1 /*
2 ** MIPS IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5 
6 /* -- Register allocator extensions --------------------------------------- */
7 
8 /* Allocate a register with a hint. */
ra_hintalloc(ASMState * as,IRRef ref,Reg hint,RegSet allow)9 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
10 {
11   Reg r = IR(ref)->r;
12   if (ra_noreg(r)) {
13     if (!ra_hashint(r) && !iscrossref(as, ref))
14       ra_sethint(IR(ref)->r, hint);  /* Propagate register hint. */
15     r = ra_allocref(as, ref, allow);
16   }
17   ra_noweak(as, r);
18   return r;
19 }
20 
21 /* Allocate a register or RID_ZERO. */
ra_alloc1z(ASMState * as,IRRef ref,RegSet allow)22 static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
23 {
24   Reg r = IR(ref)->r;
25   if (ra_noreg(r)) {
26     if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(as, ref) == 0)
27       return RID_ZERO;
28     r = ra_allocref(as, ref, allow);
29   } else {
30     ra_noweak(as, r);
31   }
32   return r;
33 }
34 
35 /* Allocate two source registers for three-operand instructions. */
ra_alloc2(ASMState * as,IRIns * ir,RegSet allow)36 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
37 {
38   IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
39   Reg left = irl->r, right = irr->r;
40   if (ra_hasreg(left)) {
41     ra_noweak(as, left);
42     if (ra_noreg(right))
43       right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
44     else
45       ra_noweak(as, right);
46   } else if (ra_hasreg(right)) {
47     ra_noweak(as, right);
48     left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
49   } else if (ra_hashint(right)) {
50     right = ra_alloc1z(as, ir->op2, allow);
51     left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
52   } else {
53     left = ra_alloc1z(as, ir->op1, allow);
54     right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
55   }
56   return left | (right << 8);
57 }
58 
59 /* -- Guard handling ------------------------------------------------------ */
60 
61 /* Need some spare long-range jump slots, for out-of-range branches. */
62 #define MIPS_SPAREJUMP		4
63 
64 /* Setup spare long-range jump slots per mcarea. */
asm_sparejump_setup(ASMState * as)65 static void asm_sparejump_setup(ASMState *as)
66 {
67   MCode *mxp = as->mctop;
68   if ((char *)mxp == (char *)as->J->mcarea + as->J->szmcarea) {
69     mxp -= MIPS_SPAREJUMP*2;
70     lj_assertA(MIPSI_NOP == 0, "bad NOP");
71     memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode));
72     as->mctop = mxp;
73   }
74 }
75 
asm_sparejump_use(MCode * mcarea,MCode tjump)76 static MCode *asm_sparejump_use(MCode *mcarea, MCode tjump)
77 {
78   MCode *mxp = (MCode *)((char *)mcarea + ((MCLink *)mcarea)->size);
79   int slot = MIPS_SPAREJUMP;
80   while (slot--) {
81     mxp -= 2;
82     if (*mxp == tjump) {
83       return mxp;
84     } else if (*mxp == MIPSI_NOP) {
85       *mxp = tjump;
86       return mxp;
87     }
88   }
89   return NULL;
90 }
91 
92 /* Setup exit stub after the end of each trace. */
asm_exitstub_setup(ASMState * as)93 static void asm_exitstub_setup(ASMState *as)
94 {
95   MCode *mxp = as->mctop;
96   /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
97   *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
98   *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
99   lj_assertA(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0,
100 	     "branch target out of range");
101   *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
102   as->mctop = mxp;
103 }
104 
105 /* Keep this in-sync with exitstub_trace_addr(). */
106 #define asm_exitstub_addr(as)	((as)->mctop)
107 
108 /* Emit conditional branch to exit for guard. */
asm_guard(ASMState * as,MIPSIns mi,Reg rs,Reg rt)109 static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
110 {
111   MCode *target = asm_exitstub_addr(as);
112   MCode *p = as->mcp;
113   if (LJ_UNLIKELY(p == as->invmcp)) {
114     as->invmcp = NULL;
115     as->loopinv = 1;
116     as->mcp = p+1;
117 #if !LJ_TARGET_MIPSR6
118     mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u);  /* Invert cond. */
119 #else
120     mi = mi ^ ((mi>>28) == 1 ? 0x04000000u :
121 	       (mi>>28) == 4 ? 0x00800000u : 0x00010000u);  /* Invert cond. */
122 #endif
123     target = p;  /* Patch target later in asm_loop_fixup. */
124   }
125   emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
126   emit_branch(as, mi, rs, rt, target);
127 }
128 
129 /* -- Operand fusion ------------------------------------------------------ */
130 
131 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
132 #define CONFLICT_SEARCH_LIM	31
133 
134 /* Check if there's no conflicting instruction between curins and ref. */
noconflict(ASMState * as,IRRef ref,IROp conflict)135 static int noconflict(ASMState *as, IRRef ref, IROp conflict)
136 {
137   IRIns *ir = as->ir;
138   IRRef i = as->curins;
139   if (i > ref + CONFLICT_SEARCH_LIM)
140     return 0;  /* Give up, ref is too far away. */
141   while (--i > ref)
142     if (ir[i].o == conflict)
143       return 0;  /* Conflict found. */
144   return 1;  /* Ok, no conflict. */
145 }
146 
147 /* Fuse the array base of colocated arrays. */
asm_fuseabase(ASMState * as,IRRef ref)148 static int32_t asm_fuseabase(ASMState *as, IRRef ref)
149 {
150   IRIns *ir = IR(ref);
151   if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
152       !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
153     return (int32_t)sizeof(GCtab);
154   return 0;
155 }
156 
157 /* Fuse array/hash/upvalue reference into register+offset operand. */
asm_fuseahuref(ASMState * as,IRRef ref,int32_t * ofsp,RegSet allow)158 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
159 {
160   IRIns *ir = IR(ref);
161   if (ra_noreg(ir->r)) {
162     if (ir->o == IR_AREF) {
163       if (mayfuse(as, ref)) {
164 	if (irref_isk(ir->op2)) {
165 	  IRRef tab = IR(ir->op1)->op1;
166 	  int32_t ofs = asm_fuseabase(as, tab);
167 	  IRRef refa = ofs ? tab : ir->op1;
168 	  ofs += 8*IR(ir->op2)->i;
169 	  if (checki16(ofs)) {
170 	    *ofsp = ofs;
171 	    return ra_alloc1(as, refa, allow);
172 	  }
173 	}
174       }
175     } else if (ir->o == IR_HREFK) {
176       if (mayfuse(as, ref)) {
177 	int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
178 	if (checki16(ofs)) {
179 	  *ofsp = ofs;
180 	  return ra_alloc1(as, ir->op1, allow);
181 	}
182       }
183     } else if (ir->o == IR_UREFC) {
184       if (irref_isk(ir->op1)) {
185 	GCfunc *fn = ir_kfunc(IR(ir->op1));
186 	intptr_t ofs = (intptr_t)&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv;
187 	intptr_t jgl = (intptr_t)J2G(as->J);
188 	if ((uintptr_t)(ofs-jgl) < 65536) {
189 	  *ofsp = ofs-jgl-32768;
190 	  return RID_JGL;
191 	} else {
192 	  *ofsp = (int16_t)ofs;
193 	  return ra_allock(as, ofs-(int16_t)ofs, allow);
194 	}
195       }
196     } else if (ir->o == IR_TMPREF) {
197       *ofsp = (int32_t)(offsetof(global_State, tmptv)-32768);
198       return RID_JGL;
199     }
200   }
201   *ofsp = 0;
202   return ra_alloc1(as, ref, allow);
203 }
204 
205 /* Fuse XLOAD/XSTORE reference into load/store operand. */
asm_fusexref(ASMState * as,MIPSIns mi,Reg rt,IRRef ref,RegSet allow,int32_t ofs)206 static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
207 			 RegSet allow, int32_t ofs)
208 {
209   IRIns *ir = IR(ref);
210   Reg base;
211   if (ra_noreg(ir->r) && canfuse(as, ir)) {
212     if (ir->o == IR_ADD) {
213       intptr_t ofs2;
214       if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(as, ir->op2),
215 				 checki16(ofs2))) {
216 	ref = ir->op1;
217 	ofs = (int32_t)ofs2;
218       }
219     } else if (ir->o == IR_STRREF) {
220       intptr_t ofs2 = 65536;
221       lj_assertA(ofs == 0, "bad usage");
222       ofs = (int32_t)sizeof(GCstr);
223       if (irref_isk(ir->op2)) {
224 	ofs2 = ofs + get_kval(as, ir->op2);
225 	ref = ir->op1;
226       } else if (irref_isk(ir->op1)) {
227 	ofs2 = ofs + get_kval(as, ir->op1);
228 	ref = ir->op2;
229       }
230       if (!checki16(ofs2)) {
231 	/* NYI: Fuse ADD with constant. */
232 	Reg right, left = ra_alloc2(as, ir, allow);
233 	right = (left >> 8); left &= 255;
234 	emit_hsi(as, mi, rt, RID_TMP, ofs);
235 	emit_dst(as, MIPSI_AADDU, RID_TMP, left, right);
236 	return;
237       }
238       ofs = ofs2;
239     }
240   }
241   base = ra_alloc1(as, ref, allow);
242   emit_hsi(as, mi, rt, base, ofs);
243 }
244 
245 /* -- Calls --------------------------------------------------------------- */
246 
247 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)248 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
249 {
250   uint32_t n, nargs = CCI_XNARGS(ci);
251   int32_t ofs = LJ_32 ? 16 : 0;
252 #if LJ_SOFTFP
253   Reg gpr = REGARG_FIRSTGPR;
254 #else
255   Reg gpr, fpr = REGARG_FIRSTFPR;
256 #endif
257   if ((void *)ci->func)
258     emit_call(as, (void *)ci->func, 1);
259 #if !LJ_SOFTFP
260   for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
261     as->cost[gpr] = REGCOST(~0u, ASMREF_L);
262   gpr = REGARG_FIRSTGPR;
263 #endif
264   for (n = 0; n < nargs; n++) {  /* Setup args. */
265     IRRef ref = args[n];
266     if (ref) {
267       IRIns *ir = IR(ref);
268 #if !LJ_SOFTFP
269       if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
270 	  !(ci->flags & CCI_VARARG)) {
271 	lj_assertA(rset_test(as->freeset, fpr),
272 		   "reg %d not free", fpr);  /* Already evicted. */
273 	ra_leftov(as, fpr, ref);
274 	fpr += LJ_32 ? 2 : 1;
275 	gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1;
276       } else
277 #endif
278       {
279 #if LJ_32 && !LJ_SOFTFP
280 	fpr = REGARG_LASTFPR+1;
281 #endif
282 	if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
283 	if (gpr <= REGARG_LASTGPR) {
284 	  lj_assertA(rset_test(as->freeset, gpr),
285 		     "reg %d not free", gpr);  /* Already evicted. */
286 #if !LJ_SOFTFP
287 	  if (irt_isfp(ir->t)) {
288 	    RegSet of = as->freeset;
289 	    Reg r;
290 	    /* Workaround to protect argument GPRs from being used for remat. */
291 	    as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
292 	    r = ra_alloc1(as, ref, RSET_FPR);
293 	    as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
294 	    if (irt_isnum(ir->t)) {
295 #if LJ_32
296 	      emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
297 	      emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
298 	      lj_assertA(rset_test(as->freeset, gpr+1),
299 			 "reg %d not free", gpr+1);  /* Already evicted. */
300 	      gpr += 2;
301 #else
302 	      emit_tg(as, MIPSI_DMFC1, gpr, r);
303 	      gpr++; fpr++;
304 #endif
305 	    } else if (irt_isfloat(ir->t)) {
306 	      emit_tg(as, MIPSI_MFC1, gpr, r);
307 	      gpr++;
308 #if LJ_64
309 	      fpr++;
310 #endif
311 	    }
312 	  } else
313 #endif
314 	  {
315 	    ra_leftov(as, gpr, ref);
316 	    gpr++;
317 #if LJ_64 && !LJ_SOFTFP
318 	    fpr++;
319 #endif
320 	  }
321 	} else {
322 	  Reg r = ra_alloc1z(as, ref, !LJ_SOFTFP && irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
323 #if LJ_32
324 	  if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
325 	  emit_spstore(as, ir, r, ofs);
326 	  ofs += irt_isnum(ir->t) ? 8 : 4;
327 #else
328 	  emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isfp(ir->t) && !irt_is64(ir->t)) ? 4 : 0));
329 	  ofs += 8;
330 #endif
331 	}
332       }
333     } else {
334 #if !LJ_SOFTFP
335       fpr = REGARG_LASTFPR+1;
336 #endif
337       if (gpr <= REGARG_LASTGPR) {
338 	gpr++;
339 #if LJ_64 && !LJ_SOFTFP
340 	fpr++;
341 #endif
342       } else {
343 	ofs += LJ_32 ? 4 : 8;
344       }
345     }
346     checkmclim(as);
347   }
348 }
349 
350 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)351 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
352 {
353   RegSet drop = RSET_SCRATCH;
354   int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
355 #if !LJ_SOFTFP
356   if ((ci->flags & CCI_NOFPRCLOBBER))
357     drop &= ~RSET_FPR;
358 #endif
359   if (ra_hasreg(ir->r))
360     rset_clear(drop, ir->r);  /* Dest reg handled below. */
361   if (hiop && ra_hasreg((ir+1)->r))
362     rset_clear(drop, (ir+1)->r);  /* Dest reg handled below. */
363   ra_evictset(as, drop);  /* Evictions must be performed first. */
364   if (ra_used(ir)) {
365     lj_assertA(!irt_ispri(ir->t), "PRI dest");
366     if (!LJ_SOFTFP && irt_isfp(ir->t)) {
367       if ((ci->flags & CCI_CASTU64)) {
368 	int32_t ofs = sps_scale(ir->s);
369 	Reg dest = ir->r;
370 	if (ra_hasreg(dest)) {
371 	  ra_free(as, dest);
372 	  ra_modified(as, dest);
373 #if LJ_32
374 	  emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
375 	  emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
376 #else
377 	  emit_tg(as, MIPSI_DMTC1, RID_RET, dest);
378 #endif
379 	}
380 	if (ofs) {
381 #if LJ_32
382 	  emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
383 	  emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
384 #else
385 	  emit_tsi(as, MIPSI_SD, RID_RET, RID_SP, ofs);
386 #endif
387 	}
388       } else {
389 	ra_destreg(as, ir, RID_FPRET);
390       }
391     } else if (hiop) {
392       ra_destpair(as, ir);
393     } else {
394       ra_destreg(as, ir, RID_RET);
395     }
396   }
397 }
398 
asm_callx(ASMState * as,IRIns * ir)399 static void asm_callx(ASMState *as, IRIns *ir)
400 {
401   IRRef args[CCI_NARGS_MAX*2];
402   CCallInfo ci;
403   IRRef func;
404   IRIns *irf;
405   ci.flags = asm_callx_flags(as, ir);
406   asm_collectargs(as, ir, &ci, args);
407   asm_setupresult(as, ir, &ci);
408   func = ir->op2; irf = IR(func);
409   if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
410   if (irref_isk(func)) {  /* Call to constant address. */
411     ci.func = (ASMFunction)(void *)get_kval(as, func);
412   } else {  /* Need specific register for indirect calls. */
413     Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
414     MCode *p = as->mcp;
415     if (r == RID_CFUNCADDR)
416       *--p = MIPSI_NOP;
417     else
418       *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r);
419     *--p = MIPSI_JALR | MIPSF_S(r);
420     as->mcp = p;
421     ci.func = (ASMFunction)(void *)0;
422   }
423   asm_gencall(as, &ci, args);
424 }
425 
426 #if !LJ_SOFTFP
asm_callround(ASMState * as,IRIns * ir,IRCallID id)427 static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
428 {
429   /* The modified regs must match with the *.dasc implementation. */
430   RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)|
431 		RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR)
432 #if LJ_TARGET_MIPSR6
433 		|RID2RSET(RID_F21)
434 #endif
435 		;
436   if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
437   ra_evictset(as, drop);
438   ra_destreg(as, ir, RID_FPRET);
439   emit_call(as, (void *)lj_ir_callinfo[id].func, 0);
440   ra_leftov(as, REGARG_FIRSTFPR, ir->op1);
441 }
442 #endif
443 
444 /* -- Returns ------------------------------------------------------------- */
445 
446 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)447 static void asm_retf(ASMState *as, IRIns *ir)
448 {
449   Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
450   void *pc = ir_kptr(IR(ir->op2));
451   int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
452   as->topslot -= (BCReg)delta;
453   if ((int32_t)as->topslot < 0) as->topslot = 0;
454   irt_setmark(IR(REF_BASE)->t);  /* Children must not coalesce with BASE reg. */
455   emit_setgl(as, base, jit_base);
456   emit_addptr(as, base, -8*delta);
457   asm_guard(as, MIPSI_BNE, RID_TMP,
458 	    ra_allock(as, igcptr(pc), rset_exclude(RSET_GPR, base)));
459   emit_tsi(as, MIPSI_AL, RID_TMP, base, -8);
460 }
461 
462 /* -- Buffer operations --------------------------------------------------- */
463 
464 #if LJ_HASBUFFER
asm_bufhdr_write(ASMState * as,Reg sb)465 static void asm_bufhdr_write(ASMState *as, Reg sb)
466 {
467   Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
468   IRIns irgc;
469   irgc.ot = IRT(0, IRT_PGC);  /* GC type. */
470   emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
471   if ((as->flags & JIT_F_MIPSXXR2)) {
472     emit_tsml(as, LJ_64 ? MIPSI_DINS : MIPSI_INS, RID_TMP, tmp,
473 	      lj_fls(SBUF_MASK_FLAG), 0);
474   } else {
475     emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
476     emit_tsi(as, MIPSI_ANDI, tmp, tmp, SBUF_MASK_FLAG);
477   }
478   emit_getgl(as, RID_TMP, cur_L);
479   emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
480 }
481 #endif
482 
483 /* -- Type conversions ---------------------------------------------------- */
484 
485 #if !LJ_SOFTFP
asm_tointg(ASMState * as,IRIns * ir,Reg left)486 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
487 {
488   Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
489   Reg dest = ra_dest(as, ir, RSET_GPR);
490 #if !LJ_TARGET_MIPSR6
491   asm_guard(as, MIPSI_BC1F, 0, 0);
492   emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
493 #else
494   asm_guard(as, MIPSI_BC1EQZ, 0, (tmp&31));
495   emit_fgh(as, MIPSI_CMP_EQ_D, tmp, tmp, left);
496 #endif
497   emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
498   emit_tg(as, MIPSI_MFC1, dest, tmp);
499   emit_fg(as, MIPSI_CVT_W_D, tmp, left);
500 }
501 
asm_tobit(ASMState * as,IRIns * ir)502 static void asm_tobit(ASMState *as, IRIns *ir)
503 {
504   RegSet allow = RSET_FPR;
505   Reg dest = ra_dest(as, ir, RSET_GPR);
506   Reg left = ra_alloc1(as, ir->op1, allow);
507   Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
508   Reg tmp = ra_scratch(as, rset_clear(allow, right));
509   emit_tg(as, MIPSI_MFC1, dest, tmp);
510   emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
511 }
512 #elif LJ_64  /* && LJ_SOFTFP */
asm_tointg(ASMState * as,IRIns * ir,Reg r)513 static void asm_tointg(ASMState *as, IRIns *ir, Reg r)
514 {
515   /* The modified regs must match with the *.dasc implementation. */
516   RegSet drop = RID2RSET(REGARG_FIRSTGPR)|RID2RSET(RID_RET)|RID2RSET(RID_RET+1)|
517 		RID2RSET(RID_R1)|RID2RSET(RID_R12);
518   if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
519   ra_evictset(as, drop);
520   /* Return values are in RID_RET (converted value) and RID_RET+1 (status). */
521   ra_destreg(as, ir, RID_RET);
522   asm_guard(as, MIPSI_BNE, RID_RET+1, RID_ZERO);
523   emit_call(as, (void *)lj_ir_callinfo[IRCALL_lj_vm_tointg].func, 0);
524   if (r == RID_NONE)
525     ra_leftov(as, REGARG_FIRSTGPR, ir->op1);
526   else if (r != REGARG_FIRSTGPR)
527     emit_move(as, REGARG_FIRSTGPR, r);
528 }
529 
asm_tobit(ASMState * as,IRIns * ir)530 static void asm_tobit(ASMState *as, IRIns *ir)
531 {
532   Reg dest = ra_dest(as, ir, RSET_GPR);
533   emit_dta(as, MIPSI_SLL, dest, dest, 0);
534   asm_callid(as, ir, IRCALL_lj_vm_tobit);
535 }
536 #endif
537 
asm_conv(ASMState * as,IRIns * ir)538 static void asm_conv(ASMState *as, IRIns *ir)
539 {
540   IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
541 #if !LJ_SOFTFP32
542   int stfp = (st == IRT_NUM || st == IRT_FLOAT);
543 #endif
544 #if LJ_64
545   int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
546 #endif
547   IRRef lref = ir->op1;
548 #if LJ_32
549   /* 64 bit integer conversions are handled by SPLIT. */
550   lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
551 	     "IR %04d has unsplit 64 bit type",
552 	     (int)(ir - as->ir) - REF_BIAS);
553 #endif
554 #if LJ_SOFTFP32
555   /* FP conversions are handled by SPLIT. */
556   lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
557 	     "IR %04d has FP type",
558 	     (int)(ir - as->ir) - REF_BIAS);
559   /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
560 #else
561   lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
562 #if !LJ_SOFTFP
563   if (irt_isfp(ir->t)) {
564     Reg dest = ra_dest(as, ir, RSET_FPR);
565     if (stfp) {  /* FP to FP conversion. */
566       emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S,
567 	      dest, ra_alloc1(as, lref, RSET_FPR));
568     } else if (st == IRT_U32) {  /* U32 to FP conversion. */
569       /* y = (x ^ 0x8000000) + 2147483648.0 */
570       Reg left = ra_alloc1(as, lref, RSET_GPR);
571       Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
572       if (irt_isfloat(ir->t))
573 	emit_fg(as, MIPSI_CVT_S_D, dest, dest);
574       /* Must perform arithmetic with doubles to keep the precision. */
575       emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
576       emit_fg(as, MIPSI_CVT_D_W, dest, dest);
577       emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
578 		 (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR);
579       emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
580       emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
581       emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
582 #if LJ_64
583     } else if(st == IRT_U64) {  /* U64 to FP conversion. */
584       /* if (x >= 1u<<63) y = (double)(int64_t)(x&(1u<<63)-1) + pow(2.0, 63) */
585       Reg left = ra_alloc1(as, lref, RSET_GPR);
586       Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
587       MCLabel l_end = emit_label(as);
588       if (irt_isfloat(ir->t)) {
589 	emit_fgh(as, MIPSI_ADD_S, dest, dest, tmp);
590 	emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P63],
591 		   rset_exclude(RSET_GPR, left));
592 	emit_fg(as, MIPSI_CVT_S_L, dest, dest);
593       } else {
594 	emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
595 	emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P63],
596 		   rset_exclude(RSET_GPR, left));
597 	emit_fg(as, MIPSI_CVT_D_L, dest, dest);
598       }
599       emit_branch(as, MIPSI_BGEZ, left, RID_ZERO, l_end);
600       emit_tg(as, MIPSI_DMTC1, RID_TMP, dest);
601       emit_tsml(as, MIPSI_DEXTM, RID_TMP, left, 30, 0);
602 #endif
603     } else {  /* Integer to FP conversion. */
604       Reg left = ra_alloc1(as, lref, RSET_GPR);
605 #if LJ_32
606       emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
607 	      dest, dest);
608       emit_tg(as, MIPSI_MTC1, left, dest);
609 #else
610       MIPSIns mi = irt_isfloat(ir->t) ?
611 	(st64 ? MIPSI_CVT_S_L : MIPSI_CVT_S_W) :
612 	(st64 ? MIPSI_CVT_D_L : MIPSI_CVT_D_W);
613       emit_fg(as, mi, dest, dest);
614       emit_tg(as, st64 ? MIPSI_DMTC1 : MIPSI_MTC1, left, dest);
615 #endif
616     }
617   } else if (stfp) {  /* FP to integer conversion. */
618     if (irt_isguard(ir->t)) {
619       /* Checked conversions are only supported from number to int. */
620       lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
621 		 "bad type for checked CONV");
622       asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
623     } else {
624       Reg dest = ra_dest(as, ir, RSET_GPR);
625       Reg left = ra_alloc1(as, lref, RSET_FPR);
626       Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
627       if (irt_isu32(ir->t)) {  /* FP to U32 conversion. */
628 	/* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
629 	emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
630 	emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
631 	emit_tg(as, MIPSI_MFC1, dest, tmp);
632 	emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D,
633 		tmp, tmp);
634 	emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D,
635 		 tmp, left, tmp);
636 	if (st == IRT_FLOAT)
637 	  emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
638 		     (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
639 	else
640 	  emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
641 		     (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR);
642 #if LJ_64
643       } else if (irt_isu64(ir->t)) {  /* FP to U64 conversion. */
644 	MCLabel l_end;
645 	emit_tg(as, MIPSI_DMFC1, dest, tmp);
646 	l_end = emit_label(as);
647 	/* For inputs >= 2^63 add -2^64 and convert again. */
648 	if (st == IRT_NUM) {
649 	  emit_fg(as, MIPSI_TRUNC_L_D, tmp, tmp);
650 	  emit_fgh(as, MIPSI_ADD_D, tmp, left, tmp);
651 	  emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
652 		     (void *)&as->J->k64[LJ_K64_M2P64],
653 		     rset_exclude(RSET_GPR, dest));
654 	  emit_fg(as, MIPSI_TRUNC_L_D, tmp, left);  /* Delay slot. */
655 #if !LJ_TARGET_MIPSR6
656 	 emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
657 	 emit_fgh(as, MIPSI_C_OLT_D, 0, left, tmp);
658 #else
659 	 emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end);
660 	 emit_fgh(as, MIPSI_CMP_LT_D, left, left, tmp);
661 #endif
662 	  emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
663 		     (void *)&as->J->k64[LJ_K64_2P63],
664 		     rset_exclude(RSET_GPR, dest));
665 	} else {
666 	  emit_fg(as, MIPSI_TRUNC_L_S, tmp, tmp);
667 	  emit_fgh(as, MIPSI_ADD_S, tmp, left, tmp);
668 	  emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
669 		     (void *)&as->J->k32[LJ_K32_M2P64],
670 		     rset_exclude(RSET_GPR, dest));
671 	  emit_fg(as, MIPSI_TRUNC_L_S, tmp, left);  /* Delay slot. */
672 #if !LJ_TARGET_MIPSR6
673 	 emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
674 	 emit_fgh(as, MIPSI_C_OLT_S, 0, left, tmp);
675 #else
676 	 emit_branch(as, MIPSI_BC1NEZ, 0, (left&31), l_end);
677 	 emit_fgh(as, MIPSI_CMP_LT_S, left, left, tmp);
678 #endif
679 	  emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
680 		     (void *)&as->J->k32[LJ_K32_2P63],
681 		     rset_exclude(RSET_GPR, dest));
682 	}
683 #endif
684       } else {
685 #if LJ_32
686 	emit_tg(as, MIPSI_MFC1, dest, tmp);
687 	emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
688 		tmp, left);
689 #else
690 	MIPSIns mi = irt_is64(ir->t) ?
691 	  (st == IRT_NUM ? MIPSI_TRUNC_L_D : MIPSI_TRUNC_L_S) :
692 	  (st == IRT_NUM ? MIPSI_TRUNC_W_D : MIPSI_TRUNC_W_S);
693 	emit_tg(as, irt_is64(ir->t) ? MIPSI_DMFC1 : MIPSI_MFC1, dest, left);
694 	emit_fg(as, mi, left, left);
695 #endif
696       }
697     }
698   } else
699 #else
700   if (irt_isfp(ir->t)) {
701 #if LJ_64 && LJ_HASFFI
702     if (stfp) {  /* FP to FP conversion. */
703       asm_callid(as, ir, irt_isnum(ir->t) ? IRCALL_softfp_f2d :
704 					    IRCALL_softfp_d2f);
705     } else {  /* Integer to FP conversion. */
706       IRCallID cid = ((IRT_IS64 >> st) & 1) ?
707 	(irt_isnum(ir->t) ?
708 	 (st == IRT_I64 ? IRCALL_fp64_l2d : IRCALL_fp64_ul2d) :
709 	 (st == IRT_I64 ? IRCALL_fp64_l2f : IRCALL_fp64_ul2f)) :
710 	(irt_isnum(ir->t) ?
711 	 (st == IRT_INT ? IRCALL_softfp_i2d : IRCALL_softfp_ui2d) :
712 	 (st == IRT_INT ? IRCALL_softfp_i2f : IRCALL_softfp_ui2f));
713       asm_callid(as, ir, cid);
714     }
715 #else
716     asm_callid(as, ir, IRCALL_softfp_i2d);
717 #endif
718   } else if (stfp) {  /* FP to integer conversion. */
719     if (irt_isguard(ir->t)) {
720       /* Checked conversions are only supported from number to int. */
721       lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
722 		 "bad type for checked CONV");
723       asm_tointg(as, ir, RID_NONE);
724     } else {
725       IRCallID cid = irt_is64(ir->t) ?
726 	((st == IRT_NUM) ?
727 	 (irt_isi64(ir->t) ? IRCALL_fp64_d2l : IRCALL_fp64_d2ul) :
728 	 (irt_isi64(ir->t) ? IRCALL_fp64_f2l : IRCALL_fp64_f2ul)) :
729 	((st == IRT_NUM) ?
730 	 (irt_isint(ir->t) ? IRCALL_softfp_d2i : IRCALL_softfp_d2ui) :
731 	 (irt_isint(ir->t) ? IRCALL_softfp_f2i : IRCALL_softfp_f2ui));
732       asm_callid(as, ir, cid);
733     }
734   } else
735 #endif
736 #endif
737   {
738     Reg dest = ra_dest(as, ir, RSET_GPR);
739     if (st >= IRT_I8 && st <= IRT_U16) {  /* Extend to 32 bit integer. */
740       Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
741       lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
742       if ((ir->op2 & IRCONV_SEXT)) {
743 	if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
744 	  emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
745 	} else {
746 	  uint32_t shift = st == IRT_I8 ? 24 : 16;
747 	  emit_dta(as, MIPSI_SRA, dest, dest, shift);
748 	  emit_dta(as, MIPSI_SLL, dest, left, shift);
749 	}
750       } else {
751 	emit_tsi(as, MIPSI_ANDI, dest, left,
752 		 (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
753       }
754     } else {  /* 32/64 bit integer conversions. */
755 #if LJ_32
756       /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
757       ra_leftov(as, dest, lref);  /* Do nothing, but may need to move regs. */
758 #else
759       if (irt_is64(ir->t)) {
760 	if (st64) {
761 	  /* 64/64 bit no-op (cast)*/
762 	  ra_leftov(as, dest, lref);
763 	} else {
764 	  Reg left = ra_alloc1(as, lref, RSET_GPR);
765 	  if ((ir->op2 & IRCONV_SEXT)) {  /* 32 to 64 bit sign extension. */
766 	    emit_dta(as, MIPSI_SLL, dest, left, 0);
767 	  } else {  /* 32 to 64 bit zero extension. */
768 	    emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0);
769 	  }
770 	}
771       } else {
772 	if (st64 && !(ir->op2 & IRCONV_NONE)) {
773 	  /* This is either a 32 bit reg/reg mov which zeroes the hiword
774 	  ** or a load of the loword from a 64 bit address.
775 	  */
776 	  Reg left = ra_alloc1(as, lref, RSET_GPR);
777 	  emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0);
778 	} else {  /* 32/32 bit no-op (cast). */
779 	  /* Do nothing, but may need to move regs. */
780 	  ra_leftov(as, dest, lref);
781 	}
782       }
783 #endif
784     }
785   }
786 }
787 
asm_strto(ASMState * as,IRIns * ir)788 static void asm_strto(ASMState *as, IRIns *ir)
789 {
790   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
791   IRRef args[2];
792   int32_t ofs = 0;
793 #if LJ_SOFTFP32
794   ra_evictset(as, RSET_SCRATCH);
795   if (ra_used(ir)) {
796     if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
797 	(ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
798       int i;
799       for (i = 0; i < 2; i++) {
800 	Reg r = (ir+i)->r;
801 	if (ra_hasreg(r)) {
802 	  ra_free(as, r);
803 	  ra_modified(as, r);
804 	  emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
805 	}
806       }
807       ofs = sps_scale(ir->s & ~1);
808     } else {
809       Reg rhi = ra_dest(as, ir+1, RSET_GPR);
810       Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
811       emit_tsi(as, MIPSI_LW, rhi, RID_SP, ofs+(LJ_BE?0:4));
812       emit_tsi(as, MIPSI_LW, rlo, RID_SP, ofs+(LJ_BE?4:0));
813     }
814   }
815 #else
816   RegSet drop = RSET_SCRATCH;
817   if (ra_hasreg(ir->r)) rset_set(drop, ir->r);  /* Spill dest reg (if any). */
818   ra_evictset(as, drop);
819   ofs = sps_scale(ir->s);
820 #endif
821   asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO);  /* Test return status. */
822   args[0] = ir->op1;      /* GCstr *str */
823   args[1] = ASMREF_TMP1;  /* TValue *n  */
824   asm_gencall(as, ci, args);
825   /* Store the result to the spill slot or temp slots. */
826   emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1),
827 	   RID_SP, ofs);
828 }
829 
830 /* -- Memory references --------------------------------------------------- */
831 
832 #if LJ_64
833 /* Store tagged value for ref at base+ofs. */
asm_tvstore64(ASMState * as,Reg base,int32_t ofs,IRRef ref)834 static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
835 {
836   RegSet allow = rset_exclude(RSET_GPR, base);
837   IRIns *ir = IR(ref);
838   lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
839 	     "store of IR type %d", irt_type(ir->t));
840   if (irref_isk(ref)) {
841     TValue k;
842     lj_ir_kvalue(as->J->L, &k, ir);
843     emit_tsi(as, MIPSI_SD, ra_allock(as, (int64_t)k.u64, allow), base, ofs);
844   } else {
845     Reg src = ra_alloc1(as, ref, allow);
846     Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47,
847 			 rset_exclude(allow, src));
848     emit_tsi(as, MIPSI_SD, RID_TMP, base, ofs);
849     if (irt_isinteger(ir->t)) {
850       emit_dst(as, MIPSI_DADDU, RID_TMP, RID_TMP, type);
851       emit_tsml(as, MIPSI_DEXT, RID_TMP, src, 31, 0);
852     } else {
853       emit_dst(as, MIPSI_DADDU, RID_TMP, src, type);
854     }
855   }
856 }
857 #endif
858 
859 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref,MSize mode)860 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
861 {
862   int32_t tmpofs = (int32_t)(offsetof(global_State, tmptv)-32768);
863   if ((mode & IRTMPREF_IN1)) {
864     IRIns *ir = IR(ref);
865     if (irt_isnum(ir->t)) {
866       if ((mode & IRTMPREF_OUT1)) {
867 #if LJ_SOFTFP
868 	emit_tsi(as, MIPSI_AADDIU, dest, RID_JGL, tmpofs);
869 #if LJ_64
870 	emit_setgl(as, ra_alloc1(as, ref, RSET_GPR), tmptv.u64);
871 #else
872 	lj_assertA(irref_isk(ref), "unsplit FP op");
873 	emit_setgl(as,
874 		   ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, RSET_GPR),
875 		   tmptv.u32.lo);
876 	emit_setgl(as,
877 		   ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, RSET_GPR),
878 		   tmptv.u32.hi);
879 #endif
880 #else
881 	Reg src = ra_alloc1(as, ref, RSET_FPR);
882 	emit_tsi(as, MIPSI_AADDIU, dest, RID_JGL, tmpofs);
883 	emit_tsi(as, MIPSI_SDC1, (src & 31),  RID_JGL, tmpofs);
884 #endif
885       } else if (irref_isk(ref)) {
886 	/* Use the number constant itself as a TValue. */
887 	ra_allockreg(as, igcptr(ir_knum(ir)), dest);
888       } else {
889 #if LJ_SOFTFP32
890 	lj_assertA(0, "unsplit FP op");
891 #else
892 	/* Otherwise force a spill and use the spill slot. */
893 	emit_tsi(as, MIPSI_AADDIU, dest, RID_SP, ra_spill(as, ir));
894 #endif
895       }
896     } else {
897       /* Otherwise use g->tmptv to hold the TValue. */
898 #if LJ_32
899       Reg type;
900       emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, tmpofs);
901       if (!irt_ispri(ir->t)) {
902 	Reg src = ra_alloc1(as, ref, RSET_GPR);
903 	emit_setgl(as, src, tmptv.gcr);
904       }
905       if (LJ_SOFTFP && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t))
906 	type = ra_alloc1(as, ref+1, RSET_GPR);
907       else
908 	type = ra_allock(as, (int32_t)irt_toitype(ir->t), RSET_GPR);
909       emit_setgl(as, type, tmptv.it);
910 #else
911       asm_tvstore64(as, dest, 0, ref);
912       emit_tsi(as, MIPSI_DADDIU, dest, RID_JGL, tmpofs);
913 #endif
914     }
915   } else {
916     emit_tsi(as, MIPSI_AADDIU, dest, RID_JGL, tmpofs);
917   }
918 }
919 
asm_aref(ASMState * as,IRIns * ir)920 static void asm_aref(ASMState *as, IRIns *ir)
921 {
922   Reg dest = ra_dest(as, ir, RSET_GPR);
923   Reg idx, base;
924   if (irref_isk(ir->op2)) {
925     IRRef tab = IR(ir->op1)->op1;
926     int32_t ofs = asm_fuseabase(as, tab);
927     IRRef refa = ofs ? tab : ir->op1;
928     ofs += 8*IR(ir->op2)->i;
929     if (checki16(ofs)) {
930       base = ra_alloc1(as, refa, RSET_GPR);
931       emit_tsi(as, MIPSI_AADDIU, dest, base, ofs);
932       return;
933     }
934   }
935   base = ra_alloc1(as, ir->op1, RSET_GPR);
936   idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
937 #if !LJ_TARGET_MIPSR6
938   emit_dst(as, MIPSI_AADDU, dest, RID_TMP, base);
939   emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
940 #else
941   emit_dst(as, MIPSI_ALSA | MIPSF_A(3-1), dest, idx, base);
942 #endif
943 }
944 
945 /* Inlined hash lookup. Specialized for key type and for const keys.
946 ** The equivalent C code is:
947 **   Node *n = hashkey(t, key);
948 **   do {
949 **     if (lj_obj_equal(&n->key, key)) return &n->val;
950 **   } while ((n = nextnode(n)));
951 **   return niltv(L);
952 */
asm_href(ASMState * as,IRIns * ir,IROp merge)953 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
954 {
955   RegSet allow = RSET_GPR;
956   int destused = ra_used(ir);
957   Reg dest = ra_dest(as, ir, allow);
958   Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
959   Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
960 #if LJ_64
961   Reg cmp64 = RID_NONE;
962 #endif
963   IRRef refkey = ir->op2;
964   IRIns *irkey = IR(refkey);
965   int isk = irref_isk(refkey);
966   IRType1 kt = irkey->t;
967   uint32_t khash;
968   MCLabel l_end, l_loop, l_next;
969 
970   rset_clear(allow, tab);
971   if (!LJ_SOFTFP && irt_isnum(kt)) {
972     key = ra_alloc1(as, refkey, RSET_FPR);
973     tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
974   } else {
975     if (!irt_ispri(kt)) {
976       key = ra_alloc1(as, refkey, allow);
977       rset_clear(allow, key);
978     }
979 #if LJ_32
980     if (LJ_SOFTFP && irkey[1].o == IR_HIOP) {
981       if (ra_hasreg((irkey+1)->r)) {
982 	type = tmpnum = (irkey+1)->r;
983 	tmp1 = ra_scratch(as, allow);
984 	rset_clear(allow, tmp1);
985 	ra_noweak(as, tmpnum);
986       } else {
987 	type = tmpnum = ra_allocref(as, refkey+1, allow);
988       }
989       rset_clear(allow, tmpnum);
990     } else {
991       type = ra_allock(as, (int32_t)irt_toitype(kt), allow);
992       rset_clear(allow, type);
993     }
994 #endif
995   }
996   tmp2 = ra_scratch(as, allow);
997   rset_clear(allow, tmp2);
998 #if LJ_64
999   if (LJ_SOFTFP || !irt_isnum(kt)) {
1000     /* Allocate cmp64 register used for 64-bit comparisons */
1001     if (LJ_SOFTFP && irt_isnum(kt)) {
1002       cmp64 = key;
1003     } else if (!isk && irt_isaddr(kt)) {
1004       cmp64 = tmp2;
1005     } else {
1006       int64_t k;
1007       if (isk && irt_isaddr(kt)) {
1008 	k = ((int64_t)irt_toitype(kt) << 47) | irkey[1].tv.u64;
1009       } else {
1010 	lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
1011 	k = ~((int64_t)~irt_toitype(kt) << 47);
1012       }
1013       cmp64 = ra_allock(as, k, allow);
1014       rset_clear(allow, cmp64);
1015     }
1016   }
1017 #endif
1018 
1019   /* Key not found in chain: jump to exit (if merged) or load niltv. */
1020   l_end = emit_label(as);
1021   as->invmcp = NULL;
1022   if (merge == IR_NE)
1023     asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO);
1024   else if (destused)
1025     emit_loada(as, dest, niltvg(J2G(as->J)));
1026   /* Follow hash chain until the end. */
1027   emit_move(as, dest, tmp1);
1028   l_loop = --as->mcp;
1029   emit_tsi(as, MIPSI_AL, tmp1, dest, (int32_t)offsetof(Node, next));
1030   l_next = emit_label(as);
1031 
1032   /* Type and value comparison. */
1033   if (merge == IR_EQ) {  /* Must match asm_guard(). */
1034     emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
1035     l_end = asm_exitstub_addr(as);
1036   }
1037   if (!LJ_SOFTFP && irt_isnum(kt)) {
1038 #if !LJ_TARGET_MIPSR6
1039     emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
1040     emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
1041 #else
1042     emit_branch(as, MIPSI_BC1NEZ, 0, (tmpnum&31), l_end);
1043     emit_fgh(as, MIPSI_CMP_EQ_D, tmpnum, tmpnum, key);
1044 #endif
1045     *--as->mcp = MIPSI_NOP;  /* Avoid NaN comparison overhead. */
1046     emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
1047     emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
1048 #if LJ_32
1049     emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
1050   } else {
1051     if (irt_ispri(kt)) {
1052       emit_branch(as, MIPSI_BEQ, tmp1, type, l_end);
1053     } else {
1054       emit_branch(as, MIPSI_BEQ, tmp2, key, l_end);
1055       emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
1056       emit_branch(as, MIPSI_BNE, tmp1, type, l_next);
1057     }
1058   }
1059   emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
1060   *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
1061 #else
1062     emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 15);
1063     emit_tg(as, MIPSI_DMTC1, tmp1, tmpnum);
1064     emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64));
1065   } else {
1066     emit_branch(as, MIPSI_BEQ, tmp1, cmp64, l_end);
1067     emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64));
1068   }
1069   *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
1070   if (!isk && irt_isaddr(kt)) {
1071     type = ra_allock(as, (int64_t)irt_toitype(kt) << 47, allow);
1072     emit_dst(as, MIPSI_DADDU, tmp2, key, type);
1073     rset_clear(allow, type);
1074   }
1075 #endif
1076 
1077   /* Load main position relative to tab->node into dest. */
1078   khash = isk ? ir_khash(as, irkey) : 1;
1079   if (khash == 0) {
1080     emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
1081   } else {
1082     Reg tmphash = tmp1;
1083     if (isk)
1084       tmphash = ra_allock(as, khash, allow);
1085     emit_dst(as, MIPSI_AADDU, dest, dest, tmp1);
1086     lj_assertA(sizeof(Node) == 24, "bad Node size");
1087     emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
1088     emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
1089     emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
1090     emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
1091     emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node));
1092     emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
1093     if (isk) {
1094       /* Nothing to do. */
1095     } else if (irt_isstr(kt)) {
1096       emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, sid));
1097     } else {  /* Must match with hash*() in lj_tab.c. */
1098       emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
1099       emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
1100       emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
1101       emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
1102       emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
1103 #if LJ_32
1104       if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
1105 	emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
1106 	if ((as->flags & JIT_F_MIPSXXR2)) {
1107 	  emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
1108 	} else {
1109 	  emit_dst(as, MIPSI_OR, dest, dest, tmp1);
1110 	  emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1);
1111 	  emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
1112 	}
1113 	emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
1114 #if LJ_SOFTFP
1115 	emit_ds(as, MIPSI_MOVE, tmp1, type);
1116 	emit_ds(as, MIPSI_MOVE, tmp2, key);
1117 #else
1118 	emit_tg(as, MIPSI_MFC1, tmp2, key);
1119 	emit_tg(as, MIPSI_MFC1, tmp1, key+1);
1120 #endif
1121       } else {
1122 	emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
1123 	emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
1124 	emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
1125       }
1126 #else
1127       emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
1128       emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
1129       if (irt_isnum(kt)) {
1130 	emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
1131 	emit_dta(as, MIPSI_DSRA32, tmp1, LJ_SOFTFP ? key : tmp1, 0);
1132 	emit_dta(as, MIPSI_SLL, tmp2, LJ_SOFTFP ? key : tmp1, 0);
1133 #if !LJ_SOFTFP
1134 	emit_tg(as, MIPSI_DMFC1, tmp1, key);
1135 #endif
1136       } else {
1137 	checkmclim(as);
1138 	emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 0);
1139 	emit_dta(as, MIPSI_SLL, tmp2, key, 0);
1140 	emit_dst(as, MIPSI_DADDU, tmp1, key, type);
1141       }
1142 #endif
1143     }
1144   }
1145 }
1146 
asm_hrefk(ASMState * as,IRIns * ir)1147 static void asm_hrefk(ASMState *as, IRIns *ir)
1148 {
1149   IRIns *kslot = IR(ir->op2);
1150   IRIns *irkey = IR(kslot->op1);
1151   int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
1152   int32_t kofs = ofs + (int32_t)offsetof(Node, key);
1153   Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
1154   Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
1155   RegSet allow = rset_exclude(RSET_GPR, node);
1156   Reg idx = node;
1157 #if LJ_32
1158   Reg key = RID_NONE, type = RID_TMP;
1159   int32_t lo, hi;
1160 #else
1161   Reg key = ra_scratch(as, allow);
1162   int64_t k;
1163 #endif
1164   lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
1165   if (ofs > 32736) {
1166     idx = dest;
1167     rset_clear(allow, dest);
1168     kofs = (int32_t)offsetof(Node, key);
1169   } else if (ra_hasreg(dest)) {
1170     emit_tsi(as, MIPSI_AADDIU, dest, node, ofs);
1171   }
1172 #if LJ_32
1173   if (!irt_ispri(irkey->t)) {
1174     key = ra_scratch(as, allow);
1175     rset_clear(allow, key);
1176   }
1177   if (irt_isnum(irkey->t)) {
1178     lo = (int32_t)ir_knum(irkey)->u32.lo;
1179     hi = (int32_t)ir_knum(irkey)->u32.hi;
1180   } else {
1181     lo = irkey->i;
1182     hi = irt_toitype(irkey->t);
1183     if (!ra_hasreg(key))
1184       goto nolo;
1185   }
1186   asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO);
1187 nolo:
1188   asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
1189   if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
1190   emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
1191 #else
1192   if (irt_ispri(irkey->t)) {
1193     lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
1194     k = ~((int64_t)~irt_toitype(irkey->t) << 47);
1195   } else if (irt_isnum(irkey->t)) {
1196     k = (int64_t)ir_knum(irkey)->u64;
1197   } else {
1198     k = ((int64_t)irt_toitype(irkey->t) << 47) | (int64_t)ir_kgc(irkey);
1199   }
1200   asm_guard(as, MIPSI_BNE, key, ra_allock(as, k, allow));
1201   emit_tsi(as, MIPSI_LD, key, idx, kofs);
1202 #endif
1203   if (ofs > 32736)
1204     emit_tsi(as, MIPSI_AADDU, dest, node, ra_allock(as, ofs, allow));
1205 }
1206 
asm_uref(ASMState * as,IRIns * ir)1207 static void asm_uref(ASMState *as, IRIns *ir)
1208 {
1209   Reg dest = ra_dest(as, ir, RSET_GPR);
1210   if (irref_isk(ir->op1)) {
1211     GCfunc *fn = ir_kfunc(IR(ir->op1));
1212     MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
1213     emit_lsptr(as, MIPSI_AL, dest, v, RSET_GPR);
1214   } else {
1215     Reg uv = ra_scratch(as, RSET_GPR);
1216     Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
1217     if (ir->o == IR_UREFC) {
1218       asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
1219       emit_tsi(as, MIPSI_AADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
1220       emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
1221     } else {
1222       emit_tsi(as, MIPSI_AL, dest, uv, (int32_t)offsetof(GCupval, v));
1223     }
1224     emit_tsi(as, MIPSI_AL, uv, func, (int32_t)offsetof(GCfuncL, uvptr) +
1225 	     (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
1226   }
1227 }
1228 
asm_fref(ASMState * as,IRIns * ir)1229 static void asm_fref(ASMState *as, IRIns *ir)
1230 {
1231   UNUSED(as); UNUSED(ir);
1232   lj_assertA(!ra_used(ir), "unfused FREF");
1233 }
1234 
asm_strref(ASMState * as,IRIns * ir)1235 static void asm_strref(ASMState *as, IRIns *ir)
1236 {
1237 #if LJ_32
1238   Reg dest = ra_dest(as, ir, RSET_GPR);
1239   IRRef ref = ir->op2, refk = ir->op1;
1240   int32_t ofs = (int32_t)sizeof(GCstr);
1241   Reg r;
1242   if (irref_isk(ref)) {
1243     IRRef tmp = refk; refk = ref; ref = tmp;
1244   } else if (!irref_isk(refk)) {
1245     Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1246     IRIns *irr = IR(ir->op2);
1247     if (ra_hasreg(irr->r)) {
1248       ra_noweak(as, irr->r);
1249       right = irr->r;
1250     } else if (mayfuse(as, irr->op2) &&
1251 	       irr->o == IR_ADD && irref_isk(irr->op2) &&
1252 	       checki16(ofs + IR(irr->op2)->i)) {
1253       ofs += IR(irr->op2)->i;
1254       right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
1255     } else {
1256       right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
1257     }
1258     emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs);
1259     emit_dst(as, MIPSI_ADDU, dest, left, right);
1260     return;
1261   }
1262   r = ra_alloc1(as, ref, RSET_GPR);
1263   ofs += IR(refk)->i;
1264   if (checki16(ofs))
1265     emit_tsi(as, MIPSI_ADDIU, dest, r, ofs);
1266   else
1267     emit_dst(as, MIPSI_ADDU, dest, r,
1268 	     ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
1269 #else
1270   RegSet allow = RSET_GPR;
1271   Reg dest = ra_dest(as, ir, allow);
1272   Reg base = ra_alloc1(as, ir->op1, allow);
1273   IRIns *irr = IR(ir->op2);
1274   int32_t ofs = sizeof(GCstr);
1275   rset_clear(allow, base);
1276   if (irref_isk(ir->op2) && checki16(ofs + irr->i)) {
1277     emit_tsi(as, MIPSI_DADDIU, dest, base, ofs + irr->i);
1278   } else {
1279     emit_tsi(as, MIPSI_DADDIU, dest, dest, ofs);
1280     emit_dst(as, MIPSI_DADDU, dest, base, ra_alloc1(as, ir->op2, allow));
1281   }
1282 #endif
1283 }
1284 
1285 /* -- Loads and stores ---------------------------------------------------- */
1286 
asm_fxloadins(ASMState * as,IRIns * ir)1287 static MIPSIns asm_fxloadins(ASMState *as, IRIns *ir)
1288 {
1289   UNUSED(as);
1290   switch (irt_type(ir->t)) {
1291   case IRT_I8: return MIPSI_LB;
1292   case IRT_U8: return MIPSI_LBU;
1293   case IRT_I16: return MIPSI_LH;
1294   case IRT_U16: return MIPSI_LHU;
1295   case IRT_NUM:
1296     lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
1297     if (!LJ_SOFTFP) return MIPSI_LDC1;
1298   /* fallthrough */
1299   case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1;
1300   /* fallthrough */
1301   default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_LD : MIPSI_LW;
1302   }
1303 }
1304 
asm_fxstoreins(ASMState * as,IRIns * ir)1305 static MIPSIns asm_fxstoreins(ASMState *as, IRIns *ir)
1306 {
1307   UNUSED(as);
1308   switch (irt_type(ir->t)) {
1309   case IRT_I8: case IRT_U8: return MIPSI_SB;
1310   case IRT_I16: case IRT_U16: return MIPSI_SH;
1311   case IRT_NUM:
1312     lj_assertA(!LJ_SOFTFP32, "unsplit FP op");
1313     if (!LJ_SOFTFP) return MIPSI_SDC1;
1314   /* fallthrough */
1315   case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1;
1316   /* fallthrough */
1317   default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_SD : MIPSI_SW;
1318   }
1319 }
1320 
asm_fload(ASMState * as,IRIns * ir)1321 static void asm_fload(ASMState *as, IRIns *ir)
1322 {
1323   Reg dest = ra_dest(as, ir, RSET_GPR);
1324   MIPSIns mi = asm_fxloadins(as, ir);
1325   Reg idx;
1326   int32_t ofs;
1327   if (ir->op1 == REF_NIL) {  /* FLOAD from GG_State with offset. */
1328     idx = RID_JGL;
1329     ofs = (ir->op2 << 2) - 32768 - GG_OFS(g);
1330   } else {
1331     idx = ra_alloc1(as, ir->op1, RSET_GPR);
1332     if (ir->op2 == IRFL_TAB_ARRAY) {
1333       ofs = asm_fuseabase(as, ir->op1);
1334       if (ofs) {  /* Turn the t->array load into an add for colocated arrays. */
1335 	emit_tsi(as, MIPSI_AADDIU, dest, idx, ofs);
1336 	return;
1337       }
1338     }
1339     ofs = field_ofs[ir->op2];
1340   }
1341   lj_assertA(!irt_isfp(ir->t), "bad FP FLOAD");
1342   emit_tsi(as, mi, dest, idx, ofs);
1343 }
1344 
asm_fstore(ASMState * as,IRIns * ir)1345 static void asm_fstore(ASMState *as, IRIns *ir)
1346 {
1347   if (ir->r != RID_SINK) {
1348     Reg src = ra_alloc1z(as, ir->op2, RSET_GPR);
1349     IRIns *irf = IR(ir->op1);
1350     Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
1351     int32_t ofs = field_ofs[irf->op2];
1352     MIPSIns mi = asm_fxstoreins(as, ir);
1353     lj_assertA(!irt_isfp(ir->t), "bad FP FSTORE");
1354     emit_tsi(as, mi, src, idx, ofs);
1355   }
1356 }
1357 
asm_xload(ASMState * as,IRIns * ir)1358 static void asm_xload(ASMState *as, IRIns *ir)
1359 {
1360   Reg dest = ra_dest(as, ir,
1361     (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1362   lj_assertA(LJ_TARGET_UNALIGNED || !(ir->op2 & IRXLOAD_UNALIGNED),
1363 	     "unaligned XLOAD");
1364   asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
1365 }
1366 
asm_xstore_(ASMState * as,IRIns * ir,int32_t ofs)1367 static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
1368 {
1369   if (ir->r != RID_SINK) {
1370     Reg src = ra_alloc1z(as, ir->op2,
1371       (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1372     asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
1373 		 rset_exclude(RSET_GPR, src), ofs);
1374   }
1375 }
1376 
1377 #define asm_xstore(as, ir)	asm_xstore_(as, ir, 0)
1378 
asm_ahuvload(ASMState * as,IRIns * ir)1379 static void asm_ahuvload(ASMState *as, IRIns *ir)
1380 {
1381   int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP);
1382   Reg dest = RID_NONE, type = RID_TMP, idx;
1383   RegSet allow = RSET_GPR;
1384   int32_t ofs = 0;
1385   IRType1 t = ir->t;
1386   if (hiop) {
1387     t.irt = IRT_NUM;
1388     if (ra_used(ir+1)) {
1389       type = ra_dest(as, ir+1, allow);
1390       rset_clear(allow, type);
1391     }
1392   }
1393   if (ra_used(ir)) {
1394     lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
1395 	       irt_isint(ir->t) || irt_isaddr(ir->t),
1396 	       "bad load type %d", irt_type(ir->t));
1397     dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
1398     rset_clear(allow, dest);
1399 #if LJ_64
1400     if (irt_isaddr(t))
1401       emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0);
1402     else if (irt_isint(t))
1403       emit_dta(as, MIPSI_SLL, dest, dest, 0);
1404 #endif
1405   }
1406   idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1407   if (ir->o == IR_VLOAD) ofs += 8 * ir->op2;
1408   rset_clear(allow, idx);
1409   if (irt_isnum(t)) {
1410     asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
1411     emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM);
1412   } else {
1413     asm_guard(as, MIPSI_BNE, type,
1414 	      ra_allock(as, (int32_t)irt_toitype(t), allow));
1415   }
1416 #if LJ_32
1417   if (ra_hasreg(dest)) {
1418     if (!LJ_SOFTFP && irt_isnum(t))
1419       emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
1420     else
1421       emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
1422   }
1423   emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
1424 #else
1425   if (ra_hasreg(dest)) {
1426     if (!LJ_SOFTFP && irt_isnum(t)) {
1427       emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
1428       dest = type;
1429     }
1430   } else {
1431     dest = type;
1432   }
1433   emit_dta(as, MIPSI_DSRA32, type, dest, 15);
1434   emit_tsi(as, MIPSI_LD, dest, idx, ofs);
1435 #endif
1436 }
1437 
asm_ahustore(ASMState * as,IRIns * ir)1438 static void asm_ahustore(ASMState *as, IRIns *ir)
1439 {
1440   RegSet allow = RSET_GPR;
1441   Reg idx, src = RID_NONE, type = RID_NONE;
1442   int32_t ofs = 0;
1443   if (ir->r == RID_SINK)
1444     return;
1445   if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
1446     src = ra_alloc1(as, ir->op2, LJ_SOFTFP ? RSET_GPR : RSET_FPR);
1447     idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1448     emit_hsi(as, LJ_SOFTFP ? MIPSI_SD : MIPSI_SDC1, src, idx, ofs);
1449   } else {
1450 #if LJ_32
1451     if (!irt_ispri(ir->t)) {
1452       src = ra_alloc1(as, ir->op2, allow);
1453       rset_clear(allow, src);
1454     }
1455     if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
1456       type = ra_alloc1(as, (ir+1)->op2, allow);
1457     else
1458       type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1459     rset_clear(allow, type);
1460     idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1461     if (ra_hasreg(src))
1462       emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
1463     emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
1464 #else
1465     Reg tmp = RID_TMP;
1466     if (irt_ispri(ir->t)) {
1467       tmp = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow);
1468       rset_clear(allow, tmp);
1469     } else {
1470       src = ra_alloc1(as, ir->op2, allow);
1471       rset_clear(allow, src);
1472       type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow);
1473       rset_clear(allow, type);
1474     }
1475     idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1476     emit_tsi(as, MIPSI_SD, tmp, idx, ofs);
1477     if (ra_hasreg(src)) {
1478       if (irt_isinteger(ir->t)) {
1479 	emit_dst(as, MIPSI_DADDU, tmp, tmp, type);
1480 	emit_tsml(as, MIPSI_DEXT, tmp, src, 31, 0);
1481       } else {
1482 	emit_dst(as, MIPSI_DADDU, tmp, src, type);
1483       }
1484     }
1485 #endif
1486   }
1487 }
1488 
asm_sload(ASMState * as,IRIns * ir)1489 static void asm_sload(ASMState *as, IRIns *ir)
1490 {
1491   Reg dest = RID_NONE, type = RID_NONE, base;
1492   RegSet allow = RSET_GPR;
1493   IRType1 t = ir->t;
1494 #if LJ_32
1495   int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1496   int hiop = (LJ_SOFTFP32 && (ir+1)->o == IR_HIOP);
1497   if (hiop)
1498     t.irt = IRT_NUM;
1499 #else
1500   int32_t ofs = 8*((int32_t)ir->op1-2);
1501 #endif
1502   lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1503 	     "bad parent SLOAD");  /* Handled by asm_head_side(). */
1504   lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1505 	     "inconsistent SLOAD variant");
1506 #if LJ_SOFTFP32
1507   lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1508 	     "unsplit SLOAD convert");  /* Handled by LJ_SOFTFP SPLIT. */
1509   if (hiop && ra_used(ir+1)) {
1510     type = ra_dest(as, ir+1, allow);
1511     rset_clear(allow, type);
1512   }
1513 #else
1514   if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1515     dest = ra_scratch(as, LJ_SOFTFP ? allow : RSET_FPR);
1516     asm_tointg(as, ir, dest);
1517     t.irt = IRT_NUM;  /* Continue with a regular number type check. */
1518   } else
1519 #endif
1520   if (ra_used(ir)) {
1521     lj_assertA((LJ_SOFTFP32 ? 0 : irt_isnum(ir->t)) ||
1522 	       irt_isint(ir->t) || irt_isaddr(ir->t),
1523 	       "bad SLOAD type %d", irt_type(ir->t));
1524     dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
1525     rset_clear(allow, dest);
1526     base = ra_alloc1(as, REF_BASE, allow);
1527     rset_clear(allow, base);
1528     if (!LJ_SOFTFP32 && (ir->op2 & IRSLOAD_CONVERT)) {
1529       if (irt_isint(t)) {
1530 	Reg tmp = ra_scratch(as, LJ_SOFTFP ? RSET_GPR : RSET_FPR);
1531 #if LJ_SOFTFP
1532 	ra_evictset(as, rset_exclude(RSET_SCRATCH, dest));
1533 	ra_destreg(as, ir, RID_RET);
1534 	emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_d2i].func, 0);
1535 	if (tmp != REGARG_FIRSTGPR)
1536 	  emit_move(as, REGARG_FIRSTGPR, tmp);
1537 #else
1538 	emit_tg(as, MIPSI_MFC1, dest, tmp);
1539 	emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp);
1540 #endif
1541 	dest = tmp;
1542 	t.irt = IRT_NUM;  /* Check for original type. */
1543       } else {
1544 	Reg tmp = ra_scratch(as, RSET_GPR);
1545 #if LJ_SOFTFP
1546 	ra_evictset(as, rset_exclude(RSET_SCRATCH, dest));
1547 	ra_destreg(as, ir, RID_RET);
1548 	emit_call(as, (void *)lj_ir_callinfo[IRCALL_softfp_i2d].func, 0);
1549 	emit_dta(as, MIPSI_SLL, REGARG_FIRSTGPR, tmp, 0);
1550 #else
1551 	emit_fg(as, MIPSI_CVT_D_W, dest, dest);
1552 	emit_tg(as, MIPSI_MTC1, tmp, dest);
1553 #endif
1554 	dest = tmp;
1555 	t.irt = IRT_INT;  /* Check for original type. */
1556       }
1557     }
1558 #if LJ_64
1559     else if (irt_isaddr(t)) {
1560       /* Clear type from pointers. */
1561       emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0);
1562     } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) {
1563       /* Sign-extend integers. */
1564       emit_dta(as, MIPSI_SLL, dest, dest, 0);
1565     }
1566 #endif
1567     goto dotypecheck;
1568   }
1569   base = ra_alloc1(as, REF_BASE, allow);
1570   rset_clear(allow, base);
1571 dotypecheck:
1572 #if LJ_32
1573   if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1574     if (ra_noreg(type))
1575       type = RID_TMP;
1576     if (irt_isnum(t)) {
1577       asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
1578       emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM);
1579     } else {
1580       Reg ktype = ra_allock(as, irt_toitype(t), allow);
1581       asm_guard(as, MIPSI_BNE, type, ktype);
1582     }
1583   }
1584   if (ra_hasreg(dest)) {
1585     if (!LJ_SOFTFP && irt_isnum(t))
1586       emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
1587     else
1588       emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
1589   }
1590   if (ra_hasreg(type))
1591     emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
1592 #else
1593   if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1594     type = dest < RID_MAX_GPR ? dest : RID_TMP;
1595     if (irt_ispri(t)) {
1596       asm_guard(as, MIPSI_BNE, type,
1597 		ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow));
1598     } else {
1599       if (irt_isnum(t)) {
1600 	asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
1601 	emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
1602 	if (!LJ_SOFTFP && ra_hasreg(dest))
1603 	  emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
1604       } else {
1605 	asm_guard(as, MIPSI_BNE, RID_TMP,
1606 		  ra_allock(as, (int32_t)irt_toitype(t), allow));
1607       }
1608       emit_dta(as, MIPSI_DSRA32, RID_TMP, type, 15);
1609     }
1610     emit_tsi(as, MIPSI_LD, type, base, ofs);
1611   } else if (ra_hasreg(dest)) {
1612     if (!LJ_SOFTFP && irt_isnum(t))
1613       emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
1614     else
1615       emit_tsi(as, irt_isint(t) ? MIPSI_LW : MIPSI_LD, dest, base,
1616 	       ofs ^ ((LJ_BE && irt_isint(t)) ? 4 : 0));
1617   }
1618 #endif
1619 }
1620 
1621 /* -- Allocations --------------------------------------------------------- */
1622 
1623 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1624 static void asm_cnew(ASMState *as, IRIns *ir)
1625 {
1626   CTState *cts = ctype_ctsG(J2G(as->J));
1627   CTypeID id = (CTypeID)IR(ir->op1)->i;
1628   CTSize sz;
1629   CTInfo info = lj_ctype_info(cts, id, &sz);
1630   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1631   IRRef args[4];
1632   RegSet drop = RSET_SCRATCH;
1633   lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1634 	     "bad CNEW/CNEWI operands");
1635 
1636   as->gcsteps++;
1637   if (ra_hasreg(ir->r))
1638     rset_clear(drop, ir->r);  /* Dest reg handled below. */
1639   ra_evictset(as, drop);
1640   if (ra_used(ir))
1641     ra_destreg(as, ir, RID_RET);  /* GCcdata * */
1642 
1643   /* Initialize immutable cdata object. */
1644   if (ir->o == IR_CNEWI) {
1645     RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1646 #if LJ_32
1647     int32_t ofs = sizeof(GCcdata);
1648     if (sz == 8) {
1649       ofs += 4;
1650       lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
1651       if (LJ_LE) ir++;
1652     }
1653     for (;;) {
1654       Reg r = ra_alloc1z(as, ir->op2, allow);
1655       emit_tsi(as, MIPSI_SW, r, RID_RET, ofs);
1656       rset_clear(allow, r);
1657       if (ofs == sizeof(GCcdata)) break;
1658       ofs -= 4; if (LJ_BE) ir++; else ir--;
1659     }
1660 #else
1661     emit_tsi(as, sz == 8 ? MIPSI_SD : MIPSI_SW, ra_alloc1(as, ir->op2, allow),
1662 	     RID_RET, sizeof(GCcdata));
1663 #endif
1664     lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1665   } else if (ir->op2 != REF_NIL) {  /* Create VLA/VLS/aligned cdata. */
1666     ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1667     args[0] = ASMREF_L;     /* lua_State *L */
1668     args[1] = ir->op1;      /* CTypeID id   */
1669     args[2] = ir->op2;      /* CTSize sz    */
1670     args[3] = ASMREF_TMP1;  /* CTSize align */
1671     asm_gencall(as, ci, args);
1672     emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1673     return;
1674   }
1675 
1676   /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1677   emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
1678   emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
1679   emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
1680   emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
1681   args[0] = ASMREF_L;     /* lua_State *L */
1682   args[1] = ASMREF_TMP1;  /* MSize size   */
1683   asm_gencall(as, ci, args);
1684   ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1685 	       ra_releasetmp(as, ASMREF_TMP1));
1686 }
1687 #endif
1688 
1689 /* -- Write barriers ------------------------------------------------------ */
1690 
asm_tbar(ASMState * as,IRIns * ir)1691 static void asm_tbar(ASMState *as, IRIns *ir)
1692 {
1693   Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1694   Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1695   Reg link = RID_TMP;
1696   MCLabel l_end = emit_label(as);
1697   emit_tsi(as, MIPSI_AS, link, tab, (int32_t)offsetof(GCtab, gclist));
1698   emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
1699   emit_setgl(as, tab, gc.grayagain);
1700   emit_getgl(as, link, gc.grayagain);
1701   emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP);  /* Clear black bit. */
1702   emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
1703   emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK);
1704   emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked));
1705 }
1706 
asm_obar(ASMState * as,IRIns * ir)1707 static void asm_obar(ASMState *as, IRIns *ir)
1708 {
1709   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1710   IRRef args[2];
1711   MCLabel l_end;
1712   Reg obj, val, tmp;
1713   /* No need for other object barriers (yet). */
1714   lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1715   ra_evictset(as, RSET_SCRATCH);
1716   l_end = emit_label(as);
1717   args[0] = ASMREF_TMP1;  /* global_State *g */
1718   args[1] = ir->op1;      /* TValue *tv      */
1719   asm_gencall(as, ci, args);
1720   emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
1721   obj = IR(ir->op1)->r;
1722   tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
1723   emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
1724   emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK);
1725   emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
1726   emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES);
1727   val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1728   emit_tsi(as, MIPSI_LBU, tmp, obj,
1729 	   (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1730   emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1731 }
1732 
1733 /* -- Arithmetic and logic operations ------------------------------------- */
1734 
1735 #if !LJ_SOFTFP
asm_fparith(ASMState * as,IRIns * ir,MIPSIns mi)1736 static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
1737 {
1738   Reg dest = ra_dest(as, ir, RSET_FPR);
1739   Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1740   right = (left >> 8); left &= 255;
1741   emit_fgh(as, mi, dest, left, right);
1742 }
1743 
asm_fpunary(ASMState * as,IRIns * ir,MIPSIns mi)1744 static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
1745 {
1746   Reg dest = ra_dest(as, ir, RSET_FPR);
1747   Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1748   emit_fg(as, mi, dest, left);
1749 }
1750 #endif
1751 
1752 #if !LJ_SOFTFP32
asm_fpmath(ASMState * as,IRIns * ir)1753 static void asm_fpmath(ASMState *as, IRIns *ir)
1754 {
1755 #if !LJ_SOFTFP
1756   if (ir->op2 <= IRFPM_TRUNC)
1757     asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
1758   else if (ir->op2 == IRFPM_SQRT)
1759     asm_fpunary(as, ir, MIPSI_SQRT_D);
1760   else
1761 #endif
1762     asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1763 }
1764 #endif
1765 
1766 #if !LJ_SOFTFP
1767 #define asm_fpadd(as, ir)	asm_fparith(as, ir, MIPSI_ADD_D)
1768 #define asm_fpsub(as, ir)	asm_fparith(as, ir, MIPSI_SUB_D)
1769 #define asm_fpmul(as, ir)	asm_fparith(as, ir, MIPSI_MUL_D)
1770 #elif LJ_64  /* && LJ_SOFTFP */
1771 #define asm_fpadd(as, ir)	asm_callid(as, ir, IRCALL_softfp_add)
1772 #define asm_fpsub(as, ir)	asm_callid(as, ir, IRCALL_softfp_sub)
1773 #define asm_fpmul(as, ir)	asm_callid(as, ir, IRCALL_softfp_mul)
1774 #endif
1775 
asm_add(ASMState * as,IRIns * ir)1776 static void asm_add(ASMState *as, IRIns *ir)
1777 {
1778   IRType1 t = ir->t;
1779 #if !LJ_SOFTFP32
1780   if (irt_isnum(t)) {
1781     asm_fpadd(as, ir);
1782   } else
1783 #endif
1784   {
1785     /* TODO MIPSR6: Fuse ADD(BSHL(a,1-4),b) or ADD(ADD(a,a),b) to MIPSI_ALSA. */
1786     Reg dest = ra_dest(as, ir, RSET_GPR);
1787     Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1788     if (irref_isk(ir->op2)) {
1789       intptr_t k = get_kval(as, ir->op2);
1790       if (checki16(k)) {
1791 	emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest,
1792 		 left, k);
1793 	return;
1794       }
1795     }
1796     right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1797     emit_dst(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDU : MIPSI_ADDU, dest,
1798 	     left, right);
1799   }
1800 }
1801 
asm_sub(ASMState * as,IRIns * ir)1802 static void asm_sub(ASMState *as, IRIns *ir)
1803 {
1804 #if !LJ_SOFTFP32
1805   if (irt_isnum(ir->t)) {
1806     asm_fpsub(as, ir);
1807   } else
1808 #endif
1809   {
1810     Reg dest = ra_dest(as, ir, RSET_GPR);
1811     Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1812     right = (left >> 8); left &= 255;
1813     emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest,
1814 	     left, right);
1815   }
1816 }
1817 
asm_mul(ASMState * as,IRIns * ir)1818 static void asm_mul(ASMState *as, IRIns *ir)
1819 {
1820 #if !LJ_SOFTFP32
1821   if (irt_isnum(ir->t)) {
1822     asm_fpmul(as, ir);
1823   } else
1824 #endif
1825   {
1826     Reg dest = ra_dest(as, ir, RSET_GPR);
1827     Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1828     right = (left >> 8); left &= 255;
1829     if (LJ_64 && irt_is64(ir->t)) {
1830 #if !LJ_TARGET_MIPSR6
1831       emit_dst(as, MIPSI_MFLO, dest, 0, 0);
1832       emit_dst(as, MIPSI_DMULT, 0, left, right);
1833 #else
1834       emit_dst(as, MIPSI_DMUL, dest, left, right);
1835 #endif
1836     } else {
1837       emit_dst(as, MIPSI_MUL, dest, left, right);
1838     }
1839   }
1840 }
1841 
1842 #if !LJ_SOFTFP32
asm_fpdiv(ASMState * as,IRIns * ir)1843 static void asm_fpdiv(ASMState *as, IRIns *ir)
1844 {
1845 #if !LJ_SOFTFP
1846     asm_fparith(as, ir, MIPSI_DIV_D);
1847 #else
1848     asm_callid(as, ir, IRCALL_softfp_div);
1849 #endif
1850 }
1851 #endif
1852 
asm_neg(ASMState * as,IRIns * ir)1853 static void asm_neg(ASMState *as, IRIns *ir)
1854 {
1855 #if !LJ_SOFTFP
1856   if (irt_isnum(ir->t)) {
1857     asm_fpunary(as, ir, MIPSI_NEG_D);
1858   } else
1859 #elif LJ_64  /* && LJ_SOFTFP */
1860   if (irt_isnum(ir->t)) {
1861     Reg dest = ra_dest(as, ir, RSET_GPR);
1862     Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1863     emit_dst(as, MIPSI_XOR, dest, left,
1864 	    ra_allock(as, 0x8000000000000000ll, rset_exclude(RSET_GPR, dest)));
1865   } else
1866 #endif
1867   {
1868     Reg dest = ra_dest(as, ir, RSET_GPR);
1869     Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1870     emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest,
1871 	     RID_ZERO, left);
1872   }
1873 }
1874 
1875 #if !LJ_SOFTFP
1876 #define asm_abs(as, ir)		asm_fpunary(as, ir, MIPSI_ABS_D)
1877 #elif LJ_64   /* && LJ_SOFTFP */
asm_abs(ASMState * as,IRIns * ir)1878 static void asm_abs(ASMState *as, IRIns *ir)
1879 {
1880   Reg dest = ra_dest(as, ir, RSET_GPR);
1881   Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1882   emit_tsml(as, MIPSI_DEXTM, dest, left, 30, 0);
1883 }
1884 #endif
1885 
asm_arithov(ASMState * as,IRIns * ir)1886 static void asm_arithov(ASMState *as, IRIns *ir)
1887 {
1888   /* TODO MIPSR6: bovc/bnvc. Caveat: no delay slot to load RID_TMP. */
1889   Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
1890   lj_assertA(!irt_is64(ir->t), "bad usage");
1891   if (irref_isk(ir->op2)) {
1892     int k = IR(ir->op2)->i;
1893     if (ir->o == IR_SUBOV) k = -k;
1894     if (checki16(k)) {  /* (dest < left) == (k >= 0 ? 1 : 0) */
1895       left = ra_alloc1(as, ir->op1, RSET_GPR);
1896       asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
1897       emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left);
1898       emit_tsi(as, MIPSI_ADDIU, dest, left, k);
1899       if (dest == left) emit_move(as, RID_TMP, left);
1900       return;
1901     }
1902   }
1903   left = ra_alloc2(as, ir, RSET_GPR);
1904   right = (left >> 8); left &= 255;
1905   tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
1906 						 right), dest));
1907   asm_guard(as, MIPSI_BLTZ, RID_TMP, 0);
1908   emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp);
1909   if (ir->o == IR_ADDOV) {  /* ((dest^left) & (dest^right)) < 0 */
1910     emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right);
1911   } else {  /* ((dest^left) & (dest^~right)) < 0 */
1912     emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest);
1913     emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO);
1914   }
1915   emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left);
1916   emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right);
1917   if (dest == left || dest == right)
1918     emit_move(as, RID_TMP, dest == left ? left : right);
1919 }
1920 
1921 #define asm_addov(as, ir)	asm_arithov(as, ir)
1922 #define asm_subov(as, ir)	asm_arithov(as, ir)
1923 
asm_mulov(ASMState * as,IRIns * ir)1924 static void asm_mulov(ASMState *as, IRIns *ir)
1925 {
1926   Reg dest = ra_dest(as, ir, RSET_GPR);
1927   Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
1928   right = (left >> 8); left &= 255;
1929   tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
1930 						 right), dest));
1931   asm_guard(as, MIPSI_BNE, RID_TMP, tmp);
1932   emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31);
1933 #if !LJ_TARGET_MIPSR6
1934   emit_dst(as, MIPSI_MFHI, tmp, 0, 0);
1935   emit_dst(as, MIPSI_MFLO, dest, 0, 0);
1936   emit_dst(as, MIPSI_MULT, 0, left, right);
1937 #else
1938   emit_dst(as, MIPSI_MUL, dest, left, right);
1939   emit_dst(as, MIPSI_MUH, tmp, left, right);
1940 #endif
1941 }
1942 
1943 #if LJ_32 && LJ_HASFFI
asm_add64(ASMState * as,IRIns * ir)1944 static void asm_add64(ASMState *as, IRIns *ir)
1945 {
1946   Reg dest = ra_dest(as, ir, RSET_GPR);
1947   Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1948   if (irref_isk(ir->op2)) {
1949     int32_t k = IR(ir->op2)->i;
1950     if (k == 0) {
1951       emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP);
1952       goto loarith;
1953     } else if (checki16(k)) {
1954       emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
1955       emit_tsi(as, MIPSI_ADDIU, dest, left, k);
1956       goto loarith;
1957     }
1958   }
1959   emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
1960   right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1961   emit_dst(as, MIPSI_ADDU, dest, left, right);
1962 loarith:
1963   ir--;
1964   dest = ra_dest(as, ir, RSET_GPR);
1965   left = ra_alloc1(as, ir->op1, RSET_GPR);
1966   if (irref_isk(ir->op2)) {
1967     int32_t k = IR(ir->op2)->i;
1968     if (k == 0) {
1969       if (dest != left)
1970 	emit_move(as, dest, left);
1971       return;
1972     } else if (checki16(k)) {
1973       if (dest == left) {
1974 	Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left));
1975 	emit_move(as, dest, tmp);
1976 	dest = tmp;
1977       }
1978       emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left);
1979       emit_tsi(as, MIPSI_ADDIU, dest, left, k);
1980       return;
1981     }
1982   }
1983   right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1984   if (dest == left && dest == right) {
1985     Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
1986     emit_move(as, dest, tmp);
1987     dest = tmp;
1988   }
1989   emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left);
1990   emit_dst(as, MIPSI_ADDU, dest, left, right);
1991 }
1992 
asm_sub64(ASMState * as,IRIns * ir)1993 static void asm_sub64(ASMState *as, IRIns *ir)
1994 {
1995   Reg dest = ra_dest(as, ir, RSET_GPR);
1996   Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1997   right = (left >> 8); left &= 255;
1998   emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
1999   emit_dst(as, MIPSI_SUBU, dest, left, right);
2000   ir--;
2001   dest = ra_dest(as, ir, RSET_GPR);
2002   left = ra_alloc2(as, ir, RSET_GPR);
2003   right = (left >> 8); left &= 255;
2004   if (dest == left) {
2005     Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
2006     emit_move(as, dest, tmp);
2007     dest = tmp;
2008   }
2009   emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest);
2010   emit_dst(as, MIPSI_SUBU, dest, left, right);
2011 }
2012 
asm_neg64(ASMState * as,IRIns * ir)2013 static void asm_neg64(ASMState *as, IRIns *ir)
2014 {
2015   Reg dest = ra_dest(as, ir, RSET_GPR);
2016   Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
2017   emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
2018   emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
2019   ir--;
2020   dest = ra_dest(as, ir, RSET_GPR);
2021   left = ra_alloc1(as, ir->op1, RSET_GPR);
2022   emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest);
2023   emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
2024 }
2025 #endif
2026 
asm_bnot(ASMState * as,IRIns * ir)2027 static void asm_bnot(ASMState *as, IRIns *ir)
2028 {
2029   Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
2030   IRIns *irl = IR(ir->op1);
2031   if (mayfuse(as, ir->op1) && irl->o == IR_BOR) {
2032     left = ra_alloc2(as, irl, RSET_GPR);
2033     right = (left >> 8); left &= 255;
2034   } else {
2035     left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
2036     right = RID_ZERO;
2037   }
2038   emit_dst(as, MIPSI_NOR, dest, left, right);
2039 }
2040 
asm_bswap(ASMState * as,IRIns * ir)2041 static void asm_bswap(ASMState *as, IRIns *ir)
2042 {
2043   Reg dest = ra_dest(as, ir, RSET_GPR);
2044   Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
2045 #if LJ_32
2046   if ((as->flags & JIT_F_MIPSXXR2)) {
2047     emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
2048     emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
2049   } else {
2050     Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest));
2051     emit_dst(as, MIPSI_OR, dest, dest, tmp);
2052     emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
2053     emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00);
2054     emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8);
2055     emit_dta(as, MIPSI_SRL, dest, left, 8);
2056     emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00);
2057     emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP);
2058     emit_dta(as, MIPSI_SRL, tmp, left, 24);
2059     emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
2060   }
2061 #else
2062   if (irt_is64(ir->t)) {
2063     emit_dst(as, MIPSI_DSHD, dest, 0, RID_TMP);
2064     emit_dst(as, MIPSI_DSBH, RID_TMP, 0, left);
2065   } else {
2066     emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
2067     emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
2068   }
2069 #endif
2070 }
2071 
asm_bitop(ASMState * as,IRIns * ir,MIPSIns mi,MIPSIns mik)2072 static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
2073 {
2074   Reg dest = ra_dest(as, ir, RSET_GPR);
2075   Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
2076   if (irref_isk(ir->op2)) {
2077     intptr_t k = get_kval(as, ir->op2);
2078     if (checku16(k)) {
2079       emit_tsi(as, mik, dest, left, k);
2080       return;
2081     }
2082   }
2083   right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
2084   emit_dst(as, mi, dest, left, right);
2085 }
2086 
2087 #define asm_band(as, ir)	asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI)
2088 #define asm_bor(as, ir)		asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI)
2089 #define asm_bxor(as, ir)	asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI)
2090 
asm_bitshift(ASMState * as,IRIns * ir,MIPSIns mi,MIPSIns mik)2091 static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
2092 {
2093   Reg dest = ra_dest(as, ir, RSET_GPR);
2094   if (irref_isk(ir->op2)) {  /* Constant shifts. */
2095     uint32_t shift = (uint32_t)IR(ir->op2)->i;
2096     if (LJ_64 && irt_is64(ir->t)) mik |= (shift & 32) ? MIPSI_D32 : MIPSI_D;
2097     emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR),
2098 	     (shift & 31));
2099   } else {
2100     Reg right, left = ra_alloc2(as, ir, RSET_GPR);
2101     right = (left >> 8); left &= 255;
2102     if (LJ_64 && irt_is64(ir->t)) mi |= MIPSI_DV;
2103     emit_dst(as, mi, dest, right, left);  /* Shift amount is in rs. */
2104   }
2105 }
2106 
2107 #define asm_bshl(as, ir)	asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL)
2108 #define asm_bshr(as, ir)	asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL)
2109 #define asm_bsar(as, ir)	asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA)
2110 #define asm_brol(as, ir)	lj_assertA(0, "unexpected BROL")
2111 
asm_bror(ASMState * as,IRIns * ir)2112 static void asm_bror(ASMState *as, IRIns *ir)
2113 {
2114   if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) {
2115     asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
2116   } else {
2117     Reg dest = ra_dest(as, ir, RSET_GPR);
2118     if (irref_isk(ir->op2)) {  /* Constant shifts. */
2119       uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
2120       Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
2121       emit_rotr(as, dest, left, RID_TMP, shift);
2122     } else {
2123       Reg right, left = ra_alloc2(as, ir, RSET_GPR);
2124       right = (left >> 8); left &= 255;
2125       emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
2126       emit_dst(as, MIPSI_SRLV, dest, right, left);
2127       emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left);
2128       emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right);
2129     }
2130   }
2131 }
2132 
2133 #if LJ_SOFTFP
asm_sfpmin_max(ASMState * as,IRIns * ir)2134 static void asm_sfpmin_max(ASMState *as, IRIns *ir)
2135 {
2136   CCallInfo ci = lj_ir_callinfo[(IROp)ir->o == IR_MIN ? IRCALL_lj_vm_sfmin : IRCALL_lj_vm_sfmax];
2137 #if LJ_64
2138   IRRef args[2];
2139   args[0] = ir->op1;
2140   args[1] = ir->op2;
2141 #else
2142   IRRef args[4];
2143   args[0^LJ_BE] = ir->op1;
2144   args[1^LJ_BE] = (ir+1)->op1;
2145   args[2^LJ_BE] = ir->op2;
2146   args[3^LJ_BE] = (ir+1)->op2;
2147 #endif
2148   asm_setupresult(as, ir, &ci);
2149   emit_call(as, (void *)ci.func, 0);
2150   ci.func = NULL;
2151   asm_gencall(as, &ci, args);
2152 }
2153 #endif
2154 
asm_min_max(ASMState * as,IRIns * ir,int ismax)2155 static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
2156 {
2157   if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
2158 #if LJ_SOFTFP
2159     asm_sfpmin_max(as, ir);
2160 #else
2161     Reg dest = ra_dest(as, ir, RSET_FPR);
2162     Reg right, left = ra_alloc2(as, ir, RSET_FPR);
2163     right = (left >> 8); left &= 255;
2164 #if !LJ_TARGET_MIPSR6
2165     if (dest == left) {
2166       emit_fg(as, MIPSI_MOVF_D, dest, right);
2167     } else {
2168       emit_fg(as, MIPSI_MOVT_D, dest, left);
2169       if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
2170     }
2171     emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? right : left, ismax ? left : right);
2172 #else
2173     emit_fgh(as, ismax ? MIPSI_MAX_D : MIPSI_MIN_D, dest, left, right);
2174 #endif
2175 #endif
2176   } else {
2177     Reg dest = ra_dest(as, ir, RSET_GPR);
2178     Reg right, left = ra_alloc2(as, ir, RSET_GPR);
2179     right = (left >> 8); left &= 255;
2180     if (left == right) {
2181       if (dest != left) emit_move(as, dest, left);
2182     } else {
2183 #if !LJ_TARGET_MIPSR6
2184       if (dest == left) {
2185 	emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
2186       } else {
2187 	emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
2188 	if (dest != right) emit_move(as, dest, right);
2189       }
2190 #else
2191       emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
2192       if (dest != right) {
2193 	emit_dst(as, MIPSI_SELNEZ, RID_TMP, right, RID_TMP);
2194 	emit_dst(as, MIPSI_SELEQZ, dest, left, RID_TMP);
2195       } else {
2196 	emit_dst(as, MIPSI_SELEQZ, RID_TMP, left, RID_TMP);
2197 	emit_dst(as, MIPSI_SELNEZ, dest, right, RID_TMP);
2198       }
2199 #endif
2200       emit_dst(as, MIPSI_SLT, RID_TMP,
2201 	       ismax ? left : right, ismax ? right : left);
2202     }
2203   }
2204 }
2205 
2206 #define asm_min(as, ir)		asm_min_max(as, ir, 0)
2207 #define asm_max(as, ir)		asm_min_max(as, ir, 1)
2208 
2209 /* -- Comparisons --------------------------------------------------------- */
2210 
2211 #if LJ_SOFTFP
2212 /* SFP comparisons. */
asm_sfpcomp(ASMState * as,IRIns * ir)2213 static void asm_sfpcomp(ASMState *as, IRIns *ir)
2214 {
2215   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
2216   RegSet drop = RSET_SCRATCH;
2217   Reg r;
2218 #if LJ_64
2219   IRRef args[2];
2220   args[0] = ir->op1;
2221   args[1] = ir->op2;
2222 #else
2223   IRRef args[4];
2224   args[LJ_LE ? 0 : 1] = ir->op1; args[LJ_LE ? 1 : 0] = (ir+1)->op1;
2225   args[LJ_LE ? 2 : 3] = ir->op2; args[LJ_LE ? 3 : 2] = (ir+1)->op2;
2226 #endif
2227 
2228   for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+(LJ_64?1:3); r++) {
2229     if (!rset_test(as->freeset, r) &&
2230 	regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
2231       rset_clear(drop, r);
2232   }
2233   ra_evictset(as, drop);
2234 
2235   asm_setupresult(as, ir, ci);
2236 
2237   switch ((IROp)ir->o) {
2238   case IR_LT:
2239     asm_guard(as, MIPSI_BGEZ, RID_RET, 0);
2240     break;
2241   case IR_ULT:
2242     asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2243     emit_loadi(as, RID_TMP, 1);
2244     asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO);
2245     break;
2246   case IR_GE:
2247     asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2248     emit_loadi(as, RID_TMP, 2);
2249     asm_guard(as, MIPSI_BLTZ, RID_RET, 0);
2250     break;
2251   case IR_LE:
2252     asm_guard(as, MIPSI_BGTZ, RID_RET, 0);
2253     break;
2254   case IR_GT:
2255     asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2256     emit_loadi(as, RID_TMP, 2);
2257     asm_guard(as, MIPSI_BLEZ, RID_RET, 0);
2258     break;
2259   case IR_UGE:
2260     asm_guard(as, MIPSI_BLTZ, RID_RET, 0);
2261     break;
2262   case IR_ULE:
2263     asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP);
2264     emit_loadi(as, RID_TMP, 1);
2265     break;
2266   case IR_UGT: case IR_ABC:
2267     asm_guard(as, MIPSI_BLEZ, RID_RET, 0);
2268     break;
2269   case IR_EQ: case IR_NE:
2270     asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_RET, RID_ZERO);
2271   default:
2272     break;
2273   }
2274   asm_gencall(as, ci, args);
2275 }
2276 #endif
2277 
asm_comp(ASMState * as,IRIns * ir)2278 static void asm_comp(ASMState *as, IRIns *ir)
2279 {
2280   /* ORDER IR: LT GE LE GT  ULT UGE ULE UGT. */
2281   IROp op = ir->o;
2282   if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
2283 #if LJ_SOFTFP
2284     asm_sfpcomp(as, ir);
2285 #else
2286 #if !LJ_TARGET_MIPSR6
2287     Reg right, left = ra_alloc2(as, ir, RSET_FPR);
2288     right = (left >> 8); left &= 255;
2289     asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
2290     emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
2291 #else
2292     Reg tmp, right, left = ra_alloc2(as, ir, RSET_FPR);
2293     right = (left >> 8); left &= 255;
2294     tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right));
2295     asm_guard(as, (op&1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31));
2296     emit_fgh(as, MIPSI_CMP_LT_D + ((op&3) ^ ((op>>2)&1)), tmp, left, right);
2297 #endif
2298 #endif
2299   } else {
2300     Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
2301     if (op == IR_ABC) op = IR_UGT;
2302     if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(as, ir->op2) == 0) {
2303       MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
2304 			    ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
2305       asm_guard(as, mi, left, 0);
2306     } else {
2307       if (irref_isk(ir->op2)) {
2308 	intptr_t k = get_kval(as, ir->op2);
2309 	if ((op&2)) k++;
2310 	if (checki16(k)) {
2311 	  asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
2312 	  emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI,
2313 		   RID_TMP, left, k);
2314 	  return;
2315 	}
2316       }
2317       right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
2318       asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
2319       emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT,
2320 	       RID_TMP, (op&2) ? right : left, (op&2) ? left : right);
2321     }
2322   }
2323 }
2324 
asm_equal(ASMState * as,IRIns * ir)2325 static void asm_equal(ASMState *as, IRIns *ir)
2326 {
2327   Reg right, left = ra_alloc2(as, ir, (!LJ_SOFTFP && irt_isnum(ir->t)) ?
2328 				       RSET_FPR : RSET_GPR);
2329   right = (left >> 8); left &= 255;
2330   if (!LJ_SOFTFP32 && irt_isnum(ir->t)) {
2331 #if LJ_SOFTFP
2332     asm_sfpcomp(as, ir);
2333 #elif !LJ_TARGET_MIPSR6
2334     asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
2335     emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
2336 #else
2337     Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_FPR, left), right));
2338     asm_guard(as, (ir->o & 1) ? MIPSI_BC1NEZ : MIPSI_BC1EQZ, 0, (tmp&31));
2339     emit_fgh(as, MIPSI_CMP_EQ_D, tmp, left, right);
2340 #endif
2341   } else {
2342     asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
2343   }
2344 }
2345 
2346 #if LJ_32 && LJ_HASFFI
2347 /* 64 bit integer comparisons. */
asm_comp64(ASMState * as,IRIns * ir)2348 static void asm_comp64(ASMState *as, IRIns *ir)
2349 {
2350   /* ORDER IR: LT GE LE GT  ULT UGE ULE UGT. */
2351   IROp op = (ir-1)->o;
2352   MCLabel l_end;
2353   Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
2354   righthi = (lefthi >> 8); lefthi &= 255;
2355   leftlo = ra_alloc2(as, ir-1,
2356 		     rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi));
2357   rightlo = (leftlo >> 8); leftlo &= 255;
2358   asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
2359   l_end = emit_label(as);
2360   if (lefthi != righthi)
2361     emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP,
2362 	     (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi);
2363   emit_dst(as, MIPSI_SLTU, RID_TMP,
2364 	   (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo);
2365   if (lefthi != righthi)
2366     emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end);
2367 }
2368 
asm_comp64eq(ASMState * as,IRIns * ir)2369 static void asm_comp64eq(ASMState *as, IRIns *ir)
2370 {
2371   Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
2372   right = (left >> 8); left &= 255;
2373   asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO);
2374   tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
2375   emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
2376   emit_dst(as, MIPSI_XOR, tmp, left, right);
2377   left = ra_alloc2(as, ir-1, RSET_GPR);
2378   right = (left >> 8); left &= 255;
2379   emit_dst(as, MIPSI_XOR, RID_TMP, left, right);
2380 }
2381 #endif
2382 
2383 /* -- Split register ops -------------------------------------------------- */
2384 
2385 /* Hiword op of a split 32/32 or 64/64 bit op. Previous op is the loword op. */
asm_hiop(ASMState * as,IRIns * ir)2386 static void asm_hiop(ASMState *as, IRIns *ir)
2387 {
2388   /* HIOP is marked as a store because it needs its own DCE logic. */
2389   int uselo = ra_used(ir-1), usehi = ra_used(ir);  /* Loword/hiword used? */
2390   if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
2391 #if LJ_32 && (LJ_HASFFI || LJ_SOFTFP)
2392   if ((ir-1)->o == IR_CONV) {  /* Conversions to/from 64 bit. */
2393     as->curins--;  /* Always skip the CONV. */
2394 #if LJ_HASFFI && !LJ_SOFTFP
2395     if (usehi || uselo)
2396       asm_conv64(as, ir);
2397     return;
2398 #endif
2399   } else if ((ir-1)->o < IR_EQ) {  /* 64 bit integer comparisons. ORDER IR. */
2400     as->curins--;  /* Always skip the loword comparison. */
2401 #if LJ_SOFTFP
2402     if (!irt_isint(ir->t)) {
2403       asm_sfpcomp(as, ir-1);
2404       return;
2405     }
2406 #endif
2407 #if LJ_HASFFI
2408     asm_comp64(as, ir);
2409 #endif
2410     return;
2411   } else if ((ir-1)->o <= IR_NE) {  /* 64 bit integer comparisons. ORDER IR. */
2412     as->curins--;  /* Always skip the loword comparison. */
2413 #if LJ_SOFTFP
2414     if (!irt_isint(ir->t)) {
2415       asm_sfpcomp(as, ir-1);
2416       return;
2417     }
2418 #endif
2419 #if LJ_HASFFI
2420     asm_comp64eq(as, ir);
2421 #endif
2422     return;
2423 #if LJ_SOFTFP
2424   } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
2425       as->curins--;  /* Always skip the loword min/max. */
2426     if (uselo || usehi)
2427       asm_sfpmin_max(as, ir-1);
2428     return;
2429 #endif
2430   } else if ((ir-1)->o == IR_XSTORE) {
2431     as->curins--;  /* Handle both stores here. */
2432     if ((ir-1)->r != RID_SINK) {
2433       asm_xstore_(as, ir, LJ_LE ? 4 : 0);
2434       asm_xstore_(as, ir-1, LJ_LE ? 0 : 4);
2435     }
2436     return;
2437   }
2438 #endif
2439   if (!usehi) return;  /* Skip unused hiword op for all remaining ops. */
2440   switch ((ir-1)->o) {
2441 #if LJ_32 && LJ_HASFFI
2442   case IR_ADD: as->curins--; asm_add64(as, ir); break;
2443   case IR_SUB: as->curins--; asm_sub64(as, ir); break;
2444   case IR_NEG: as->curins--; asm_neg64(as, ir); break;
2445   case IR_CNEWI:
2446     /* Nothing to do here. Handled by lo op itself. */
2447     break;
2448 #endif
2449 #if LJ_32 && LJ_SOFTFP
2450   case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2451   case IR_STRTO:
2452     if (!uselo)
2453       ra_allocref(as, ir->op1, RSET_GPR);  /* Mark lo op as used. */
2454     break;
2455   case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: case IR_TMPREF:
2456     /* Nothing to do here. Handled by lo op itself. */
2457     break;
2458 #endif
2459   case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
2460     if (!uselo)
2461       ra_allocref(as, ir->op1, RID2RSET(RID_RETLO));  /* Mark lo op as used. */
2462     break;
2463   default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
2464   }
2465 }
2466 
2467 /* -- Profiling ----------------------------------------------------------- */
2468 
asm_prof(ASMState * as,IRIns * ir)2469 static void asm_prof(ASMState *as, IRIns *ir)
2470 {
2471   UNUSED(ir);
2472   asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
2473   emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE);
2474   emit_lsglptr(as, MIPSI_LBU, RID_TMP,
2475 	       (int32_t)offsetof(global_State, hookmask));
2476 }
2477 
2478 /* -- Stack handling ------------------------------------------------------ */
2479 
2480 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)2481 static void asm_stack_check(ASMState *as, BCReg topslot,
2482 			    IRIns *irp, RegSet allow, ExitNo exitno)
2483 {
2484   /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
2485   Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
2486   ExitNo oldsnap = as->snapno;
2487   rset_clear(allow, pbase);
2488 #if LJ_32
2489   tmp = allow ? rset_pickbot(allow) :
2490 		(pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
2491 #else
2492   tmp = allow ? rset_pickbot(allow) : RID_RET;
2493 #endif
2494   as->snapno = exitno;
2495   asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
2496   as->snapno = oldsnap;
2497   if (allow == RSET_EMPTY)  /* Restore temp. register. */
2498     emit_tsi(as, MIPSI_AL, tmp, RID_SP, 0);
2499   else
2500     ra_modified(as, tmp);
2501   emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
2502   emit_dst(as, MIPSI_ASUBU, RID_TMP, tmp, pbase);
2503   emit_tsi(as, MIPSI_AL, tmp, tmp, offsetof(lua_State, maxstack));
2504   if (pbase == RID_TMP)
2505     emit_getgl(as, RID_TMP, jit_base);
2506   emit_getgl(as, tmp, cur_L);
2507   if (allow == RSET_EMPTY)  /* Spill temp. register. */
2508     emit_tsi(as, MIPSI_AS, tmp, RID_SP, 0);
2509 }
2510 
2511 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)2512 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2513 {
2514   SnapEntry *map = &as->T->snapmap[snap->mapofs];
2515 #if LJ_32 || defined(LUA_USE_ASSERT)
2516   SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
2517 #endif
2518   MSize n, nent = snap->nent;
2519   /* Store the value of all modified slots to the Lua stack. */
2520   for (n = 0; n < nent; n++) {
2521     SnapEntry sn = map[n];
2522     BCReg s = snap_slot(sn);
2523     int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
2524     IRRef ref = snap_ref(sn);
2525     IRIns *ir = IR(ref);
2526     if ((sn & SNAP_NORESTORE))
2527       continue;
2528     if (irt_isnum(ir->t)) {
2529 #if LJ_SOFTFP32
2530       Reg tmp;
2531       RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2532       /* LJ_SOFTFP: must be a number constant. */
2533       lj_assertA(irref_isk(ref), "unsplit FP op");
2534       tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
2535       emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
2536       if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
2537       tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
2538       emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
2539 #elif LJ_SOFTFP  /* && LJ_64 */
2540       Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2541       emit_tsi(as, MIPSI_SD, src, RID_BASE, ofs);
2542 #else
2543       Reg src = ra_alloc1(as, ref, RSET_FPR);
2544       emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
2545 #endif
2546     } else {
2547 #if LJ_32
2548       RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2549       Reg type;
2550       lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2551 		 "restore of IR type %d", irt_type(ir->t));
2552       if (!irt_ispri(ir->t)) {
2553 	Reg src = ra_alloc1(as, ref, allow);
2554 	rset_clear(allow, src);
2555 	emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0));
2556       }
2557       if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2558 	if (s == 0) continue;  /* Do not overwrite link to previous frame. */
2559 	type = ra_allock(as, (int32_t)(*flinks--), allow);
2560 #if LJ_SOFTFP
2561       } else if ((sn & SNAP_SOFTFPNUM)) {
2562 	type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
2563 #endif
2564       } else if ((sn & SNAP_KEYINDEX)) {
2565 	type = ra_allock(as, (int32_t)LJ_KEYINDEX, allow);
2566       } else {
2567 	type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
2568       }
2569       emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
2570 #else
2571       asm_tvstore64(as, RID_BASE, ofs, ref);
2572 #endif
2573     }
2574     checkmclim(as);
2575   }
2576   lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2577 }
2578 
2579 /* -- GC handling --------------------------------------------------------- */
2580 
2581 /* Marker to prevent patching the GC check exit. */
2582 #define MIPS_NOPATCH_GC_CHECK	MIPSI_OR
2583 
2584 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)2585 static void asm_gc_check(ASMState *as)
2586 {
2587   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2588   IRRef args[2];
2589   MCLabel l_end;
2590   Reg tmp;
2591   ra_evictset(as, RSET_SCRATCH);
2592   l_end = emit_label(as);
2593   /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2594   /* Assumes asm_snap_prep() already done. */
2595   asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO);
2596   args[0] = ASMREF_TMP1;  /* global_State *g */
2597   args[1] = ASMREF_TMP2;  /* MSize steps     */
2598   asm_gencall(as, ci, args);
2599   l_end[-3] = MIPS_NOPATCH_GC_CHECK;  /* Replace the nop after the call. */
2600   emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
2601   tmp = ra_releasetmp(as, ASMREF_TMP2);
2602   emit_loadi(as, tmp, as->gcsteps);
2603   /* Jump around GC step if GC total < GC threshold. */
2604   emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end);
2605   emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp);
2606   emit_getgl(as, tmp, gc.threshold);
2607   emit_getgl(as, RID_TMP, gc.total);
2608   as->gcsteps = 0;
2609   checkmclim(as);
2610 }
2611 
2612 /* -- Loop handling ------------------------------------------------------- */
2613 
2614 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)2615 static void asm_loop_fixup(ASMState *as)
2616 {
2617   MCode *p = as->mctop;
2618   MCode *target = as->mcp;
2619   p[-1] = MIPSI_NOP;
2620   if (as->loopinv) {  /* Inverted loop branch? */
2621     /* asm_guard already inverted the cond branch. Only patch the target. */
2622     p[-3] |= ((target-p+2) & 0x0000ffffu);
2623   } else {
2624     p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
2625   }
2626 }
2627 
2628 /* Fixup the tail of the loop. */
asm_loop_tail_fixup(ASMState * as)2629 static void asm_loop_tail_fixup(ASMState *as)
2630 {
2631   if (as->loopinv) as->mctop--;
2632 }
2633 
2634 /* -- Head of trace ------------------------------------------------------- */
2635 
2636 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)2637 static void asm_head_root_base(ASMState *as)
2638 {
2639   IRIns *ir = IR(REF_BASE);
2640   Reg r = ir->r;
2641   if (ra_hasreg(r)) {
2642     ra_free(as, r);
2643     if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2644       ir->r = RID_INIT;  /* No inheritance for modified BASE register. */
2645     if (r != RID_BASE)
2646       emit_move(as, r, RID_BASE);
2647   }
2648 }
2649 
2650 /* Coalesce BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)2651 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2652 {
2653   IRIns *ir = IR(REF_BASE);
2654   Reg r = ir->r;
2655   if (ra_hasreg(r)) {
2656     ra_free(as, r);
2657     if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2658       ir->r = RID_INIT;  /* No inheritance for modified BASE register. */
2659     if (irp->r == r) {
2660       rset_clear(allow, r);  /* Mark same BASE register as coalesced. */
2661     } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2662       rset_clear(allow, irp->r);
2663       emit_move(as, r, irp->r);  /* Move from coalesced parent reg. */
2664     } else {
2665       emit_getgl(as, r, jit_base);  /* Otherwise reload BASE. */
2666     }
2667   }
2668   return allow;
2669 }
2670 
2671 /* -- Tail of trace ------------------------------------------------------- */
2672 
2673 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)2674 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2675 {
2676   MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
2677   int32_t spadj = as->T->spadjust;
2678   MCode *p = as->mctop-1;
2679   *p = spadj ? (MIPSI_AADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
2680   p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
2681 }
2682 
2683 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)2684 static void asm_tail_prep(ASMState *as)
2685 {
2686   as->mcp = as->mctop-2;  /* Leave room for branch plus nop or stack adj. */
2687   as->invmcp = as->loopref ? as->mcp : NULL;
2688 }
2689 
2690 /* -- Trace setup --------------------------------------------------------- */
2691 
2692 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)2693 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2694 {
2695   IRRef args[CCI_NARGS_MAX*2];
2696   uint32_t i, nargs = CCI_XNARGS(ci);
2697 #if LJ_32
2698   int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
2699 #else
2700   int nslots = 0, ngpr = REGARG_NUMGPR;
2701 #endif
2702   asm_collectargs(as, ir, ci, args);
2703   for (i = 0; i < nargs; i++) {
2704 #if LJ_32
2705     if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t) &&
2706 	nfpr > 0 && !(ci->flags & CCI_VARARG)) {
2707       nfpr--;
2708       ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
2709     } else if (!LJ_SOFTFP && args[i] && irt_isnum(IR(args[i])->t)) {
2710       nfpr = 0;
2711       ngpr = ngpr & ~1;
2712       if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
2713     } else {
2714       nfpr = 0;
2715       if (ngpr > 0) ngpr--; else nslots++;
2716     }
2717 #else
2718     if (ngpr > 0) ngpr--; else nslots += 2;
2719 #endif
2720   }
2721   if (nslots > as->evenspill)  /* Leave room for args in stack slots. */
2722     as->evenspill = nslots;
2723   return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
2724 }
2725 
asm_setup_target(ASMState * as)2726 static void asm_setup_target(ASMState *as)
2727 {
2728   asm_sparejump_setup(as);
2729   asm_exitstub_setup(as);
2730 }
2731 
2732 /* -- Trace patching ------------------------------------------------------ */
2733 
2734 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)2735 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2736 {
2737   MCode *p = T->mcode;
2738   MCode *pe = (MCode *)((char *)p + T->szmcode);
2739   MCode *px = exitstub_trace_addr(T, exitno);
2740   MCode *cstart = NULL, *cstop = NULL;
2741   MCode *mcarea = lj_mcode_patch(J, p, 0);
2742   MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno;
2743   MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
2744   for (p++; p < pe; p++) {
2745     if (*p == exitload) {  /* Look for load of exit number. */
2746       /* Look for exitstub branch. Yes, this covers all used branch variants. */
2747       if (((p[-1] ^ (px-p)) & 0xffffu) == 0 &&
2748 	  ((p[-1] & 0xf0000000u) == MIPSI_BEQ ||
2749 	   (p[-1] & 0xfc1e0000u) == MIPSI_BLTZ ||
2750 #if !LJ_TARGET_MIPSR6
2751 	   (p[-1] & 0xffe00000u) == MIPSI_BC1F
2752 #else
2753 	   (p[-1] & 0xff600000u) == MIPSI_BC1EQZ
2754 #endif
2755 	  ) && p[-2] != MIPS_NOPATCH_GC_CHECK) {
2756 	ptrdiff_t delta = target - p;
2757 	if (((delta + 0x8000) >> 16) == 0) {  /* Patch in-range branch. */
2758 	patchbranch:
2759 	  p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu);
2760 	  *p = MIPSI_NOP;  /* Replace the load of the exit number. */
2761 	  cstop = p+1;
2762 	  if (!cstart) cstart = p-1;
2763 	} else {  /* Branch out of range. Use spare jump slot in mcarea. */
2764 	  MCode *mcjump = asm_sparejump_use(mcarea, tjump);
2765 	  if (mcjump) {
2766 	    lj_mcode_sync(mcjump, mcjump+1);
2767 	    delta = mcjump - p;
2768 	    if (((delta + 0x8000) >> 16) == 0) {
2769 	      goto patchbranch;
2770 	    } else {
2771 	      lj_assertJ(0, "spare jump out of range: -Osizemcode too big");
2772 	    }
2773 	  }
2774 	  /* Ignore jump slot overflow. Child trace is simply not attached. */
2775 	}
2776       } else if (p+1 == pe) {
2777 	/* Patch NOP after code for inverted loop branch. Use of J is ok. */
2778 	lj_assertJ(p[1] == MIPSI_NOP, "expected NOP");
2779 	p[1] = tjump;
2780 	*p = MIPSI_NOP;  /* Replace the load of the exit number. */
2781 	cstop = p+2;
2782 	if (!cstart) cstart = p+1;
2783       }
2784     }
2785   }
2786   if (cstart) lj_mcode_sync(cstart, cstop);
2787   lj_mcode_patch(J, mcarea, 1);
2788 }
2789 
2790