1 /*
2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2014 Mike Pall. See Copyright Notice in luajit.h
4 */
5 
6 #define lj_asm_c
7 #define LUA_CORE
8 
9 #include "lj_obj.h"
10 
11 #if LJ_HASJIT
12 
13 #include "lj_gc.h"
14 #include "lj_str.h"
15 #include "lj_tab.h"
16 #include "lj_frame.h"
17 #if LJ_HASFFI
18 #include "lj_ctype.h"
19 #endif
20 #include "lj_ir.h"
21 #include "lj_jit.h"
22 #include "lj_ircall.h"
23 #include "lj_iropt.h"
24 #include "lj_mcode.h"
25 #include "lj_iropt.h"
26 #include "lj_trace.h"
27 #include "lj_snap.h"
28 #include "lj_asm.h"
29 #include "lj_dispatch.h"
30 #include "lj_vm.h"
31 #include "lj_target.h"
32 
33 #ifdef LUA_USE_ASSERT
34 #include <stdio.h>
35 #endif
36 
37 /* -- Assembler state and common macros ----------------------------------- */
38 
39 /* Assembler state. */
40 typedef struct ASMState {
41   RegCost cost[RID_MAX];  /* Reference and blended allocation cost for regs. */
42 
43   MCode *mcp;		/* Current MCode pointer (grows down). */
44   MCode *mclim;		/* Lower limit for MCode memory + red zone. */
45 #ifdef LUA_USE_ASSERT
46   MCode *mcp_prev;	/* Red zone overflow check. */
47 #endif
48 
49   IRIns *ir;		/* Copy of pointer to IR instructions/constants. */
50   jit_State *J;		/* JIT compiler state. */
51 
52 #if LJ_TARGET_X86ORX64
53   x86ModRM mrm;		/* Fused x86 address operand. */
54 #endif
55 
56   RegSet freeset;	/* Set of free registers. */
57   RegSet modset;	/* Set of registers modified inside the loop. */
58   RegSet weakset;	/* Set of weakly referenced registers. */
59   RegSet phiset;	/* Set of PHI registers. */
60 
61   uint32_t flags;	/* Copy of JIT compiler flags. */
62   int loopinv;		/* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
63 
64   int32_t evenspill;	/* Next even spill slot. */
65   int32_t oddspill;	/* Next odd spill slot (or 0). */
66 
67   IRRef curins;		/* Reference of current instruction. */
68   IRRef stopins;	/* Stop assembly before hitting this instruction. */
69   IRRef orignins;	/* Original T->nins. */
70 
71   IRRef snapref;	/* Current snapshot is active after this reference. */
72   IRRef snaprename;	/* Rename highwater mark for snapshot check. */
73   SnapNo snapno;	/* Current snapshot number. */
74   SnapNo loopsnapno;	/* Loop snapshot number. */
75 
76   IRRef fuseref;	/* Fusion limit (loopref, 0 or FUSE_DISABLED). */
77   IRRef sectref;	/* Section base reference (loopref or 0). */
78   IRRef loopref;	/* Reference of LOOP instruction (or 0). */
79 
80   BCReg topslot;	/* Number of slots for stack check (unless 0). */
81   int32_t gcsteps;	/* Accumulated number of GC steps (per section). */
82 
83   GCtrace *T;		/* Trace to assemble. */
84   GCtrace *parent;	/* Parent trace (or NULL). */
85 
86   MCode *mcbot;		/* Bottom of reserved MCode. */
87   MCode *mctop;		/* Top of generated MCode. */
88   MCode *mcloop;	/* Pointer to loop MCode (or NULL). */
89   MCode *invmcp;	/* Points to invertible loop branch (or NULL). */
90   MCode *flagmcp;	/* Pending opportunity to merge flag setting ins. */
91   MCode *realign;	/* Realign loop if not NULL. */
92 
93 #ifdef RID_NUM_KREF
94   int32_t krefk[RID_NUM_KREF];
95 #endif
96   IRRef1 phireg[RID_MAX];  /* PHI register references. */
97   uint16_t parentmap[LJ_MAX_JSLOTS];  /* Parent instruction to RegSP map. */
98 } ASMState;
99 
100 #define IR(ref)			(&as->ir[(ref)])
101 
102 #define ASMREF_TMP1		REF_TRUE	/* Temp. register. */
103 #define ASMREF_TMP2		REF_FALSE	/* Temp. register. */
104 #define ASMREF_L		REF_NIL		/* Stores register for L. */
105 
106 /* Check for variant to invariant references. */
107 #define iscrossref(as, ref)	((ref) < as->sectref)
108 
109 /* Inhibit memory op fusion from variant to invariant references. */
110 #define FUSE_DISABLED		(~(IRRef)0)
111 #define mayfuse(as, ref)	((ref) > as->fuseref)
112 #define neverfuse(as)		(as->fuseref == FUSE_DISABLED)
113 #define canfuse(as, ir)		(!neverfuse(as) && !irt_isphi((ir)->t))
114 #define opisfusableload(o) \
115   ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
116    (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
117 
118 /* Sparse limit checks using a red zone before the actual limit. */
119 #define MCLIM_REDZONE	64
120 
asm_mclimit(ASMState * as)121 static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
122 {
123   lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
124 }
125 
checkmclim(ASMState * as)126 static LJ_AINLINE void checkmclim(ASMState *as)
127 {
128 #ifdef LUA_USE_ASSERT
129   if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
130     IRIns *ir = IR(as->curins+1);
131     fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d  %02d %04d %04d\n", as->mcp,
132 	    as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
133     lua_assert(0);
134   }
135 #endif
136   if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
137 #ifdef LUA_USE_ASSERT
138   as->mcp_prev = as->mcp;
139 #endif
140 }
141 
142 #ifdef RID_NUM_KREF
143 #define ra_iskref(ref)		((ref) < RID_NUM_KREF)
144 #define ra_krefreg(ref)		((Reg)(RID_MIN_KREF + (Reg)(ref)))
145 #define ra_krefk(as, ref)	(as->krefk[(ref)])
146 
ra_setkref(ASMState * as,Reg r,int32_t k)147 static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k)
148 {
149   IRRef ref = (IRRef)(r - RID_MIN_KREF);
150   as->krefk[ref] = k;
151   as->cost[r] = REGCOST(ref, ref);
152 }
153 
154 #else
155 #define ra_iskref(ref)		0
156 #define ra_krefreg(ref)		RID_MIN_GPR
157 #define ra_krefk(as, ref)	0
158 #endif
159 
160 /* Arch-specific field offsets. */
161 static const uint8_t field_ofs[IRFL__MAX+1] = {
162 #define FLOFS(name, ofs)	(uint8_t)(ofs),
163 IRFLDEF(FLOFS)
164 #undef FLOFS
165   0
166 };
167 
168 /* -- Target-specific instruction emitter --------------------------------- */
169 
170 #if LJ_TARGET_X86ORX64
171 #include "lj_emit_x86.h"
172 #elif LJ_TARGET_ARM
173 #include "lj_emit_arm.h"
174 #elif LJ_TARGET_PPC
175 #include "lj_emit_ppc.h"
176 #elif LJ_TARGET_MIPS
177 #include "lj_emit_mips.h"
178 #else
179 #error "Missing instruction emitter for target CPU"
180 #endif
181 
182 /* -- Register allocator debugging ---------------------------------------- */
183 
184 /* #define LUAJIT_DEBUG_RA */
185 
186 #ifdef LUAJIT_DEBUG_RA
187 
188 #include <stdio.h>
189 #include <stdarg.h>
190 
191 #define RIDNAME(name)	#name,
192 static const char *const ra_regname[] = {
193   GPRDEF(RIDNAME)
194   FPRDEF(RIDNAME)
195   VRIDDEF(RIDNAME)
196   NULL
197 };
198 #undef RIDNAME
199 
200 static char ra_dbg_buf[65536];
201 static char *ra_dbg_p;
202 static char *ra_dbg_merge;
203 static MCode *ra_dbg_mcp;
204 
ra_dstart(void)205 static void ra_dstart(void)
206 {
207   ra_dbg_p = ra_dbg_buf;
208   ra_dbg_merge = NULL;
209   ra_dbg_mcp = NULL;
210 }
211 
ra_dflush(void)212 static void ra_dflush(void)
213 {
214   fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
215   ra_dstart();
216 }
217 
ra_dprintf(ASMState * as,const char * fmt,...)218 static void ra_dprintf(ASMState *as, const char *fmt, ...)
219 {
220   char *p;
221   va_list argp;
222   va_start(argp, fmt);
223   p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
224   ra_dbg_mcp = NULL;
225   p += sprintf(p, "%08x  \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
226   for (;;) {
227     const char *e = strchr(fmt, '$');
228     if (e == NULL) break;
229     memcpy(p, fmt, (size_t)(e-fmt));
230     p += e-fmt;
231     if (e[1] == 'r') {
232       Reg r = va_arg(argp, Reg) & RID_MASK;
233       if (r <= RID_MAX) {
234 	const char *q;
235 	for (q = ra_regname[r]; *q; q++)
236 	  *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
237       } else {
238 	*p++ = '?';
239 	lua_assert(0);
240       }
241     } else if (e[1] == 'f' || e[1] == 'i') {
242       IRRef ref;
243       if (e[1] == 'f')
244 	ref = va_arg(argp, IRRef);
245       else
246 	ref = va_arg(argp, IRIns *) - as->ir;
247       if (ref >= REF_BIAS)
248 	p += sprintf(p, "%04d", ref - REF_BIAS);
249       else
250 	p += sprintf(p, "K%03d", REF_BIAS - ref);
251     } else if (e[1] == 's') {
252       uint32_t slot = va_arg(argp, uint32_t);
253       p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
254     } else if (e[1] == 'x') {
255       p += sprintf(p, "%08x", va_arg(argp, int32_t));
256     } else {
257       lua_assert(0);
258     }
259     fmt = e+2;
260   }
261   va_end(argp);
262   while (*fmt)
263     *p++ = *fmt++;
264   *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
265   if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
266     fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
267     p = ra_dbg_buf;
268   }
269   ra_dbg_p = p;
270 }
271 
272 #define RA_DBG_START()	ra_dstart()
273 #define RA_DBG_FLUSH()	ra_dflush()
274 #define RA_DBG_REF() \
275   do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
276        ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
277 #define RA_DBGX(x)	ra_dprintf x
278 
279 #else
280 #define RA_DBG_START()	((void)0)
281 #define RA_DBG_FLUSH()	((void)0)
282 #define RA_DBG_REF()	((void)0)
283 #define RA_DBGX(x)	((void)0)
284 #endif
285 
286 /* -- Register allocator -------------------------------------------------- */
287 
288 #define ra_free(as, r)		rset_set(as->freeset, (r))
289 #define ra_modified(as, r)	rset_set(as->modset, (r))
290 #define ra_weak(as, r)		rset_set(as->weakset, (r))
291 #define ra_noweak(as, r)	rset_clear(as->weakset, (r))
292 
293 #define ra_used(ir)		(ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
294 
295 /* Setup register allocator. */
ra_setup(ASMState * as)296 static void ra_setup(ASMState *as)
297 {
298   Reg r;
299   /* Initially all regs (except the stack pointer) are free for use. */
300   as->freeset = RSET_INIT;
301   as->modset = RSET_EMPTY;
302   as->weakset = RSET_EMPTY;
303   as->phiset = RSET_EMPTY;
304   memset(as->phireg, 0, sizeof(as->phireg));
305   for (r = RID_MIN_GPR; r < RID_MAX; r++)
306     as->cost[r] = REGCOST(~0u, 0u);
307 }
308 
309 /* Rematerialize constants. */
ra_rematk(ASMState * as,IRRef ref)310 static Reg ra_rematk(ASMState *as, IRRef ref)
311 {
312   IRIns *ir;
313   Reg r;
314   if (ra_iskref(ref)) {
315     r = ra_krefreg(ref);
316     lua_assert(!rset_test(as->freeset, r));
317     ra_free(as, r);
318     ra_modified(as, r);
319     emit_loadi(as, r, ra_krefk(as, ref));
320     return r;
321   }
322   ir = IR(ref);
323   r = ir->r;
324   lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
325   ra_free(as, r);
326   ra_modified(as, r);
327   ir->r = RID_INIT;  /* Do not keep any hint. */
328   RA_DBGX((as, "remat     $i $r", ir, r));
329 #if !LJ_SOFTFP
330   if (ir->o == IR_KNUM) {
331     emit_loadn(as, r, ir_knum(ir));
332   } else
333 #endif
334   if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
335     ra_sethint(ir->r, RID_BASE);  /* Restore BASE register hint. */
336     emit_getgl(as, r, jit_base);
337   } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
338     lua_assert(irt_isnil(ir->t));  /* REF_NIL stores ASMREF_L register. */
339     emit_getgl(as, r, jit_L);
340 #if LJ_64
341   } else if (ir->o == IR_KINT64) {
342     emit_loadu64(as, r, ir_kint64(ir)->u64);
343 #endif
344   } else {
345     lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
346 	       ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
347     emit_loadi(as, r, ir->i);
348   }
349   return r;
350 }
351 
352 /* Force a spill. Allocate a new spill slot if needed. */
ra_spill(ASMState * as,IRIns * ir)353 static int32_t ra_spill(ASMState *as, IRIns *ir)
354 {
355   int32_t slot = ir->s;
356   if (!ra_hasspill(slot)) {
357     if (irt_is64(ir->t)) {
358       slot = as->evenspill;
359       as->evenspill += 2;
360     } else if (as->oddspill) {
361       slot = as->oddspill;
362       as->oddspill = 0;
363     } else {
364       slot = as->evenspill;
365       as->oddspill = slot+1;
366       as->evenspill += 2;
367     }
368     if (as->evenspill > 256)
369       lj_trace_err(as->J, LJ_TRERR_SPILLOV);
370     ir->s = (uint8_t)slot;
371   }
372   return sps_scale(slot);
373 }
374 
375 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
ra_releasetmp(ASMState * as,IRRef ref)376 static Reg ra_releasetmp(ASMState *as, IRRef ref)
377 {
378   IRIns *ir = IR(ref);
379   Reg r = ir->r;
380   lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
381   ra_free(as, r);
382   ra_modified(as, r);
383   ir->r = RID_INIT;
384   return r;
385 }
386 
387 /* Restore a register (marked as free). Rematerialize or force a spill. */
ra_restore(ASMState * as,IRRef ref)388 static Reg ra_restore(ASMState *as, IRRef ref)
389 {
390   if (emit_canremat(ref)) {
391     return ra_rematk(as, ref);
392   } else {
393     IRIns *ir = IR(ref);
394     int32_t ofs = ra_spill(as, ir);  /* Force a spill slot. */
395     Reg r = ir->r;
396     lua_assert(ra_hasreg(r));
397     ra_sethint(ir->r, r);  /* Keep hint. */
398     ra_free(as, r);
399     if (!rset_test(as->weakset, r)) {  /* Only restore non-weak references. */
400       ra_modified(as, r);
401       RA_DBGX((as, "restore   $i $r", ir, r));
402       emit_spload(as, ir, r, ofs);
403     }
404     return r;
405   }
406 }
407 
408 /* Save a register to a spill slot. */
ra_save(ASMState * as,IRIns * ir,Reg r)409 static void ra_save(ASMState *as, IRIns *ir, Reg r)
410 {
411   RA_DBGX((as, "save      $i $r", ir, r));
412   emit_spstore(as, ir, r, sps_scale(ir->s));
413 }
414 
415 #define MINCOST(name) \
416   if (rset_test(RSET_ALL, RID_##name) && \
417       LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
418     cost = as->cost[RID_##name];
419 
420 /* Evict the register with the lowest cost, forcing a restore. */
ra_evict(ASMState * as,RegSet allow)421 static Reg ra_evict(ASMState *as, RegSet allow)
422 {
423   IRRef ref;
424   RegCost cost = ~(RegCost)0;
425   lua_assert(allow != RSET_EMPTY);
426   if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
427     GPRDEF(MINCOST)
428   } else {
429     FPRDEF(MINCOST)
430   }
431   ref = regcost_ref(cost);
432   lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins));
433   /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
434   if (!irref_isk(ref) && (as->weakset & allow)) {
435     IRIns *ir = IR(ref);
436     if (!rset_test(as->weakset, ir->r))
437       ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
438   }
439   return ra_restore(as, ref);
440 }
441 
442 /* Pick any register (marked as free). Evict on-demand. */
ra_pick(ASMState * as,RegSet allow)443 static Reg ra_pick(ASMState *as, RegSet allow)
444 {
445   RegSet pick = as->freeset & allow;
446   if (!pick)
447     return ra_evict(as, allow);
448   else
449     return rset_picktop(pick);
450 }
451 
452 /* Get a scratch register (marked as free). */
ra_scratch(ASMState * as,RegSet allow)453 static Reg ra_scratch(ASMState *as, RegSet allow)
454 {
455   Reg r = ra_pick(as, allow);
456   ra_modified(as, r);
457   RA_DBGX((as, "scratch        $r", r));
458   return r;
459 }
460 
461 /* Evict all registers from a set (if not free). */
ra_evictset(ASMState * as,RegSet drop)462 static void ra_evictset(ASMState *as, RegSet drop)
463 {
464   RegSet work;
465   as->modset |= drop;
466 #if !LJ_SOFTFP
467   work = (drop & ~as->freeset) & RSET_FPR;
468   while (work) {
469     Reg r = rset_pickbot(work);
470     ra_restore(as, regcost_ref(as->cost[r]));
471     rset_clear(work, r);
472     checkmclim(as);
473   }
474 #endif
475   work = (drop & ~as->freeset);
476   while (work) {
477     Reg r = rset_pickbot(work);
478     ra_restore(as, regcost_ref(as->cost[r]));
479     rset_clear(work, r);
480     checkmclim(as);
481   }
482 }
483 
484 /* Evict (rematerialize) all registers allocated to constants. */
ra_evictk(ASMState * as)485 static void ra_evictk(ASMState *as)
486 {
487   RegSet work;
488 #if !LJ_SOFTFP
489   work = ~as->freeset & RSET_FPR;
490   while (work) {
491     Reg r = rset_pickbot(work);
492     IRRef ref = regcost_ref(as->cost[r]);
493     if (emit_canremat(ref) && irref_isk(ref)) {
494       ra_rematk(as, ref);
495       checkmclim(as);
496     }
497     rset_clear(work, r);
498   }
499 #endif
500   work = ~as->freeset & RSET_GPR;
501   while (work) {
502     Reg r = rset_pickbot(work);
503     IRRef ref = regcost_ref(as->cost[r]);
504     if (emit_canremat(ref) && irref_isk(ref)) {
505       ra_rematk(as, ref);
506       checkmclim(as);
507     }
508     rset_clear(work, r);
509   }
510 }
511 
512 #ifdef RID_NUM_KREF
513 /* Allocate a register for a constant. */
ra_allock(ASMState * as,int32_t k,RegSet allow)514 static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
515 {
516   /* First try to find a register which already holds the same constant. */
517   RegSet pick, work = ~as->freeset & RSET_GPR;
518   Reg r;
519   while (work) {
520     IRRef ref;
521     r = rset_pickbot(work);
522     ref = regcost_ref(as->cost[r]);
523     if (ref < ASMREF_L &&
524 	k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
525       return r;
526     rset_clear(work, r);
527   }
528   pick = as->freeset & allow;
529   if (pick) {
530     /* Constants should preferably get unmodified registers. */
531     if ((pick & ~as->modset))
532       pick &= ~as->modset;
533     r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
534   } else {
535     r = ra_evict(as, allow);
536   }
537   RA_DBGX((as, "allock    $x $r", k, r));
538   ra_setkref(as, r, k);
539   rset_clear(as->freeset, r);
540   ra_noweak(as, r);
541   return r;
542 }
543 
544 /* Allocate a specific register for a constant. */
ra_allockreg(ASMState * as,int32_t k,Reg r)545 static void ra_allockreg(ASMState *as, int32_t k, Reg r)
546 {
547   Reg kr = ra_allock(as, k, RID2RSET(r));
548   if (kr != r) {
549     IRIns irdummy;
550     irdummy.t.irt = IRT_INT;
551     ra_scratch(as, RID2RSET(r));
552     emit_movrr(as, &irdummy, r, kr);
553   }
554 }
555 #else
556 #define ra_allockreg(as, k, r)		emit_loadi(as, (r), (k))
557 #endif
558 
559 /* Allocate a register for ref from the allowed set of registers.
560 ** Note: this function assumes the ref does NOT have a register yet!
561 ** Picks an optimal register, sets the cost and marks the register as non-free.
562 */
ra_allocref(ASMState * as,IRRef ref,RegSet allow)563 static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
564 {
565   IRIns *ir = IR(ref);
566   RegSet pick = as->freeset & allow;
567   Reg r;
568   lua_assert(ra_noreg(ir->r));
569   if (pick) {
570     /* First check register hint from propagation or PHI. */
571     if (ra_hashint(ir->r)) {
572       r = ra_gethint(ir->r);
573       if (rset_test(pick, r))  /* Use hint register if possible. */
574 	goto found;
575       /* Rematerialization is cheaper than missing a hint. */
576       if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
577 	ra_rematk(as, regcost_ref(as->cost[r]));
578 	goto found;
579       }
580       RA_DBGX((as, "hintmiss  $f $r", ref, r));
581     }
582     /* Invariants should preferably get unmodified registers. */
583     if (ref < as->loopref && !irt_isphi(ir->t)) {
584       if ((pick & ~as->modset))
585 	pick &= ~as->modset;
586       r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
587     } else {
588       /* We've got plenty of regs, so get callee-save regs if possible. */
589       if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
590 	pick &= ~RSET_SCRATCH;
591       r = rset_picktop(pick);
592     }
593   } else {
594     r = ra_evict(as, allow);
595   }
596 found:
597   RA_DBGX((as, "alloc     $f $r", ref, r));
598   ir->r = (uint8_t)r;
599   rset_clear(as->freeset, r);
600   ra_noweak(as, r);
601   as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
602   return r;
603 }
604 
605 /* Allocate a register on-demand. */
ra_alloc1(ASMState * as,IRRef ref,RegSet allow)606 static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
607 {
608   Reg r = IR(ref)->r;
609   /* Note: allow is ignored if the register is already allocated. */
610   if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
611   ra_noweak(as, r);
612   return r;
613 }
614 
615 /* Rename register allocation and emit move. */
ra_rename(ASMState * as,Reg down,Reg up)616 static void ra_rename(ASMState *as, Reg down, Reg up)
617 {
618   IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]);
619   IRIns *ir = IR(ref);
620   ir->r = (uint8_t)up;
621   as->cost[down] = 0;
622   lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR));
623   lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up));
624   ra_free(as, down);  /* 'down' is free ... */
625   ra_modified(as, down);
626   rset_clear(as->freeset, up);  /* ... and 'up' is now allocated. */
627   ra_noweak(as, up);
628   RA_DBGX((as, "rename    $f $r $r", regcost_ref(as->cost[up]), down, up));
629   emit_movrr(as, ir, down, up);  /* Backwards codegen needs inverse move. */
630   if (!ra_hasspill(IR(ref)->s)) {  /* Add the rename to the IR. */
631     lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno);
632     ren = tref_ref(lj_ir_emit(as->J));
633     as->ir = as->T->ir;  /* The IR may have been reallocated. */
634     IR(ren)->r = (uint8_t)down;
635     IR(ren)->s = SPS_NONE;
636   }
637 }
638 
639 /* Pick a destination register (marked as free).
640 ** Caveat: allow is ignored if there's already a destination register.
641 ** Use ra_destreg() to get a specific register.
642 */
ra_dest(ASMState * as,IRIns * ir,RegSet allow)643 static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
644 {
645   Reg dest = ir->r;
646   if (ra_hasreg(dest)) {
647     ra_free(as, dest);
648     ra_modified(as, dest);
649   } else {
650     if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
651       dest = ra_gethint(dest);
652       ra_modified(as, dest);
653       RA_DBGX((as, "dest           $r", dest));
654     } else {
655       dest = ra_scratch(as, allow);
656     }
657     ir->r = dest;
658   }
659   if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
660   return dest;
661 }
662 
663 /* Force a specific destination register (marked as free). */
ra_destreg(ASMState * as,IRIns * ir,Reg r)664 static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
665 {
666   Reg dest = ra_dest(as, ir, RID2RSET(r));
667   if (dest != r) {
668     lua_assert(rset_test(as->freeset, r));
669     ra_modified(as, r);
670     emit_movrr(as, ir, dest, r);
671   }
672 }
673 
674 #if LJ_TARGET_X86ORX64
675 /* Propagate dest register to left reference. Emit moves as needed.
676 ** This is a required fixup step for all 2-operand machine instructions.
677 */
ra_left(ASMState * as,Reg dest,IRRef lref)678 static void ra_left(ASMState *as, Reg dest, IRRef lref)
679 {
680   IRIns *ir = IR(lref);
681   Reg left = ir->r;
682   if (ra_noreg(left)) {
683     if (irref_isk(lref)) {
684       if (ir->o == IR_KNUM) {
685 	cTValue *tv = ir_knum(ir);
686 	/* FP remat needs a load except for +0. Still better than eviction. */
687 	if (tvispzero(tv) || !(as->freeset & RSET_FPR)) {
688 	  emit_loadn(as, dest, tv);
689 	  return;
690 	}
691 #if LJ_64
692       } else if (ir->o == IR_KINT64) {
693 	emit_loadu64(as, dest, ir_kint64(ir)->u64);
694 	return;
695 #endif
696       } else {
697 	lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
698 		   ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
699 	emit_loadi(as, dest, ir->i);
700 	return;
701       }
702     }
703     if (!ra_hashint(left) && !iscrossref(as, lref))
704       ra_sethint(ir->r, dest);  /* Propagate register hint. */
705     left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
706   }
707   ra_noweak(as, left);
708   /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
709   if (dest != left) {
710     /* Use register renaming if dest is the PHI reg. */
711     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
712       ra_modified(as, left);
713       ra_rename(as, left, dest);
714     } else {
715       emit_movrr(as, ir, dest, left);
716     }
717   }
718 }
719 #else
720 /* Similar to ra_left, except we override any hints. */
ra_leftov(ASMState * as,Reg dest,IRRef lref)721 static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
722 {
723   IRIns *ir = IR(lref);
724   Reg left = ir->r;
725   if (ra_noreg(left)) {
726     ra_sethint(ir->r, dest);  /* Propagate register hint. */
727     left = ra_allocref(as, lref,
728 		       (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
729   }
730   ra_noweak(as, left);
731   if (dest != left) {
732     /* Use register renaming if dest is the PHI reg. */
733     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
734       ra_modified(as, left);
735       ra_rename(as, left, dest);
736     } else {
737       emit_movrr(as, ir, dest, left);
738     }
739   }
740 }
741 #endif
742 
743 #if !LJ_64
744 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
ra_destpair(ASMState * as,IRIns * ir)745 static void ra_destpair(ASMState *as, IRIns *ir)
746 {
747   Reg destlo = ir->r, desthi = (ir+1)->r;
748   /* First spill unrelated refs blocking the destination registers. */
749   if (!rset_test(as->freeset, RID_RETLO) &&
750       destlo != RID_RETLO && desthi != RID_RETLO)
751     ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
752   if (!rset_test(as->freeset, RID_RETHI) &&
753       destlo != RID_RETHI && desthi != RID_RETHI)
754     ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
755   /* Next free the destination registers (if any). */
756   if (ra_hasreg(destlo)) {
757     ra_free(as, destlo);
758     ra_modified(as, destlo);
759   } else {
760     destlo = RID_RETLO;
761   }
762   if (ra_hasreg(desthi)) {
763     ra_free(as, desthi);
764     ra_modified(as, desthi);
765   } else {
766     desthi = RID_RETHI;
767   }
768   /* Check for conflicts and shuffle the registers as needed. */
769   if (destlo == RID_RETHI) {
770     if (desthi == RID_RETLO) {
771 #if LJ_TARGET_X86
772       *--as->mcp = XI_XCHGa + RID_RETHI;
773 #else
774       emit_movrr(as, ir, RID_RETHI, RID_TMP);
775       emit_movrr(as, ir, RID_RETLO, RID_RETHI);
776       emit_movrr(as, ir, RID_TMP, RID_RETLO);
777 #endif
778     } else {
779       emit_movrr(as, ir, RID_RETHI, RID_RETLO);
780       if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
781     }
782   } else if (desthi == RID_RETLO) {
783     emit_movrr(as, ir, RID_RETLO, RID_RETHI);
784     if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
785   } else {
786     if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
787     if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
788   }
789   /* Restore spill slots (if any). */
790   if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
791   if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
792 }
793 #endif
794 
795 /* -- Snapshot handling --------- ----------------------------------------- */
796 
797 /* Can we rematerialize a KNUM instead of forcing a spill? */
asm_snap_canremat(ASMState * as)798 static int asm_snap_canremat(ASMState *as)
799 {
800   Reg r;
801   for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
802     if (irref_isk(regcost_ref(as->cost[r])))
803       return 1;
804   return 0;
805 }
806 
807 /* Check whether a sunk store corresponds to an allocation. */
asm_sunk_store(ASMState * as,IRIns * ira,IRIns * irs)808 static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
809 {
810   if (irs->s == 255) {
811     if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
812 	irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
813       IRIns *irk = IR(irs->op1);
814       if (irk->o == IR_AREF || irk->o == IR_HREFK)
815 	irk = IR(irk->op1);
816       return (IR(irk->op1) == ira);
817     }
818     return 0;
819   } else {
820     return (ira + irs->s == irs);  /* Quick check. */
821   }
822 }
823 
824 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
asm_snap_alloc1(ASMState * as,IRRef ref)825 static void asm_snap_alloc1(ASMState *as, IRRef ref)
826 {
827   IRIns *ir = IR(ref);
828   if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) {
829     if (ir->r == RID_SINK) {
830       ir->r = RID_SUNK;
831 #if LJ_HASFFI
832       if (ir->o == IR_CNEWI) {  /* Allocate CNEWI value. */
833 	asm_snap_alloc1(as, ir->op2);
834 	if (LJ_32 && (ir+1)->o == IR_HIOP)
835 	  asm_snap_alloc1(as, (ir+1)->op2);
836       } else
837 #endif
838       {  /* Allocate stored values for TNEW, TDUP and CNEW. */
839 	IRIns *irs;
840 	lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW);
841 	for (irs = IR(as->snapref-1); irs > ir; irs--)
842 	  if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
843 	    lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
844 		       irs->o == IR_FSTORE || irs->o == IR_XSTORE);
845 	    asm_snap_alloc1(as, irs->op2);
846 	    if (LJ_32 && (irs+1)->o == IR_HIOP)
847 	      asm_snap_alloc1(as, (irs+1)->op2);
848 	  }
849       }
850     } else {
851       RegSet allow;
852       if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
853 	IRIns *irc;
854 	for (irc = IR(as->curins); irc > ir; irc--)
855 	  if ((irc->op1 == ref || irc->op2 == ref) &&
856 	      !(irc->r == RID_SINK || irc->r == RID_SUNK))
857 	    goto nosink;  /* Don't sink conversion if result is used. */
858 	asm_snap_alloc1(as, ir->op1);
859 	return;
860       }
861     nosink:
862       allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
863       if ((as->freeset & allow) ||
864 	       (allow == RSET_FPR && asm_snap_canremat(as))) {
865 	/* Get a weak register if we have a free one or can rematerialize. */
866 	Reg r = ra_allocref(as, ref, allow);  /* Allocate a register. */
867 	if (!irt_isphi(ir->t))
868 	  ra_weak(as, r);  /* But mark it as weakly referenced. */
869 	checkmclim(as);
870 	RA_DBGX((as, "snapreg   $f $r", ref, ir->r));
871       } else {
872 	ra_spill(as, ir);  /* Otherwise force a spill slot. */
873 	RA_DBGX((as, "snapspill $f $s", ref, ir->s));
874       }
875     }
876   }
877 }
878 
879 /* Allocate refs escaping to a snapshot. */
asm_snap_alloc(ASMState * as)880 static void asm_snap_alloc(ASMState *as)
881 {
882   SnapShot *snap = &as->T->snap[as->snapno];
883   SnapEntry *map = &as->T->snapmap[snap->mapofs];
884   MSize n, nent = snap->nent;
885   for (n = 0; n < nent; n++) {
886     SnapEntry sn = map[n];
887     IRRef ref = snap_ref(sn);
888     if (!irref_isk(ref)) {
889       asm_snap_alloc1(as, ref);
890       if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
891 	lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP);
892 	asm_snap_alloc1(as, ref+1);
893       }
894     }
895   }
896 }
897 
898 /* All guards for a snapshot use the same exitno. This is currently the
899 ** same as the snapshot number. Since the exact origin of the exit cannot
900 ** be determined, all guards for the same snapshot must exit with the same
901 ** RegSP mapping.
902 ** A renamed ref which has been used in a prior guard for the same snapshot
903 ** would cause an inconsistency. The easy way out is to force a spill slot.
904 */
asm_snap_checkrename(ASMState * as,IRRef ren)905 static int asm_snap_checkrename(ASMState *as, IRRef ren)
906 {
907   SnapShot *snap = &as->T->snap[as->snapno];
908   SnapEntry *map = &as->T->snapmap[snap->mapofs];
909   MSize n, nent = snap->nent;
910   for (n = 0; n < nent; n++) {
911     SnapEntry sn = map[n];
912     IRRef ref = snap_ref(sn);
913     if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
914       IRIns *ir = IR(ref);
915       ra_spill(as, ir);  /* Register renamed, so force a spill slot. */
916       RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
917       return 1;  /* Found. */
918     }
919   }
920   return 0;  /* Not found. */
921 }
922 
923 /* Prepare snapshot for next guard instruction. */
asm_snap_prep(ASMState * as)924 static void asm_snap_prep(ASMState *as)
925 {
926   if (as->curins < as->snapref) {
927     do {
928       if (as->snapno == 0) return;  /* Called by sunk stores before snap #0. */
929       as->snapno--;
930       as->snapref = as->T->snap[as->snapno].ref;
931     } while (as->curins < as->snapref);
932     asm_snap_alloc(as);
933     as->snaprename = as->T->nins;
934   } else {
935     /* Process any renames above the highwater mark. */
936     for (; as->snaprename < as->T->nins; as->snaprename++) {
937       IRIns *ir = IR(as->snaprename);
938       if (asm_snap_checkrename(as, ir->op1))
939 	ir->op2 = REF_BIAS-1;  /* Kill rename. */
940     }
941   }
942 }
943 
944 /* -- Miscellaneous helpers ----------------------------------------------- */
945 
946 /* Collect arguments from CALL* and CARG instructions. */
asm_collectargs(ASMState * as,IRIns * ir,const CCallInfo * ci,IRRef * args)947 static void asm_collectargs(ASMState *as, IRIns *ir,
948 			    const CCallInfo *ci, IRRef *args)
949 {
950   uint32_t n = CCI_NARGS(ci);
951   lua_assert(n <= CCI_NARGS_MAX*2);  /* Account for split args. */
952   if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
953   while (n-- > 1) {
954     ir = IR(ir->op1);
955     lua_assert(ir->o == IR_CARG);
956     args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
957   }
958   args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
959   lua_assert(IR(ir->op1)->o != IR_CARG);
960 }
961 
962 /* Reconstruct CCallInfo flags for CALLX*. */
asm_callx_flags(ASMState * as,IRIns * ir)963 static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
964 {
965   uint32_t nargs = 0;
966   if (ir->op1 != REF_NIL) {  /* Count number of arguments first. */
967     IRIns *ira = IR(ir->op1);
968     nargs++;
969     while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
970   }
971 #if LJ_HASFFI
972   if (IR(ir->op2)->o == IR_CARG) {  /* Copy calling convention info. */
973     CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
974     CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
975     nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
976 #if LJ_TARGET_X86
977     nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
978 #endif
979   }
980 #endif
981   return (nargs | (ir->t.irt << CCI_OTSHIFT));
982 }
983 
984 /* Calculate stack adjustment. */
asm_stack_adjust(ASMState * as)985 static int32_t asm_stack_adjust(ASMState *as)
986 {
987   if (as->evenspill <= SPS_FIXED)
988     return 0;
989   return sps_scale(sps_align(as->evenspill));
990 }
991 
992 /* Must match with hash*() in lj_tab.c. */
ir_khash(IRIns * ir)993 static uint32_t ir_khash(IRIns *ir)
994 {
995   uint32_t lo, hi;
996   if (irt_isstr(ir->t)) {
997     return ir_kstr(ir)->hash;
998   } else if (irt_isnum(ir->t)) {
999     lo = ir_knum(ir)->u32.lo;
1000     hi = ir_knum(ir)->u32.hi << 1;
1001   } else if (irt_ispri(ir->t)) {
1002     lua_assert(!irt_isnil(ir->t));
1003     return irt_type(ir->t)-IRT_FALSE;
1004   } else {
1005     lua_assert(irt_isgcv(ir->t));
1006     lo = u32ptr(ir_kgc(ir));
1007     hi = lo + HASH_BIAS;
1008   }
1009   return hashrot(lo, hi);
1010 }
1011 
1012 /* -- Allocations --------------------------------------------------------- */
1013 
1014 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
1015 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
1016 
asm_snew(ASMState * as,IRIns * ir)1017 static void asm_snew(ASMState *as, IRIns *ir)
1018 {
1019   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
1020   IRRef args[3];
1021   args[0] = ASMREF_L;  /* lua_State *L    */
1022   args[1] = ir->op1;   /* const char *str */
1023   args[2] = ir->op2;   /* size_t len      */
1024   as->gcsteps++;
1025   asm_setupresult(as, ir, ci);  /* GCstr * */
1026   asm_gencall(as, ci, args);
1027 }
1028 
asm_tnew(ASMState * as,IRIns * ir)1029 static void asm_tnew(ASMState *as, IRIns *ir)
1030 {
1031   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
1032   IRRef args[2];
1033   args[0] = ASMREF_L;     /* lua_State *L    */
1034   args[1] = ASMREF_TMP1;  /* uint32_t ahsize */
1035   as->gcsteps++;
1036   asm_setupresult(as, ir, ci);  /* GCtab * */
1037   asm_gencall(as, ci, args);
1038   ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
1039 }
1040 
asm_tdup(ASMState * as,IRIns * ir)1041 static void asm_tdup(ASMState *as, IRIns *ir)
1042 {
1043   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
1044   IRRef args[2];
1045   args[0] = ASMREF_L;  /* lua_State *L    */
1046   args[1] = ir->op1;   /* const GCtab *kt */
1047   as->gcsteps++;
1048   asm_setupresult(as, ir, ci);  /* GCtab * */
1049   asm_gencall(as, ci, args);
1050 }
1051 
1052 static void asm_gc_check(ASMState *as);
1053 
1054 /* Explicit GC step. */
asm_gcstep(ASMState * as,IRIns * ir)1055 static void asm_gcstep(ASMState *as, IRIns *ir)
1056 {
1057   IRIns *ira;
1058   for (ira = IR(as->stopins+1); ira < ir; ira++)
1059     if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
1060 	 (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
1061 	ra_used(ira))
1062       as->gcsteps++;
1063   if (as->gcsteps)
1064     asm_gc_check(as);
1065   as->gcsteps = 0x80000000;  /* Prevent implicit GC check further up. */
1066 }
1067 
1068 /* -- PHI and loop handling ----------------------------------------------- */
1069 
1070 /* Break a PHI cycle by renaming to a free register (evict if needed). */
asm_phi_break(ASMState * as,RegSet blocked,RegSet blockedby,RegSet allow)1071 static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
1072 			  RegSet allow)
1073 {
1074   RegSet candidates = blocked & allow;
1075   if (candidates) {  /* If this register file has candidates. */
1076     /* Note: the set for ra_pick cannot be empty, since each register file
1077     ** has some registers never allocated to PHIs.
1078     */
1079     Reg down, up = ra_pick(as, ~blocked & allow);  /* Get a free register. */
1080     if (candidates & ~blockedby)  /* Optimize shifts, else it's a cycle. */
1081       candidates = candidates & ~blockedby;
1082     down = rset_picktop(candidates);  /* Pick candidate PHI register. */
1083     ra_rename(as, down, up);  /* And rename it to the free register. */
1084   }
1085 }
1086 
1087 /* PHI register shuffling.
1088 **
1089 ** The allocator tries hard to preserve PHI register assignments across
1090 ** the loop body. Most of the time this loop does nothing, since there
1091 ** are no register mismatches.
1092 **
1093 ** If a register mismatch is detected and ...
1094 ** - the register is currently free: rename it.
1095 ** - the register is blocked by an invariant: restore/remat and rename it.
1096 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1097 **
1098 ** The renames are order-sensitive, so just retry the loop if a register
1099 ** is marked as blocked, but has been freed in the meantime. A cycle is
1100 ** detected if all of the blocked registers are allocated. To break the
1101 ** cycle rename one of them to a free register and retry.
1102 **
1103 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1104 */
asm_phi_shuffle(ASMState * as)1105 static void asm_phi_shuffle(ASMState *as)
1106 {
1107   RegSet work;
1108 
1109   /* Find and resolve PHI register mismatches. */
1110   for (;;) {
1111     RegSet blocked = RSET_EMPTY;
1112     RegSet blockedby = RSET_EMPTY;
1113     RegSet phiset = as->phiset;
1114     while (phiset) {  /* Check all left PHI operand registers. */
1115       Reg r = rset_pickbot(phiset);
1116       IRIns *irl = IR(as->phireg[r]);
1117       Reg left = irl->r;
1118       if (r != left) {  /* Mismatch? */
1119 	if (!rset_test(as->freeset, r)) {  /* PHI register blocked? */
1120 	  IRRef ref = regcost_ref(as->cost[r]);
1121 	  /* Blocked by other PHI (w/reg)? */
1122 	  if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
1123 	    rset_set(blocked, r);
1124 	    if (ra_hasreg(left))
1125 	      rset_set(blockedby, left);
1126 	    left = RID_NONE;
1127 	  } else {  /* Otherwise grab register from invariant. */
1128 	    ra_restore(as, ref);
1129 	    checkmclim(as);
1130 	  }
1131 	}
1132 	if (ra_hasreg(left)) {
1133 	  ra_rename(as, left, r);
1134 	  checkmclim(as);
1135 	}
1136       }
1137       rset_clear(phiset, r);
1138     }
1139     if (!blocked) break;  /* Finished. */
1140     if (!(as->freeset & blocked)) {  /* Break cycles if none are free. */
1141       asm_phi_break(as, blocked, blockedby, RSET_GPR);
1142       if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
1143       checkmclim(as);
1144     }  /* Else retry some more renames. */
1145   }
1146 
1147   /* Restore/remat invariants whose registers are modified inside the loop. */
1148 #if !LJ_SOFTFP
1149   work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
1150   while (work) {
1151     Reg r = rset_pickbot(work);
1152     ra_restore(as, regcost_ref(as->cost[r]));
1153     rset_clear(work, r);
1154     checkmclim(as);
1155   }
1156 #endif
1157   work = as->modset & ~(as->freeset | as->phiset);
1158   while (work) {
1159     Reg r = rset_pickbot(work);
1160     ra_restore(as, regcost_ref(as->cost[r]));
1161     rset_clear(work, r);
1162     checkmclim(as);
1163   }
1164 
1165   /* Allocate and save all unsaved PHI regs and clear marks. */
1166   work = as->phiset;
1167   while (work) {
1168     Reg r = rset_picktop(work);
1169     IRRef lref = as->phireg[r];
1170     IRIns *ir = IR(lref);
1171     if (ra_hasspill(ir->s)) {  /* Left PHI gained a spill slot? */
1172       irt_clearmark(ir->t);  /* Handled here, so clear marker now. */
1173       ra_alloc1(as, lref, RID2RSET(r));
1174       ra_save(as, ir, r);  /* Save to spill slot inside the loop. */
1175       checkmclim(as);
1176     }
1177     rset_clear(work, r);
1178   }
1179 }
1180 
1181 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
asm_phi_copyspill(ASMState * as)1182 static void asm_phi_copyspill(ASMState *as)
1183 {
1184   int need = 0;
1185   IRIns *ir;
1186   for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
1187     if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
1188       need |= irt_isfp(ir->t) ? 2 : 1;  /* Unsynced spill slot? */
1189   if ((need & 1)) {  /* Copy integer spill slots. */
1190 #if !LJ_TARGET_X86ORX64
1191     Reg r = RID_TMP;
1192 #else
1193     Reg r = RID_RET;
1194     if ((as->freeset & RSET_GPR))
1195       r = rset_pickbot((as->freeset & RSET_GPR));
1196     else
1197       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1198 #endif
1199     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1200       if (ra_hasspill(ir->s)) {
1201 	IRIns *irl = IR(ir->op1);
1202 	if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
1203 	  emit_spstore(as, irl, r, sps_scale(irl->s));
1204 	  emit_spload(as, ir, r, sps_scale(ir->s));
1205 	  checkmclim(as);
1206 	}
1207       }
1208     }
1209 #if LJ_TARGET_X86ORX64
1210     if (!rset_test(as->freeset, r))
1211       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1212 #endif
1213   }
1214 #if !LJ_SOFTFP
1215   if ((need & 2)) {  /* Copy FP spill slots. */
1216 #if LJ_TARGET_X86
1217     Reg r = RID_XMM0;
1218 #else
1219     Reg r = RID_FPRET;
1220 #endif
1221     if ((as->freeset & RSET_FPR))
1222       r = rset_pickbot((as->freeset & RSET_FPR));
1223     if (!rset_test(as->freeset, r))
1224       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1225     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1226       if (ra_hasspill(ir->s)) {
1227 	IRIns *irl = IR(ir->op1);
1228 	if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
1229 	  emit_spstore(as, irl, r, sps_scale(irl->s));
1230 	  emit_spload(as, ir, r, sps_scale(ir->s));
1231 	  checkmclim(as);
1232 	}
1233       }
1234     }
1235     if (!rset_test(as->freeset, r))
1236       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1237   }
1238 #endif
1239 }
1240 
1241 /* Emit renames for left PHIs which are only spilled outside the loop. */
asm_phi_fixup(ASMState * as)1242 static void asm_phi_fixup(ASMState *as)
1243 {
1244   RegSet work = as->phiset;
1245   while (work) {
1246     Reg r = rset_picktop(work);
1247     IRRef lref = as->phireg[r];
1248     IRIns *ir = IR(lref);
1249     if (irt_ismarked(ir->t)) {
1250       irt_clearmark(ir->t);
1251       /* Left PHI gained a spill slot before the loop? */
1252       if (ra_hasspill(ir->s)) {
1253 	IRRef ren;
1254 	lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno);
1255 	ren = tref_ref(lj_ir_emit(as->J));
1256 	as->ir = as->T->ir;  /* The IR may have been reallocated. */
1257 	IR(ren)->r = (uint8_t)r;
1258 	IR(ren)->s = SPS_NONE;
1259       }
1260     }
1261     rset_clear(work, r);
1262   }
1263 }
1264 
1265 /* Setup right PHI reference. */
asm_phi(ASMState * as,IRIns * ir)1266 static void asm_phi(ASMState *as, IRIns *ir)
1267 {
1268   RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
1269 		 ~as->phiset;
1270   RegSet afree = (as->freeset & allow);
1271   IRIns *irl = IR(ir->op1);
1272   IRIns *irr = IR(ir->op2);
1273   if (ir->r == RID_SINK)  /* Sink PHI. */
1274     return;
1275   /* Spill slot shuffling is not implemented yet (but rarely needed). */
1276   if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
1277     lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1278   /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1279   if ((afree & (afree-1))) {  /* Two or more free registers? */
1280     Reg r;
1281     if (ra_noreg(irr->r)) {  /* Get a register for the right PHI. */
1282       r = ra_allocref(as, ir->op2, allow);
1283     } else {  /* Duplicate right PHI, need a copy (rare). */
1284       r = ra_scratch(as, allow);
1285       emit_movrr(as, irr, r, irr->r);
1286     }
1287     ir->r = (uint8_t)r;
1288     rset_set(as->phiset, r);
1289     as->phireg[r] = (IRRef1)ir->op1;
1290     irt_setmark(irl->t);  /* Marks left PHIs _with_ register. */
1291     if (ra_noreg(irl->r))
1292       ra_sethint(irl->r, r); /* Set register hint for left PHI. */
1293   } else {  /* Otherwise allocate a spill slot. */
1294     /* This is overly restrictive, but it triggers only on synthetic code. */
1295     if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
1296       lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1297     ra_spill(as, ir);
1298     irr->s = ir->s;  /* Set right PHI spill slot. Sync left slot later. */
1299   }
1300 }
1301 
1302 static void asm_loop_fixup(ASMState *as);
1303 
1304 /* Middle part of a loop. */
asm_loop(ASMState * as)1305 static void asm_loop(ASMState *as)
1306 {
1307   MCode *mcspill;
1308   /* LOOP is a guard, so the snapno is up to date. */
1309   as->loopsnapno = as->snapno;
1310   if (as->gcsteps)
1311     asm_gc_check(as);
1312   /* LOOP marks the transition from the variant to the invariant part. */
1313   as->flagmcp = as->invmcp = NULL;
1314   as->sectref = 0;
1315   if (!neverfuse(as)) as->fuseref = 0;
1316   asm_phi_shuffle(as);
1317   mcspill = as->mcp;
1318   asm_phi_copyspill(as);
1319   asm_loop_fixup(as);
1320   as->mcloop = as->mcp;
1321   RA_DBGX((as, "===== LOOP ====="));
1322   if (!as->realign) RA_DBG_FLUSH();
1323   if (as->mcp != mcspill)
1324     emit_jmp(as, mcspill);
1325 }
1326 
1327 /* -- Target-specific assembler ------------------------------------------- */
1328 
1329 #if LJ_TARGET_X86ORX64
1330 #include "lj_asm_x86.h"
1331 #elif LJ_TARGET_ARM
1332 #include "lj_asm_arm.h"
1333 #elif LJ_TARGET_PPC
1334 #include "lj_asm_ppc.h"
1335 #elif LJ_TARGET_MIPS
1336 #include "lj_asm_mips.h"
1337 #else
1338 #error "Missing assembler for target CPU"
1339 #endif
1340 
1341 /* -- Head of trace ------------------------------------------------------- */
1342 
1343 /* Head of a root trace. */
asm_head_root(ASMState * as)1344 static void asm_head_root(ASMState *as)
1345 {
1346   int32_t spadj;
1347   asm_head_root_base(as);
1348   emit_setvmstate(as, (int32_t)as->T->traceno);
1349   spadj = asm_stack_adjust(as);
1350   as->T->spadjust = (uint16_t)spadj;
1351   emit_spsub(as, spadj);
1352   /* Root traces assume a checked stack for the starting proto. */
1353   as->T->topslot = gcref(as->T->startpt)->pt.framesize;
1354 }
1355 
1356 /* Head of a side trace.
1357 **
1358 ** The current simplistic algorithm requires that all slots inherited
1359 ** from the parent are live in a register between pass 2 and pass 3. This
1360 ** avoids the complexity of stack slot shuffling. But of course this may
1361 ** overflow the register set in some cases and cause the dreaded error:
1362 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1363 */
asm_head_side(ASMState * as)1364 static void asm_head_side(ASMState *as)
1365 {
1366   IRRef1 sloadins[RID_MAX];
1367   RegSet allow = RSET_ALL;  /* Inverse of all coalesced registers. */
1368   RegSet live = RSET_EMPTY;  /* Live parent registers. */
1369   IRIns *irp = &as->parent->ir[REF_BASE];  /* Parent base. */
1370   int32_t spadj, spdelta;
1371   int pass2 = 0;
1372   int pass3 = 0;
1373   IRRef i;
1374 
1375   allow = asm_head_side_base(as, irp, allow);
1376 
1377   /* Scan all parent SLOADs and collect register dependencies. */
1378   for (i = as->stopins; i > REF_BASE; i--) {
1379     IRIns *ir = IR(i);
1380     RegSP rs;
1381     lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1382 	       (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL);
1383     rs = as->parentmap[i - REF_FIRST];
1384     if (ra_hasreg(ir->r)) {
1385       rset_clear(allow, ir->r);
1386       if (ra_hasspill(ir->s)) {
1387 	ra_save(as, ir, ir->r);
1388 	checkmclim(as);
1389       }
1390     } else if (ra_hasspill(ir->s)) {
1391       irt_setmark(ir->t);
1392       pass2 = 1;
1393     }
1394     if (ir->r == rs) {  /* Coalesce matching registers right now. */
1395       ra_free(as, ir->r);
1396     } else if (ra_hasspill(regsp_spill(rs))) {
1397       if (ra_hasreg(ir->r))
1398 	pass3 = 1;
1399     } else if (ra_used(ir)) {
1400       sloadins[rs] = (IRRef1)i;
1401       rset_set(live, rs);  /* Block live parent register. */
1402     }
1403   }
1404 
1405   /* Calculate stack frame adjustment. */
1406   spadj = asm_stack_adjust(as);
1407   spdelta = spadj - (int32_t)as->parent->spadjust;
1408   if (spdelta < 0) {  /* Don't shrink the stack frame. */
1409     spadj = (int32_t)as->parent->spadjust;
1410     spdelta = 0;
1411   }
1412   as->T->spadjust = (uint16_t)spadj;
1413 
1414   /* Reload spilled target registers. */
1415   if (pass2) {
1416     for (i = as->stopins; i > REF_BASE; i--) {
1417       IRIns *ir = IR(i);
1418       if (irt_ismarked(ir->t)) {
1419 	RegSet mask;
1420 	Reg r;
1421 	RegSP rs;
1422 	irt_clearmark(ir->t);
1423 	rs = as->parentmap[i - REF_FIRST];
1424 	if (!ra_hasspill(regsp_spill(rs)))
1425 	  ra_sethint(ir->r, rs);  /* Hint may be gone, set it again. */
1426 	else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
1427 	  continue;  /* Same spill slot, do nothing. */
1428 	mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
1429 	if (mask == RSET_EMPTY)
1430 	  lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1431 	r = ra_allocref(as, i, mask);
1432 	ra_save(as, ir, r);
1433 	rset_clear(allow, r);
1434 	if (r == rs) {  /* Coalesce matching registers right now. */
1435 	  ra_free(as, r);
1436 	  rset_clear(live, r);
1437 	} else if (ra_hasspill(regsp_spill(rs))) {
1438 	  pass3 = 1;
1439 	}
1440 	checkmclim(as);
1441       }
1442     }
1443   }
1444 
1445   /* Store trace number and adjust stack frame relative to the parent. */
1446   emit_setvmstate(as, (int32_t)as->T->traceno);
1447   emit_spsub(as, spdelta);
1448 
1449 #if !LJ_TARGET_X86ORX64
1450   /* Restore BASE register from parent spill slot. */
1451   if (ra_hasspill(irp->s))
1452     emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
1453 #endif
1454 
1455   /* Restore target registers from parent spill slots. */
1456   if (pass3) {
1457     RegSet work = ~as->freeset & RSET_ALL;
1458     while (work) {
1459       Reg r = rset_pickbot(work);
1460       IRRef ref = regcost_ref(as->cost[r]);
1461       RegSP rs = as->parentmap[ref - REF_FIRST];
1462       rset_clear(work, r);
1463       if (ra_hasspill(regsp_spill(rs))) {
1464 	int32_t ofs = sps_scale(regsp_spill(rs));
1465 	ra_free(as, r);
1466 	emit_spload(as, IR(ref), r, ofs);
1467 	checkmclim(as);
1468       }
1469     }
1470   }
1471 
1472   /* Shuffle registers to match up target regs with parent regs. */
1473   for (;;) {
1474     RegSet work;
1475 
1476     /* Repeatedly coalesce free live registers by moving to their target. */
1477     while ((work = as->freeset & live) != RSET_EMPTY) {
1478       Reg rp = rset_pickbot(work);
1479       IRIns *ir = IR(sloadins[rp]);
1480       rset_clear(live, rp);
1481       rset_clear(allow, rp);
1482       ra_free(as, ir->r);
1483       emit_movrr(as, ir, ir->r, rp);
1484       checkmclim(as);
1485     }
1486 
1487     /* We're done if no live registers remain. */
1488     if (live == RSET_EMPTY)
1489       break;
1490 
1491     /* Break cycles by renaming one target to a temp. register. */
1492     if (live & RSET_GPR) {
1493       RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
1494       if (tmpset == RSET_EMPTY)
1495 	lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1496       ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
1497     }
1498     if (!LJ_SOFTFP && (live & RSET_FPR)) {
1499       RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
1500       if (tmpset == RSET_EMPTY)
1501 	lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1502       ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
1503     }
1504     checkmclim(as);
1505     /* Continue with coalescing to fix up the broken cycle(s). */
1506   }
1507 
1508   /* Inherit top stack slot already checked by parent trace. */
1509   as->T->topslot = as->parent->topslot;
1510   if (as->topslot > as->T->topslot) {  /* Need to check for higher slot? */
1511 #ifdef EXITSTATE_CHECKEXIT
1512     /* Highest exit + 1 indicates stack check. */
1513     ExitNo exitno = as->T->nsnap;
1514 #else
1515     /* Reuse the parent exit in the context of the parent trace. */
1516     ExitNo exitno = as->J->exitno;
1517 #endif
1518     as->T->topslot = (uint8_t)as->topslot;  /* Remember for child traces. */
1519     asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
1520   }
1521 }
1522 
1523 /* -- Tail of trace ------------------------------------------------------- */
1524 
1525 /* Get base slot for a snapshot. */
asm_baseslot(ASMState * as,SnapShot * snap,int * gotframe)1526 static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
1527 {
1528   SnapEntry *map = &as->T->snapmap[snap->mapofs];
1529   MSize n;
1530   for (n = snap->nent; n > 0; n--) {
1531     SnapEntry sn = map[n-1];
1532     if ((sn & SNAP_FRAME)) {
1533       *gotframe = 1;
1534       return snap_slot(sn);
1535     }
1536   }
1537   return 0;
1538 }
1539 
1540 /* Link to another trace. */
asm_tail_link(ASMState * as)1541 static void asm_tail_link(ASMState *as)
1542 {
1543   SnapNo snapno = as->T->nsnap-1;  /* Last snapshot. */
1544   SnapShot *snap = &as->T->snap[snapno];
1545   int gotframe = 0;
1546   BCReg baseslot = asm_baseslot(as, snap, &gotframe);
1547 
1548   as->topslot = snap->topslot;
1549   checkmclim(as);
1550   ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
1551 
1552   if (as->T->link == 0) {
1553     /* Setup fixed registers for exit to interpreter. */
1554     const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]);
1555     int32_t mres;
1556     if (bc_op(*pc) == BC_JLOOP) {  /* NYI: find a better way to do this. */
1557       BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
1558       if (bc_isret(bc_op(*retpc)))
1559 	pc = retpc;
1560     }
1561     ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
1562     ra_allockreg(as, i32ptr(pc), RID_LPC);
1563     mres = (int32_t)(snap->nslots - baseslot);
1564     switch (bc_op(*pc)) {
1565     case BC_CALLM: case BC_CALLMT:
1566       mres -= (int32_t)(1 + bc_a(*pc) + bc_c(*pc)); break;
1567     case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
1568     case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
1569     default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
1570     }
1571     ra_allockreg(as, mres, RID_RET);  /* Return MULTRES or 0. */
1572   } else if (baseslot) {
1573     /* Save modified BASE for linking to trace with higher start frame. */
1574     emit_setgl(as, RID_BASE, jit_base);
1575   }
1576   emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
1577 
1578   /* Sync the interpreter state with the on-trace state. */
1579   asm_stack_restore(as, snap);
1580 
1581   /* Root traces that add frames need to check the stack at the end. */
1582   if (!as->parent && gotframe)
1583     asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
1584 }
1585 
1586 /* -- Trace setup --------------------------------------------------------- */
1587 
1588 /* Clear reg/sp for all instructions and add register hints. */
asm_setup_regsp(ASMState * as)1589 static void asm_setup_regsp(ASMState *as)
1590 {
1591   GCtrace *T = as->T;
1592   int sink = T->sinktags;
1593   IRRef nins = T->nins;
1594   IRIns *ir, *lastir;
1595   int inloop;
1596 #if LJ_TARGET_ARM
1597   uint32_t rload = 0xa6402a64;
1598 #endif
1599 
1600   ra_setup(as);
1601 
1602   /* Clear reg/sp for constants. */
1603   for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++)
1604     ir->prev = REGSP_INIT;
1605 
1606   /* REF_BASE is used for implicit references to the BASE register. */
1607   lastir->prev = REGSP_HINT(RID_BASE);
1608 
1609   ir = IR(nins-1);
1610   if (ir->o == IR_RENAME) {
1611     do { ir--; nins--; } while (ir->o == IR_RENAME);
1612     T->nins = nins;  /* Remove any renames left over from ASM restart. */
1613   }
1614   as->snaprename = nins;
1615   as->snapref = nins;
1616   as->snapno = T->nsnap;
1617 
1618   as->stopins = REF_BASE;
1619   as->orignins = nins;
1620   as->curins = nins;
1621 
1622   /* Setup register hints for parent link instructions. */
1623   ir = IR(REF_FIRST);
1624   if (as->parent) {
1625     uint16_t *p;
1626     lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir);
1627     if (lastir - ir > LJ_MAX_JSLOTS)
1628       lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1629     as->stopins = (IRRef)((lastir-1) - as->ir);
1630     for (p = as->parentmap; ir < lastir; ir++) {
1631       RegSP rs = ir->prev;
1632       *p++ = (uint16_t)rs;  /* Copy original parent RegSP to parentmap. */
1633       if (!ra_hasspill(regsp_spill(rs)))
1634 	ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
1635       else
1636 	ir->prev = REGSP_INIT;
1637     }
1638   }
1639 
1640   inloop = 0;
1641   as->evenspill = SPS_FIRST;
1642   for (lastir = IR(nins); ir < lastir; ir++) {
1643     if (sink) {
1644       if (ir->r == RID_SINK)
1645 	continue;
1646       if (ir->r == RID_SUNK) {  /* Revert after ASM restart. */
1647 	ir->r = RID_SINK;
1648 	continue;
1649       }
1650     }
1651     switch (ir->o) {
1652     case IR_LOOP:
1653       inloop = 1;
1654       break;
1655 #if LJ_TARGET_ARM
1656     case IR_SLOAD:
1657       if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
1658 	break;
1659       /* fallthrough */
1660     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1661       if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
1662       ir->prev = (uint16_t)REGSP_HINT((rload & 15));
1663       rload = lj_ror(rload, 4);
1664       continue;
1665 #endif
1666     case IR_CALLXS: {
1667       CCallInfo ci;
1668       ci.flags = asm_callx_flags(as, ir);
1669       ir->prev = asm_setup_call_slots(as, ir, &ci);
1670       if (inloop)
1671 	as->modset |= RSET_SCRATCH;
1672       continue;
1673       }
1674     case IR_CALLN: case IR_CALLL: case IR_CALLS: {
1675       const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1676       ir->prev = asm_setup_call_slots(as, ir, ci);
1677       if (inloop)
1678 	as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
1679 		      (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
1680       continue;
1681       }
1682 #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
1683     case IR_HIOP:
1684       switch ((ir-1)->o) {
1685 #if LJ_SOFTFP && LJ_TARGET_ARM
1686       case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1687 	if (ra_hashint((ir-1)->r)) {
1688 	  ir->prev = (ir-1)->prev + 1;
1689 	  continue;
1690 	}
1691 	break;
1692 #endif
1693 #if !LJ_SOFTFP && LJ_NEED_FP64
1694       case IR_CONV:
1695 	if (irt_isfp((ir-1)->t)) {
1696 	  ir->prev = REGSP_HINT(RID_FPRET);
1697 	  continue;
1698 	}
1699 	/* fallthrough */
1700 #endif
1701       case IR_CALLN: case IR_CALLXS:
1702 #if LJ_SOFTFP
1703       case IR_MIN: case IR_MAX:
1704 #endif
1705 	(ir-1)->prev = REGSP_HINT(RID_RETLO);
1706 	ir->prev = REGSP_HINT(RID_RETHI);
1707 	continue;
1708       default:
1709 	break;
1710       }
1711       break;
1712 #endif
1713 #if LJ_SOFTFP
1714     case IR_MIN: case IR_MAX:
1715       if ((ir+1)->o != IR_HIOP) break;
1716       /* fallthrough */
1717 #endif
1718     /* C calls evict all scratch regs and return results in RID_RET. */
1719     case IR_SNEW: case IR_XSNEW: case IR_NEWREF:
1720       if (REGARG_NUMGPR < 3 && as->evenspill < 3)
1721 	as->evenspill = 3;  /* lj_str_new and lj_tab_newkey need 3 args. */
1722     case IR_TNEW: case IR_TDUP: case IR_CNEW: case IR_CNEWI: case IR_TOSTR:
1723       ir->prev = REGSP_HINT(RID_RET);
1724       if (inloop)
1725 	as->modset = RSET_SCRATCH;
1726       continue;
1727     case IR_STRTO: case IR_OBAR:
1728       if (inloop)
1729 	as->modset = RSET_SCRATCH;
1730       break;
1731 #if !LJ_TARGET_X86ORX64 && !LJ_SOFTFP
1732     case IR_ATAN2: case IR_LDEXP:
1733 #endif
1734     case IR_POW:
1735       if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1736 #if LJ_TARGET_X86ORX64
1737 	ir->prev = REGSP_HINT(RID_XMM0);
1738 	if (inloop)
1739 	  as->modset |= RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
1740 #else
1741 	ir->prev = REGSP_HINT(RID_FPRET);
1742 	if (inloop)
1743 	  as->modset |= RSET_SCRATCH;
1744 #endif
1745 	continue;
1746       }
1747       /* fallthrough for integer POW */
1748     case IR_DIV: case IR_MOD:
1749       if (!irt_isnum(ir->t)) {
1750 	ir->prev = REGSP_HINT(RID_RET);
1751 	if (inloop)
1752 	  as->modset |= (RSET_SCRATCH & RSET_GPR);
1753 	continue;
1754       }
1755       break;
1756     case IR_FPMATH:
1757 #if LJ_TARGET_X86ORX64
1758       if (ir->op2 == IRFPM_EXP2) {  /* May be joined to lj_vm_pow_sse. */
1759 	ir->prev = REGSP_HINT(RID_XMM0);
1760 #if !LJ_64
1761 	if (as->evenspill < 4)  /* Leave room for 16 byte scratch area. */
1762 	  as->evenspill = 4;
1763 #endif
1764 	if (inloop)
1765 	  as->modset |= RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
1766 	continue;
1767       } else if (ir->op2 <= IRFPM_TRUNC && !(as->flags & JIT_F_SSE4_1)) {
1768 	ir->prev = REGSP_HINT(RID_XMM0);
1769 	if (inloop)
1770 	  as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
1771 	continue;
1772       }
1773       break;
1774 #else
1775       ir->prev = REGSP_HINT(RID_FPRET);
1776       if (inloop)
1777 	as->modset |= RSET_SCRATCH;
1778       continue;
1779 #endif
1780 #if LJ_TARGET_X86ORX64
1781     /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
1782     case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
1783       if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
1784 	IR(ir->op2)->r = REGSP_HINT(RID_ECX);
1785 	if (inloop)
1786 	  rset_set(as->modset, RID_ECX);
1787       }
1788       break;
1789 #endif
1790     /* Do not propagate hints across type conversions or loads. */
1791     case IR_TOBIT:
1792     case IR_XLOAD:
1793 #if !LJ_TARGET_ARM
1794     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1795 #endif
1796       break;
1797     case IR_CONV:
1798       if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
1799 	  (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
1800 	break;
1801       /* fallthrough */
1802     default:
1803       /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
1804       if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
1805 	  ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
1806 	ir->prev = IR(ir->op1)->prev;
1807 	continue;
1808       }
1809       break;
1810     }
1811     ir->prev = REGSP_INIT;
1812   }
1813   if ((as->evenspill & 1))
1814     as->oddspill = as->evenspill++;
1815   else
1816     as->oddspill = 0;
1817 }
1818 
1819 /* -- Assembler core ------------------------------------------------------ */
1820 
1821 /* Assemble a trace. */
lj_asm_trace(jit_State * J,GCtrace * T)1822 void lj_asm_trace(jit_State *J, GCtrace *T)
1823 {
1824   ASMState as_;
1825   ASMState *as = &as_;
1826   MCode *origtop;
1827 
1828   /* Ensure an initialized instruction beyond the last one for HIOP checks. */
1829   J->cur.nins = lj_ir_nextins(J);
1830   J->cur.ir[J->cur.nins].o = IR_NOP;
1831 
1832   /* Setup initial state. Copy some fields to reduce indirections. */
1833   as->J = J;
1834   as->T = T;
1835   as->ir = T->ir;
1836   as->flags = J->flags;
1837   as->loopref = J->loopref;
1838   as->realign = NULL;
1839   as->loopinv = 0;
1840   as->parent = J->parent ? traceref(J, J->parent) : NULL;
1841 
1842   /* Reserve MCode memory. */
1843   as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
1844   as->mcp = as->mctop;
1845   as->mclim = as->mcbot + MCLIM_REDZONE;
1846   asm_setup_target(as);
1847 
1848   do {
1849     as->mcp = as->mctop;
1850 #ifdef LUA_USE_ASSERT
1851     as->mcp_prev = as->mcp;
1852 #endif
1853     as->curins = T->nins;
1854     RA_DBG_START();
1855     RA_DBGX((as, "===== STOP ====="));
1856 
1857     /* General trace setup. Emit tail of trace. */
1858     asm_tail_prep(as);
1859     as->mcloop = NULL;
1860     as->flagmcp = NULL;
1861     as->topslot = 0;
1862     as->gcsteps = 0;
1863     as->sectref = as->loopref;
1864     as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
1865     asm_setup_regsp(as);
1866     if (!as->loopref)
1867       asm_tail_link(as);
1868 
1869     /* Assemble a trace in linear backwards order. */
1870     for (as->curins--; as->curins > as->stopins; as->curins--) {
1871       IRIns *ir = IR(as->curins);
1872       lua_assert(!(LJ_32 && irt_isint64(ir->t)));  /* Handled by SPLIT. */
1873       if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
1874 	continue;  /* Dead-code elimination can be soooo easy. */
1875       if (irt_isguard(ir->t))
1876 	asm_snap_prep(as);
1877       RA_DBG_REF();
1878       checkmclim(as);
1879       asm_ir(as, ir);
1880     }
1881   } while (as->realign);  /* Retry in case the MCode needs to be realigned. */
1882 
1883   /* Emit head of trace. */
1884   RA_DBG_REF();
1885   checkmclim(as);
1886   if (as->gcsteps > 0) {
1887     as->curins = as->T->snap[0].ref;
1888     asm_snap_prep(as);  /* The GC check is a guard. */
1889     asm_gc_check(as);
1890   }
1891   ra_evictk(as);
1892   if (as->parent)
1893     asm_head_side(as);
1894   else
1895     asm_head_root(as);
1896   asm_phi_fixup(as);
1897 
1898   RA_DBGX((as, "===== START ===="));
1899   RA_DBG_FLUSH();
1900   if (as->freeset != RSET_ALL)
1901     lj_trace_err(as->J, LJ_TRERR_BADRA);  /* Ouch! Should never happen. */
1902 
1903   /* Set trace entry point before fixing up tail to allow link to self. */
1904   T->mcode = as->mcp;
1905   T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
1906   if (!as->loopref)
1907     asm_tail_fixup(as, T->link);  /* Note: this may change as->mctop! */
1908   T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
1909   lj_mcode_sync(T->mcode, origtop);
1910 }
1911 
1912 #undef IR
1913 
1914 #endif
1915