1 /*
2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5 
6 #define lj_asm_c
7 #define LUA_CORE
8 
9 #include "lj_obj.h"
10 
11 #if LJ_HASJIT
12 
13 #include "lj_gc.h"
14 #include "lj_str.h"
15 #include "lj_tab.h"
16 #include "lj_frame.h"
17 #if LJ_HASFFI
18 #include "lj_ctype.h"
19 #endif
20 #include "lj_ir.h"
21 #include "lj_jit.h"
22 #include "lj_ircall.h"
23 #include "lj_iropt.h"
24 #include "lj_mcode.h"
25 #include "lj_trace.h"
26 #include "lj_snap.h"
27 #include "lj_asm.h"
28 #include "lj_dispatch.h"
29 #include "lj_vm.h"
30 #include "lj_target.h"
31 
32 #ifdef LUA_USE_ASSERT
33 #include <stdio.h>
34 #endif
35 
36 /* -- Assembler state and common macros ----------------------------------- */
37 
38 /* Assembler state. */
39 typedef struct ASMState {
40   RegCost cost[RID_MAX];  /* Reference and blended allocation cost for regs. */
41 
42   MCode *mcp;		/* Current MCode pointer (grows down). */
43   MCode *mclim;		/* Lower limit for MCode memory + red zone. */
44 #ifdef LUA_USE_ASSERT
45   MCode *mcp_prev;	/* Red zone overflow check. */
46 #endif
47 
48   IRIns *ir;		/* Copy of pointer to IR instructions/constants. */
49   jit_State *J;		/* JIT compiler state. */
50 
51 #if LJ_TARGET_X86ORX64
52   x86ModRM mrm;		/* Fused x86 address operand. */
53 #endif
54 
55   RegSet freeset;	/* Set of free registers. */
56   RegSet modset;	/* Set of registers modified inside the loop. */
57   RegSet weakset;	/* Set of weakly referenced registers. */
58   RegSet phiset;	/* Set of PHI registers. */
59 
60   uint32_t flags;	/* Copy of JIT compiler flags. */
61   int loopinv;		/* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
62 
63   int32_t evenspill;	/* Next even spill slot. */
64   int32_t oddspill;	/* Next odd spill slot (or 0). */
65 
66   IRRef curins;		/* Reference of current instruction. */
67   IRRef stopins;	/* Stop assembly before hitting this instruction. */
68   IRRef orignins;	/* Original T->nins. */
69 
70   IRRef snapref;	/* Current snapshot is active after this reference. */
71   IRRef snaprename;	/* Rename highwater mark for snapshot check. */
72   SnapNo snapno;	/* Current snapshot number. */
73   SnapNo loopsnapno;	/* Loop snapshot number. */
74 
75   IRRef fuseref;	/* Fusion limit (loopref, 0 or FUSE_DISABLED). */
76   IRRef sectref;	/* Section base reference (loopref or 0). */
77   IRRef loopref;	/* Reference of LOOP instruction (or 0). */
78 
79   BCReg topslot;	/* Number of slots for stack check (unless 0). */
80   int32_t gcsteps;	/* Accumulated number of GC steps (per section). */
81 
82   GCtrace *T;		/* Trace to assemble. */
83   GCtrace *parent;	/* Parent trace (or NULL). */
84 
85   MCode *mcbot;		/* Bottom of reserved MCode. */
86   MCode *mctop;		/* Top of generated MCode. */
87   MCode *mcloop;	/* Pointer to loop MCode (or NULL). */
88   MCode *invmcp;	/* Points to invertible loop branch (or NULL). */
89   MCode *flagmcp;	/* Pending opportunity to merge flag setting ins. */
90   MCode *realign;	/* Realign loop if not NULL. */
91 
92 #ifdef RID_NUM_KREF
93   intptr_t krefk[RID_NUM_KREF];
94 #endif
95   IRRef1 phireg[RID_MAX];  /* PHI register references. */
96   uint16_t parentmap[LJ_MAX_JSLOTS];  /* Parent instruction to RegSP map. */
97 } ASMState;
98 
99 #ifdef LUA_USE_ASSERT
100 #define lj_assertA(c, ...)	lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
101 #else
102 #define lj_assertA(c, ...)	((void)as)
103 #endif
104 
105 #define IR(ref)			(&as->ir[(ref)])
106 
107 #define ASMREF_TMP1		REF_TRUE	/* Temp. register. */
108 #define ASMREF_TMP2		REF_FALSE	/* Temp. register. */
109 #define ASMREF_L		REF_NIL		/* Stores register for L. */
110 
111 /* Check for variant to invariant references. */
112 #define iscrossref(as, ref)	((ref) < as->sectref)
113 
114 /* Inhibit memory op fusion from variant to invariant references. */
115 #define FUSE_DISABLED		(~(IRRef)0)
116 #define mayfuse(as, ref)	((ref) > as->fuseref)
117 #define neverfuse(as)		(as->fuseref == FUSE_DISABLED)
118 #define canfuse(as, ir)		(!neverfuse(as) && !irt_isphi((ir)->t))
119 #define opisfusableload(o) \
120   ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
121    (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
122 
123 /* Sparse limit checks using a red zone before the actual limit. */
124 #define MCLIM_REDZONE	64
125 
asm_mclimit(ASMState * as)126 static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
127 {
128   lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
129 }
130 
checkmclim(ASMState * as)131 static LJ_AINLINE void checkmclim(ASMState *as)
132 {
133 #ifdef LUA_USE_ASSERT
134   if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
135     IRIns *ir = IR(as->curins+1);
136     lj_assertA(0, "red zone overflow: %p IR %04d  %02d %04d %04d\n", as->mcp,
137       as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
138   }
139 #endif
140   if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
141 #ifdef LUA_USE_ASSERT
142   as->mcp_prev = as->mcp;
143 #endif
144 }
145 
146 #ifdef RID_NUM_KREF
147 #define ra_iskref(ref)		((ref) < RID_NUM_KREF)
148 #define ra_krefreg(ref)		((Reg)(RID_MIN_KREF + (Reg)(ref)))
149 #define ra_krefk(as, ref)	(as->krefk[(ref)])
150 
ra_setkref(ASMState * as,Reg r,intptr_t k)151 static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
152 {
153   IRRef ref = (IRRef)(r - RID_MIN_KREF);
154   as->krefk[ref] = k;
155   as->cost[r] = REGCOST(ref, ref);
156 }
157 
158 #else
159 #define ra_iskref(ref)		0
160 #define ra_krefreg(ref)		RID_MIN_GPR
161 #define ra_krefk(as, ref)	0
162 #endif
163 
164 /* Arch-specific field offsets. */
165 static const uint8_t field_ofs[IRFL__MAX+1] = {
166 #define FLOFS(name, ofs)	(uint8_t)(ofs),
167 IRFLDEF(FLOFS)
168 #undef FLOFS
169   0
170 };
171 
172 /* -- Target-specific instruction emitter --------------------------------- */
173 
174 #if LJ_TARGET_X86ORX64
175 #include "lj_emit_x86.h"
176 #elif LJ_TARGET_ARM
177 #include "lj_emit_arm.h"
178 #elif LJ_TARGET_ARM64
179 #include "lj_emit_arm64.h"
180 #elif LJ_TARGET_PPC
181 #include "lj_emit_ppc.h"
182 #elif LJ_TARGET_MIPS
183 #include "lj_emit_mips.h"
184 #else
185 #error "Missing instruction emitter for target CPU"
186 #endif
187 
188 /* Generic load/store of register from/to stack slot. */
189 #define emit_spload(as, ir, r, ofs) \
190   emit_loadofs(as, ir, (r), RID_SP, (ofs))
191 #define emit_spstore(as, ir, r, ofs) \
192   emit_storeofs(as, ir, (r), RID_SP, (ofs))
193 
194 /* -- Register allocator debugging ---------------------------------------- */
195 
196 /* #define LUAJIT_DEBUG_RA */
197 
198 #ifdef LUAJIT_DEBUG_RA
199 
200 #include <stdio.h>
201 #include <stdarg.h>
202 
203 #define RIDNAME(name)	#name,
204 static const char *const ra_regname[] = {
205   GPRDEF(RIDNAME)
206   FPRDEF(RIDNAME)
207   VRIDDEF(RIDNAME)
208   NULL
209 };
210 #undef RIDNAME
211 
212 static char ra_dbg_buf[65536];
213 static char *ra_dbg_p;
214 static char *ra_dbg_merge;
215 static MCode *ra_dbg_mcp;
216 
ra_dstart(void)217 static void ra_dstart(void)
218 {
219   ra_dbg_p = ra_dbg_buf;
220   ra_dbg_merge = NULL;
221   ra_dbg_mcp = NULL;
222 }
223 
ra_dflush(void)224 static void ra_dflush(void)
225 {
226   fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
227   ra_dstart();
228 }
229 
ra_dprintf(ASMState * as,const char * fmt,...)230 static void ra_dprintf(ASMState *as, const char *fmt, ...)
231 {
232   char *p;
233   va_list argp;
234   va_start(argp, fmt);
235   p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
236   ra_dbg_mcp = NULL;
237   p += sprintf(p, "%08x  \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
238   for (;;) {
239     const char *e = strchr(fmt, '$');
240     if (e == NULL) break;
241     memcpy(p, fmt, (size_t)(e-fmt));
242     p += e-fmt;
243     if (e[1] == 'r') {
244       Reg r = va_arg(argp, Reg) & RID_MASK;
245       if (r <= RID_MAX) {
246 	const char *q;
247 	for (q = ra_regname[r]; *q; q++)
248 	  *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
249       } else {
250 	*p++ = '?';
251 	lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt);
252       }
253     } else if (e[1] == 'f' || e[1] == 'i') {
254       IRRef ref;
255       if (e[1] == 'f')
256 	ref = va_arg(argp, IRRef);
257       else
258 	ref = va_arg(argp, IRIns *) - as->ir;
259       if (ref >= REF_BIAS)
260 	p += sprintf(p, "%04d", ref - REF_BIAS);
261       else
262 	p += sprintf(p, "K%03d", REF_BIAS - ref);
263     } else if (e[1] == 's') {
264       uint32_t slot = va_arg(argp, uint32_t);
265       p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
266     } else if (e[1] == 'x') {
267       p += sprintf(p, "%08x", va_arg(argp, int32_t));
268     } else {
269       lj_assertA(0, "bad debug format code");
270     }
271     fmt = e+2;
272   }
273   va_end(argp);
274   while (*fmt)
275     *p++ = *fmt++;
276   *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
277   if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
278     fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
279     p = ra_dbg_buf;
280   }
281   ra_dbg_p = p;
282 }
283 
284 #define RA_DBG_START()	ra_dstart()
285 #define RA_DBG_FLUSH()	ra_dflush()
286 #define RA_DBG_REF() \
287   do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
288        ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
289 #define RA_DBGX(x)	ra_dprintf x
290 
291 #else
292 #define RA_DBG_START()	((void)0)
293 #define RA_DBG_FLUSH()	((void)0)
294 #define RA_DBG_REF()	((void)0)
295 #define RA_DBGX(x)	((void)0)
296 #endif
297 
298 /* -- Register allocator -------------------------------------------------- */
299 
300 #define ra_free(as, r)		rset_set(as->freeset, (r))
301 #define ra_modified(as, r)	rset_set(as->modset, (r))
302 #define ra_weak(as, r)		rset_set(as->weakset, (r))
303 #define ra_noweak(as, r)	rset_clear(as->weakset, (r))
304 
305 #define ra_used(ir)		(ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
306 
307 /* Setup register allocator. */
ra_setup(ASMState * as)308 static void ra_setup(ASMState *as)
309 {
310   Reg r;
311   /* Initially all regs (except the stack pointer) are free for use. */
312   as->freeset = RSET_INIT;
313   as->modset = RSET_EMPTY;
314   as->weakset = RSET_EMPTY;
315   as->phiset = RSET_EMPTY;
316   memset(as->phireg, 0, sizeof(as->phireg));
317   for (r = RID_MIN_GPR; r < RID_MAX; r++)
318     as->cost[r] = REGCOST(~0u, 0u);
319 }
320 
321 /* Rematerialize constants. */
ra_rematk(ASMState * as,IRRef ref)322 static Reg ra_rematk(ASMState *as, IRRef ref)
323 {
324   IRIns *ir;
325   Reg r;
326   if (ra_iskref(ref)) {
327     r = ra_krefreg(ref);
328     lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r);
329     ra_free(as, r);
330     ra_modified(as, r);
331 #if LJ_64
332     emit_loadu64(as, r, ra_krefk(as, ref));
333 #else
334     emit_loadi(as, r, ra_krefk(as, ref));
335 #endif
336     return r;
337   }
338   ir = IR(ref);
339   r = ir->r;
340   lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref);
341   lj_assertA(!ra_hasspill(ir->s),
342 	     "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s);
343   ra_free(as, r);
344   ra_modified(as, r);
345   ir->r = RID_INIT;  /* Do not keep any hint. */
346   RA_DBGX((as, "remat     $i $r", ir, r));
347 #if !LJ_SOFTFP32
348   if (ir->o == IR_KNUM) {
349     emit_loadk64(as, r, ir);
350   } else
351 #endif
352   if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
353     ra_sethint(ir->r, RID_BASE);  /* Restore BASE register hint. */
354     emit_getgl(as, r, jit_base);
355   } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
356     /* REF_NIL stores ASMREF_L register. */
357     lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L");
358     emit_getgl(as, r, cur_L);
359 #if LJ_64
360   } else if (ir->o == IR_KINT64) {
361     emit_loadu64(as, r, ir_kint64(ir)->u64);
362 #if LJ_GC64
363   } else if (ir->o == IR_KGC) {
364     emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
365   } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
366     emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
367 #endif
368 #endif
369   } else {
370     lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
371 	       ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
372 	       "rematk of bad IR op %d", ir->o);
373     emit_loadi(as, r, ir->i);
374   }
375   return r;
376 }
377 
378 /* Force a spill. Allocate a new spill slot if needed. */
ra_spill(ASMState * as,IRIns * ir)379 static int32_t ra_spill(ASMState *as, IRIns *ir)
380 {
381   int32_t slot = ir->s;
382   lj_assertA(ir >= as->ir + REF_TRUE,
383 	     "spill of K%03d", REF_BIAS - (int)(ir - as->ir));
384   if (!ra_hasspill(slot)) {
385     if (irt_is64(ir->t)) {
386       slot = as->evenspill;
387       as->evenspill += 2;
388     } else if (as->oddspill) {
389       slot = as->oddspill;
390       as->oddspill = 0;
391     } else {
392       slot = as->evenspill;
393       as->oddspill = slot+1;
394       as->evenspill += 2;
395     }
396     if (as->evenspill > 256)
397       lj_trace_err(as->J, LJ_TRERR_SPILLOV);
398     ir->s = (uint8_t)slot;
399   }
400   return sps_scale(slot);
401 }
402 
403 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
ra_releasetmp(ASMState * as,IRRef ref)404 static Reg ra_releasetmp(ASMState *as, IRRef ref)
405 {
406   IRIns *ir = IR(ref);
407   Reg r = ir->r;
408   lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1);
409   lj_assertA(!ra_hasspill(ir->s),
410 	     "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s);
411   ra_free(as, r);
412   ra_modified(as, r);
413   ir->r = RID_INIT;
414   return r;
415 }
416 
417 /* Restore a register (marked as free). Rematerialize or force a spill. */
ra_restore(ASMState * as,IRRef ref)418 static Reg ra_restore(ASMState *as, IRRef ref)
419 {
420   if (emit_canremat(ref)) {
421     return ra_rematk(as, ref);
422   } else {
423     IRIns *ir = IR(ref);
424     int32_t ofs = ra_spill(as, ir);  /* Force a spill slot. */
425     Reg r = ir->r;
426     lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS);
427     ra_sethint(ir->r, r);  /* Keep hint. */
428     ra_free(as, r);
429     if (!rset_test(as->weakset, r)) {  /* Only restore non-weak references. */
430       ra_modified(as, r);
431       RA_DBGX((as, "restore   $i $r", ir, r));
432       emit_spload(as, ir, r, ofs);
433     }
434     return r;
435   }
436 }
437 
438 /* Save a register to a spill slot. */
ra_save(ASMState * as,IRIns * ir,Reg r)439 static void ra_save(ASMState *as, IRIns *ir, Reg r)
440 {
441   RA_DBGX((as, "save      $i $r", ir, r));
442   emit_spstore(as, ir, r, sps_scale(ir->s));
443 }
444 
445 #define MINCOST(name) \
446   if (rset_test(RSET_ALL, RID_##name) && \
447       LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
448     cost = as->cost[RID_##name];
449 
450 /* Evict the register with the lowest cost, forcing a restore. */
ra_evict(ASMState * as,RegSet allow)451 static Reg ra_evict(ASMState *as, RegSet allow)
452 {
453   IRRef ref;
454   RegCost cost = ~(RegCost)0;
455   lj_assertA(allow != RSET_EMPTY, "evict from empty set");
456   if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
457     GPRDEF(MINCOST)
458   } else {
459     FPRDEF(MINCOST)
460   }
461   ref = regcost_ref(cost);
462   lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins),
463 	     "evict of out-of-range IR %04d", ref - REF_BIAS);
464   /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
465   if (!irref_isk(ref) && (as->weakset & allow)) {
466     IRIns *ir = IR(ref);
467     if (!rset_test(as->weakset, ir->r))
468       ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
469   }
470   return ra_restore(as, ref);
471 }
472 
473 /* Pick any register (marked as free). Evict on-demand. */
ra_pick(ASMState * as,RegSet allow)474 static Reg ra_pick(ASMState *as, RegSet allow)
475 {
476   RegSet pick = as->freeset & allow;
477   if (!pick)
478     return ra_evict(as, allow);
479   else
480     return rset_picktop(pick);
481 }
482 
483 /* Get a scratch register (marked as free). */
ra_scratch(ASMState * as,RegSet allow)484 static Reg ra_scratch(ASMState *as, RegSet allow)
485 {
486   Reg r = ra_pick(as, allow);
487   ra_modified(as, r);
488   RA_DBGX((as, "scratch        $r", r));
489   return r;
490 }
491 
492 /* Evict all registers from a set (if not free). */
ra_evictset(ASMState * as,RegSet drop)493 static void ra_evictset(ASMState *as, RegSet drop)
494 {
495   RegSet work;
496   as->modset |= drop;
497 #if !LJ_SOFTFP
498   work = (drop & ~as->freeset) & RSET_FPR;
499   while (work) {
500     Reg r = rset_pickbot(work);
501     ra_restore(as, regcost_ref(as->cost[r]));
502     rset_clear(work, r);
503     checkmclim(as);
504   }
505 #endif
506   work = (drop & ~as->freeset);
507   while (work) {
508     Reg r = rset_pickbot(work);
509     ra_restore(as, regcost_ref(as->cost[r]));
510     rset_clear(work, r);
511     checkmclim(as);
512   }
513 }
514 
515 /* Evict (rematerialize) all registers allocated to constants. */
ra_evictk(ASMState * as)516 static void ra_evictk(ASMState *as)
517 {
518   RegSet work;
519 #if !LJ_SOFTFP
520   work = ~as->freeset & RSET_FPR;
521   while (work) {
522     Reg r = rset_pickbot(work);
523     IRRef ref = regcost_ref(as->cost[r]);
524     if (emit_canremat(ref) && irref_isk(ref)) {
525       ra_rematk(as, ref);
526       checkmclim(as);
527     }
528     rset_clear(work, r);
529   }
530 #endif
531   work = ~as->freeset & RSET_GPR;
532   while (work) {
533     Reg r = rset_pickbot(work);
534     IRRef ref = regcost_ref(as->cost[r]);
535     if (emit_canremat(ref) && irref_isk(ref)) {
536       ra_rematk(as, ref);
537       checkmclim(as);
538     }
539     rset_clear(work, r);
540   }
541 }
542 
543 #ifdef RID_NUM_KREF
544 /* Allocate a register for a constant. */
ra_allock(ASMState * as,intptr_t k,RegSet allow)545 static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
546 {
547   /* First try to find a register which already holds the same constant. */
548   RegSet pick, work = ~as->freeset & RSET_GPR;
549   Reg r;
550   while (work) {
551     IRRef ref;
552     r = rset_pickbot(work);
553     ref = regcost_ref(as->cost[r]);
554 #if LJ_64
555     if (ref < ASMREF_L) {
556       if (ra_iskref(ref)) {
557 	if (k == ra_krefk(as, ref))
558 	  return r;
559       } else {
560 	IRIns *ir = IR(ref);
561 	if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
562 #if LJ_GC64
563 	    (ir->o == IR_KINT && k == ir->i) ||
564 	    (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
565 	    ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
566 	     k == (intptr_t)ir_kptr(ir))
567 #else
568 	    (ir->o != IR_KINT64 && k == ir->i)
569 #endif
570 	   )
571 	  return r;
572       }
573     }
574 #else
575     if (ref < ASMREF_L &&
576 	k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
577       return r;
578 #endif
579     rset_clear(work, r);
580   }
581   pick = as->freeset & allow;
582   if (pick) {
583     /* Constants should preferably get unmodified registers. */
584     if ((pick & ~as->modset))
585       pick &= ~as->modset;
586     r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
587   } else {
588     r = ra_evict(as, allow);
589   }
590   RA_DBGX((as, "allock    $x $r", k, r));
591   ra_setkref(as, r, k);
592   rset_clear(as->freeset, r);
593   ra_noweak(as, r);
594   return r;
595 }
596 
597 /* Allocate a specific register for a constant. */
ra_allockreg(ASMState * as,intptr_t k,Reg r)598 static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
599 {
600   Reg kr = ra_allock(as, k, RID2RSET(r));
601   if (kr != r) {
602     IRIns irdummy;
603     irdummy.t.irt = IRT_INT;
604     ra_scratch(as, RID2RSET(r));
605     emit_movrr(as, &irdummy, r, kr);
606   }
607 }
608 #else
609 #define ra_allockreg(as, k, r)		emit_loadi(as, (r), (k))
610 #endif
611 
612 /* Allocate a register for ref from the allowed set of registers.
613 ** Note: this function assumes the ref does NOT have a register yet!
614 ** Picks an optimal register, sets the cost and marks the register as non-free.
615 */
ra_allocref(ASMState * as,IRRef ref,RegSet allow)616 static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
617 {
618   IRIns *ir = IR(ref);
619   RegSet pick = as->freeset & allow;
620   Reg r;
621   lj_assertA(ra_noreg(ir->r),
622 	     "IR %04d already has reg %d", ref - REF_BIAS, ir->r);
623   if (pick) {
624     /* First check register hint from propagation or PHI. */
625     if (ra_hashint(ir->r)) {
626       r = ra_gethint(ir->r);
627       if (rset_test(pick, r))  /* Use hint register if possible. */
628 	goto found;
629       /* Rematerialization is cheaper than missing a hint. */
630       if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
631 	ra_rematk(as, regcost_ref(as->cost[r]));
632 	goto found;
633       }
634       RA_DBGX((as, "hintmiss  $f $r", ref, r));
635     }
636     /* Invariants should preferably get unmodified registers. */
637     if (ref < as->loopref && !irt_isphi(ir->t)) {
638       if ((pick & ~as->modset))
639 	pick &= ~as->modset;
640       r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
641     } else {
642       /* We've got plenty of regs, so get callee-save regs if possible. */
643       if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
644 	pick &= ~RSET_SCRATCH;
645       r = rset_picktop(pick);
646     }
647   } else {
648     r = ra_evict(as, allow);
649   }
650 found:
651   RA_DBGX((as, "alloc     $f $r", ref, r));
652   ir->r = (uint8_t)r;
653   rset_clear(as->freeset, r);
654   ra_noweak(as, r);
655   as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
656   return r;
657 }
658 
659 /* Allocate a register on-demand. */
ra_alloc1(ASMState * as,IRRef ref,RegSet allow)660 static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
661 {
662   Reg r = IR(ref)->r;
663   /* Note: allow is ignored if the register is already allocated. */
664   if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
665   ra_noweak(as, r);
666   return r;
667 }
668 
669 /* Add a register rename to the IR. */
ra_addrename(ASMState * as,Reg down,IRRef ref,SnapNo snapno)670 static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
671 {
672   IRRef ren;
673   lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
674   ren = tref_ref(lj_ir_emit(as->J));
675   as->J->cur.ir[ren].r = (uint8_t)down;
676   as->J->cur.ir[ren].s = SPS_NONE;
677 }
678 
679 /* Rename register allocation and emit move. */
ra_rename(ASMState * as,Reg down,Reg up)680 static void ra_rename(ASMState *as, Reg down, Reg up)
681 {
682   IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
683   IRIns *ir = IR(ref);
684   ir->r = (uint8_t)up;
685   as->cost[down] = 0;
686   lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR),
687 	     "rename between GPR/FPR %d and %d", down, up);
688   lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down);
689   lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up);
690   ra_free(as, down);  /* 'down' is free ... */
691   ra_modified(as, down);
692   rset_clear(as->freeset, up);  /* ... and 'up' is now allocated. */
693   ra_noweak(as, up);
694   RA_DBGX((as, "rename    $f $r $r", regcost_ref(as->cost[up]), down, up));
695   emit_movrr(as, ir, down, up);  /* Backwards codegen needs inverse move. */
696   if (!ra_hasspill(IR(ref)->s)) {  /* Add the rename to the IR. */
697     ra_addrename(as, down, ref, as->snapno);
698   }
699 }
700 
701 /* Pick a destination register (marked as free).
702 ** Caveat: allow is ignored if there's already a destination register.
703 ** Use ra_destreg() to get a specific register.
704 */
ra_dest(ASMState * as,IRIns * ir,RegSet allow)705 static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
706 {
707   Reg dest = ir->r;
708   if (ra_hasreg(dest)) {
709     ra_free(as, dest);
710     ra_modified(as, dest);
711   } else {
712     if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
713       dest = ra_gethint(dest);
714       ra_modified(as, dest);
715       RA_DBGX((as, "dest           $r", dest));
716     } else {
717       dest = ra_scratch(as, allow);
718     }
719     ir->r = dest;
720   }
721   if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
722   return dest;
723 }
724 
725 /* Force a specific destination register (marked as free). */
ra_destreg(ASMState * as,IRIns * ir,Reg r)726 static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
727 {
728   Reg dest = ra_dest(as, ir, RID2RSET(r));
729   if (dest != r) {
730     lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r);
731     ra_modified(as, r);
732     emit_movrr(as, ir, dest, r);
733   }
734 }
735 
736 #if LJ_TARGET_X86ORX64
737 /* Propagate dest register to left reference. Emit moves as needed.
738 ** This is a required fixup step for all 2-operand machine instructions.
739 */
ra_left(ASMState * as,Reg dest,IRRef lref)740 static void ra_left(ASMState *as, Reg dest, IRRef lref)
741 {
742   IRIns *ir = IR(lref);
743   Reg left = ir->r;
744   if (ra_noreg(left)) {
745     if (irref_isk(lref)) {
746       if (ir->o == IR_KNUM) {
747 	/* FP remat needs a load except for +0. Still better than eviction. */
748 	if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
749 	  emit_loadk64(as, dest, ir);
750 	  return;
751 	}
752 #if LJ_64
753       } else if (ir->o == IR_KINT64) {
754 	emit_loadk64(as, dest, ir);
755 	return;
756 #if LJ_GC64
757       } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
758 	emit_loadk64(as, dest, ir);
759 	return;
760 #endif
761 #endif
762       } else if (ir->o != IR_KPRI) {
763 	lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
764 		   ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
765 		   "K%03d has bad IR op %d", REF_BIAS - lref, ir->o);
766 	emit_loadi(as, dest, ir->i);
767 	return;
768       }
769     }
770     if (!ra_hashint(left) && !iscrossref(as, lref))
771       ra_sethint(ir->r, dest);  /* Propagate register hint. */
772     left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
773   }
774   ra_noweak(as, left);
775   /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
776   if (dest != left) {
777     /* Use register renaming if dest is the PHI reg. */
778     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
779       ra_modified(as, left);
780       ra_rename(as, left, dest);
781     } else {
782       emit_movrr(as, ir, dest, left);
783     }
784   }
785 }
786 #else
787 /* Similar to ra_left, except we override any hints. */
ra_leftov(ASMState * as,Reg dest,IRRef lref)788 static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
789 {
790   IRIns *ir = IR(lref);
791   Reg left = ir->r;
792   if (ra_noreg(left)) {
793     ra_sethint(ir->r, dest);  /* Propagate register hint. */
794     left = ra_allocref(as, lref,
795 		       (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
796   }
797   ra_noweak(as, left);
798   if (dest != left) {
799     /* Use register renaming if dest is the PHI reg. */
800     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
801       ra_modified(as, left);
802       ra_rename(as, left, dest);
803     } else {
804       emit_movrr(as, ir, dest, left);
805     }
806   }
807 }
808 #endif
809 
810 #if !LJ_64
811 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
ra_destpair(ASMState * as,IRIns * ir)812 static void ra_destpair(ASMState *as, IRIns *ir)
813 {
814   Reg destlo = ir->r, desthi = (ir+1)->r;
815   /* First spill unrelated refs blocking the destination registers. */
816   if (!rset_test(as->freeset, RID_RETLO) &&
817       destlo != RID_RETLO && desthi != RID_RETLO)
818     ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
819   if (!rset_test(as->freeset, RID_RETHI) &&
820       destlo != RID_RETHI && desthi != RID_RETHI)
821     ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
822   /* Next free the destination registers (if any). */
823   if (ra_hasreg(destlo)) {
824     ra_free(as, destlo);
825     ra_modified(as, destlo);
826   } else {
827     destlo = RID_RETLO;
828   }
829   if (ra_hasreg(desthi)) {
830     ra_free(as, desthi);
831     ra_modified(as, desthi);
832   } else {
833     desthi = RID_RETHI;
834   }
835   /* Check for conflicts and shuffle the registers as needed. */
836   if (destlo == RID_RETHI) {
837     if (desthi == RID_RETLO) {
838 #if LJ_TARGET_X86
839       *--as->mcp = XI_XCHGa + RID_RETHI;
840 #else
841       emit_movrr(as, ir, RID_RETHI, RID_TMP);
842       emit_movrr(as, ir, RID_RETLO, RID_RETHI);
843       emit_movrr(as, ir, RID_TMP, RID_RETLO);
844 #endif
845     } else {
846       emit_movrr(as, ir, RID_RETHI, RID_RETLO);
847       if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
848     }
849   } else if (desthi == RID_RETLO) {
850     emit_movrr(as, ir, RID_RETLO, RID_RETHI);
851     if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
852   } else {
853     if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
854     if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
855   }
856   /* Restore spill slots (if any). */
857   if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
858   if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
859 }
860 #endif
861 
862 /* -- Snapshot handling --------- ----------------------------------------- */
863 
864 /* Can we rematerialize a KNUM instead of forcing a spill? */
asm_snap_canremat(ASMState * as)865 static int asm_snap_canremat(ASMState *as)
866 {
867   Reg r;
868   for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
869     if (irref_isk(regcost_ref(as->cost[r])))
870       return 1;
871   return 0;
872 }
873 
874 /* Check whether a sunk store corresponds to an allocation. */
asm_sunk_store(ASMState * as,IRIns * ira,IRIns * irs)875 static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
876 {
877   if (irs->s == 255) {
878     if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
879 	irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
880       IRIns *irk = IR(irs->op1);
881       if (irk->o == IR_AREF || irk->o == IR_HREFK)
882 	irk = IR(irk->op1);
883       return (IR(irk->op1) == ira);
884     }
885     return 0;
886   } else {
887     return (ira + irs->s == irs);  /* Quick check. */
888   }
889 }
890 
891 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
asm_snap_alloc1(ASMState * as,IRRef ref)892 static void asm_snap_alloc1(ASMState *as, IRRef ref)
893 {
894   IRIns *ir = IR(ref);
895   if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) {
896     if (ir->r == RID_SINK) {
897       ir->r = RID_SUNK;
898 #if LJ_HASFFI
899       if (ir->o == IR_CNEWI) {  /* Allocate CNEWI value. */
900 	asm_snap_alloc1(as, ir->op2);
901 	if (LJ_32 && (ir+1)->o == IR_HIOP)
902 	  asm_snap_alloc1(as, (ir+1)->op2);
903       } else
904 #endif
905       {  /* Allocate stored values for TNEW, TDUP and CNEW. */
906 	IRIns *irs;
907 	lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW,
908 		   "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o);
909 	for (irs = IR(as->snapref-1); irs > ir; irs--)
910 	  if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
911 	    lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
912 		       irs->o == IR_FSTORE || irs->o == IR_XSTORE,
913 		       "sunk store IR %04d has bad op %d",
914 		       (int)(irs - as->ir) - REF_BIAS, irs->o);
915 	    asm_snap_alloc1(as, irs->op2);
916 	    if (LJ_32 && (irs+1)->o == IR_HIOP)
917 	      asm_snap_alloc1(as, (irs+1)->op2);
918 	  }
919       }
920     } else {
921       RegSet allow;
922       if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
923 	IRIns *irc;
924 	for (irc = IR(as->curins); irc > ir; irc--)
925 	  if ((irc->op1 == ref || irc->op2 == ref) &&
926 	      !(irc->r == RID_SINK || irc->r == RID_SUNK))
927 	    goto nosink;  /* Don't sink conversion if result is used. */
928 	asm_snap_alloc1(as, ir->op1);
929 	return;
930       }
931     nosink:
932       allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
933       if ((as->freeset & allow) ||
934 	       (allow == RSET_FPR && asm_snap_canremat(as))) {
935 	/* Get a weak register if we have a free one or can rematerialize. */
936 	Reg r = ra_allocref(as, ref, allow);  /* Allocate a register. */
937 	if (!irt_isphi(ir->t))
938 	  ra_weak(as, r);  /* But mark it as weakly referenced. */
939 	checkmclim(as);
940 	RA_DBGX((as, "snapreg   $f $r", ref, ir->r));
941       } else {
942 	ra_spill(as, ir);  /* Otherwise force a spill slot. */
943 	RA_DBGX((as, "snapspill $f $s", ref, ir->s));
944       }
945     }
946   }
947 }
948 
949 /* Allocate refs escaping to a snapshot. */
asm_snap_alloc(ASMState * as)950 static void asm_snap_alloc(ASMState *as)
951 {
952   SnapShot *snap = &as->T->snap[as->snapno];
953   SnapEntry *map = &as->T->snapmap[snap->mapofs];
954   MSize n, nent = snap->nent;
955   for (n = 0; n < nent; n++) {
956     SnapEntry sn = map[n];
957     IRRef ref = snap_ref(sn);
958     if (!irref_isk(ref)) {
959       asm_snap_alloc1(as, ref);
960       if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
961 	lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP,
962 		   "snap %d[%d] points to bad SOFTFP IR %04d",
963 		   as->snapno, n, ref - REF_BIAS);
964 	asm_snap_alloc1(as, ref+1);
965       }
966     }
967   }
968 }
969 
970 /* All guards for a snapshot use the same exitno. This is currently the
971 ** same as the snapshot number. Since the exact origin of the exit cannot
972 ** be determined, all guards for the same snapshot must exit with the same
973 ** RegSP mapping.
974 ** A renamed ref which has been used in a prior guard for the same snapshot
975 ** would cause an inconsistency. The easy way out is to force a spill slot.
976 */
asm_snap_checkrename(ASMState * as,IRRef ren)977 static int asm_snap_checkrename(ASMState *as, IRRef ren)
978 {
979   SnapShot *snap = &as->T->snap[as->snapno];
980   SnapEntry *map = &as->T->snapmap[snap->mapofs];
981   MSize n, nent = snap->nent;
982   for (n = 0; n < nent; n++) {
983     SnapEntry sn = map[n];
984     IRRef ref = snap_ref(sn);
985     if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
986       IRIns *ir = IR(ref);
987       ra_spill(as, ir);  /* Register renamed, so force a spill slot. */
988       RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
989       return 1;  /* Found. */
990     }
991   }
992   return 0;  /* Not found. */
993 }
994 
995 /* Prepare snapshot for next guard instruction. */
asm_snap_prep(ASMState * as)996 static void asm_snap_prep(ASMState *as)
997 {
998   if (as->curins < as->snapref) {
999     do {
1000       if (as->snapno == 0) return;  /* Called by sunk stores before snap #0. */
1001       as->snapno--;
1002       as->snapref = as->T->snap[as->snapno].ref;
1003     } while (as->curins < as->snapref);
1004     asm_snap_alloc(as);
1005     as->snaprename = as->T->nins;
1006   } else {
1007     /* Process any renames above the highwater mark. */
1008     for (; as->snaprename < as->T->nins; as->snaprename++) {
1009       IRIns *ir = &as->T->ir[as->snaprename];
1010       if (asm_snap_checkrename(as, ir->op1))
1011 	ir->op2 = REF_BIAS-1;  /* Kill rename. */
1012     }
1013   }
1014 }
1015 
1016 /* -- Miscellaneous helpers ----------------------------------------------- */
1017 
1018 /* Calculate stack adjustment. */
asm_stack_adjust(ASMState * as)1019 static int32_t asm_stack_adjust(ASMState *as)
1020 {
1021   if (as->evenspill <= SPS_FIXED)
1022     return 0;
1023   return sps_scale(sps_align(as->evenspill));
1024 }
1025 
1026 /* Must match with hash*() in lj_tab.c. */
ir_khash(ASMState * as,IRIns * ir)1027 static uint32_t ir_khash(ASMState *as, IRIns *ir)
1028 {
1029   uint32_t lo, hi;
1030   UNUSED(as);
1031   if (irt_isstr(ir->t)) {
1032     return ir_kstr(ir)->sid;
1033   } else if (irt_isnum(ir->t)) {
1034     lo = ir_knum(ir)->u32.lo;
1035     hi = ir_knum(ir)->u32.hi << 1;
1036   } else if (irt_ispri(ir->t)) {
1037     lj_assertA(!irt_isnil(ir->t), "hash of nil key");
1038     return irt_type(ir->t)-IRT_FALSE;
1039   } else {
1040     lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t));
1041     lo = u32ptr(ir_kgc(ir));
1042 #if LJ_GC64
1043     hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15);
1044 #else
1045     hi = lo + HASH_BIAS;
1046 #endif
1047   }
1048   return hashrot(lo, hi);
1049 }
1050 
1051 /* -- Allocations --------------------------------------------------------- */
1052 
1053 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
1054 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
1055 
asm_snew(ASMState * as,IRIns * ir)1056 static void asm_snew(ASMState *as, IRIns *ir)
1057 {
1058   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
1059   IRRef args[3];
1060   args[0] = ASMREF_L;  /* lua_State *L    */
1061   args[1] = ir->op1;   /* const char *str */
1062   args[2] = ir->op2;   /* size_t len      */
1063   as->gcsteps++;
1064   asm_setupresult(as, ir, ci);  /* GCstr * */
1065   asm_gencall(as, ci, args);
1066 }
1067 
asm_tnew(ASMState * as,IRIns * ir)1068 static void asm_tnew(ASMState *as, IRIns *ir)
1069 {
1070   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
1071   IRRef args[2];
1072   args[0] = ASMREF_L;     /* lua_State *L    */
1073   args[1] = ASMREF_TMP1;  /* uint32_t ahsize */
1074   as->gcsteps++;
1075   asm_setupresult(as, ir, ci);  /* GCtab * */
1076   asm_gencall(as, ci, args);
1077   ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
1078 }
1079 
asm_tdup(ASMState * as,IRIns * ir)1080 static void asm_tdup(ASMState *as, IRIns *ir)
1081 {
1082   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
1083   IRRef args[2];
1084   args[0] = ASMREF_L;  /* lua_State *L    */
1085   args[1] = ir->op1;   /* const GCtab *kt */
1086   as->gcsteps++;
1087   asm_setupresult(as, ir, ci);  /* GCtab * */
1088   asm_gencall(as, ci, args);
1089 }
1090 
1091 static void asm_gc_check(ASMState *as);
1092 
1093 /* Explicit GC step. */
asm_gcstep(ASMState * as,IRIns * ir)1094 static void asm_gcstep(ASMState *as, IRIns *ir)
1095 {
1096   IRIns *ira;
1097   for (ira = IR(as->stopins+1); ira < ir; ira++)
1098     if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
1099 	 (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
1100 	ra_used(ira))
1101       as->gcsteps++;
1102   if (as->gcsteps)
1103     asm_gc_check(as);
1104   as->gcsteps = 0x80000000;  /* Prevent implicit GC check further up. */
1105 }
1106 
1107 /* -- Buffer operations --------------------------------------------------- */
1108 
1109 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref);
1110 
asm_bufhdr(ASMState * as,IRIns * ir)1111 static void asm_bufhdr(ASMState *as, IRIns *ir)
1112 {
1113   Reg sb = ra_dest(as, ir, RSET_GPR);
1114   if ((ir->op2 & IRBUFHDR_APPEND)) {
1115     /* Rematerialize const buffer pointer instead of likely spill. */
1116     IRIns *irp = IR(ir->op1);
1117     if (!(ra_hasreg(irp->r) || irp == ir-1 ||
1118 	  (irp == ir-2 && !ra_used(ir-1)))) {
1119       while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND)))
1120 	irp = IR(irp->op1);
1121       if (irref_isk(irp->op1)) {
1122 	ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
1123 	ir = irp;
1124       }
1125     }
1126   } else {
1127     Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
1128     /* Passing ir isn't strictly correct, but it's an IRT_PGC, too. */
1129     emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p));
1130     emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b));
1131   }
1132 #if LJ_TARGET_X86ORX64
1133   ra_left(as, sb, ir->op1);
1134 #else
1135   ra_leftov(as, sb, ir->op1);
1136 #endif
1137 }
1138 
asm_bufput(ASMState * as,IRIns * ir)1139 static void asm_bufput(ASMState *as, IRIns *ir)
1140 {
1141   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
1142   IRRef args[3];
1143   IRIns *irs;
1144   int kchar = -129;
1145   args[0] = ir->op1;  /* SBuf * */
1146   args[1] = ir->op2;  /* GCstr * */
1147   irs = IR(ir->op2);
1148   lj_assertA(irt_isstr(irs->t),
1149 	     "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS);
1150   if (irs->o == IR_KGC) {
1151     GCstr *s = ir_kstr(irs);
1152     if (s->len == 1) {  /* Optimize put of single-char string constant. */
1153       kchar = (int8_t)strdata(s)[0];  /* Signed! */
1154       args[1] = ASMREF_TMP1;  /* int, truncated to char */
1155       ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1156     }
1157   } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
1158     if (irs->o == IR_TOSTR) {  /* Fuse number to string conversions. */
1159       if (irs->op2 == IRTOSTR_NUM) {
1160 	args[1] = ASMREF_TMP1;  /* TValue * */
1161 	ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
1162       } else {
1163 	lj_assertA(irt_isinteger(IR(irs->op1)->t),
1164 		   "TOSTR of non-numeric IR %04d", irs->op1);
1165 	args[1] = irs->op1;  /* int */
1166 	if (irs->op2 == IRTOSTR_INT)
1167 	  ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
1168 	else
1169 	  ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1170       }
1171     } else if (irs->o == IR_SNEW) {  /* Fuse string allocation. */
1172       args[1] = irs->op1;  /* const void * */
1173       args[2] = irs->op2;  /* MSize */
1174       ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
1175     }
1176   }
1177   asm_setupresult(as, ir, ci);  /* SBuf * */
1178   asm_gencall(as, ci, args);
1179   if (args[1] == ASMREF_TMP1) {
1180     Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
1181     if (kchar == -129)
1182       asm_tvptr(as, tmp, irs->op1);
1183     else
1184       ra_allockreg(as, kchar, tmp);
1185   }
1186 }
1187 
asm_bufstr(ASMState * as,IRIns * ir)1188 static void asm_bufstr(ASMState *as, IRIns *ir)
1189 {
1190   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
1191   IRRef args[1];
1192   args[0] = ir->op1;  /* SBuf *sb */
1193   as->gcsteps++;
1194   asm_setupresult(as, ir, ci);  /* GCstr * */
1195   asm_gencall(as, ci, args);
1196 }
1197 
1198 /* -- Type conversions ---------------------------------------------------- */
1199 
asm_tostr(ASMState * as,IRIns * ir)1200 static void asm_tostr(ASMState *as, IRIns *ir)
1201 {
1202   const CCallInfo *ci;
1203   IRRef args[2];
1204   args[0] = ASMREF_L;
1205   as->gcsteps++;
1206   if (ir->op2 == IRTOSTR_NUM) {
1207     args[1] = ASMREF_TMP1;  /* cTValue * */
1208     ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
1209   } else {
1210     args[1] = ir->op1;  /* int32_t k */
1211     if (ir->op2 == IRTOSTR_INT)
1212       ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
1213     else
1214       ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
1215   }
1216   asm_setupresult(as, ir, ci);  /* GCstr * */
1217   asm_gencall(as, ci, args);
1218   if (ir->op2 == IRTOSTR_NUM)
1219     asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
1220 }
1221 
1222 #if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
asm_conv64(ASMState * as,IRIns * ir)1223 static void asm_conv64(ASMState *as, IRIns *ir)
1224 {
1225   IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
1226   IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
1227   IRCallID id;
1228   IRRef args[2];
1229   lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP,
1230 	     "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS);
1231   args[LJ_BE] = (ir-1)->op1;
1232   args[LJ_LE] = ir->op1;
1233   if (st == IRT_NUM || st == IRT_FLOAT) {
1234     id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
1235     ir--;
1236   } else {
1237     id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
1238   }
1239   {
1240 #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1241     CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
1242     cim.flags |= CCI_VARARG;  /* These calls don't use the hard-float ABI! */
1243 #else
1244     const CCallInfo *ci = &lj_ir_callinfo[id];
1245 #endif
1246     asm_setupresult(as, ir, ci);
1247     asm_gencall(as, ci, args);
1248   }
1249 }
1250 #endif
1251 
1252 /* -- Memory references --------------------------------------------------- */
1253 
asm_newref(ASMState * as,IRIns * ir)1254 static void asm_newref(ASMState *as, IRIns *ir)
1255 {
1256   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1257   IRRef args[3];
1258   if (ir->r == RID_SINK)
1259     return;
1260   args[0] = ASMREF_L;     /* lua_State *L */
1261   args[1] = ir->op1;      /* GCtab *t     */
1262   args[2] = ASMREF_TMP1;  /* cTValue *key */
1263   asm_setupresult(as, ir, ci);  /* TValue * */
1264   asm_gencall(as, ci, args);
1265   asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
1266 }
1267 
asm_lref(ASMState * as,IRIns * ir)1268 static void asm_lref(ASMState *as, IRIns *ir)
1269 {
1270   Reg r = ra_dest(as, ir, RSET_GPR);
1271 #if LJ_TARGET_X86ORX64
1272   ra_left(as, r, ASMREF_L);
1273 #else
1274   ra_leftov(as, r, ASMREF_L);
1275 #endif
1276 }
1277 
1278 /* -- Calls --------------------------------------------------------------- */
1279 
1280 /* Collect arguments from CALL* and CARG instructions. */
asm_collectargs(ASMState * as,IRIns * ir,const CCallInfo * ci,IRRef * args)1281 static void asm_collectargs(ASMState *as, IRIns *ir,
1282 			    const CCallInfo *ci, IRRef *args)
1283 {
1284   uint32_t n = CCI_XNARGS(ci);
1285   /* Account for split args. */
1286   lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n);
1287   if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
1288   while (n-- > 1) {
1289     ir = IR(ir->op1);
1290     lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree");
1291     args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
1292   }
1293   args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
1294   lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree");
1295 }
1296 
1297 /* Reconstruct CCallInfo flags for CALLX*. */
asm_callx_flags(ASMState * as,IRIns * ir)1298 static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
1299 {
1300   uint32_t nargs = 0;
1301   if (ir->op1 != REF_NIL) {  /* Count number of arguments first. */
1302     IRIns *ira = IR(ir->op1);
1303     nargs++;
1304     while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
1305   }
1306 #if LJ_HASFFI
1307   if (IR(ir->op2)->o == IR_CARG) {  /* Copy calling convention info. */
1308     CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
1309     CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
1310     nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
1311 #if LJ_TARGET_X86
1312     nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
1313 #endif
1314   }
1315 #endif
1316   return (nargs | (ir->t.irt << CCI_OTSHIFT));
1317 }
1318 
asm_callid(ASMState * as,IRIns * ir,IRCallID id)1319 static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1320 {
1321   const CCallInfo *ci = &lj_ir_callinfo[id];
1322   IRRef args[2];
1323   args[0] = ir->op1;
1324   args[1] = ir->op2;
1325   asm_setupresult(as, ir, ci);
1326   asm_gencall(as, ci, args);
1327 }
1328 
asm_call(ASMState * as,IRIns * ir)1329 static void asm_call(ASMState *as, IRIns *ir)
1330 {
1331   IRRef args[CCI_NARGS_MAX];
1332   const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1333   asm_collectargs(as, ir, ci, args);
1334   asm_setupresult(as, ir, ci);
1335   asm_gencall(as, ci, args);
1336 }
1337 
1338 /* -- PHI and loop handling ----------------------------------------------- */
1339 
1340 /* Break a PHI cycle by renaming to a free register (evict if needed). */
asm_phi_break(ASMState * as,RegSet blocked,RegSet blockedby,RegSet allow)1341 static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
1342 			  RegSet allow)
1343 {
1344   RegSet candidates = blocked & allow;
1345   if (candidates) {  /* If this register file has candidates. */
1346     /* Note: the set for ra_pick cannot be empty, since each register file
1347     ** has some registers never allocated to PHIs.
1348     */
1349     Reg down, up = ra_pick(as, ~blocked & allow);  /* Get a free register. */
1350     if (candidates & ~blockedby)  /* Optimize shifts, else it's a cycle. */
1351       candidates = candidates & ~blockedby;
1352     down = rset_picktop(candidates);  /* Pick candidate PHI register. */
1353     ra_rename(as, down, up);  /* And rename it to the free register. */
1354   }
1355 }
1356 
1357 /* PHI register shuffling.
1358 **
1359 ** The allocator tries hard to preserve PHI register assignments across
1360 ** the loop body. Most of the time this loop does nothing, since there
1361 ** are no register mismatches.
1362 **
1363 ** If a register mismatch is detected and ...
1364 ** - the register is currently free: rename it.
1365 ** - the register is blocked by an invariant: restore/remat and rename it.
1366 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1367 **
1368 ** The renames are order-sensitive, so just retry the loop if a register
1369 ** is marked as blocked, but has been freed in the meantime. A cycle is
1370 ** detected if all of the blocked registers are allocated. To break the
1371 ** cycle rename one of them to a free register and retry.
1372 **
1373 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1374 */
asm_phi_shuffle(ASMState * as)1375 static void asm_phi_shuffle(ASMState *as)
1376 {
1377   RegSet work;
1378 
1379   /* Find and resolve PHI register mismatches. */
1380   for (;;) {
1381     RegSet blocked = RSET_EMPTY;
1382     RegSet blockedby = RSET_EMPTY;
1383     RegSet phiset = as->phiset;
1384     while (phiset) {  /* Check all left PHI operand registers. */
1385       Reg r = rset_pickbot(phiset);
1386       IRIns *irl = IR(as->phireg[r]);
1387       Reg left = irl->r;
1388       if (r != left) {  /* Mismatch? */
1389 	if (!rset_test(as->freeset, r)) {  /* PHI register blocked? */
1390 	  IRRef ref = regcost_ref(as->cost[r]);
1391 	  /* Blocked by other PHI (w/reg)? */
1392 	  if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
1393 	    rset_set(blocked, r);
1394 	    if (ra_hasreg(left))
1395 	      rset_set(blockedby, left);
1396 	    left = RID_NONE;
1397 	  } else {  /* Otherwise grab register from invariant. */
1398 	    ra_restore(as, ref);
1399 	    checkmclim(as);
1400 	  }
1401 	}
1402 	if (ra_hasreg(left)) {
1403 	  ra_rename(as, left, r);
1404 	  checkmclim(as);
1405 	}
1406       }
1407       rset_clear(phiset, r);
1408     }
1409     if (!blocked) break;  /* Finished. */
1410     if (!(as->freeset & blocked)) {  /* Break cycles if none are free. */
1411       asm_phi_break(as, blocked, blockedby, RSET_GPR);
1412       if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
1413       checkmclim(as);
1414     }  /* Else retry some more renames. */
1415   }
1416 
1417   /* Restore/remat invariants whose registers are modified inside the loop. */
1418 #if !LJ_SOFTFP
1419   work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
1420   while (work) {
1421     Reg r = rset_pickbot(work);
1422     ra_restore(as, regcost_ref(as->cost[r]));
1423     rset_clear(work, r);
1424     checkmclim(as);
1425   }
1426 #endif
1427   work = as->modset & ~(as->freeset | as->phiset);
1428   while (work) {
1429     Reg r = rset_pickbot(work);
1430     ra_restore(as, regcost_ref(as->cost[r]));
1431     rset_clear(work, r);
1432     checkmclim(as);
1433   }
1434 
1435   /* Allocate and save all unsaved PHI regs and clear marks. */
1436   work = as->phiset;
1437   while (work) {
1438     Reg r = rset_picktop(work);
1439     IRRef lref = as->phireg[r];
1440     IRIns *ir = IR(lref);
1441     if (ra_hasspill(ir->s)) {  /* Left PHI gained a spill slot? */
1442       irt_clearmark(ir->t);  /* Handled here, so clear marker now. */
1443       ra_alloc1(as, lref, RID2RSET(r));
1444       ra_save(as, ir, r);  /* Save to spill slot inside the loop. */
1445       checkmclim(as);
1446     }
1447     rset_clear(work, r);
1448   }
1449 }
1450 
1451 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
asm_phi_copyspill(ASMState * as)1452 static void asm_phi_copyspill(ASMState *as)
1453 {
1454   int need = 0;
1455   IRIns *ir;
1456   for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
1457     if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
1458       need |= irt_isfp(ir->t) ? 2 : 1;  /* Unsynced spill slot? */
1459   if ((need & 1)) {  /* Copy integer spill slots. */
1460 #if !LJ_TARGET_X86ORX64
1461     Reg r = RID_TMP;
1462 #else
1463     Reg r = RID_RET;
1464     if ((as->freeset & RSET_GPR))
1465       r = rset_pickbot((as->freeset & RSET_GPR));
1466     else
1467       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1468 #endif
1469     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1470       if (ra_hasspill(ir->s)) {
1471 	IRIns *irl = IR(ir->op1);
1472 	if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
1473 	  emit_spstore(as, irl, r, sps_scale(irl->s));
1474 	  emit_spload(as, ir, r, sps_scale(ir->s));
1475 	  checkmclim(as);
1476 	}
1477       }
1478     }
1479 #if LJ_TARGET_X86ORX64
1480     if (!rset_test(as->freeset, r))
1481       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1482 #endif
1483   }
1484 #if !LJ_SOFTFP
1485   if ((need & 2)) {  /* Copy FP spill slots. */
1486 #if LJ_TARGET_X86
1487     Reg r = RID_XMM0;
1488 #else
1489     Reg r = RID_FPRET;
1490 #endif
1491     if ((as->freeset & RSET_FPR))
1492       r = rset_pickbot((as->freeset & RSET_FPR));
1493     if (!rset_test(as->freeset, r))
1494       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1495     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1496       if (ra_hasspill(ir->s)) {
1497 	IRIns *irl = IR(ir->op1);
1498 	if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
1499 	  emit_spstore(as, irl, r, sps_scale(irl->s));
1500 	  emit_spload(as, ir, r, sps_scale(ir->s));
1501 	  checkmclim(as);
1502 	}
1503       }
1504     }
1505     if (!rset_test(as->freeset, r))
1506       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1507   }
1508 #endif
1509 }
1510 
1511 /* Emit renames for left PHIs which are only spilled outside the loop. */
asm_phi_fixup(ASMState * as)1512 static void asm_phi_fixup(ASMState *as)
1513 {
1514   RegSet work = as->phiset;
1515   while (work) {
1516     Reg r = rset_picktop(work);
1517     IRRef lref = as->phireg[r];
1518     IRIns *ir = IR(lref);
1519     if (irt_ismarked(ir->t)) {
1520       irt_clearmark(ir->t);
1521       /* Left PHI gained a spill slot before the loop? */
1522       if (ra_hasspill(ir->s)) {
1523 	ra_addrename(as, r, lref, as->loopsnapno);
1524       }
1525     }
1526     rset_clear(work, r);
1527   }
1528 }
1529 
1530 /* Setup right PHI reference. */
asm_phi(ASMState * as,IRIns * ir)1531 static void asm_phi(ASMState *as, IRIns *ir)
1532 {
1533   RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
1534 		 ~as->phiset;
1535   RegSet afree = (as->freeset & allow);
1536   IRIns *irl = IR(ir->op1);
1537   IRIns *irr = IR(ir->op2);
1538   if (ir->r == RID_SINK)  /* Sink PHI. */
1539     return;
1540   /* Spill slot shuffling is not implemented yet (but rarely needed). */
1541   if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
1542     lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1543   /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1544   if ((afree & (afree-1))) {  /* Two or more free registers? */
1545     Reg r;
1546     if (ra_noreg(irr->r)) {  /* Get a register for the right PHI. */
1547       r = ra_allocref(as, ir->op2, allow);
1548     } else {  /* Duplicate right PHI, need a copy (rare). */
1549       r = ra_scratch(as, allow);
1550       emit_movrr(as, irr, r, irr->r);
1551     }
1552     ir->r = (uint8_t)r;
1553     rset_set(as->phiset, r);
1554     as->phireg[r] = (IRRef1)ir->op1;
1555     irt_setmark(irl->t);  /* Marks left PHIs _with_ register. */
1556     if (ra_noreg(irl->r))
1557       ra_sethint(irl->r, r); /* Set register hint for left PHI. */
1558   } else {  /* Otherwise allocate a spill slot. */
1559     /* This is overly restrictive, but it triggers only on synthetic code. */
1560     if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
1561       lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1562     ra_spill(as, ir);
1563     irr->s = ir->s;  /* Set right PHI spill slot. Sync left slot later. */
1564   }
1565 }
1566 
1567 static void asm_loop_fixup(ASMState *as);
1568 
1569 /* Middle part of a loop. */
asm_loop(ASMState * as)1570 static void asm_loop(ASMState *as)
1571 {
1572   MCode *mcspill;
1573   /* LOOP is a guard, so the snapno is up to date. */
1574   as->loopsnapno = as->snapno;
1575   if (as->gcsteps)
1576     asm_gc_check(as);
1577   /* LOOP marks the transition from the variant to the invariant part. */
1578   as->flagmcp = as->invmcp = NULL;
1579   as->sectref = 0;
1580   if (!neverfuse(as)) as->fuseref = 0;
1581   asm_phi_shuffle(as);
1582   mcspill = as->mcp;
1583   asm_phi_copyspill(as);
1584   asm_loop_fixup(as);
1585   as->mcloop = as->mcp;
1586   RA_DBGX((as, "===== LOOP ====="));
1587   if (!as->realign) RA_DBG_FLUSH();
1588   if (as->mcp != mcspill)
1589     emit_jmp(as, mcspill);
1590 }
1591 
1592 /* -- Target-specific assembler ------------------------------------------- */
1593 
1594 #if LJ_TARGET_X86ORX64
1595 #include "lj_asm_x86.h"
1596 #elif LJ_TARGET_ARM
1597 #include "lj_asm_arm.h"
1598 #elif LJ_TARGET_ARM64
1599 #include "lj_asm_arm64.h"
1600 #elif LJ_TARGET_PPC
1601 #include "lj_asm_ppc.h"
1602 #elif LJ_TARGET_MIPS
1603 #include "lj_asm_mips.h"
1604 #else
1605 #error "Missing assembler for target CPU"
1606 #endif
1607 
1608 /* -- Common instruction helpers ------------------------------------------ */
1609 
1610 #if !LJ_SOFTFP32
1611 #if !LJ_TARGET_X86ORX64
1612 #define asm_ldexp(as, ir)	asm_callid(as, ir, IRCALL_ldexp)
1613 #define asm_fppowi(as, ir)	asm_callid(as, ir, IRCALL_lj_vm_powi)
1614 #endif
1615 
asm_pow(ASMState * as,IRIns * ir)1616 static void asm_pow(ASMState *as, IRIns *ir)
1617 {
1618 #if LJ_64 && LJ_HASFFI
1619   if (!irt_isnum(ir->t))
1620     asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
1621 					  IRCALL_lj_carith_powu64);
1622   else
1623 #endif
1624   if (irt_isnum(IR(ir->op2)->t))
1625     asm_callid(as, ir, IRCALL_pow);
1626   else
1627     asm_fppowi(as, ir);
1628 }
1629 
asm_div(ASMState * as,IRIns * ir)1630 static void asm_div(ASMState *as, IRIns *ir)
1631 {
1632 #if LJ_64 && LJ_HASFFI
1633   if (!irt_isnum(ir->t))
1634     asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
1635 					  IRCALL_lj_carith_divu64);
1636   else
1637 #endif
1638     asm_fpdiv(as, ir);
1639 }
1640 #endif
1641 
asm_mod(ASMState * as,IRIns * ir)1642 static void asm_mod(ASMState *as, IRIns *ir)
1643 {
1644 #if LJ_64 && LJ_HASFFI
1645   if (!irt_isint(ir->t))
1646     asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
1647 					  IRCALL_lj_carith_modu64);
1648   else
1649 #endif
1650     asm_callid(as, ir, IRCALL_lj_vm_modi);
1651 }
1652 
asm_fuseequal(ASMState * as,IRIns * ir)1653 static void asm_fuseequal(ASMState *as, IRIns *ir)
1654 {
1655   /* Fuse HREF + EQ/NE. */
1656   if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1657     as->curins--;
1658     asm_href(as, ir-1, (IROp)ir->o);
1659   } else {
1660     asm_equal(as, ir);
1661   }
1662 }
1663 
asm_alen(ASMState * as,IRIns * ir)1664 static void asm_alen(ASMState *as, IRIns *ir)
1665 {
1666   asm_callid(as, ir, ir->op2 == REF_NIL ? IRCALL_lj_tab_len :
1667 					  IRCALL_lj_tab_len_hint);
1668 }
1669 
1670 /* -- Instruction dispatch ------------------------------------------------ */
1671 
1672 /* Assemble a single instruction. */
asm_ir(ASMState * as,IRIns * ir)1673 static void asm_ir(ASMState *as, IRIns *ir)
1674 {
1675   switch ((IROp)ir->o) {
1676   /* Miscellaneous ops. */
1677   case IR_LOOP: asm_loop(as); break;
1678   case IR_NOP: case IR_XBAR:
1679     lj_assertA(!ra_used(ir),
1680 	       "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS);
1681     break;
1682   case IR_USE:
1683     ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1684   case IR_PHI: asm_phi(as, ir); break;
1685   case IR_HIOP: asm_hiop(as, ir); break;
1686   case IR_GCSTEP: asm_gcstep(as, ir); break;
1687   case IR_PROF: asm_prof(as, ir); break;
1688 
1689   /* Guarded assertions. */
1690   case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1691   case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1692   case IR_ABC:
1693     asm_comp(as, ir);
1694     break;
1695   case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;
1696 
1697   case IR_RETF: asm_retf(as, ir); break;
1698 
1699   /* Bit ops. */
1700   case IR_BNOT: asm_bnot(as, ir); break;
1701   case IR_BSWAP: asm_bswap(as, ir); break;
1702   case IR_BAND: asm_band(as, ir); break;
1703   case IR_BOR: asm_bor(as, ir); break;
1704   case IR_BXOR: asm_bxor(as, ir); break;
1705   case IR_BSHL: asm_bshl(as, ir); break;
1706   case IR_BSHR: asm_bshr(as, ir); break;
1707   case IR_BSAR: asm_bsar(as, ir); break;
1708   case IR_BROL: asm_brol(as, ir); break;
1709   case IR_BROR: asm_bror(as, ir); break;
1710 
1711   /* Arithmetic ops. */
1712   case IR_ADD: asm_add(as, ir); break;
1713   case IR_SUB: asm_sub(as, ir); break;
1714   case IR_MUL: asm_mul(as, ir); break;
1715   case IR_MOD: asm_mod(as, ir); break;
1716   case IR_NEG: asm_neg(as, ir); break;
1717 #if LJ_SOFTFP32
1718   case IR_DIV: case IR_POW: case IR_ABS:
1719   case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
1720     /* Unused for LJ_SOFTFP32. */
1721     lj_assertA(0, "IR %04d with unused op %d",
1722 		  (int)(ir - as->ir) - REF_BIAS, ir->o);
1723     break;
1724 #else
1725   case IR_DIV: asm_div(as, ir); break;
1726   case IR_POW: asm_pow(as, ir); break;
1727   case IR_ABS: asm_abs(as, ir); break;
1728   case IR_LDEXP: asm_ldexp(as, ir); break;
1729   case IR_FPMATH: asm_fpmath(as, ir); break;
1730   case IR_TOBIT: asm_tobit(as, ir); break;
1731 #endif
1732   case IR_MIN: asm_min(as, ir); break;
1733   case IR_MAX: asm_max(as, ir); break;
1734 
1735   /* Overflow-checking arithmetic ops. */
1736   case IR_ADDOV: asm_addov(as, ir); break;
1737   case IR_SUBOV: asm_subov(as, ir); break;
1738   case IR_MULOV: asm_mulov(as, ir); break;
1739 
1740   /* Memory references. */
1741   case IR_AREF: asm_aref(as, ir); break;
1742   case IR_HREF: asm_href(as, ir, 0); break;
1743   case IR_HREFK: asm_hrefk(as, ir); break;
1744   case IR_NEWREF: asm_newref(as, ir); break;
1745   case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1746   case IR_FREF: asm_fref(as, ir); break;
1747   case IR_STRREF: asm_strref(as, ir); break;
1748   case IR_LREF: asm_lref(as, ir); break;
1749 
1750   /* Loads and stores. */
1751   case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1752     asm_ahuvload(as, ir);
1753     break;
1754   case IR_FLOAD: asm_fload(as, ir); break;
1755   case IR_XLOAD: asm_xload(as, ir); break;
1756   case IR_SLOAD: asm_sload(as, ir); break;
1757   case IR_ALEN: asm_alen(as, ir); break;
1758 
1759   case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1760   case IR_FSTORE: asm_fstore(as, ir); break;
1761   case IR_XSTORE: asm_xstore(as, ir); break;
1762 
1763   /* Allocations. */
1764   case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1765   case IR_TNEW: asm_tnew(as, ir); break;
1766   case IR_TDUP: asm_tdup(as, ir); break;
1767   case IR_CNEW: case IR_CNEWI:
1768 #if LJ_HASFFI
1769     asm_cnew(as, ir);
1770 #else
1771     lj_assertA(0, "IR %04d with unused op %d",
1772 		  (int)(ir - as->ir) - REF_BIAS, ir->o);
1773 #endif
1774     break;
1775 
1776   /* Buffer operations. */
1777   case IR_BUFHDR: asm_bufhdr(as, ir); break;
1778   case IR_BUFPUT: asm_bufput(as, ir); break;
1779   case IR_BUFSTR: asm_bufstr(as, ir); break;
1780 
1781   /* Write barriers. */
1782   case IR_TBAR: asm_tbar(as, ir); break;
1783   case IR_OBAR: asm_obar(as, ir); break;
1784 
1785   /* Type conversions. */
1786   case IR_CONV: asm_conv(as, ir); break;
1787   case IR_TOSTR: asm_tostr(as, ir); break;
1788   case IR_STRTO: asm_strto(as, ir); break;
1789 
1790   /* Calls. */
1791   case IR_CALLA:
1792     as->gcsteps++;
1793     /* fallthrough */
1794   case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1795   case IR_CALLXS: asm_callx(as, ir); break;
1796   case IR_CARG: break;
1797 
1798   default:
1799     setintV(&as->J->errinfo, ir->o);
1800     lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1801     break;
1802   }
1803 }
1804 
1805 /* -- Head of trace ------------------------------------------------------- */
1806 
1807 /* Head of a root trace. */
asm_head_root(ASMState * as)1808 static void asm_head_root(ASMState *as)
1809 {
1810   int32_t spadj;
1811   asm_head_root_base(as);
1812   emit_setvmstate(as, (int32_t)as->T->traceno);
1813   spadj = asm_stack_adjust(as);
1814   as->T->spadjust = (uint16_t)spadj;
1815   emit_spsub(as, spadj);
1816   /* Root traces assume a checked stack for the starting proto. */
1817   as->T->topslot = gcref(as->T->startpt)->pt.framesize;
1818 }
1819 
1820 /* Head of a side trace.
1821 **
1822 ** The current simplistic algorithm requires that all slots inherited
1823 ** from the parent are live in a register between pass 2 and pass 3. This
1824 ** avoids the complexity of stack slot shuffling. But of course this may
1825 ** overflow the register set in some cases and cause the dreaded error:
1826 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1827 */
asm_head_side(ASMState * as)1828 static void asm_head_side(ASMState *as)
1829 {
1830   IRRef1 sloadins[RID_MAX];
1831   RegSet allow = RSET_ALL;  /* Inverse of all coalesced registers. */
1832   RegSet live = RSET_EMPTY;  /* Live parent registers. */
1833   IRIns *irp = &as->parent->ir[REF_BASE];  /* Parent base. */
1834   int32_t spadj, spdelta;
1835   int pass2 = 0;
1836   int pass3 = 0;
1837   IRRef i;
1838 
1839   if (as->snapno && as->topslot > as->parent->topslot) {
1840     /* Force snap #0 alloc to prevent register overwrite in stack check. */
1841     as->snapno = 0;
1842     asm_snap_alloc(as);
1843   }
1844   allow = asm_head_side_base(as, irp, allow);
1845 
1846   /* Scan all parent SLOADs and collect register dependencies. */
1847   for (i = as->stopins; i > REF_BASE; i--) {
1848     IRIns *ir = IR(i);
1849     RegSP rs;
1850     lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1851 	       (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL,
1852 	       "IR %04d has bad parent op %d",
1853 	       (int)(ir - as->ir) - REF_BIAS, ir->o);
1854     rs = as->parentmap[i - REF_FIRST];
1855     if (ra_hasreg(ir->r)) {
1856       rset_clear(allow, ir->r);
1857       if (ra_hasspill(ir->s)) {
1858 	ra_save(as, ir, ir->r);
1859 	checkmclim(as);
1860       }
1861     } else if (ra_hasspill(ir->s)) {
1862       irt_setmark(ir->t);
1863       pass2 = 1;
1864     }
1865     if (ir->r == rs) {  /* Coalesce matching registers right now. */
1866       ra_free(as, ir->r);
1867     } else if (ra_hasspill(regsp_spill(rs))) {
1868       if (ra_hasreg(ir->r))
1869 	pass3 = 1;
1870     } else if (ra_used(ir)) {
1871       sloadins[rs] = (IRRef1)i;
1872       rset_set(live, rs);  /* Block live parent register. */
1873     }
1874   }
1875 
1876   /* Calculate stack frame adjustment. */
1877   spadj = asm_stack_adjust(as);
1878   spdelta = spadj - (int32_t)as->parent->spadjust;
1879   if (spdelta < 0) {  /* Don't shrink the stack frame. */
1880     spadj = (int32_t)as->parent->spadjust;
1881     spdelta = 0;
1882   }
1883   as->T->spadjust = (uint16_t)spadj;
1884 
1885   /* Reload spilled target registers. */
1886   if (pass2) {
1887     for (i = as->stopins; i > REF_BASE; i--) {
1888       IRIns *ir = IR(i);
1889       if (irt_ismarked(ir->t)) {
1890 	RegSet mask;
1891 	Reg r;
1892 	RegSP rs;
1893 	irt_clearmark(ir->t);
1894 	rs = as->parentmap[i - REF_FIRST];
1895 	if (!ra_hasspill(regsp_spill(rs)))
1896 	  ra_sethint(ir->r, rs);  /* Hint may be gone, set it again. */
1897 	else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
1898 	  continue;  /* Same spill slot, do nothing. */
1899 	mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
1900 	if (mask == RSET_EMPTY)
1901 	  lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1902 	r = ra_allocref(as, i, mask);
1903 	ra_save(as, ir, r);
1904 	rset_clear(allow, r);
1905 	if (r == rs) {  /* Coalesce matching registers right now. */
1906 	  ra_free(as, r);
1907 	  rset_clear(live, r);
1908 	} else if (ra_hasspill(regsp_spill(rs))) {
1909 	  pass3 = 1;
1910 	}
1911 	checkmclim(as);
1912       }
1913     }
1914   }
1915 
1916   /* Store trace number and adjust stack frame relative to the parent. */
1917   emit_setvmstate(as, (int32_t)as->T->traceno);
1918   emit_spsub(as, spdelta);
1919 
1920 #if !LJ_TARGET_X86ORX64
1921   /* Restore BASE register from parent spill slot. */
1922   if (ra_hasspill(irp->s))
1923     emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
1924 #endif
1925 
1926   /* Restore target registers from parent spill slots. */
1927   if (pass3) {
1928     RegSet work = ~as->freeset & RSET_ALL;
1929     while (work) {
1930       Reg r = rset_pickbot(work);
1931       IRRef ref = regcost_ref(as->cost[r]);
1932       RegSP rs = as->parentmap[ref - REF_FIRST];
1933       rset_clear(work, r);
1934       if (ra_hasspill(regsp_spill(rs))) {
1935 	int32_t ofs = sps_scale(regsp_spill(rs));
1936 	ra_free(as, r);
1937 	emit_spload(as, IR(ref), r, ofs);
1938 	checkmclim(as);
1939       }
1940     }
1941   }
1942 
1943   /* Shuffle registers to match up target regs with parent regs. */
1944   for (;;) {
1945     RegSet work;
1946 
1947     /* Repeatedly coalesce free live registers by moving to their target. */
1948     while ((work = as->freeset & live) != RSET_EMPTY) {
1949       Reg rp = rset_pickbot(work);
1950       IRIns *ir = IR(sloadins[rp]);
1951       rset_clear(live, rp);
1952       rset_clear(allow, rp);
1953       ra_free(as, ir->r);
1954       emit_movrr(as, ir, ir->r, rp);
1955       checkmclim(as);
1956     }
1957 
1958     /* We're done if no live registers remain. */
1959     if (live == RSET_EMPTY)
1960       break;
1961 
1962     /* Break cycles by renaming one target to a temp. register. */
1963     if (live & RSET_GPR) {
1964       RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
1965       if (tmpset == RSET_EMPTY)
1966 	lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1967       ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
1968     }
1969     if (!LJ_SOFTFP && (live & RSET_FPR)) {
1970       RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
1971       if (tmpset == RSET_EMPTY)
1972 	lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1973       ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
1974     }
1975     checkmclim(as);
1976     /* Continue with coalescing to fix up the broken cycle(s). */
1977   }
1978 
1979   /* Inherit top stack slot already checked by parent trace. */
1980   as->T->topslot = as->parent->topslot;
1981   if (as->topslot > as->T->topslot) {  /* Need to check for higher slot? */
1982 #ifdef EXITSTATE_CHECKEXIT
1983     /* Highest exit + 1 indicates stack check. */
1984     ExitNo exitno = as->T->nsnap;
1985 #else
1986     /* Reuse the parent exit in the context of the parent trace. */
1987     ExitNo exitno = as->J->exitno;
1988 #endif
1989     as->T->topslot = (uint8_t)as->topslot;  /* Remember for child traces. */
1990     asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
1991   }
1992 }
1993 
1994 /* -- Tail of trace ------------------------------------------------------- */
1995 
1996 /* Get base slot for a snapshot. */
asm_baseslot(ASMState * as,SnapShot * snap,int * gotframe)1997 static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
1998 {
1999   SnapEntry *map = &as->T->snapmap[snap->mapofs];
2000   MSize n;
2001   for (n = snap->nent; n > 0; n--) {
2002     SnapEntry sn = map[n-1];
2003     if ((sn & SNAP_FRAME)) {
2004       *gotframe = 1;
2005       return snap_slot(sn) - LJ_FR2;
2006     }
2007   }
2008   return 0;
2009 }
2010 
2011 /* Link to another trace. */
asm_tail_link(ASMState * as)2012 static void asm_tail_link(ASMState *as)
2013 {
2014   SnapNo snapno = as->T->nsnap-1;  /* Last snapshot. */
2015   SnapShot *snap = &as->T->snap[snapno];
2016   int gotframe = 0;
2017   BCReg baseslot = asm_baseslot(as, snap, &gotframe);
2018 
2019   as->topslot = snap->topslot;
2020   checkmclim(as);
2021   ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
2022 
2023   if (as->T->link == 0) {
2024     /* Setup fixed registers for exit to interpreter. */
2025     const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
2026     int32_t mres;
2027     if (bc_op(*pc) == BC_JLOOP) {  /* NYI: find a better way to do this. */
2028       BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
2029       if (bc_isret(bc_op(*retpc)))
2030 	pc = retpc;
2031     }
2032 #if LJ_GC64
2033     emit_loadu64(as, RID_LPC, u64ptr(pc));
2034 #else
2035     ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
2036     ra_allockreg(as, i32ptr(pc), RID_LPC);
2037 #endif
2038     mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
2039     switch (bc_op(*pc)) {
2040     case BC_CALLM: case BC_CALLMT:
2041       mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
2042     case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
2043     case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
2044     default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
2045     }
2046     ra_allockreg(as, mres, RID_RET);  /* Return MULTRES or 0. */
2047   } else if (baseslot) {
2048     /* Save modified BASE for linking to trace with higher start frame. */
2049     emit_setgl(as, RID_BASE, jit_base);
2050   }
2051   emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
2052 
2053   if (as->J->ktrace) {  /* Patch ktrace slot with the final GCtrace pointer. */
2054     setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
2055     IR(as->J->ktrace)->o = IR_KGC;
2056   }
2057 
2058   /* Sync the interpreter state with the on-trace state. */
2059   asm_stack_restore(as, snap);
2060 
2061   /* Root traces that add frames need to check the stack at the end. */
2062   if (!as->parent && gotframe)
2063     asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
2064 }
2065 
2066 /* -- Trace setup --------------------------------------------------------- */
2067 
2068 /* Clear reg/sp for all instructions and add register hints. */
asm_setup_regsp(ASMState * as)2069 static void asm_setup_regsp(ASMState *as)
2070 {
2071   GCtrace *T = as->T;
2072   int sink = T->sinktags;
2073   IRRef nins = T->nins;
2074   IRIns *ir, *lastir;
2075   int inloop;
2076 #if LJ_TARGET_ARM
2077   uint32_t rload = 0xa6402a64;
2078 #endif
2079 
2080   ra_setup(as);
2081 
2082   /* Clear reg/sp for constants. */
2083   for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
2084     ir->prev = REGSP_INIT;
2085     if (irt_is64(ir->t) && ir->o != IR_KNULL) {
2086 #if LJ_GC64
2087       /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
2088       ir->i = 0;  /* Will become non-zero only for RIP-relative addresses. */
2089 #else
2090       /* Make life easier for backends by putting address of constant in i. */
2091       ir->i = (int32_t)(intptr_t)(ir+1);
2092 #endif
2093       ir++;
2094     }
2095   }
2096 
2097   /* REF_BASE is used for implicit references to the BASE register. */
2098   lastir->prev = REGSP_HINT(RID_BASE);
2099 
2100   as->snaprename = nins;
2101   as->snapref = nins;
2102   as->snapno = T->nsnap;
2103 
2104   as->stopins = REF_BASE;
2105   as->orignins = nins;
2106   as->curins = nins;
2107 
2108   /* Setup register hints for parent link instructions. */
2109   ir = IR(REF_FIRST);
2110   if (as->parent) {
2111     uint16_t *p;
2112     lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir);
2113     if (lastir - ir > LJ_MAX_JSLOTS)
2114       lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2115     as->stopins = (IRRef)((lastir-1) - as->ir);
2116     for (p = as->parentmap; ir < lastir; ir++) {
2117       RegSP rs = ir->prev;
2118       *p++ = (uint16_t)rs;  /* Copy original parent RegSP to parentmap. */
2119       if (!ra_hasspill(regsp_spill(rs)))
2120 	ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
2121       else
2122 	ir->prev = REGSP_INIT;
2123     }
2124   }
2125 
2126   inloop = 0;
2127   as->evenspill = SPS_FIRST;
2128   for (lastir = IR(nins); ir < lastir; ir++) {
2129     if (sink) {
2130       if (ir->r == RID_SINK)
2131 	continue;
2132       if (ir->r == RID_SUNK) {  /* Revert after ASM restart. */
2133 	ir->r = RID_SINK;
2134 	continue;
2135       }
2136     }
2137     switch (ir->o) {
2138     case IR_LOOP:
2139       inloop = 1;
2140       break;
2141 #if LJ_TARGET_ARM
2142     case IR_SLOAD:
2143       if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
2144 	break;
2145       /* fallthrough */
2146     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2147       if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
2148       ir->prev = (uint16_t)REGSP_HINT((rload & 15));
2149       rload = lj_ror(rload, 4);
2150       continue;
2151 #endif
2152     case IR_CALLXS: {
2153       CCallInfo ci;
2154       ci.flags = asm_callx_flags(as, ir);
2155       ir->prev = asm_setup_call_slots(as, ir, &ci);
2156       if (inloop)
2157 	as->modset |= RSET_SCRATCH;
2158       continue;
2159       }
2160     case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: {
2161       const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
2162       ir->prev = asm_setup_call_slots(as, ir, ci);
2163       if (inloop)
2164 	as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
2165 		      (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
2166       continue;
2167       }
2168 #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
2169     case IR_HIOP:
2170       switch ((ir-1)->o) {
2171 #if LJ_SOFTFP && LJ_TARGET_ARM
2172       case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2173 	if (ra_hashint((ir-1)->r)) {
2174 	  ir->prev = (ir-1)->prev + 1;
2175 	  continue;
2176 	}
2177 	break;
2178 #endif
2179 #if !LJ_SOFTFP && LJ_NEED_FP64
2180       case IR_CONV:
2181 	if (irt_isfp((ir-1)->t)) {
2182 	  ir->prev = REGSP_HINT(RID_FPRET);
2183 	  continue;
2184 	}
2185 #endif
2186       /* fallthrough */
2187       case IR_CALLN: case IR_CALLXS:
2188 #if LJ_SOFTFP
2189       case IR_MIN: case IR_MAX:
2190 #endif
2191 	(ir-1)->prev = REGSP_HINT(RID_RETLO);
2192 	ir->prev = REGSP_HINT(RID_RETHI);
2193 	continue;
2194       default:
2195 	break;
2196       }
2197       break;
2198 #endif
2199 #if LJ_SOFTFP
2200     case IR_MIN: case IR_MAX:
2201       if ((ir+1)->o != IR_HIOP) break;
2202 #endif
2203     /* fallthrough */
2204     /* C calls evict all scratch regs and return results in RID_RET. */
2205     case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
2206       if (REGARG_NUMGPR < 3 && as->evenspill < 3)
2207 	as->evenspill = 3;  /* lj_str_new and lj_tab_newkey need 3 args. */
2208 #if LJ_TARGET_X86 && LJ_HASFFI
2209       if (0) {
2210     case IR_CNEW:
2211 	if (ir->op2 != REF_NIL && as->evenspill < 4)
2212 	  as->evenspill = 4;  /* lj_cdata_newv needs 4 args. */
2213       }
2214       /* fallthrough */
2215 #else
2216       /* fallthrough */
2217     case IR_CNEW:
2218 #endif
2219       /* fallthrough */
2220     case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
2221     case IR_BUFSTR:
2222       ir->prev = REGSP_HINT(RID_RET);
2223       if (inloop)
2224 	as->modset = RSET_SCRATCH;
2225       continue;
2226     case IR_STRTO: case IR_OBAR:
2227       if (inloop)
2228 	as->modset = RSET_SCRATCH;
2229       break;
2230 #if !LJ_SOFTFP
2231 #if !LJ_TARGET_X86ORX64
2232     case IR_LDEXP:
2233 #endif
2234 #endif
2235       /* fallthrough */
2236     case IR_POW:
2237       if (!LJ_SOFTFP && irt_isnum(ir->t)) {
2238 	if (inloop)
2239 	  as->modset |= RSET_SCRATCH;
2240 #if LJ_TARGET_X86
2241 	if (irt_isnum(IR(ir->op2)->t)) {
2242 	  if (as->evenspill < 4)  /* Leave room to call pow(). */
2243 	    as->evenspill = 4;
2244 	}
2245 	break;
2246 #else
2247 	ir->prev = REGSP_HINT(RID_FPRET);
2248 	continue;
2249 #endif
2250       }
2251       /* fallthrough */ /* for integer POW */
2252     case IR_DIV: case IR_MOD:
2253       if (!irt_isnum(ir->t)) {
2254 	ir->prev = REGSP_HINT(RID_RET);
2255 	if (inloop)
2256 	  as->modset |= (RSET_SCRATCH & RSET_GPR);
2257 	continue;
2258       }
2259       break;
2260     case IR_FPMATH:
2261 #if LJ_TARGET_X86ORX64
2262       if (ir->op2 <= IRFPM_TRUNC) {
2263 	if (!(as->flags & JIT_F_SSE4_1)) {
2264 	  ir->prev = REGSP_HINT(RID_XMM0);
2265 	  if (inloop)
2266 	    as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
2267 	  continue;
2268 	}
2269 	break;
2270       }
2271 #endif
2272       if (inloop)
2273 	as->modset |= RSET_SCRATCH;
2274 #if LJ_TARGET_X86
2275       break;
2276 #else
2277       ir->prev = REGSP_HINT(RID_FPRET);
2278       continue;
2279 #endif
2280 #if LJ_TARGET_X86ORX64
2281     /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
2282     case IR_BSHL: case IR_BSHR: case IR_BSAR:
2283       if ((as->flags & JIT_F_BMI2))  /* Except if BMI2 is available. */
2284 	break;
2285       /* fallthrough */
2286     case IR_BROL: case IR_BROR:
2287       if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
2288 	IR(ir->op2)->r = REGSP_HINT(RID_ECX);
2289 	if (inloop)
2290 	  rset_set(as->modset, RID_ECX);
2291       }
2292       break;
2293 #endif
2294     /* Do not propagate hints across type conversions or loads. */
2295     case IR_TOBIT:
2296     case IR_XLOAD:
2297 #if !LJ_TARGET_ARM
2298     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2299 #endif
2300       break;
2301     case IR_CONV:
2302       if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
2303 	  (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
2304 	break;
2305       /* fallthrough */
2306     default:
2307       /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
2308       if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
2309 	  ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
2310 	ir->prev = IR(ir->op1)->prev;
2311 	continue;
2312       }
2313       break;
2314     }
2315     ir->prev = REGSP_INIT;
2316   }
2317   if ((as->evenspill & 1))
2318     as->oddspill = as->evenspill++;
2319   else
2320     as->oddspill = 0;
2321 }
2322 
2323 /* -- Assembler core ------------------------------------------------------ */
2324 
2325 /* Assemble a trace. */
lj_asm_trace(jit_State * J,GCtrace * T)2326 void lj_asm_trace(jit_State *J, GCtrace *T)
2327 {
2328   ASMState as_;
2329   ASMState *as = &as_;
2330   MCode *origtop;
2331 
2332   /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2333   {
2334     IRRef nins = T->nins;
2335     IRIns *ir = &T->ir[nins-1];
2336     if (ir->o == IR_NOP || ir->o == IR_RENAME) {
2337       do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
2338       T->nins = nins;
2339     }
2340   }
2341 
2342   /* Ensure an initialized instruction beyond the last one for HIOP checks. */
2343   /* This also allows one RENAME to be added without reallocating curfinal. */
2344   as->orignins = lj_ir_nextins(J);
2345   lj_ir_nop(&J->cur.ir[as->orignins]);
2346 
2347   /* Setup initial state. Copy some fields to reduce indirections. */
2348   as->J = J;
2349   as->T = T;
2350   J->curfinal = lj_trace_alloc(J->L, T);  /* This copies the IR, too. */
2351   as->flags = J->flags;
2352   as->loopref = J->loopref;
2353   as->realign = NULL;
2354   as->loopinv = 0;
2355   as->parent = J->parent ? traceref(J, J->parent) : NULL;
2356 
2357   /* Reserve MCode memory. */
2358   as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
2359   as->mcp = as->mctop;
2360   as->mclim = as->mcbot + MCLIM_REDZONE;
2361   asm_setup_target(as);
2362 
2363   /*
2364   ** This is a loop, because the MCode may have to be (re-)assembled
2365   ** multiple times:
2366   **
2367   ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2368   **    backend wants the MCode to be aligned differently.
2369   **
2370   **    This is currently only the case on x86/x64, where small loops get
2371   **    an aligned loop body plus a short branch. Not much effort is wasted,
2372   **    because the abort happens very quickly and only once.
2373   **
2374   ** 2. The IR is immovable, since the MCode embeds pointers to various
2375   **    constants inside the IR. But RENAMEs may need to be added to the IR
2376   **    during assembly, which might grow and reallocate the IR. We check
2377   **    at the end if the IR (in J->cur.ir) has actually grown, resize the
2378   **    copy (in J->curfinal.ir) and try again.
2379   **
2380   **    95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2381   **    2 RENAMEs and only 0.5% have more than that. That's why we opt to
2382   **    always have one spare slot in the IR (see above), which means we
2383   **    have to redo the assembly for only ~2% of all traces.
2384   **
2385   **    Very, very rarely, this needs to be done repeatedly, since the
2386   **    location of constants inside the IR (actually, reachability from
2387   **    a global pointer) may affect register allocation and thus the
2388   **    number of RENAMEs.
2389   */
2390   for (;;) {
2391     as->mcp = as->mctop;
2392 #ifdef LUA_USE_ASSERT
2393     as->mcp_prev = as->mcp;
2394 #endif
2395     as->ir = J->curfinal->ir;  /* Use the copied IR. */
2396     as->curins = J->cur.nins = as->orignins;
2397 
2398     RA_DBG_START();
2399     RA_DBGX((as, "===== STOP ====="));
2400 
2401     /* General trace setup. Emit tail of trace. */
2402     asm_tail_prep(as);
2403     as->mcloop = NULL;
2404     as->flagmcp = NULL;
2405     as->topslot = 0;
2406     as->gcsteps = 0;
2407     as->sectref = as->loopref;
2408     as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
2409     asm_setup_regsp(as);
2410     if (!as->loopref)
2411       asm_tail_link(as);
2412 
2413     /* Assemble a trace in linear backwards order. */
2414     for (as->curins--; as->curins > as->stopins; as->curins--) {
2415       IRIns *ir = IR(as->curins);
2416       /* 64 bit types handled by SPLIT for 32 bit archs. */
2417       lj_assertA(!(LJ_32 && irt_isint64(ir->t)),
2418 		 "IR %04d has unsplit 64 bit type",
2419 		 (int)(ir - as->ir) - REF_BIAS);
2420       if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
2421 	continue;  /* Dead-code elimination can be soooo easy. */
2422       if (irt_isguard(ir->t))
2423 	asm_snap_prep(as);
2424       RA_DBG_REF();
2425       checkmclim(as);
2426       asm_ir(as, ir);
2427     }
2428 
2429     if (as->realign && J->curfinal->nins >= T->nins)
2430       continue;  /* Retry in case only the MCode needs to be realigned. */
2431 
2432     /* Emit head of trace. */
2433     RA_DBG_REF();
2434     checkmclim(as);
2435     if (as->gcsteps > 0) {
2436       as->curins = as->T->snap[0].ref;
2437       asm_snap_prep(as);  /* The GC check is a guard. */
2438       asm_gc_check(as);
2439       as->curins = as->stopins;
2440     }
2441     ra_evictk(as);
2442     if (as->parent)
2443       asm_head_side(as);
2444     else
2445       asm_head_root(as);
2446     asm_phi_fixup(as);
2447 
2448     if (J->curfinal->nins >= T->nins) {  /* IR didn't grow? */
2449       lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth");
2450       memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
2451 	     (T->nins - as->orignins) * sizeof(IRIns));  /* Copy RENAMEs. */
2452       T->nins = J->curfinal->nins;
2453       break;  /* Done. */
2454     }
2455 
2456     /* Otherwise try again with a bigger IR. */
2457     lj_trace_free(J2G(J), J->curfinal);
2458     J->curfinal = NULL;  /* In case lj_trace_alloc() OOMs. */
2459     J->curfinal = lj_trace_alloc(J->L, T);
2460     as->realign = NULL;
2461   }
2462 
2463   RA_DBGX((as, "===== START ===="));
2464   RA_DBG_FLUSH();
2465   if (as->freeset != RSET_ALL)
2466     lj_trace_err(as->J, LJ_TRERR_BADRA);  /* Ouch! Should never happen. */
2467 
2468   /* Set trace entry point before fixing up tail to allow link to self. */
2469   T->mcode = as->mcp;
2470   T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
2471   if (!as->loopref)
2472     asm_tail_fixup(as, T->link);  /* Note: this may change as->mctop! */
2473   T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
2474 #if LJ_TARGET_MCODE_FIXUP
2475   asm_mcode_fixup(T->mcode, T->szmcode);
2476 #endif
2477   lj_mcode_sync(T->mcode, origtop);
2478 }
2479 
2480 #undef IR
2481 
2482 #endif
2483