1 /*
2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5 
6 #define lj_asm_c
7 #define LUA_CORE
8 
9 #include "lj_obj.h"
10 
11 #if LJ_HASJIT
12 
13 #include "lj_gc.h"
14 #include "lj_buf.h"
15 #include "lj_str.h"
16 #include "lj_tab.h"
17 #include "lj_frame.h"
18 #if LJ_HASFFI
19 #include "lj_ctype.h"
20 #endif
21 #include "lj_ir.h"
22 #include "lj_jit.h"
23 #include "lj_ircall.h"
24 #include "lj_iropt.h"
25 #include "lj_mcode.h"
26 #include "lj_trace.h"
27 #include "lj_snap.h"
28 #include "lj_asm.h"
29 #include "lj_dispatch.h"
30 #include "lj_vm.h"
31 #include "lj_target.h"
32 
33 #ifdef LUA_USE_ASSERT
34 #include <stdio.h>
35 #endif
36 
37 /* -- Assembler state and common macros ----------------------------------- */
38 
39 /* Assembler state. */
40 typedef struct ASMState {
41   RegCost cost[RID_MAX];  /* Reference and blended allocation cost for regs. */
42 
43   MCode *mcp;		/* Current MCode pointer (grows down). */
44   MCode *mclim;		/* Lower limit for MCode memory + red zone. */
45 #ifdef LUA_USE_ASSERT
46   MCode *mcp_prev;	/* Red zone overflow check. */
47 #endif
48 
49   IRIns *ir;		/* Copy of pointer to IR instructions/constants. */
50   jit_State *J;		/* JIT compiler state. */
51 
52 #if LJ_TARGET_X86ORX64
53   x86ModRM mrm;		/* Fused x86 address operand. */
54 #endif
55 
56   RegSet freeset;	/* Set of free registers. */
57   RegSet modset;	/* Set of registers modified inside the loop. */
58   RegSet weakset;	/* Set of weakly referenced registers. */
59   RegSet phiset;	/* Set of PHI registers. */
60 
61   uint32_t flags;	/* Copy of JIT compiler flags. */
62   int loopinv;		/* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
63 
64   int32_t evenspill;	/* Next even spill slot. */
65   int32_t oddspill;	/* Next odd spill slot (or 0). */
66 
67   IRRef curins;		/* Reference of current instruction. */
68   IRRef stopins;	/* Stop assembly before hitting this instruction. */
69   IRRef orignins;	/* Original T->nins. */
70 
71   IRRef snapref;	/* Current snapshot is active after this reference. */
72   IRRef snaprename;	/* Rename highwater mark for snapshot check. */
73   SnapNo snapno;	/* Current snapshot number. */
74   SnapNo loopsnapno;	/* Loop snapshot number. */
75   int snapalloc;	/* Current snapshot needs allocation. */
76   BloomFilter snapfilt1, snapfilt2;	/* Filled with snapshot refs. */
77 
78   IRRef fuseref;	/* Fusion limit (loopref, 0 or FUSE_DISABLED). */
79   IRRef sectref;	/* Section base reference (loopref or 0). */
80   IRRef loopref;	/* Reference of LOOP instruction (or 0). */
81 
82   BCReg topslot;	/* Number of slots for stack check (unless 0). */
83   int32_t gcsteps;	/* Accumulated number of GC steps (per section). */
84 
85   GCtrace *T;		/* Trace to assemble. */
86   GCtrace *parent;	/* Parent trace (or NULL). */
87 
88   MCode *mcbot;		/* Bottom of reserved MCode. */
89   MCode *mctop;		/* Top of generated MCode. */
90   MCode *mctoporig;	/* Original top of generated MCode. */
91   MCode *mcloop;	/* Pointer to loop MCode (or NULL). */
92   MCode *invmcp;	/* Points to invertible loop branch (or NULL). */
93   MCode *flagmcp;	/* Pending opportunity to merge flag setting ins. */
94   MCode *realign;	/* Realign loop if not NULL. */
95 
96 #ifdef RID_NUM_KREF
97   intptr_t krefk[RID_NUM_KREF];
98 #endif
99   IRRef1 phireg[RID_MAX];  /* PHI register references. */
100   uint16_t parentmap[LJ_MAX_JSLOTS];  /* Parent instruction to RegSP map. */
101 } ASMState;
102 
103 #ifdef LUA_USE_ASSERT
104 #define lj_assertA(c, ...)	lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
105 #else
106 #define lj_assertA(c, ...)	((void)as)
107 #endif
108 
109 #define IR(ref)			(&as->ir[(ref)])
110 
111 #define ASMREF_TMP1		REF_TRUE	/* Temp. register. */
112 #define ASMREF_TMP2		REF_FALSE	/* Temp. register. */
113 #define ASMREF_L		REF_NIL		/* Stores register for L. */
114 
115 /* Check for variant to invariant references. */
116 #define iscrossref(as, ref)	((ref) < as->sectref)
117 
118 /* Inhibit memory op fusion from variant to invariant references. */
119 #define FUSE_DISABLED		(~(IRRef)0)
120 #define mayfuse(as, ref)	((ref) > as->fuseref)
121 #define neverfuse(as)		(as->fuseref == FUSE_DISABLED)
122 #define canfuse(as, ir)		(!neverfuse(as) && !irt_isphi((ir)->t))
123 #define opisfusableload(o) \
124   ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
125    (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
126 
127 /* Sparse limit checks using a red zone before the actual limit. */
128 #define MCLIM_REDZONE	64
129 
asm_mclimit(ASMState * as)130 static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
131 {
132   lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
133 }
134 
checkmclim(ASMState * as)135 static LJ_AINLINE void checkmclim(ASMState *as)
136 {
137 #ifdef LUA_USE_ASSERT
138   if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
139     IRIns *ir = IR(as->curins+1);
140     lj_assertA(0, "red zone overflow: %p IR %04d  %02d %04d %04d\n", as->mcp,
141       as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
142   }
143 #endif
144   if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
145 #ifdef LUA_USE_ASSERT
146   as->mcp_prev = as->mcp;
147 #endif
148 }
149 
150 #ifdef RID_NUM_KREF
151 #define ra_iskref(ref)		((ref) < RID_NUM_KREF)
152 #define ra_krefreg(ref)		((Reg)(RID_MIN_KREF + (Reg)(ref)))
153 #define ra_krefk(as, ref)	(as->krefk[(ref)])
154 
ra_setkref(ASMState * as,Reg r,intptr_t k)155 static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
156 {
157   IRRef ref = (IRRef)(r - RID_MIN_KREF);
158   as->krefk[ref] = k;
159   as->cost[r] = REGCOST(ref, ref);
160 }
161 
162 #else
163 #define ra_iskref(ref)		0
164 #define ra_krefreg(ref)		RID_MIN_GPR
165 #define ra_krefk(as, ref)	0
166 #endif
167 
168 /* Arch-specific field offsets. */
169 static const uint8_t field_ofs[IRFL__MAX+1] = {
170 #define FLOFS(name, ofs)	(uint8_t)(ofs),
171 IRFLDEF(FLOFS)
172 #undef FLOFS
173   0
174 };
175 
176 /* -- Target-specific instruction emitter --------------------------------- */
177 
178 #if LJ_TARGET_X86ORX64
179 #include "lj_emit_x86.h"
180 #elif LJ_TARGET_ARM
181 #include "lj_emit_arm.h"
182 #elif LJ_TARGET_ARM64
183 #include "lj_emit_arm64.h"
184 #elif LJ_TARGET_PPC
185 #include "lj_emit_ppc.h"
186 #elif LJ_TARGET_MIPS
187 #include "lj_emit_mips.h"
188 #else
189 #error "Missing instruction emitter for target CPU"
190 #endif
191 
192 /* Generic load/store of register from/to stack slot. */
193 #define emit_spload(as, ir, r, ofs) \
194   emit_loadofs(as, ir, (r), RID_SP, (ofs))
195 #define emit_spstore(as, ir, r, ofs) \
196   emit_storeofs(as, ir, (r), RID_SP, (ofs))
197 
198 /* -- Register allocator debugging ---------------------------------------- */
199 
200 /* #define LUAJIT_DEBUG_RA */
201 
202 #ifdef LUAJIT_DEBUG_RA
203 
204 #include <stdio.h>
205 #include <stdarg.h>
206 
207 #define RIDNAME(name)	#name,
208 static const char *const ra_regname[] = {
209   GPRDEF(RIDNAME)
210   FPRDEF(RIDNAME)
211   VRIDDEF(RIDNAME)
212   NULL
213 };
214 #undef RIDNAME
215 
216 static char ra_dbg_buf[65536];
217 static char *ra_dbg_p;
218 static char *ra_dbg_merge;
219 static MCode *ra_dbg_mcp;
220 
ra_dstart(void)221 static void ra_dstart(void)
222 {
223   ra_dbg_p = ra_dbg_buf;
224   ra_dbg_merge = NULL;
225   ra_dbg_mcp = NULL;
226 }
227 
ra_dflush(void)228 static void ra_dflush(void)
229 {
230   fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
231   ra_dstart();
232 }
233 
ra_dprintf(ASMState * as,const char * fmt,...)234 static void ra_dprintf(ASMState *as, const char *fmt, ...)
235 {
236   char *p;
237   va_list argp;
238   va_start(argp, fmt);
239   p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
240   ra_dbg_mcp = NULL;
241   p += sprintf(p, "%08x  \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
242   for (;;) {
243     const char *e = strchr(fmt, '$');
244     if (e == NULL) break;
245     memcpy(p, fmt, (size_t)(e-fmt));
246     p += e-fmt;
247     if (e[1] == 'r') {
248       Reg r = va_arg(argp, Reg) & RID_MASK;
249       if (r <= RID_MAX) {
250 	const char *q;
251 	for (q = ra_regname[r]; *q; q++)
252 	  *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
253       } else {
254 	*p++ = '?';
255 	lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt);
256       }
257     } else if (e[1] == 'f' || e[1] == 'i') {
258       IRRef ref;
259       if (e[1] == 'f')
260 	ref = va_arg(argp, IRRef);
261       else
262 	ref = va_arg(argp, IRIns *) - as->ir;
263       if (ref >= REF_BIAS)
264 	p += sprintf(p, "%04d", ref - REF_BIAS);
265       else
266 	p += sprintf(p, "K%03d", REF_BIAS - ref);
267     } else if (e[1] == 's') {
268       uint32_t slot = va_arg(argp, uint32_t);
269       p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
270     } else if (e[1] == 'x') {
271       p += sprintf(p, "%08x", va_arg(argp, int32_t));
272     } else {
273       lj_assertA(0, "bad debug format code");
274     }
275     fmt = e+2;
276   }
277   va_end(argp);
278   while (*fmt)
279     *p++ = *fmt++;
280   *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
281   if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
282     fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
283     p = ra_dbg_buf;
284   }
285   ra_dbg_p = p;
286 }
287 
288 #define RA_DBG_START()	ra_dstart()
289 #define RA_DBG_FLUSH()	ra_dflush()
290 #define RA_DBG_REF() \
291   do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
292        ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
293 #define RA_DBGX(x)	ra_dprintf x
294 
295 #else
296 #define RA_DBG_START()	((void)0)
297 #define RA_DBG_FLUSH()	((void)0)
298 #define RA_DBG_REF()	((void)0)
299 #define RA_DBGX(x)	((void)0)
300 #endif
301 
302 /* -- Register allocator -------------------------------------------------- */
303 
304 #define ra_free(as, r)		rset_set(as->freeset, (r))
305 #define ra_modified(as, r)	rset_set(as->modset, (r))
306 #define ra_weak(as, r)		rset_set(as->weakset, (r))
307 #define ra_noweak(as, r)	rset_clear(as->weakset, (r))
308 
309 #define ra_used(ir)		(ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
310 
311 /* Setup register allocator. */
ra_setup(ASMState * as)312 static void ra_setup(ASMState *as)
313 {
314   Reg r;
315   /* Initially all regs (except the stack pointer) are free for use. */
316   as->freeset = RSET_INIT;
317   as->modset = RSET_EMPTY;
318   as->weakset = RSET_EMPTY;
319   as->phiset = RSET_EMPTY;
320   memset(as->phireg, 0, sizeof(as->phireg));
321   for (r = RID_MIN_GPR; r < RID_MAX; r++)
322     as->cost[r] = REGCOST(~0u, 0u);
323 }
324 
325 /* Rematerialize constants. */
ra_rematk(ASMState * as,IRRef ref)326 static Reg ra_rematk(ASMState *as, IRRef ref)
327 {
328   IRIns *ir;
329   Reg r;
330   if (ra_iskref(ref)) {
331     r = ra_krefreg(ref);
332     lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r);
333     ra_free(as, r);
334     ra_modified(as, r);
335 #if LJ_64
336     emit_loadu64(as, r, ra_krefk(as, ref));
337 #else
338     emit_loadi(as, r, ra_krefk(as, ref));
339 #endif
340     return r;
341   }
342   ir = IR(ref);
343   r = ir->r;
344   lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref);
345   lj_assertA(!ra_hasspill(ir->s),
346 	     "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s);
347   ra_free(as, r);
348   ra_modified(as, r);
349   ir->r = RID_INIT;  /* Do not keep any hint. */
350   RA_DBGX((as, "remat     $i $r", ir, r));
351 #if !LJ_SOFTFP32
352   if (ir->o == IR_KNUM) {
353     emit_loadk64(as, r, ir);
354   } else
355 #endif
356   if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
357     ra_sethint(ir->r, RID_BASE);  /* Restore BASE register hint. */
358     emit_getgl(as, r, jit_base);
359   } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
360     /* REF_NIL stores ASMREF_L register. */
361     lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L");
362     emit_getgl(as, r, cur_L);
363 #if LJ_64
364   } else if (ir->o == IR_KINT64) {
365     emit_loadu64(as, r, ir_kint64(ir)->u64);
366 #if LJ_GC64
367   } else if (ir->o == IR_KGC) {
368     emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
369   } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
370     emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
371 #endif
372 #endif
373   } else {
374     lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
375 	       ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
376 	       "rematk of bad IR op %d", ir->o);
377     emit_loadi(as, r, ir->i);
378   }
379   return r;
380 }
381 
382 /* Force a spill. Allocate a new spill slot if needed. */
ra_spill(ASMState * as,IRIns * ir)383 static int32_t ra_spill(ASMState *as, IRIns *ir)
384 {
385   int32_t slot = ir->s;
386   lj_assertA(ir >= as->ir + REF_TRUE,
387 	     "spill of K%03d", REF_BIAS - (int)(ir - as->ir));
388   if (!ra_hasspill(slot)) {
389     if (irt_is64(ir->t)) {
390       slot = as->evenspill;
391       as->evenspill += 2;
392     } else if (as->oddspill) {
393       slot = as->oddspill;
394       as->oddspill = 0;
395     } else {
396       slot = as->evenspill;
397       as->oddspill = slot+1;
398       as->evenspill += 2;
399     }
400     if (as->evenspill > 256)
401       lj_trace_err(as->J, LJ_TRERR_SPILLOV);
402     ir->s = (uint8_t)slot;
403   }
404   return sps_scale(slot);
405 }
406 
407 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
ra_releasetmp(ASMState * as,IRRef ref)408 static Reg ra_releasetmp(ASMState *as, IRRef ref)
409 {
410   IRIns *ir = IR(ref);
411   Reg r = ir->r;
412   lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1);
413   lj_assertA(!ra_hasspill(ir->s),
414 	     "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s);
415   ra_free(as, r);
416   ra_modified(as, r);
417   ir->r = RID_INIT;
418   return r;
419 }
420 
421 /* Restore a register (marked as free). Rematerialize or force a spill. */
ra_restore(ASMState * as,IRRef ref)422 static Reg ra_restore(ASMState *as, IRRef ref)
423 {
424   if (emit_canremat(ref)) {
425     return ra_rematk(as, ref);
426   } else {
427     IRIns *ir = IR(ref);
428     int32_t ofs = ra_spill(as, ir);  /* Force a spill slot. */
429     Reg r = ir->r;
430     lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS);
431     ra_sethint(ir->r, r);  /* Keep hint. */
432     ra_free(as, r);
433     if (!rset_test(as->weakset, r)) {  /* Only restore non-weak references. */
434       ra_modified(as, r);
435       RA_DBGX((as, "restore   $i $r", ir, r));
436       emit_spload(as, ir, r, ofs);
437     }
438     return r;
439   }
440 }
441 
442 /* Save a register to a spill slot. */
ra_save(ASMState * as,IRIns * ir,Reg r)443 static void ra_save(ASMState *as, IRIns *ir, Reg r)
444 {
445   RA_DBGX((as, "save      $i $r", ir, r));
446   emit_spstore(as, ir, r, sps_scale(ir->s));
447 }
448 
449 #define MINCOST(name) \
450   if (rset_test(RSET_ALL, RID_##name) && \
451       LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
452     cost = as->cost[RID_##name];
453 
454 /* Evict the register with the lowest cost, forcing a restore. */
ra_evict(ASMState * as,RegSet allow)455 static Reg ra_evict(ASMState *as, RegSet allow)
456 {
457   IRRef ref;
458   RegCost cost = ~(RegCost)0;
459   lj_assertA(allow != RSET_EMPTY, "evict from empty set");
460   if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
461     GPRDEF(MINCOST)
462   } else {
463     FPRDEF(MINCOST)
464   }
465   ref = regcost_ref(cost);
466   lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins),
467 	     "evict of out-of-range IR %04d", ref - REF_BIAS);
468   /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
469   if (!irref_isk(ref) && (as->weakset & allow)) {
470     IRIns *ir = IR(ref);
471     if (!rset_test(as->weakset, ir->r))
472       ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
473   }
474   return ra_restore(as, ref);
475 }
476 
477 /* Pick any register (marked as free). Evict on-demand. */
ra_pick(ASMState * as,RegSet allow)478 static Reg ra_pick(ASMState *as, RegSet allow)
479 {
480   RegSet pick = as->freeset & allow;
481   if (!pick)
482     return ra_evict(as, allow);
483   else
484     return rset_picktop(pick);
485 }
486 
487 /* Get a scratch register (marked as free). */
ra_scratch(ASMState * as,RegSet allow)488 static Reg ra_scratch(ASMState *as, RegSet allow)
489 {
490   Reg r = ra_pick(as, allow);
491   ra_modified(as, r);
492   RA_DBGX((as, "scratch        $r", r));
493   return r;
494 }
495 
496 /* Evict all registers from a set (if not free). */
ra_evictset(ASMState * as,RegSet drop)497 static void ra_evictset(ASMState *as, RegSet drop)
498 {
499   RegSet work;
500   as->modset |= drop;
501 #if !LJ_SOFTFP
502   work = (drop & ~as->freeset) & RSET_FPR;
503   while (work) {
504     Reg r = rset_pickbot(work);
505     ra_restore(as, regcost_ref(as->cost[r]));
506     rset_clear(work, r);
507     checkmclim(as);
508   }
509 #endif
510   work = (drop & ~as->freeset);
511   while (work) {
512     Reg r = rset_pickbot(work);
513     ra_restore(as, regcost_ref(as->cost[r]));
514     rset_clear(work, r);
515     checkmclim(as);
516   }
517 }
518 
519 /* Evict (rematerialize) all registers allocated to constants. */
ra_evictk(ASMState * as)520 static void ra_evictk(ASMState *as)
521 {
522   RegSet work;
523 #if !LJ_SOFTFP
524   work = ~as->freeset & RSET_FPR;
525   while (work) {
526     Reg r = rset_pickbot(work);
527     IRRef ref = regcost_ref(as->cost[r]);
528     if (emit_canremat(ref) && irref_isk(ref)) {
529       ra_rematk(as, ref);
530       checkmclim(as);
531     }
532     rset_clear(work, r);
533   }
534 #endif
535   work = ~as->freeset & RSET_GPR;
536   while (work) {
537     Reg r = rset_pickbot(work);
538     IRRef ref = regcost_ref(as->cost[r]);
539     if (emit_canremat(ref) && irref_isk(ref)) {
540       ra_rematk(as, ref);
541       checkmclim(as);
542     }
543     rset_clear(work, r);
544   }
545 }
546 
547 #ifdef RID_NUM_KREF
548 /* Allocate a register for a constant. */
ra_allock(ASMState * as,intptr_t k,RegSet allow)549 static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
550 {
551   /* First try to find a register which already holds the same constant. */
552   RegSet pick, work = ~as->freeset & RSET_GPR;
553   Reg r;
554   while (work) {
555     IRRef ref;
556     r = rset_pickbot(work);
557     ref = regcost_ref(as->cost[r]);
558 #if LJ_64
559     if (ref < ASMREF_L) {
560       if (ra_iskref(ref)) {
561 	if (k == ra_krefk(as, ref))
562 	  return r;
563       } else {
564 	IRIns *ir = IR(ref);
565 	if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
566 #if LJ_GC64
567 	    (ir->o == IR_KINT && k == ir->i) ||
568 	    (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
569 	    ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
570 	     k == (intptr_t)ir_kptr(ir))
571 #else
572 	    (ir->o != IR_KINT64 && k == ir->i)
573 #endif
574 	   )
575 	  return r;
576       }
577     }
578 #else
579     if (ref < ASMREF_L &&
580 	k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
581       return r;
582 #endif
583     rset_clear(work, r);
584   }
585   pick = as->freeset & allow;
586   if (pick) {
587     /* Constants should preferably get unmodified registers. */
588     if ((pick & ~as->modset))
589       pick &= ~as->modset;
590     r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
591   } else {
592     r = ra_evict(as, allow);
593   }
594   RA_DBGX((as, "allock    $x $r", k, r));
595   ra_setkref(as, r, k);
596   rset_clear(as->freeset, r);
597   ra_noweak(as, r);
598   return r;
599 }
600 
601 /* Allocate a specific register for a constant. */
ra_allockreg(ASMState * as,intptr_t k,Reg r)602 static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
603 {
604   Reg kr = ra_allock(as, k, RID2RSET(r));
605   if (kr != r) {
606     IRIns irdummy;
607     irdummy.t.irt = IRT_INT;
608     ra_scratch(as, RID2RSET(r));
609     emit_movrr(as, &irdummy, r, kr);
610   }
611 }
612 #else
613 #define ra_allockreg(as, k, r)		emit_loadi(as, (r), (k))
614 #endif
615 
616 /* Allocate a register for ref from the allowed set of registers.
617 ** Note: this function assumes the ref does NOT have a register yet!
618 ** Picks an optimal register, sets the cost and marks the register as non-free.
619 */
ra_allocref(ASMState * as,IRRef ref,RegSet allow)620 static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
621 {
622   IRIns *ir = IR(ref);
623   RegSet pick = as->freeset & allow;
624   Reg r;
625   lj_assertA(ra_noreg(ir->r),
626 	     "IR %04d already has reg %d", ref - REF_BIAS, ir->r);
627   if (pick) {
628     /* First check register hint from propagation or PHI. */
629     if (ra_hashint(ir->r)) {
630       r = ra_gethint(ir->r);
631       if (rset_test(pick, r))  /* Use hint register if possible. */
632 	goto found;
633       /* Rematerialization is cheaper than missing a hint. */
634       if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
635 	ra_rematk(as, regcost_ref(as->cost[r]));
636 	goto found;
637       }
638       RA_DBGX((as, "hintmiss  $f $r", ref, r));
639     }
640     /* Invariants should preferably get unmodified registers. */
641     if (ref < as->loopref && !irt_isphi(ir->t)) {
642       if ((pick & ~as->modset))
643 	pick &= ~as->modset;
644       r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
645     } else {
646       /* We've got plenty of regs, so get callee-save regs if possible. */
647       if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
648 	pick &= ~RSET_SCRATCH;
649       r = rset_picktop(pick);
650     }
651   } else {
652     r = ra_evict(as, allow);
653   }
654 found:
655   RA_DBGX((as, "alloc     $f $r", ref, r));
656   ir->r = (uint8_t)r;
657   rset_clear(as->freeset, r);
658   ra_noweak(as, r);
659   as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
660   return r;
661 }
662 
663 /* Allocate a register on-demand. */
ra_alloc1(ASMState * as,IRRef ref,RegSet allow)664 static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
665 {
666   Reg r = IR(ref)->r;
667   /* Note: allow is ignored if the register is already allocated. */
668   if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
669   ra_noweak(as, r);
670   return r;
671 }
672 
673 /* Add a register rename to the IR. */
ra_addrename(ASMState * as,Reg down,IRRef ref,SnapNo snapno)674 static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
675 {
676   IRRef ren;
677   lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
678   ren = tref_ref(lj_ir_emit(as->J));
679   as->J->cur.ir[ren].r = (uint8_t)down;
680   as->J->cur.ir[ren].s = SPS_NONE;
681 }
682 
683 /* Rename register allocation and emit move. */
ra_rename(ASMState * as,Reg down,Reg up)684 static void ra_rename(ASMState *as, Reg down, Reg up)
685 {
686   IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
687   IRIns *ir = IR(ref);
688   ir->r = (uint8_t)up;
689   as->cost[down] = 0;
690   lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR),
691 	     "rename between GPR/FPR %d and %d", down, up);
692   lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down);
693   lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up);
694   ra_free(as, down);  /* 'down' is free ... */
695   ra_modified(as, down);
696   rset_clear(as->freeset, up);  /* ... and 'up' is now allocated. */
697   ra_noweak(as, up);
698   RA_DBGX((as, "rename    $f $r $r", regcost_ref(as->cost[up]), down, up));
699   emit_movrr(as, ir, down, up);  /* Backwards codegen needs inverse move. */
700   if (!ra_hasspill(IR(ref)->s)) {  /* Add the rename to the IR. */
701     /*
702     ** The rename is effective at the subsequent (already emitted) exit
703     ** branch. This is for the current snapshot (as->snapno). Except if we
704     ** haven't yet allocated any refs for the snapshot (as->snapalloc == 1),
705     ** then it belongs to the next snapshot.
706     ** See also the discussion at asm_snap_checkrename().
707     */
708     ra_addrename(as, down, ref, as->snapno + as->snapalloc);
709   }
710 }
711 
712 /* Pick a destination register (marked as free).
713 ** Caveat: allow is ignored if there's already a destination register.
714 ** Use ra_destreg() to get a specific register.
715 */
ra_dest(ASMState * as,IRIns * ir,RegSet allow)716 static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
717 {
718   Reg dest = ir->r;
719   if (ra_hasreg(dest)) {
720     ra_free(as, dest);
721     ra_modified(as, dest);
722   } else {
723     if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
724       dest = ra_gethint(dest);
725       ra_modified(as, dest);
726       RA_DBGX((as, "dest           $r", dest));
727     } else {
728       dest = ra_scratch(as, allow);
729     }
730     ir->r = dest;
731   }
732   if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
733   return dest;
734 }
735 
736 /* Force a specific destination register (marked as free). */
ra_destreg(ASMState * as,IRIns * ir,Reg r)737 static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
738 {
739   Reg dest = ra_dest(as, ir, RID2RSET(r));
740   if (dest != r) {
741     lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r);
742     ra_modified(as, r);
743     emit_movrr(as, ir, dest, r);
744   }
745 }
746 
747 #if LJ_TARGET_X86ORX64
748 /* Propagate dest register to left reference. Emit moves as needed.
749 ** This is a required fixup step for all 2-operand machine instructions.
750 */
ra_left(ASMState * as,Reg dest,IRRef lref)751 static void ra_left(ASMState *as, Reg dest, IRRef lref)
752 {
753   IRIns *ir = IR(lref);
754   Reg left = ir->r;
755   if (ra_noreg(left)) {
756     if (irref_isk(lref)) {
757       if (ir->o == IR_KNUM) {
758 	/* FP remat needs a load except for +0. Still better than eviction. */
759 	if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
760 	  emit_loadk64(as, dest, ir);
761 	  return;
762 	}
763 #if LJ_64
764       } else if (ir->o == IR_KINT64) {
765 	emit_loadk64(as, dest, ir);
766 	return;
767 #if LJ_GC64
768       } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
769 	emit_loadk64(as, dest, ir);
770 	return;
771 #endif
772 #endif
773       } else if (ir->o != IR_KPRI) {
774 	lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
775 		   ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
776 		   "K%03d has bad IR op %d", REF_BIAS - lref, ir->o);
777 	emit_loadi(as, dest, ir->i);
778 	return;
779       }
780     }
781     if (!ra_hashint(left) && !iscrossref(as, lref))
782       ra_sethint(ir->r, dest);  /* Propagate register hint. */
783     left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
784   }
785   ra_noweak(as, left);
786   /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
787   if (dest != left) {
788     /* Use register renaming if dest is the PHI reg. */
789     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
790       ra_modified(as, left);
791       ra_rename(as, left, dest);
792     } else {
793       emit_movrr(as, ir, dest, left);
794     }
795   }
796 }
797 #else
798 /* Similar to ra_left, except we override any hints. */
ra_leftov(ASMState * as,Reg dest,IRRef lref)799 static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
800 {
801   IRIns *ir = IR(lref);
802   Reg left = ir->r;
803   if (ra_noreg(left)) {
804     ra_sethint(ir->r, dest);  /* Propagate register hint. */
805     left = ra_allocref(as, lref,
806 		       (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
807   }
808   ra_noweak(as, left);
809   if (dest != left) {
810     /* Use register renaming if dest is the PHI reg. */
811     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
812       ra_modified(as, left);
813       ra_rename(as, left, dest);
814     } else {
815       emit_movrr(as, ir, dest, left);
816     }
817   }
818 }
819 #endif
820 
821 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
ra_destpair(ASMState * as,IRIns * ir)822 static void ra_destpair(ASMState *as, IRIns *ir)
823 {
824   Reg destlo = ir->r, desthi = (ir+1)->r;
825   IRIns *irx = (LJ_64 && !irt_is64(ir->t)) ? ir+1 : ir;
826   /* First spill unrelated refs blocking the destination registers. */
827   if (!rset_test(as->freeset, RID_RETLO) &&
828       destlo != RID_RETLO && desthi != RID_RETLO)
829     ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
830   if (!rset_test(as->freeset, RID_RETHI) &&
831       destlo != RID_RETHI && desthi != RID_RETHI)
832     ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
833   /* Next free the destination registers (if any). */
834   if (ra_hasreg(destlo)) {
835     ra_free(as, destlo);
836     ra_modified(as, destlo);
837   } else {
838     destlo = RID_RETLO;
839   }
840   if (ra_hasreg(desthi)) {
841     ra_free(as, desthi);
842     ra_modified(as, desthi);
843   } else {
844     desthi = RID_RETHI;
845   }
846   /* Check for conflicts and shuffle the registers as needed. */
847   if (destlo == RID_RETHI) {
848     if (desthi == RID_RETLO) {
849 #if LJ_TARGET_X86ORX64
850       *--as->mcp = REX_64IR(irx, XI_XCHGa + RID_RETHI);
851 #else
852       emit_movrr(as, irx, RID_RETHI, RID_TMP);
853       emit_movrr(as, irx, RID_RETLO, RID_RETHI);
854       emit_movrr(as, irx, RID_TMP, RID_RETLO);
855 #endif
856     } else {
857       emit_movrr(as, irx, RID_RETHI, RID_RETLO);
858       if (desthi != RID_RETHI) emit_movrr(as, irx, desthi, RID_RETHI);
859     }
860   } else if (desthi == RID_RETLO) {
861     emit_movrr(as, irx, RID_RETLO, RID_RETHI);
862     if (destlo != RID_RETLO) emit_movrr(as, irx, destlo, RID_RETLO);
863   } else {
864     if (desthi != RID_RETHI) emit_movrr(as, irx, desthi, RID_RETHI);
865     if (destlo != RID_RETLO) emit_movrr(as, irx, destlo, RID_RETLO);
866   }
867   /* Restore spill slots (if any). */
868   if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
869   if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
870 }
871 
872 /* -- Snapshot handling --------- ----------------------------------------- */
873 
874 /* Can we rematerialize a KNUM instead of forcing a spill? */
asm_snap_canremat(ASMState * as)875 static int asm_snap_canremat(ASMState *as)
876 {
877   Reg r;
878   for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
879     if (irref_isk(regcost_ref(as->cost[r])))
880       return 1;
881   return 0;
882 }
883 
884 /* Check whether a sunk store corresponds to an allocation. */
asm_sunk_store(ASMState * as,IRIns * ira,IRIns * irs)885 static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
886 {
887   if (irs->s == 255) {
888     if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
889 	irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
890       IRIns *irk = IR(irs->op1);
891       if (irk->o == IR_AREF || irk->o == IR_HREFK)
892 	irk = IR(irk->op1);
893       return (IR(irk->op1) == ira);
894     }
895     return 0;
896   } else {
897     return (ira + irs->s == irs);  /* Quick check. */
898   }
899 }
900 
901 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
asm_snap_alloc1(ASMState * as,IRRef ref)902 static void asm_snap_alloc1(ASMState *as, IRRef ref)
903 {
904   IRIns *ir = IR(ref);
905   if (!irref_isk(ref) && ir->r != RID_SUNK) {
906     bloomset(as->snapfilt1, ref);
907     bloomset(as->snapfilt2, hashrot(ref, ref + HASH_BIAS));
908     if (ra_used(ir)) return;
909     if (ir->r == RID_SINK) {
910       ir->r = RID_SUNK;
911 #if LJ_HASFFI
912       if (ir->o == IR_CNEWI) {  /* Allocate CNEWI value. */
913 	asm_snap_alloc1(as, ir->op2);
914 	if (LJ_32 && (ir+1)->o == IR_HIOP)
915 	  asm_snap_alloc1(as, (ir+1)->op2);
916       } else
917 #endif
918       {  /* Allocate stored values for TNEW, TDUP and CNEW. */
919 	IRIns *irs;
920 	lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW,
921 		   "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o);
922 	for (irs = IR(as->snapref-1); irs > ir; irs--)
923 	  if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
924 	    lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
925 		       irs->o == IR_FSTORE || irs->o == IR_XSTORE,
926 		       "sunk store IR %04d has bad op %d",
927 		       (int)(irs - as->ir) - REF_BIAS, irs->o);
928 	    asm_snap_alloc1(as, irs->op2);
929 	    if (LJ_32 && (irs+1)->o == IR_HIOP)
930 	      asm_snap_alloc1(as, (irs+1)->op2);
931 	  }
932       }
933     } else {
934       RegSet allow;
935       if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
936 	IRIns *irc;
937 	for (irc = IR(as->curins); irc > ir; irc--)
938 	  if ((irc->op1 == ref || irc->op2 == ref) &&
939 	      !(irc->r == RID_SINK || irc->r == RID_SUNK))
940 	    goto nosink;  /* Don't sink conversion if result is used. */
941 	asm_snap_alloc1(as, ir->op1);
942 	return;
943       }
944     nosink:
945       allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
946       if ((as->freeset & allow) ||
947 	       (allow == RSET_FPR && asm_snap_canremat(as))) {
948 	/* Get a weak register if we have a free one or can rematerialize. */
949 	Reg r = ra_allocref(as, ref, allow);  /* Allocate a register. */
950 	if (!irt_isphi(ir->t))
951 	  ra_weak(as, r);  /* But mark it as weakly referenced. */
952 	checkmclim(as);
953 	RA_DBGX((as, "snapreg   $f $r", ref, ir->r));
954       } else {
955 	ra_spill(as, ir);  /* Otherwise force a spill slot. */
956 	RA_DBGX((as, "snapspill $f $s", ref, ir->s));
957       }
958     }
959   }
960 }
961 
962 /* Allocate refs escaping to a snapshot. */
asm_snap_alloc(ASMState * as,int snapno)963 static void asm_snap_alloc(ASMState *as, int snapno)
964 {
965   SnapShot *snap = &as->T->snap[snapno];
966   SnapEntry *map = &as->T->snapmap[snap->mapofs];
967   MSize n, nent = snap->nent;
968   as->snapfilt1 = as->snapfilt2 = 0;
969   for (n = 0; n < nent; n++) {
970     SnapEntry sn = map[n];
971     IRRef ref = snap_ref(sn);
972     if (!irref_isk(ref)) {
973       asm_snap_alloc1(as, ref);
974       if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
975 	lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP,
976 		   "snap %d[%d] points to bad SOFTFP IR %04d",
977 		   snapno, n, ref - REF_BIAS);
978 	asm_snap_alloc1(as, ref+1);
979       }
980     }
981   }
982 }
983 
984 /* All guards for a snapshot use the same exitno. This is currently the
985 ** same as the snapshot number. Since the exact origin of the exit cannot
986 ** be determined, all guards for the same snapshot must exit with the same
987 ** RegSP mapping.
988 ** A renamed ref which has been used in a prior guard for the same snapshot
989 ** would cause an inconsistency. The easy way out is to force a spill slot.
990 */
asm_snap_checkrename(ASMState * as,IRRef ren)991 static int asm_snap_checkrename(ASMState *as, IRRef ren)
992 {
993   if (bloomtest(as->snapfilt1, ren) &&
994       bloomtest(as->snapfilt2, hashrot(ren, ren + HASH_BIAS))) {
995     IRIns *ir = IR(ren);
996     ra_spill(as, ir);  /* Register renamed, so force a spill slot. */
997     RA_DBGX((as, "snaprensp $f $s", ren, ir->s));
998     return 1;  /* Found. */
999   }
1000   return 0;  /* Not found. */
1001 }
1002 
1003 /* Prepare snapshot for next guard or throwing instruction. */
asm_snap_prep(ASMState * as)1004 static void asm_snap_prep(ASMState *as)
1005 {
1006   if (as->snapalloc) {
1007     /* Alloc on first invocation for each snapshot. */
1008     as->snapalloc = 0;
1009     asm_snap_alloc(as, as->snapno);
1010     as->snaprename = as->T->nins;
1011   } else {
1012     /* Check any renames above the highwater mark. */
1013     for (; as->snaprename < as->T->nins; as->snaprename++) {
1014       IRIns *ir = &as->T->ir[as->snaprename];
1015       if (asm_snap_checkrename(as, ir->op1))
1016 	ir->op2 = REF_BIAS-1;  /* Kill rename. */
1017     }
1018   }
1019 }
1020 
1021 /* Move to previous snapshot when we cross the current snapshot ref. */
asm_snap_prev(ASMState * as)1022 static void asm_snap_prev(ASMState *as)
1023 {
1024   if (as->curins < as->snapref) {
1025     uintptr_t ofs = (uintptr_t)(as->mctoporig - as->mcp);
1026     if (ofs >= 0x10000) lj_trace_err(as->J, LJ_TRERR_MCODEOV);
1027     do {
1028       if (as->snapno == 0) return;
1029       as->snapno--;
1030       as->snapref = as->T->snap[as->snapno].ref;
1031       as->T->snap[as->snapno].mcofs = (uint16_t)ofs;  /* Remember mcode ofs. */
1032     } while (as->curins < as->snapref);  /* May have no ins inbetween. */
1033     as->snapalloc = 1;
1034   }
1035 }
1036 
1037 /* Fixup snapshot mcode offsetst. */
asm_snap_fixup_mcofs(ASMState * as)1038 static void asm_snap_fixup_mcofs(ASMState *as)
1039 {
1040   uint32_t sz = (uint32_t)(as->mctoporig - as->mcp);
1041   SnapShot *snap = as->T->snap;
1042   SnapNo i;
1043   for (i = as->T->nsnap-1; i > 0; i--) {
1044     /* Compute offset from mcode start and store in correct snapshot. */
1045     snap[i].mcofs = (uint16_t)(sz - snap[i-1].mcofs);
1046   }
1047   snap[0].mcofs = 0;
1048 }
1049 
1050 /* -- Miscellaneous helpers ----------------------------------------------- */
1051 
1052 /* Calculate stack adjustment. */
asm_stack_adjust(ASMState * as)1053 static int32_t asm_stack_adjust(ASMState *as)
1054 {
1055   if (as->evenspill <= SPS_FIXED)
1056     return 0;
1057   return sps_scale(sps_align(as->evenspill));
1058 }
1059 
1060 /* Must match with hash*() in lj_tab.c. */
ir_khash(ASMState * as,IRIns * ir)1061 static uint32_t ir_khash(ASMState *as, IRIns *ir)
1062 {
1063   uint32_t lo, hi;
1064   UNUSED(as);
1065   if (irt_isstr(ir->t)) {
1066     return ir_kstr(ir)->sid;
1067   } else if (irt_isnum(ir->t)) {
1068     lo = ir_knum(ir)->u32.lo;
1069     hi = ir_knum(ir)->u32.hi << 1;
1070   } else if (irt_ispri(ir->t)) {
1071     lj_assertA(!irt_isnil(ir->t), "hash of nil key");
1072     return irt_type(ir->t)-IRT_FALSE;
1073   } else {
1074     lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t));
1075     lo = u32ptr(ir_kgc(ir));
1076 #if LJ_GC64
1077     hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15);
1078 #else
1079     hi = lo + HASH_BIAS;
1080 #endif
1081   }
1082   return hashrot(lo, hi);
1083 }
1084 
1085 /* -- Allocations --------------------------------------------------------- */
1086 
1087 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
1088 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
1089 
asm_snew(ASMState * as,IRIns * ir)1090 static void asm_snew(ASMState *as, IRIns *ir)
1091 {
1092   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
1093   IRRef args[3];
1094   asm_snap_prep(as);
1095   args[0] = ASMREF_L;  /* lua_State *L    */
1096   args[1] = ir->op1;   /* const char *str */
1097   args[2] = ir->op2;   /* size_t len      */
1098   as->gcsteps++;
1099   asm_setupresult(as, ir, ci);  /* GCstr * */
1100   asm_gencall(as, ci, args);
1101 }
1102 
asm_tnew(ASMState * as,IRIns * ir)1103 static void asm_tnew(ASMState *as, IRIns *ir)
1104 {
1105   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
1106   IRRef args[2];
1107   asm_snap_prep(as);
1108   args[0] = ASMREF_L;     /* lua_State *L    */
1109   args[1] = ASMREF_TMP1;  /* uint32_t ahsize */
1110   as->gcsteps++;
1111   asm_setupresult(as, ir, ci);  /* GCtab * */
1112   asm_gencall(as, ci, args);
1113   ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
1114 }
1115 
asm_tdup(ASMState * as,IRIns * ir)1116 static void asm_tdup(ASMState *as, IRIns *ir)
1117 {
1118   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
1119   IRRef args[2];
1120   asm_snap_prep(as);
1121   args[0] = ASMREF_L;  /* lua_State *L    */
1122   args[1] = ir->op1;   /* const GCtab *kt */
1123   as->gcsteps++;
1124   asm_setupresult(as, ir, ci);  /* GCtab * */
1125   asm_gencall(as, ci, args);
1126 }
1127 
1128 static void asm_gc_check(ASMState *as);
1129 
1130 /* Explicit GC step. */
asm_gcstep(ASMState * as,IRIns * ir)1131 static void asm_gcstep(ASMState *as, IRIns *ir)
1132 {
1133   IRIns *ira;
1134   for (ira = IR(as->stopins+1); ira < ir; ira++)
1135     if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
1136 	 (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
1137 	ra_used(ira))
1138       as->gcsteps++;
1139   if (as->gcsteps)
1140     asm_gc_check(as);
1141   as->gcsteps = 0x80000000;  /* Prevent implicit GC check further up. */
1142 }
1143 
1144 /* -- Buffer operations --------------------------------------------------- */
1145 
1146 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode);
1147 #if LJ_HASBUFFER
1148 static void asm_bufhdr_write(ASMState *as, Reg sb);
1149 #endif
1150 
asm_bufhdr(ASMState * as,IRIns * ir)1151 static void asm_bufhdr(ASMState *as, IRIns *ir)
1152 {
1153   Reg sb = ra_dest(as, ir, RSET_GPR);
1154   switch (ir->op2) {
1155   case IRBUFHDR_RESET: {
1156     Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
1157     IRIns irbp;
1158     irbp.ot = IRT(0, IRT_PTR);  /* Buffer data pointer type. */
1159     emit_storeofs(as, &irbp, tmp, sb, offsetof(SBuf, w));
1160     emit_loadofs(as, &irbp, tmp, sb, offsetof(SBuf, b));
1161     break;
1162     }
1163   case IRBUFHDR_APPEND: {
1164     /* Rematerialize const buffer pointer instead of likely spill. */
1165     IRIns *irp = IR(ir->op1);
1166     if (!(ra_hasreg(irp->r) || irp == ir-1 ||
1167 	  (irp == ir-2 && !ra_used(ir-1)))) {
1168       while (!(irp->o == IR_BUFHDR && irp->op2 == IRBUFHDR_RESET))
1169 	irp = IR(irp->op1);
1170       if (irref_isk(irp->op1)) {
1171 	ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
1172 	ir = irp;
1173       }
1174     }
1175     break;
1176     }
1177 #if LJ_HASBUFFER
1178   case IRBUFHDR_WRITE:
1179     asm_bufhdr_write(as, sb);
1180     break;
1181 #endif
1182   default: lj_assertA(0, "bad BUFHDR op2 %d", ir->op2); break;
1183   }
1184 #if LJ_TARGET_X86ORX64
1185   ra_left(as, sb, ir->op1);
1186 #else
1187   ra_leftov(as, sb, ir->op1);
1188 #endif
1189 }
1190 
asm_bufput(ASMState * as,IRIns * ir)1191 static void asm_bufput(ASMState *as, IRIns *ir)
1192 {
1193   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
1194   IRRef args[3];
1195   IRIns *irs;
1196   int kchar = -129;
1197   args[0] = ir->op1;  /* SBuf * */
1198   args[1] = ir->op2;  /* GCstr * */
1199   irs = IR(ir->op2);
1200   lj_assertA(irt_isstr(irs->t),
1201 	     "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS);
1202   if (irs->o == IR_KGC) {
1203     GCstr *s = ir_kstr(irs);
1204     if (s->len == 1) {  /* Optimize put of single-char string constant. */
1205       kchar = (int8_t)strdata(s)[0];  /* Signed! */
1206       args[1] = ASMREF_TMP1;  /* int, truncated to char */
1207       ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1208     }
1209   } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
1210     if (irs->o == IR_TOSTR) {  /* Fuse number to string conversions. */
1211       if (irs->op2 == IRTOSTR_NUM) {
1212 	args[1] = ASMREF_TMP1;  /* TValue * */
1213 	ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
1214       } else {
1215 	lj_assertA(irt_isinteger(IR(irs->op1)->t),
1216 		   "TOSTR of non-numeric IR %04d", irs->op1);
1217 	args[1] = irs->op1;  /* int */
1218 	if (irs->op2 == IRTOSTR_INT)
1219 	  ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
1220 	else
1221 	  ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1222       }
1223     } else if (irs->o == IR_SNEW) {  /* Fuse string allocation. */
1224       args[1] = irs->op1;  /* const void * */
1225       args[2] = irs->op2;  /* MSize */
1226       ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
1227     }
1228   }
1229   asm_setupresult(as, ir, ci);  /* SBuf * */
1230   asm_gencall(as, ci, args);
1231   if (args[1] == ASMREF_TMP1) {
1232     Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
1233     if (kchar == -129)
1234       asm_tvptr(as, tmp, irs->op1, IRTMPREF_IN1);
1235     else
1236       ra_allockreg(as, kchar, tmp);
1237   }
1238 }
1239 
asm_bufstr(ASMState * as,IRIns * ir)1240 static void asm_bufstr(ASMState *as, IRIns *ir)
1241 {
1242   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
1243   IRRef args[1];
1244   args[0] = ir->op1;  /* SBuf *sb */
1245   as->gcsteps++;
1246   asm_setupresult(as, ir, ci);  /* GCstr * */
1247   asm_gencall(as, ci, args);
1248 }
1249 
1250 /* -- Type conversions ---------------------------------------------------- */
1251 
asm_tostr(ASMState * as,IRIns * ir)1252 static void asm_tostr(ASMState *as, IRIns *ir)
1253 {
1254   const CCallInfo *ci;
1255   IRRef args[2];
1256   asm_snap_prep(as);
1257   args[0] = ASMREF_L;
1258   as->gcsteps++;
1259   if (ir->op2 == IRTOSTR_NUM) {
1260     args[1] = ASMREF_TMP1;  /* cTValue * */
1261     ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
1262   } else {
1263     args[1] = ir->op1;  /* int32_t k */
1264     if (ir->op2 == IRTOSTR_INT)
1265       ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
1266     else
1267       ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
1268   }
1269   asm_setupresult(as, ir, ci);  /* GCstr * */
1270   asm_gencall(as, ci, args);
1271   if (ir->op2 == IRTOSTR_NUM)
1272     asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1, IRTMPREF_IN1);
1273 }
1274 
1275 #if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
asm_conv64(ASMState * as,IRIns * ir)1276 static void asm_conv64(ASMState *as, IRIns *ir)
1277 {
1278   IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
1279   IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
1280   IRCallID id;
1281   IRRef args[2];
1282   lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP,
1283 	     "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS);
1284   args[LJ_BE] = (ir-1)->op1;
1285   args[LJ_LE] = ir->op1;
1286   if (st == IRT_NUM || st == IRT_FLOAT) {
1287     id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
1288     ir--;
1289   } else {
1290     id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
1291   }
1292   {
1293 #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1294     CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
1295     cim.flags |= CCI_VARARG;  /* These calls don't use the hard-float ABI! */
1296 #else
1297     const CCallInfo *ci = &lj_ir_callinfo[id];
1298 #endif
1299     asm_setupresult(as, ir, ci);
1300     asm_gencall(as, ci, args);
1301   }
1302 }
1303 #endif
1304 
1305 /* -- Memory references --------------------------------------------------- */
1306 
asm_newref(ASMState * as,IRIns * ir)1307 static void asm_newref(ASMState *as, IRIns *ir)
1308 {
1309   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1310   IRRef args[3];
1311   if (ir->r == RID_SINK)
1312     return;
1313   asm_snap_prep(as);
1314   args[0] = ASMREF_L;     /* lua_State *L */
1315   args[1] = ir->op1;      /* GCtab *t     */
1316   args[2] = ASMREF_TMP1;  /* cTValue *key */
1317   asm_setupresult(as, ir, ci);  /* TValue * */
1318   asm_gencall(as, ci, args);
1319   asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2, IRTMPREF_IN1);
1320 }
1321 
asm_tmpref(ASMState * as,IRIns * ir)1322 static void asm_tmpref(ASMState *as, IRIns *ir)
1323 {
1324   Reg r = ra_dest(as, ir, RSET_GPR);
1325   asm_tvptr(as, r, ir->op1, ir->op2);
1326 }
1327 
asm_lref(ASMState * as,IRIns * ir)1328 static void asm_lref(ASMState *as, IRIns *ir)
1329 {
1330   Reg r = ra_dest(as, ir, RSET_GPR);
1331 #if LJ_TARGET_X86ORX64
1332   ra_left(as, r, ASMREF_L);
1333 #else
1334   ra_leftov(as, r, ASMREF_L);
1335 #endif
1336 }
1337 
1338 /* -- Calls --------------------------------------------------------------- */
1339 
1340 /* Collect arguments from CALL* and CARG instructions. */
asm_collectargs(ASMState * as,IRIns * ir,const CCallInfo * ci,IRRef * args)1341 static void asm_collectargs(ASMState *as, IRIns *ir,
1342 			    const CCallInfo *ci, IRRef *args)
1343 {
1344   uint32_t n = CCI_XNARGS(ci);
1345   /* Account for split args. */
1346   lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n);
1347   if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
1348   while (n-- > 1) {
1349     ir = IR(ir->op1);
1350     lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree");
1351     args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
1352   }
1353   args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
1354   lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree");
1355 }
1356 
1357 /* Reconstruct CCallInfo flags for CALLX*. */
asm_callx_flags(ASMState * as,IRIns * ir)1358 static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
1359 {
1360   uint32_t nargs = 0;
1361   if (ir->op1 != REF_NIL) {  /* Count number of arguments first. */
1362     IRIns *ira = IR(ir->op1);
1363     nargs++;
1364     while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
1365   }
1366 #if LJ_HASFFI
1367   if (IR(ir->op2)->o == IR_CARG) {  /* Copy calling convention info. */
1368     CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
1369     CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
1370     nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
1371 #if LJ_TARGET_X86
1372     nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
1373 #endif
1374   }
1375 #endif
1376   return (nargs | (ir->t.irt << CCI_OTSHIFT));
1377 }
1378 
asm_callid(ASMState * as,IRIns * ir,IRCallID id)1379 static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1380 {
1381   const CCallInfo *ci = &lj_ir_callinfo[id];
1382   IRRef args[2];
1383   args[0] = ir->op1;
1384   args[1] = ir->op2;
1385   asm_setupresult(as, ir, ci);
1386   asm_gencall(as, ci, args);
1387 }
1388 
asm_call(ASMState * as,IRIns * ir)1389 static void asm_call(ASMState *as, IRIns *ir)
1390 {
1391   IRRef args[CCI_NARGS_MAX];
1392   const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1393   asm_collectargs(as, ir, ci, args);
1394   asm_setupresult(as, ir, ci);
1395   asm_gencall(as, ci, args);
1396 }
1397 
1398 /* -- PHI and loop handling ----------------------------------------------- */
1399 
1400 /* Break a PHI cycle by renaming to a free register (evict if needed). */
asm_phi_break(ASMState * as,RegSet blocked,RegSet blockedby,RegSet allow)1401 static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
1402 			  RegSet allow)
1403 {
1404   RegSet candidates = blocked & allow;
1405   if (candidates) {  /* If this register file has candidates. */
1406     /* Note: the set for ra_pick cannot be empty, since each register file
1407     ** has some registers never allocated to PHIs.
1408     */
1409     Reg down, up = ra_pick(as, ~blocked & allow);  /* Get a free register. */
1410     if (candidates & ~blockedby)  /* Optimize shifts, else it's a cycle. */
1411       candidates = candidates & ~blockedby;
1412     down = rset_picktop(candidates);  /* Pick candidate PHI register. */
1413     ra_rename(as, down, up);  /* And rename it to the free register. */
1414   }
1415 }
1416 
1417 /* PHI register shuffling.
1418 **
1419 ** The allocator tries hard to preserve PHI register assignments across
1420 ** the loop body. Most of the time this loop does nothing, since there
1421 ** are no register mismatches.
1422 **
1423 ** If a register mismatch is detected and ...
1424 ** - the register is currently free: rename it.
1425 ** - the register is blocked by an invariant: restore/remat and rename it.
1426 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1427 **
1428 ** The renames are order-sensitive, so just retry the loop if a register
1429 ** is marked as blocked, but has been freed in the meantime. A cycle is
1430 ** detected if all of the blocked registers are allocated. To break the
1431 ** cycle rename one of them to a free register and retry.
1432 **
1433 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1434 */
asm_phi_shuffle(ASMState * as)1435 static void asm_phi_shuffle(ASMState *as)
1436 {
1437   RegSet work;
1438 
1439   /* Find and resolve PHI register mismatches. */
1440   for (;;) {
1441     RegSet blocked = RSET_EMPTY;
1442     RegSet blockedby = RSET_EMPTY;
1443     RegSet phiset = as->phiset;
1444     while (phiset) {  /* Check all left PHI operand registers. */
1445       Reg r = rset_pickbot(phiset);
1446       IRIns *irl = IR(as->phireg[r]);
1447       Reg left = irl->r;
1448       if (r != left) {  /* Mismatch? */
1449 	if (!rset_test(as->freeset, r)) {  /* PHI register blocked? */
1450 	  IRRef ref = regcost_ref(as->cost[r]);
1451 	  /* Blocked by other PHI (w/reg)? */
1452 	  if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
1453 	    rset_set(blocked, r);
1454 	    if (ra_hasreg(left))
1455 	      rset_set(blockedby, left);
1456 	    left = RID_NONE;
1457 	  } else {  /* Otherwise grab register from invariant. */
1458 	    ra_restore(as, ref);
1459 	    checkmclim(as);
1460 	  }
1461 	}
1462 	if (ra_hasreg(left)) {
1463 	  ra_rename(as, left, r);
1464 	  checkmclim(as);
1465 	}
1466       }
1467       rset_clear(phiset, r);
1468     }
1469     if (!blocked) break;  /* Finished. */
1470     if (!(as->freeset & blocked)) {  /* Break cycles if none are free. */
1471       asm_phi_break(as, blocked, blockedby, RSET_GPR);
1472       if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
1473       checkmclim(as);
1474     }  /* Else retry some more renames. */
1475   }
1476 
1477   /* Restore/remat invariants whose registers are modified inside the loop. */
1478 #if !LJ_SOFTFP
1479   work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
1480   while (work) {
1481     Reg r = rset_pickbot(work);
1482     ra_restore(as, regcost_ref(as->cost[r]));
1483     rset_clear(work, r);
1484     checkmclim(as);
1485   }
1486 #endif
1487   work = as->modset & ~(as->freeset | as->phiset);
1488   while (work) {
1489     Reg r = rset_pickbot(work);
1490     ra_restore(as, regcost_ref(as->cost[r]));
1491     rset_clear(work, r);
1492     checkmclim(as);
1493   }
1494 
1495   /* Allocate and save all unsaved PHI regs and clear marks. */
1496   work = as->phiset;
1497   while (work) {
1498     Reg r = rset_picktop(work);
1499     IRRef lref = as->phireg[r];
1500     IRIns *ir = IR(lref);
1501     if (ra_hasspill(ir->s)) {  /* Left PHI gained a spill slot? */
1502       irt_clearmark(ir->t);  /* Handled here, so clear marker now. */
1503       ra_alloc1(as, lref, RID2RSET(r));
1504       ra_save(as, ir, r);  /* Save to spill slot inside the loop. */
1505       checkmclim(as);
1506     }
1507     rset_clear(work, r);
1508   }
1509 }
1510 
1511 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
asm_phi_copyspill(ASMState * as)1512 static void asm_phi_copyspill(ASMState *as)
1513 {
1514   int need = 0;
1515   IRIns *ir;
1516   for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
1517     if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
1518       need |= irt_isfp(ir->t) ? 2 : 1;  /* Unsynced spill slot? */
1519   if ((need & 1)) {  /* Copy integer spill slots. */
1520 #if !LJ_TARGET_X86ORX64
1521     Reg r = RID_TMP;
1522 #else
1523     Reg r = RID_RET;
1524     if ((as->freeset & RSET_GPR))
1525       r = rset_pickbot((as->freeset & RSET_GPR));
1526     else
1527       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1528 #endif
1529     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1530       if (ra_hasspill(ir->s)) {
1531 	IRIns *irl = IR(ir->op1);
1532 	if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
1533 	  emit_spstore(as, irl, r, sps_scale(irl->s));
1534 	  emit_spload(as, ir, r, sps_scale(ir->s));
1535 	  checkmclim(as);
1536 	}
1537       }
1538     }
1539 #if LJ_TARGET_X86ORX64
1540     if (!rset_test(as->freeset, r))
1541       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1542 #endif
1543   }
1544 #if !LJ_SOFTFP
1545   if ((need & 2)) {  /* Copy FP spill slots. */
1546 #if LJ_TARGET_X86
1547     Reg r = RID_XMM0;
1548 #else
1549     Reg r = RID_FPRET;
1550 #endif
1551     if ((as->freeset & RSET_FPR))
1552       r = rset_pickbot((as->freeset & RSET_FPR));
1553     if (!rset_test(as->freeset, r))
1554       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1555     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1556       if (ra_hasspill(ir->s)) {
1557 	IRIns *irl = IR(ir->op1);
1558 	if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
1559 	  emit_spstore(as, irl, r, sps_scale(irl->s));
1560 	  emit_spload(as, ir, r, sps_scale(ir->s));
1561 	  checkmclim(as);
1562 	}
1563       }
1564     }
1565     if (!rset_test(as->freeset, r))
1566       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1567   }
1568 #endif
1569 }
1570 
1571 /* Emit renames for left PHIs which are only spilled outside the loop. */
asm_phi_fixup(ASMState * as)1572 static void asm_phi_fixup(ASMState *as)
1573 {
1574   RegSet work = as->phiset;
1575   while (work) {
1576     Reg r = rset_picktop(work);
1577     IRRef lref = as->phireg[r];
1578     IRIns *ir = IR(lref);
1579     if (irt_ismarked(ir->t)) {
1580       irt_clearmark(ir->t);
1581       /* Left PHI gained a spill slot before the loop? */
1582       if (ra_hasspill(ir->s)) {
1583 	ra_addrename(as, r, lref, as->loopsnapno);
1584       }
1585     }
1586     rset_clear(work, r);
1587   }
1588 }
1589 
1590 /* Setup right PHI reference. */
asm_phi(ASMState * as,IRIns * ir)1591 static void asm_phi(ASMState *as, IRIns *ir)
1592 {
1593   RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
1594 		 ~as->phiset;
1595   RegSet afree = (as->freeset & allow);
1596   IRIns *irl = IR(ir->op1);
1597   IRIns *irr = IR(ir->op2);
1598   if (ir->r == RID_SINK)  /* Sink PHI. */
1599     return;
1600   /* Spill slot shuffling is not implemented yet (but rarely needed). */
1601   if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
1602     lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1603   /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1604   if ((afree & (afree-1))) {  /* Two or more free registers? */
1605     Reg r;
1606     if (ra_noreg(irr->r)) {  /* Get a register for the right PHI. */
1607       r = ra_allocref(as, ir->op2, allow);
1608     } else {  /* Duplicate right PHI, need a copy (rare). */
1609       r = ra_scratch(as, allow);
1610       emit_movrr(as, irr, r, irr->r);
1611     }
1612     ir->r = (uint8_t)r;
1613     rset_set(as->phiset, r);
1614     as->phireg[r] = (IRRef1)ir->op1;
1615     irt_setmark(irl->t);  /* Marks left PHIs _with_ register. */
1616     if (ra_noreg(irl->r))
1617       ra_sethint(irl->r, r); /* Set register hint for left PHI. */
1618   } else {  /* Otherwise allocate a spill slot. */
1619     /* This is overly restrictive, but it triggers only on synthetic code. */
1620     if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
1621       lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1622     ra_spill(as, ir);
1623     irr->s = ir->s;  /* Set right PHI spill slot. Sync left slot later. */
1624   }
1625 }
1626 
1627 static void asm_loop_fixup(ASMState *as);
1628 
1629 /* Middle part of a loop. */
asm_loop(ASMState * as)1630 static void asm_loop(ASMState *as)
1631 {
1632   MCode *mcspill;
1633   /* LOOP is a guard, so the snapno is up to date. */
1634   as->loopsnapno = as->snapno;
1635   if (as->gcsteps)
1636     asm_gc_check(as);
1637   /* LOOP marks the transition from the variant to the invariant part. */
1638   as->flagmcp = as->invmcp = NULL;
1639   as->sectref = 0;
1640   if (!neverfuse(as)) as->fuseref = 0;
1641   asm_phi_shuffle(as);
1642   mcspill = as->mcp;
1643   asm_phi_copyspill(as);
1644   asm_loop_fixup(as);
1645   as->mcloop = as->mcp;
1646   RA_DBGX((as, "===== LOOP ====="));
1647   if (!as->realign) RA_DBG_FLUSH();
1648   if (as->mcp != mcspill)
1649     emit_jmp(as, mcspill);
1650 }
1651 
1652 /* -- Target-specific assembler ------------------------------------------- */
1653 
1654 #if LJ_TARGET_X86ORX64
1655 #include "lj_asm_x86.h"
1656 #elif LJ_TARGET_ARM
1657 #include "lj_asm_arm.h"
1658 #elif LJ_TARGET_ARM64
1659 #include "lj_asm_arm64.h"
1660 #elif LJ_TARGET_PPC
1661 #include "lj_asm_ppc.h"
1662 #elif LJ_TARGET_MIPS
1663 #include "lj_asm_mips.h"
1664 #elif LJ_TARGET_S390X
1665 #include "lj_asm_s390x.h"
1666 #else
1667 #error "Missing assembler for target CPU"
1668 #endif
1669 
1670 /* -- Common instruction helpers ------------------------------------------ */
1671 
1672 #if !LJ_SOFTFP32
1673 #if !LJ_TARGET_X86ORX64
1674 #define asm_ldexp(as, ir)	asm_callid(as, ir, IRCALL_ldexp)
1675 #define asm_fppowi(as, ir)	asm_callid(as, ir, IRCALL_lj_vm_powi)
1676 #endif
1677 
asm_pow(ASMState * as,IRIns * ir)1678 static void asm_pow(ASMState *as, IRIns *ir)
1679 {
1680 #if LJ_64 && LJ_HASFFI
1681   if (!irt_isnum(ir->t))
1682     asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
1683 					  IRCALL_lj_carith_powu64);
1684   else
1685 #endif
1686   if (irt_isnum(IR(ir->op2)->t))
1687     asm_callid(as, ir, IRCALL_pow);
1688   else
1689     asm_fppowi(as, ir);
1690 }
1691 
asm_div(ASMState * as,IRIns * ir)1692 static void asm_div(ASMState *as, IRIns *ir)
1693 {
1694 #if LJ_64 && LJ_HASFFI
1695   if (!irt_isnum(ir->t))
1696     asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
1697 					  IRCALL_lj_carith_divu64);
1698   else
1699 #endif
1700     asm_fpdiv(as, ir);
1701 }
1702 #endif
1703 
asm_mod(ASMState * as,IRIns * ir)1704 static void asm_mod(ASMState *as, IRIns *ir)
1705 {
1706 #if LJ_64 && LJ_HASFFI
1707   if (!irt_isint(ir->t))
1708     asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
1709 					  IRCALL_lj_carith_modu64);
1710   else
1711 #endif
1712     asm_callid(as, ir, IRCALL_lj_vm_modi);
1713 }
1714 
asm_fuseequal(ASMState * as,IRIns * ir)1715 static void asm_fuseequal(ASMState *as, IRIns *ir)
1716 {
1717   /* Fuse HREF + EQ/NE. */
1718   if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1719     as->curins--;
1720     asm_href(as, ir-1, (IROp)ir->o);
1721   } else {
1722     asm_equal(as, ir);
1723   }
1724 }
1725 
asm_alen(ASMState * as,IRIns * ir)1726 static void asm_alen(ASMState *as, IRIns *ir)
1727 {
1728   asm_callid(as, ir, ir->op2 == REF_NIL ? IRCALL_lj_tab_len :
1729 					  IRCALL_lj_tab_len_hint);
1730 }
1731 
1732 /* -- Instruction dispatch ------------------------------------------------ */
1733 
1734 /* Assemble a single instruction. */
asm_ir(ASMState * as,IRIns * ir)1735 static void asm_ir(ASMState *as, IRIns *ir)
1736 {
1737   switch ((IROp)ir->o) {
1738   /* Miscellaneous ops. */
1739   case IR_LOOP: asm_loop(as); break;
1740   case IR_NOP: case IR_XBAR:
1741     lj_assertA(!ra_used(ir),
1742 	       "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS);
1743     break;
1744   case IR_USE:
1745     ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1746   case IR_PHI: asm_phi(as, ir); break;
1747   case IR_HIOP: asm_hiop(as, ir); break;
1748   case IR_GCSTEP: asm_gcstep(as, ir); break;
1749   case IR_PROF: asm_prof(as, ir); break;
1750 
1751   /* Guarded assertions. */
1752   case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1753   case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1754   case IR_ABC:
1755     asm_comp(as, ir);
1756     break;
1757   case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;
1758 
1759   case IR_RETF: asm_retf(as, ir); break;
1760 
1761   /* Bit ops. */
1762   case IR_BNOT: asm_bnot(as, ir); break;
1763   case IR_BSWAP: asm_bswap(as, ir); break;
1764   case IR_BAND: asm_band(as, ir); break;
1765   case IR_BOR: asm_bor(as, ir); break;
1766   case IR_BXOR: asm_bxor(as, ir); break;
1767   case IR_BSHL: asm_bshl(as, ir); break;
1768   case IR_BSHR: asm_bshr(as, ir); break;
1769   case IR_BSAR: asm_bsar(as, ir); break;
1770   case IR_BROL: asm_brol(as, ir); break;
1771   case IR_BROR: asm_bror(as, ir); break;
1772 
1773   /* Arithmetic ops. */
1774   case IR_ADD: asm_add(as, ir); break;
1775   case IR_SUB: asm_sub(as, ir); break;
1776   case IR_MUL: asm_mul(as, ir); break;
1777   case IR_MOD: asm_mod(as, ir); break;
1778   case IR_NEG: asm_neg(as, ir); break;
1779 #if LJ_SOFTFP32
1780   case IR_DIV: case IR_POW: case IR_ABS:
1781   case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
1782     /* Unused for LJ_SOFTFP32. */
1783     lj_assertA(0, "IR %04d with unused op %d",
1784 		  (int)(ir - as->ir) - REF_BIAS, ir->o);
1785     break;
1786 #else
1787   case IR_DIV: asm_div(as, ir); break;
1788   case IR_POW: asm_pow(as, ir); break;
1789   case IR_ABS: asm_abs(as, ir); break;
1790   case IR_LDEXP: asm_ldexp(as, ir); break;
1791   case IR_FPMATH: asm_fpmath(as, ir); break;
1792   case IR_TOBIT: asm_tobit(as, ir); break;
1793 #endif
1794   case IR_MIN: asm_min(as, ir); break;
1795   case IR_MAX: asm_max(as, ir); break;
1796 
1797   /* Overflow-checking arithmetic ops. */
1798   case IR_ADDOV: asm_addov(as, ir); break;
1799   case IR_SUBOV: asm_subov(as, ir); break;
1800   case IR_MULOV: asm_mulov(as, ir); break;
1801 
1802   /* Memory references. */
1803   case IR_AREF: asm_aref(as, ir); break;
1804   case IR_HREF: asm_href(as, ir, 0); break;
1805   case IR_HREFK: asm_hrefk(as, ir); break;
1806   case IR_NEWREF: asm_newref(as, ir); break;
1807   case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1808   case IR_FREF: asm_fref(as, ir); break;
1809   case IR_TMPREF: asm_tmpref(as, ir); break;
1810   case IR_STRREF: asm_strref(as, ir); break;
1811   case IR_LREF: asm_lref(as, ir); break;
1812 
1813   /* Loads and stores. */
1814   case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1815     asm_ahuvload(as, ir);
1816     break;
1817   case IR_FLOAD: asm_fload(as, ir); break;
1818   case IR_XLOAD: asm_xload(as, ir); break;
1819   case IR_SLOAD: asm_sload(as, ir); break;
1820   case IR_ALEN: asm_alen(as, ir); break;
1821 
1822   case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1823   case IR_FSTORE: asm_fstore(as, ir); break;
1824   case IR_XSTORE: asm_xstore(as, ir); break;
1825 
1826   /* Allocations. */
1827   case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1828   case IR_TNEW: asm_tnew(as, ir); break;
1829   case IR_TDUP: asm_tdup(as, ir); break;
1830   case IR_CNEW: case IR_CNEWI:
1831 #if LJ_HASFFI
1832     asm_cnew(as, ir);
1833 #else
1834     lj_assertA(0, "IR %04d with unused op %d",
1835 		  (int)(ir - as->ir) - REF_BIAS, ir->o);
1836 #endif
1837     break;
1838 
1839   /* Buffer operations. */
1840   case IR_BUFHDR: asm_bufhdr(as, ir); break;
1841   case IR_BUFPUT: asm_bufput(as, ir); break;
1842   case IR_BUFSTR: asm_bufstr(as, ir); break;
1843 
1844   /* Write barriers. */
1845   case IR_TBAR: asm_tbar(as, ir); break;
1846   case IR_OBAR: asm_obar(as, ir); break;
1847 
1848   /* Type conversions. */
1849   case IR_CONV: asm_conv(as, ir); break;
1850   case IR_TOSTR: asm_tostr(as, ir); break;
1851   case IR_STRTO: asm_strto(as, ir); break;
1852 
1853   /* Calls. */
1854   case IR_CALLA:
1855     as->gcsteps++;
1856     /* fallthrough */
1857   case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1858   case IR_CALLXS: asm_callx(as, ir); break;
1859   case IR_CARG: break;
1860 
1861   default:
1862     setintV(&as->J->errinfo, ir->o);
1863     lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1864     break;
1865   }
1866 }
1867 
1868 /* -- Head of trace ------------------------------------------------------- */
1869 
1870 /* Head of a root trace. */
asm_head_root(ASMState * as)1871 static void asm_head_root(ASMState *as)
1872 {
1873   int32_t spadj;
1874   asm_head_root_base(as);
1875   emit_setvmstate(as, (int32_t)as->T->traceno);
1876   spadj = asm_stack_adjust(as);
1877   as->T->spadjust = (uint16_t)spadj;
1878   emit_spsub(as, spadj);
1879   /* Root traces assume a checked stack for the starting proto. */
1880   as->T->topslot = gcref(as->T->startpt)->pt.framesize;
1881 }
1882 
1883 /* Head of a side trace.
1884 **
1885 ** The current simplistic algorithm requires that all slots inherited
1886 ** from the parent are live in a register between pass 2 and pass 3. This
1887 ** avoids the complexity of stack slot shuffling. But of course this may
1888 ** overflow the register set in some cases and cause the dreaded error:
1889 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1890 */
asm_head_side(ASMState * as)1891 static void asm_head_side(ASMState *as)
1892 {
1893   IRRef1 sloadins[RID_MAX];
1894   RegSet allow = RSET_ALL;  /* Inverse of all coalesced registers. */
1895   RegSet live = RSET_EMPTY;  /* Live parent registers. */
1896   IRIns *irp = &as->parent->ir[REF_BASE];  /* Parent base. */
1897   int32_t spadj, spdelta;
1898   int pass2 = 0;
1899   int pass3 = 0;
1900   IRRef i;
1901 
1902   if (as->snapno && as->topslot > as->parent->topslot) {
1903     /* Force snap #0 alloc to prevent register overwrite in stack check. */
1904     asm_snap_alloc(as, 0);
1905   }
1906   allow = asm_head_side_base(as, irp, allow);
1907 
1908   /* Scan all parent SLOADs and collect register dependencies. */
1909   for (i = as->stopins; i > REF_BASE; i--) {
1910     IRIns *ir = IR(i);
1911     RegSP rs;
1912     lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1913 	       (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL,
1914 	       "IR %04d has bad parent op %d",
1915 	       (int)(ir - as->ir) - REF_BIAS, ir->o);
1916     rs = as->parentmap[i - REF_FIRST];
1917     if (ra_hasreg(ir->r)) {
1918       rset_clear(allow, ir->r);
1919       if (ra_hasspill(ir->s)) {
1920 	ra_save(as, ir, ir->r);
1921 	checkmclim(as);
1922       }
1923     } else if (ra_hasspill(ir->s)) {
1924       irt_setmark(ir->t);
1925       pass2 = 1;
1926     }
1927     if (ir->r == rs) {  /* Coalesce matching registers right now. */
1928       ra_free(as, ir->r);
1929     } else if (ra_hasspill(regsp_spill(rs))) {
1930       if (ra_hasreg(ir->r))
1931 	pass3 = 1;
1932     } else if (ra_used(ir)) {
1933       sloadins[rs] = (IRRef1)i;
1934       rset_set(live, rs);  /* Block live parent register. */
1935     }
1936   }
1937 
1938   /* Calculate stack frame adjustment. */
1939   spadj = asm_stack_adjust(as);
1940   spdelta = spadj - (int32_t)as->parent->spadjust;
1941   if (spdelta < 0) {  /* Don't shrink the stack frame. */
1942     spadj = (int32_t)as->parent->spadjust;
1943     spdelta = 0;
1944   }
1945   as->T->spadjust = (uint16_t)spadj;
1946 
1947   /* Reload spilled target registers. */
1948   if (pass2) {
1949     for (i = as->stopins; i > REF_BASE; i--) {
1950       IRIns *ir = IR(i);
1951       if (irt_ismarked(ir->t)) {
1952 	RegSet mask;
1953 	Reg r;
1954 	RegSP rs;
1955 	irt_clearmark(ir->t);
1956 	rs = as->parentmap[i - REF_FIRST];
1957 	if (!ra_hasspill(regsp_spill(rs)))
1958 	  ra_sethint(ir->r, rs);  /* Hint may be gone, set it again. */
1959 	else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
1960 	  continue;  /* Same spill slot, do nothing. */
1961 	mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
1962 	if (mask == RSET_EMPTY)
1963 	  lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1964 	r = ra_allocref(as, i, mask);
1965 	ra_save(as, ir, r);
1966 	rset_clear(allow, r);
1967 	if (r == rs) {  /* Coalesce matching registers right now. */
1968 	  ra_free(as, r);
1969 	  rset_clear(live, r);
1970 	} else if (ra_hasspill(regsp_spill(rs))) {
1971 	  pass3 = 1;
1972 	}
1973 	checkmclim(as);
1974       }
1975     }
1976   }
1977 
1978   /* Store trace number and adjust stack frame relative to the parent. */
1979   emit_setvmstate(as, (int32_t)as->T->traceno);
1980   emit_spsub(as, spdelta);
1981 
1982 #if !LJ_TARGET_X86ORX64
1983   /* Restore BASE register from parent spill slot. */
1984   if (ra_hasspill(irp->s))
1985     emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
1986 #endif
1987 
1988   /* Restore target registers from parent spill slots. */
1989   if (pass3) {
1990     RegSet work = ~as->freeset & RSET_ALL;
1991     while (work) {
1992       Reg r = rset_pickbot(work);
1993       IRRef ref = regcost_ref(as->cost[r]);
1994       RegSP rs = as->parentmap[ref - REF_FIRST];
1995       rset_clear(work, r);
1996       if (ra_hasspill(regsp_spill(rs))) {
1997 	int32_t ofs = sps_scale(regsp_spill(rs));
1998 	ra_free(as, r);
1999 	emit_spload(as, IR(ref), r, ofs);
2000 	checkmclim(as);
2001       }
2002     }
2003   }
2004 
2005   /* Shuffle registers to match up target regs with parent regs. */
2006   for (;;) {
2007     RegSet work;
2008 
2009     /* Repeatedly coalesce free live registers by moving to their target. */
2010     while ((work = as->freeset & live) != RSET_EMPTY) {
2011       Reg rp = rset_pickbot(work);
2012       IRIns *ir = IR(sloadins[rp]);
2013       rset_clear(live, rp);
2014       rset_clear(allow, rp);
2015       ra_free(as, ir->r);
2016       emit_movrr(as, ir, ir->r, rp);
2017       checkmclim(as);
2018     }
2019 
2020     /* We're done if no live registers remain. */
2021     if (live == RSET_EMPTY)
2022       break;
2023 
2024     /* Break cycles by renaming one target to a temp. register. */
2025     if (live & RSET_GPR) {
2026       RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
2027       if (tmpset == RSET_EMPTY)
2028 	lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2029       ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
2030     }
2031     if (!LJ_SOFTFP && (live & RSET_FPR)) {
2032       RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
2033       if (tmpset == RSET_EMPTY)
2034 	lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2035       ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
2036     }
2037     checkmclim(as);
2038     /* Continue with coalescing to fix up the broken cycle(s). */
2039   }
2040 
2041   /* Inherit top stack slot already checked by parent trace. */
2042   as->T->topslot = as->parent->topslot;
2043   if (as->topslot > as->T->topslot) {  /* Need to check for higher slot? */
2044 #ifdef EXITSTATE_CHECKEXIT
2045     /* Highest exit + 1 indicates stack check. */
2046     ExitNo exitno = as->T->nsnap;
2047 #else
2048     /* Reuse the parent exit in the context of the parent trace. */
2049     ExitNo exitno = as->J->exitno;
2050 #endif
2051     as->T->topslot = (uint8_t)as->topslot;  /* Remember for child traces. */
2052     asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
2053   }
2054 }
2055 
2056 /* -- Tail of trace ------------------------------------------------------- */
2057 
2058 /* Get base slot for a snapshot. */
asm_baseslot(ASMState * as,SnapShot * snap,int * gotframe)2059 static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
2060 {
2061   SnapEntry *map = &as->T->snapmap[snap->mapofs];
2062   MSize n;
2063   for (n = snap->nent; n > 0; n--) {
2064     SnapEntry sn = map[n-1];
2065     if ((sn & SNAP_FRAME)) {
2066       *gotframe = 1;
2067       return snap_slot(sn) - LJ_FR2;
2068     }
2069   }
2070   return 0;
2071 }
2072 
2073 /* Link to another trace. */
asm_tail_link(ASMState * as)2074 static void asm_tail_link(ASMState *as)
2075 {
2076   SnapNo snapno = as->T->nsnap-1;  /* Last snapshot. */
2077   SnapShot *snap = &as->T->snap[snapno];
2078   int gotframe = 0;
2079   BCReg baseslot = asm_baseslot(as, snap, &gotframe);
2080 
2081   as->topslot = snap->topslot;
2082   checkmclim(as);
2083   ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
2084 
2085   if (as->T->link == 0) {
2086     /* Setup fixed registers for exit to interpreter. */
2087     const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
2088     int32_t mres;
2089     if (bc_op(*pc) == BC_JLOOP) {  /* NYI: find a better way to do this. */
2090       BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
2091       if (bc_isret(bc_op(*retpc)))
2092 	pc = retpc;
2093     }
2094 #if LJ_GC64
2095     emit_loadu64(as, RID_LPC, u64ptr(pc));
2096 #else
2097     ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
2098     ra_allockreg(as, i32ptr(pc), RID_LPC);
2099 #endif
2100     mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
2101     switch (bc_op(*pc)) {
2102     case BC_CALLM: case BC_CALLMT:
2103       mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
2104     case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
2105     case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
2106     default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
2107     }
2108     ra_allockreg(as, mres, RID_RET);  /* Return MULTRES or 0. */
2109   } else if (baseslot) {
2110     /* Save modified BASE for linking to trace with higher start frame. */
2111     emit_setgl(as, RID_BASE, jit_base);
2112   }
2113   emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
2114 
2115   if (as->J->ktrace) {  /* Patch ktrace slot with the final GCtrace pointer. */
2116     setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
2117     IR(as->J->ktrace)->o = IR_KGC;
2118   }
2119 
2120   /* Sync the interpreter state with the on-trace state. */
2121   asm_stack_restore(as, snap);
2122 
2123   /* Root traces that add frames need to check the stack at the end. */
2124   if (!as->parent && gotframe)
2125     asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
2126 }
2127 
2128 /* -- Trace setup --------------------------------------------------------- */
2129 
2130 /* Clear reg/sp for all instructions and add register hints. */
asm_setup_regsp(ASMState * as)2131 static void asm_setup_regsp(ASMState *as)
2132 {
2133   GCtrace *T = as->T;
2134   int sink = T->sinktags;
2135   IRRef nins = T->nins;
2136   IRIns *ir, *lastir;
2137   int inloop;
2138 #if LJ_TARGET_ARM
2139   uint32_t rload = 0xa6402a64;
2140 #endif
2141 
2142   ra_setup(as);
2143 #if LJ_TARGET_ARM64
2144   ra_setkref(as, RID_GL, (intptr_t)J2G(as->J));
2145 #endif
2146 
2147   /* Clear reg/sp for constants. */
2148   for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
2149     ir->prev = REGSP_INIT;
2150     if (irt_is64(ir->t) && ir->o != IR_KNULL) {
2151 #if LJ_GC64
2152       /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
2153       ir->i = 0;  /* Will become non-zero only for RIP-relative addresses. */
2154 #else
2155       /* Make life easier for backends by putting address of constant in i. */
2156       ir->i = (int32_t)(intptr_t)(ir+1);
2157 #endif
2158       ir++;
2159     }
2160   }
2161 
2162   /* REF_BASE is used for implicit references to the BASE register. */
2163   lastir->prev = REGSP_HINT(RID_BASE);
2164 
2165   as->snaprename = nins;
2166   as->snapref = nins;
2167   as->snapno = T->nsnap;
2168   as->snapalloc = 0;
2169 
2170   as->stopins = REF_BASE;
2171   as->orignins = nins;
2172   as->curins = nins;
2173 
2174   /* Setup register hints for parent link instructions. */
2175   ir = IR(REF_FIRST);
2176   if (as->parent) {
2177     uint16_t *p;
2178     lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir);
2179     if (lastir - ir > LJ_MAX_JSLOTS)
2180       lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2181     as->stopins = (IRRef)((lastir-1) - as->ir);
2182     for (p = as->parentmap; ir < lastir; ir++) {
2183       RegSP rs = ir->prev;
2184       *p++ = (uint16_t)rs;  /* Copy original parent RegSP to parentmap. */
2185       if (!ra_hasspill(regsp_spill(rs)))
2186 	ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
2187       else
2188 	ir->prev = REGSP_INIT;
2189     }
2190   }
2191 
2192   inloop = 0;
2193   as->evenspill = SPS_FIRST;
2194   for (lastir = IR(nins); ir < lastir; ir++) {
2195     if (sink) {
2196       if (ir->r == RID_SINK)
2197 	continue;
2198       if (ir->r == RID_SUNK) {  /* Revert after ASM restart. */
2199 	ir->r = RID_SINK;
2200 	continue;
2201       }
2202     }
2203     switch (ir->o) {
2204     case IR_LOOP:
2205       inloop = 1;
2206       break;
2207 #if LJ_TARGET_ARM
2208     case IR_SLOAD:
2209       if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
2210 	break;
2211       /* fallthrough */
2212     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2213       if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
2214       ir->prev = (uint16_t)REGSP_HINT((rload & 15));
2215       rload = lj_ror(rload, 4);
2216       continue;
2217     case IR_TMPREF:
2218       if ((ir->op2 & IRTMPREF_OUT2) && as->evenspill < 4)
2219 	as->evenspill = 4;  /* TMPREF OUT2 needs two TValues on the stack. */
2220       break;
2221 #endif
2222     case IR_CALLXS: {
2223       CCallInfo ci;
2224       ci.flags = asm_callx_flags(as, ir);
2225       ir->prev = asm_setup_call_slots(as, ir, &ci);
2226       if (inloop)
2227 	as->modset |= RSET_SCRATCH;
2228       continue;
2229       }
2230     case IR_CALLL:
2231       /* lj_vm_next needs two TValues on the stack. */
2232 #if LJ_TARGET_X64 && LJ_ABI_WIN
2233       if (ir->op2 == IRCALL_lj_vm_next && as->evenspill < SPS_FIRST + 4)
2234 	as->evenspill = SPS_FIRST + 4;
2235 #else
2236       if (SPS_FIRST < 4 && ir->op2 == IRCALL_lj_vm_next && as->evenspill < 4)
2237 	as->evenspill = 4;
2238 #endif
2239       /* fallthrough */
2240     case IR_CALLN: case IR_CALLA: case IR_CALLS: {
2241       const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
2242       ir->prev = asm_setup_call_slots(as, ir, ci);
2243       if (inloop)
2244 	as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
2245 		      (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
2246       continue;
2247       }
2248     case IR_HIOP:
2249       switch ((ir-1)->o) {
2250 #if LJ_SOFTFP && LJ_TARGET_ARM
2251       case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2252 	if (ra_hashint((ir-1)->r)) {
2253 	  ir->prev = (ir-1)->prev + 1;
2254 	  continue;
2255 	}
2256 	break;
2257 #endif
2258 #if !LJ_SOFTFP && LJ_NEED_FP64 && LJ_32 && LJ_HASFFI
2259       case IR_CONV:
2260 	if (irt_isfp((ir-1)->t)) {
2261 	  ir->prev = REGSP_HINT(RID_FPRET);
2262 	  continue;
2263 	}
2264 #endif
2265       /* fallthrough */
2266       case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
2267 #if LJ_SOFTFP
2268       case IR_MIN: case IR_MAX:
2269 #endif
2270 	(ir-1)->prev = REGSP_HINT(RID_RETLO);
2271 	ir->prev = REGSP_HINT(RID_RETHI);
2272 	continue;
2273       default:
2274 	break;
2275       }
2276       break;
2277 #if LJ_SOFTFP
2278     case IR_MIN: case IR_MAX:
2279       if ((ir+1)->o != IR_HIOP) break;
2280 #endif
2281     /* fallthrough */
2282     /* C calls evict all scratch regs and return results in RID_RET. */
2283     case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
2284       if (REGARG_NUMGPR < 3 && as->evenspill < 3)
2285 	as->evenspill = 3;  /* lj_str_new and lj_tab_newkey need 3 args. */
2286 #if LJ_TARGET_X86 && LJ_HASFFI
2287       if (0) {
2288     case IR_CNEW:
2289 	if (ir->op2 != REF_NIL && as->evenspill < 4)
2290 	  as->evenspill = 4;  /* lj_cdata_newv needs 4 args. */
2291       }
2292       /* fallthrough */
2293 #else
2294       /* fallthrough */
2295     case IR_CNEW:
2296 #endif
2297       /* fallthrough */
2298     case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
2299     case IR_BUFSTR:
2300       ir->prev = REGSP_HINT(RID_RET);
2301       if (inloop)
2302 	as->modset = RSET_SCRATCH;
2303       continue;
2304     case IR_STRTO: case IR_OBAR:
2305       if (inloop)
2306 	as->modset = RSET_SCRATCH;
2307       break;
2308 #if !LJ_SOFTFP
2309 #if !LJ_TARGET_X86ORX64
2310     case IR_LDEXP:
2311 #endif
2312 #endif
2313       /* fallthrough */
2314     case IR_POW:
2315       if (!LJ_SOFTFP && irt_isnum(ir->t)) {
2316 	if (inloop)
2317 	  as->modset |= RSET_SCRATCH;
2318 #if LJ_TARGET_X86
2319 	if (irt_isnum(IR(ir->op2)->t)) {
2320 	  if (as->evenspill < 4)  /* Leave room to call pow(). */
2321 	    as->evenspill = 4;
2322 	}
2323 	break;
2324 #else
2325 	ir->prev = REGSP_HINT(RID_FPRET);
2326 	continue;
2327 #endif
2328       }
2329       /* fallthrough */ /* for integer POW */
2330     case IR_DIV: case IR_MOD:
2331       if ((LJ_64 && LJ_SOFTFP) || !irt_isnum(ir->t)) {
2332 	ir->prev = REGSP_HINT(RID_RET);
2333 	if (inloop)
2334 	  as->modset |= (RSET_SCRATCH & RSET_GPR);
2335 	continue;
2336       }
2337       break;
2338 #if LJ_64 && LJ_SOFTFP
2339     case IR_ADD: case IR_SUB: case IR_MUL:
2340       if (irt_isnum(ir->t)) {
2341 	ir->prev = REGSP_HINT(RID_RET);
2342 	if (inloop)
2343 	  as->modset |= (RSET_SCRATCH & RSET_GPR);
2344 	continue;
2345       }
2346       break;
2347 #endif
2348     case IR_FPMATH:
2349 #if LJ_TARGET_X86ORX64
2350       if (ir->op2 <= IRFPM_TRUNC) {
2351 	if (!(as->flags & JIT_F_SSE4_1)) {
2352 	  ir->prev = REGSP_HINT(RID_XMM0);
2353 	  if (inloop)
2354 	    as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
2355 	  continue;
2356 	}
2357 	break;
2358       }
2359 #endif
2360       if (inloop)
2361 	as->modset |= RSET_SCRATCH;
2362 #if LJ_TARGET_X86
2363       break;
2364 #else
2365       ir->prev = REGSP_HINT(RID_FPRET);
2366       continue;
2367 #endif
2368 #if LJ_TARGET_X86ORX64
2369     /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
2370     case IR_BSHL: case IR_BSHR: case IR_BSAR:
2371       if ((as->flags & JIT_F_BMI2))  /* Except if BMI2 is available. */
2372 	break;
2373       /* fallthrough */
2374     case IR_BROL: case IR_BROR:
2375       if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
2376 	IR(ir->op2)->r = REGSP_HINT(RID_ECX);
2377 	if (inloop)
2378 	  rset_set(as->modset, RID_ECX);
2379       }
2380       break;
2381 #endif
2382     /* Do not propagate hints across type conversions or loads. */
2383     case IR_TOBIT:
2384     case IR_XLOAD:
2385 #if !LJ_TARGET_ARM
2386     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2387 #endif
2388       break;
2389     case IR_CONV:
2390       if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
2391 	  (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
2392 	break;
2393       /* fallthrough */
2394     default:
2395       /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
2396       if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
2397 	  ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
2398 	ir->prev = IR(ir->op1)->prev;
2399 	continue;
2400       }
2401       break;
2402     }
2403     ir->prev = REGSP_INIT;
2404   }
2405   if ((as->evenspill & 1))
2406     as->oddspill = as->evenspill++;
2407   else
2408     as->oddspill = 0;
2409 }
2410 
2411 /* -- Assembler core ------------------------------------------------------ */
2412 
2413 /* Assemble a trace. */
lj_asm_trace(jit_State * J,GCtrace * T)2414 void lj_asm_trace(jit_State *J, GCtrace *T)
2415 {
2416   ASMState as_;
2417   ASMState *as = &as_;
2418 
2419   /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2420   {
2421     IRRef nins = T->nins;
2422     IRIns *ir = &T->ir[nins-1];
2423     if (ir->o == IR_NOP || ir->o == IR_RENAME) {
2424       do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
2425       T->nins = nins;
2426     }
2427   }
2428 
2429   /* Ensure an initialized instruction beyond the last one for HIOP checks. */
2430   /* This also allows one RENAME to be added without reallocating curfinal. */
2431   as->orignins = lj_ir_nextins(J);
2432   lj_ir_nop(&J->cur.ir[as->orignins]);
2433 
2434   /* Setup initial state. Copy some fields to reduce indirections. */
2435   as->J = J;
2436   as->T = T;
2437   J->curfinal = lj_trace_alloc(J->L, T);  /* This copies the IR, too. */
2438   as->flags = J->flags;
2439   as->loopref = J->loopref;
2440   as->realign = NULL;
2441   as->loopinv = 0;
2442   as->parent = J->parent ? traceref(J, J->parent) : NULL;
2443 
2444   /* Reserve MCode memory. */
2445   as->mctop = as->mctoporig = lj_mcode_reserve(J, &as->mcbot);
2446   as->mcp = as->mctop;
2447   as->mclim = as->mcbot + MCLIM_REDZONE;
2448   asm_setup_target(as);
2449 
2450   /*
2451   ** This is a loop, because the MCode may have to be (re-)assembled
2452   ** multiple times:
2453   **
2454   ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2455   **    backend wants the MCode to be aligned differently.
2456   **
2457   **    This is currently only the case on x86/x64, where small loops get
2458   **    an aligned loop body plus a short branch. Not much effort is wasted,
2459   **    because the abort happens very quickly and only once.
2460   **
2461   ** 2. The IR is immovable, since the MCode embeds pointers to various
2462   **    constants inside the IR. But RENAMEs may need to be added to the IR
2463   **    during assembly, which might grow and reallocate the IR. We check
2464   **    at the end if the IR (in J->cur.ir) has actually grown, resize the
2465   **    copy (in J->curfinal.ir) and try again.
2466   **
2467   **    95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2468   **    2 RENAMEs and only 0.5% have more than that. That's why we opt to
2469   **    always have one spare slot in the IR (see above), which means we
2470   **    have to redo the assembly for only ~2% of all traces.
2471   **
2472   **    Very, very rarely, this needs to be done repeatedly, since the
2473   **    location of constants inside the IR (actually, reachability from
2474   **    a global pointer) may affect register allocation and thus the
2475   **    number of RENAMEs.
2476   */
2477   for (;;) {
2478     as->mcp = as->mctop;
2479 #ifdef LUA_USE_ASSERT
2480     as->mcp_prev = as->mcp;
2481 #endif
2482     as->ir = J->curfinal->ir;  /* Use the copied IR. */
2483     as->curins = J->cur.nins = as->orignins;
2484 
2485     RA_DBG_START();
2486     RA_DBGX((as, "===== STOP ====="));
2487 
2488     /* General trace setup. Emit tail of trace. */
2489     asm_tail_prep(as);
2490     as->mcloop = NULL;
2491     as->flagmcp = NULL;
2492     as->topslot = 0;
2493     as->gcsteps = 0;
2494     as->sectref = as->loopref;
2495     as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
2496     asm_setup_regsp(as);
2497     if (!as->loopref)
2498       asm_tail_link(as);
2499 
2500     /* Assemble a trace in linear backwards order. */
2501     for (as->curins--; as->curins > as->stopins; as->curins--) {
2502       IRIns *ir = IR(as->curins);
2503       /* 64 bit types handled by SPLIT for 32 bit archs. */
2504       lj_assertA(!(LJ_32 && irt_isint64(ir->t)),
2505 		 "IR %04d has unsplit 64 bit type",
2506 		 (int)(ir - as->ir) - REF_BIAS);
2507       asm_snap_prev(as);
2508       if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
2509 	continue;  /* Dead-code elimination can be soooo easy. */
2510       if (irt_isguard(ir->t))
2511 	asm_snap_prep(as);
2512       RA_DBG_REF();
2513       checkmclim(as);
2514       asm_ir(as, ir);
2515     }
2516 
2517     if (as->realign && J->curfinal->nins >= T->nins)
2518       continue;  /* Retry in case only the MCode needs to be realigned. */
2519 
2520     /* Emit head of trace. */
2521     RA_DBG_REF();
2522     checkmclim(as);
2523     if (as->gcsteps > 0) {
2524       as->curins = as->T->snap[0].ref;
2525       asm_snap_prep(as);  /* The GC check is a guard. */
2526       asm_gc_check(as);
2527       as->curins = as->stopins;
2528     }
2529     ra_evictk(as);
2530     if (as->parent)
2531       asm_head_side(as);
2532     else
2533       asm_head_root(as);
2534     asm_phi_fixup(as);
2535 
2536     if (J->curfinal->nins >= T->nins) {  /* IR didn't grow? */
2537       lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth");
2538       memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
2539 	     (T->nins - as->orignins) * sizeof(IRIns));  /* Copy RENAMEs. */
2540       T->nins = J->curfinal->nins;
2541       /* Fill mcofs of any unprocessed snapshots. */
2542       as->curins = REF_FIRST;
2543       asm_snap_prev(as);
2544       break;  /* Done. */
2545     }
2546 
2547     /* Otherwise try again with a bigger IR. */
2548     lj_trace_free(J2G(J), J->curfinal);
2549     J->curfinal = NULL;  /* In case lj_trace_alloc() OOMs. */
2550     J->curfinal = lj_trace_alloc(J->L, T);
2551     as->realign = NULL;
2552   }
2553 
2554   RA_DBGX((as, "===== START ===="));
2555   RA_DBG_FLUSH();
2556   if (as->freeset != RSET_ALL)
2557     lj_trace_err(as->J, LJ_TRERR_BADRA);  /* Ouch! Should never happen. */
2558 
2559   /* Set trace entry point before fixing up tail to allow link to self. */
2560   T->mcode = as->mcp;
2561   T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
2562   if (as->loopref)
2563     asm_loop_tail_fixup(as);
2564   else
2565     asm_tail_fixup(as, T->link);  /* Note: this may change as->mctop! */
2566   T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
2567   asm_snap_fixup_mcofs(as);
2568 #if LJ_TARGET_MCODE_FIXUP
2569   asm_mcode_fixup(T->mcode, T->szmcode);
2570 #endif
2571   lj_mcode_sync(T->mcode, as->mctoporig);
2572 }
2573 
2574 #undef IR
2575 
2576 #endif
2577