1 /*
2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 #define lj_asm_c
7 #define LUA_CORE
8
9 #include "lj_obj.h"
10
11 #if LJ_HASJIT
12
13 #include "lj_gc.h"
14 #include "lj_str.h"
15 #include "lj_tab.h"
16 #include "lj_frame.h"
17 #if LJ_HASFFI
18 #include "lj_ctype.h"
19 #endif
20 #include "lj_ir.h"
21 #include "lj_jit.h"
22 #include "lj_ircall.h"
23 #include "lj_iropt.h"
24 #include "lj_mcode.h"
25 #include "lj_iropt.h"
26 #include "lj_trace.h"
27 #include "lj_snap.h"
28 #include "lj_asm.h"
29 #include "lj_dispatch.h"
30 #include "lj_vm.h"
31 #include "lj_target.h"
32
33 #ifdef LUA_USE_ASSERT
34 #include <stdio.h>
35 #endif
36
37 /* -- Assembler state and common macros ----------------------------------- */
38
39 /* Assembler state. */
40 typedef struct ASMState {
41 RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
42
43 MCode *mcp; /* Current MCode pointer (grows down). */
44 MCode *mclim; /* Lower limit for MCode memory + red zone. */
45 #ifdef LUA_USE_ASSERT
46 MCode *mcp_prev; /* Red zone overflow check. */
47 #endif
48
49 IRIns *ir; /* Copy of pointer to IR instructions/constants. */
50 jit_State *J; /* JIT compiler state. */
51
52 #if LJ_TARGET_X86ORX64
53 x86ModRM mrm; /* Fused x86 address operand. */
54 #endif
55
56 RegSet freeset; /* Set of free registers. */
57 RegSet modset; /* Set of registers modified inside the loop. */
58 RegSet weakset; /* Set of weakly referenced registers. */
59 RegSet phiset; /* Set of PHI registers. */
60
61 uint32_t flags; /* Copy of JIT compiler flags. */
62 int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
63
64 int32_t evenspill; /* Next even spill slot. */
65 int32_t oddspill; /* Next odd spill slot (or 0). */
66
67 IRRef curins; /* Reference of current instruction. */
68 IRRef stopins; /* Stop assembly before hitting this instruction. */
69 IRRef orignins; /* Original T->nins. */
70
71 IRRef snapref; /* Current snapshot is active after this reference. */
72 IRRef snaprename; /* Rename highwater mark for snapshot check. */
73 SnapNo snapno; /* Current snapshot number. */
74 SnapNo loopsnapno; /* Loop snapshot number. */
75
76 IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
77 IRRef sectref; /* Section base reference (loopref or 0). */
78 IRRef loopref; /* Reference of LOOP instruction (or 0). */
79
80 BCReg topslot; /* Number of slots for stack check (unless 0). */
81 int32_t gcsteps; /* Accumulated number of GC steps (per section). */
82
83 GCtrace *T; /* Trace to assemble. */
84 GCtrace *parent; /* Parent trace (or NULL). */
85
86 MCode *mcbot; /* Bottom of reserved MCode. */
87 MCode *mctop; /* Top of generated MCode. */
88 MCode *mcloop; /* Pointer to loop MCode (or NULL). */
89 MCode *invmcp; /* Points to invertible loop branch (or NULL). */
90 MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
91 MCode *realign; /* Realign loop if not NULL. */
92
93 #ifdef RID_NUM_KREF
94 intptr_t krefk[RID_NUM_KREF];
95 #endif
96 IRRef1 phireg[RID_MAX]; /* PHI register references. */
97 uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
98 } ASMState;
99
100 #define IR(ref) (&as->ir[(ref)])
101
102 #define ASMREF_TMP1 REF_TRUE /* Temp. register. */
103 #define ASMREF_TMP2 REF_FALSE /* Temp. register. */
104 #define ASMREF_L REF_NIL /* Stores register for L. */
105
106 /* Check for variant to invariant references. */
107 #define iscrossref(as, ref) ((ref) < as->sectref)
108
109 /* Inhibit memory op fusion from variant to invariant references. */
110 #define FUSE_DISABLED (~(IRRef)0)
111 #define mayfuse(as, ref) ((ref) > as->fuseref)
112 #define neverfuse(as) (as->fuseref == FUSE_DISABLED)
113 #define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
114 #define opisfusableload(o) \
115 ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
116 (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
117
118 /* Sparse limit checks using a red zone before the actual limit. */
119 #define MCLIM_REDZONE 64
120
asm_mclimit(ASMState * as)121 static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
122 {
123 lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
124 }
125
checkmclim(ASMState * as)126 static LJ_AINLINE void checkmclim(ASMState *as)
127 {
128 #ifdef LUA_USE_ASSERT
129 if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
130 IRIns *ir = IR(as->curins+1);
131 fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d %02d %04d %04d\n", as->mcp,
132 as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
133 lua_assert(0);
134 }
135 #endif
136 if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
137 #ifdef LUA_USE_ASSERT
138 as->mcp_prev = as->mcp;
139 #endif
140 }
141
142 #ifdef RID_NUM_KREF
143 #define ra_iskref(ref) ((ref) < RID_NUM_KREF)
144 #define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
145 #define ra_krefk(as, ref) (as->krefk[(ref)])
146
ra_setkref(ASMState * as,Reg r,intptr_t k)147 static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
148 {
149 IRRef ref = (IRRef)(r - RID_MIN_KREF);
150 as->krefk[ref] = k;
151 as->cost[r] = REGCOST(ref, ref);
152 }
153
154 #else
155 #define ra_iskref(ref) 0
156 #define ra_krefreg(ref) RID_MIN_GPR
157 #define ra_krefk(as, ref) 0
158 #endif
159
160 /* Arch-specific field offsets. */
161 static const uint8_t field_ofs[IRFL__MAX+1] = {
162 #define FLOFS(name, ofs) (uint8_t)(ofs),
163 IRFLDEF(FLOFS)
164 #undef FLOFS
165 0
166 };
167
168 /* -- Target-specific instruction emitter --------------------------------- */
169
170 #if LJ_TARGET_X86ORX64
171 #include "lj_emit_x86.h"
172 #elif LJ_TARGET_ARM
173 #include "lj_emit_arm.h"
174 #elif LJ_TARGET_ARM64
175 #include "lj_emit_arm64.h"
176 #elif LJ_TARGET_PPC
177 #include "lj_emit_ppc.h"
178 #elif LJ_TARGET_MIPS
179 #include "lj_emit_mips.h"
180 #else
181 #error "Missing instruction emitter for target CPU"
182 #endif
183
184 /* Generic load/store of register from/to stack slot. */
185 #define emit_spload(as, ir, r, ofs) \
186 emit_loadofs(as, ir, (r), RID_SP, (ofs))
187 #define emit_spstore(as, ir, r, ofs) \
188 emit_storeofs(as, ir, (r), RID_SP, (ofs))
189
190 /* -- Register allocator debugging ---------------------------------------- */
191
192 /* #define LUAJIT_DEBUG_RA */
193
194 #ifdef LUAJIT_DEBUG_RA
195
196 #include <stdio.h>
197 #include <stdarg.h>
198
199 #define RIDNAME(name) #name,
200 static const char *const ra_regname[] = {
201 GPRDEF(RIDNAME)
202 FPRDEF(RIDNAME)
203 VRIDDEF(RIDNAME)
204 NULL
205 };
206 #undef RIDNAME
207
208 static char ra_dbg_buf[65536];
209 static char *ra_dbg_p;
210 static char *ra_dbg_merge;
211 static MCode *ra_dbg_mcp;
212
ra_dstart(void)213 static void ra_dstart(void)
214 {
215 ra_dbg_p = ra_dbg_buf;
216 ra_dbg_merge = NULL;
217 ra_dbg_mcp = NULL;
218 }
219
ra_dflush(void)220 static void ra_dflush(void)
221 {
222 fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
223 ra_dstart();
224 }
225
ra_dprintf(ASMState * as,const char * fmt,...)226 static void ra_dprintf(ASMState *as, const char *fmt, ...)
227 {
228 char *p;
229 va_list argp;
230 va_start(argp, fmt);
231 p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
232 ra_dbg_mcp = NULL;
233 p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
234 for (;;) {
235 const char *e = strchr(fmt, '$');
236 if (e == NULL) break;
237 memcpy(p, fmt, (size_t)(e-fmt));
238 p += e-fmt;
239 if (e[1] == 'r') {
240 Reg r = va_arg(argp, Reg) & RID_MASK;
241 if (r <= RID_MAX) {
242 const char *q;
243 for (q = ra_regname[r]; *q; q++)
244 *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
245 } else {
246 *p++ = '?';
247 lua_assert(0);
248 }
249 } else if (e[1] == 'f' || e[1] == 'i') {
250 IRRef ref;
251 if (e[1] == 'f')
252 ref = va_arg(argp, IRRef);
253 else
254 ref = va_arg(argp, IRIns *) - as->ir;
255 if (ref >= REF_BIAS)
256 p += sprintf(p, "%04d", ref - REF_BIAS);
257 else
258 p += sprintf(p, "K%03d", REF_BIAS - ref);
259 } else if (e[1] == 's') {
260 uint32_t slot = va_arg(argp, uint32_t);
261 p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
262 } else if (e[1] == 'x') {
263 p += sprintf(p, "%08x", va_arg(argp, int32_t));
264 } else {
265 lua_assert(0);
266 }
267 fmt = e+2;
268 }
269 va_end(argp);
270 while (*fmt)
271 *p++ = *fmt++;
272 *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
273 if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
274 fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
275 p = ra_dbg_buf;
276 }
277 ra_dbg_p = p;
278 }
279
280 #define RA_DBG_START() ra_dstart()
281 #define RA_DBG_FLUSH() ra_dflush()
282 #define RA_DBG_REF() \
283 do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
284 ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
285 #define RA_DBGX(x) ra_dprintf x
286
287 #else
288 #define RA_DBG_START() ((void)0)
289 #define RA_DBG_FLUSH() ((void)0)
290 #define RA_DBG_REF() ((void)0)
291 #define RA_DBGX(x) ((void)0)
292 #endif
293
294 /* -- Register allocator -------------------------------------------------- */
295
296 #define ra_free(as, r) rset_set(as->freeset, (r))
297 #define ra_modified(as, r) rset_set(as->modset, (r))
298 #define ra_weak(as, r) rset_set(as->weakset, (r))
299 #define ra_noweak(as, r) rset_clear(as->weakset, (r))
300
301 #define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
302
303 /* Setup register allocator. */
ra_setup(ASMState * as)304 static void ra_setup(ASMState *as)
305 {
306 Reg r;
307 /* Initially all regs (except the stack pointer) are free for use. */
308 as->freeset = RSET_INIT;
309 as->modset = RSET_EMPTY;
310 as->weakset = RSET_EMPTY;
311 as->phiset = RSET_EMPTY;
312 memset(as->phireg, 0, sizeof(as->phireg));
313 for (r = RID_MIN_GPR; r < RID_MAX; r++)
314 as->cost[r] = REGCOST(~0u, 0u);
315 }
316
317 /* Rematerialize constants. */
ra_rematk(ASMState * as,IRRef ref)318 static Reg ra_rematk(ASMState *as, IRRef ref)
319 {
320 IRIns *ir;
321 Reg r;
322 if (ra_iskref(ref)) {
323 r = ra_krefreg(ref);
324 lua_assert(!rset_test(as->freeset, r));
325 ra_free(as, r);
326 ra_modified(as, r);
327 #if LJ_64
328 emit_loadu64(as, r, ra_krefk(as, ref));
329 #else
330 emit_loadi(as, r, ra_krefk(as, ref));
331 #endif
332 return r;
333 }
334 ir = IR(ref);
335 r = ir->r;
336 lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
337 ra_free(as, r);
338 ra_modified(as, r);
339 ir->r = RID_INIT; /* Do not keep any hint. */
340 RA_DBGX((as, "remat $i $r", ir, r));
341 #if !LJ_SOFTFP
342 if (ir->o == IR_KNUM) {
343 emit_loadk64(as, r, ir);
344 } else
345 #endif
346 if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
347 ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
348 emit_getgl(as, r, jit_base);
349 } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
350 lua_assert(irt_isnil(ir->t)); /* REF_NIL stores ASMREF_L register. */
351 emit_getgl(as, r, cur_L);
352 #if LJ_64
353 } else if (ir->o == IR_KINT64) {
354 emit_loadu64(as, r, ir_kint64(ir)->u64);
355 #if LJ_GC64
356 } else if (ir->o == IR_KGC) {
357 emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
358 } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
359 emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
360 #endif
361 #endif
362 } else {
363 lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
364 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
365 emit_loadi(as, r, ir->i);
366 }
367 return r;
368 }
369
370 /* Force a spill. Allocate a new spill slot if needed. */
ra_spill(ASMState * as,IRIns * ir)371 static int32_t ra_spill(ASMState *as, IRIns *ir)
372 {
373 int32_t slot = ir->s;
374 lua_assert(ir >= as->ir + REF_TRUE);
375 if (!ra_hasspill(slot)) {
376 if (irt_is64(ir->t)) {
377 slot = as->evenspill;
378 as->evenspill += 2;
379 } else if (as->oddspill) {
380 slot = as->oddspill;
381 as->oddspill = 0;
382 } else {
383 slot = as->evenspill;
384 as->oddspill = slot+1;
385 as->evenspill += 2;
386 }
387 if (as->evenspill > 256)
388 lj_trace_err(as->J, LJ_TRERR_SPILLOV);
389 ir->s = (uint8_t)slot;
390 }
391 return sps_scale(slot);
392 }
393
394 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
ra_releasetmp(ASMState * as,IRRef ref)395 static Reg ra_releasetmp(ASMState *as, IRRef ref)
396 {
397 IRIns *ir = IR(ref);
398 Reg r = ir->r;
399 lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
400 ra_free(as, r);
401 ra_modified(as, r);
402 ir->r = RID_INIT;
403 return r;
404 }
405
406 /* Restore a register (marked as free). Rematerialize or force a spill. */
ra_restore(ASMState * as,IRRef ref)407 static Reg ra_restore(ASMState *as, IRRef ref)
408 {
409 if (emit_canremat(ref)) {
410 return ra_rematk(as, ref);
411 } else {
412 IRIns *ir = IR(ref);
413 int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
414 Reg r = ir->r;
415 lua_assert(ra_hasreg(r));
416 ra_sethint(ir->r, r); /* Keep hint. */
417 ra_free(as, r);
418 if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
419 ra_modified(as, r);
420 RA_DBGX((as, "restore $i $r", ir, r));
421 emit_spload(as, ir, r, ofs);
422 }
423 return r;
424 }
425 }
426
427 /* Save a register to a spill slot. */
ra_save(ASMState * as,IRIns * ir,Reg r)428 static void ra_save(ASMState *as, IRIns *ir, Reg r)
429 {
430 RA_DBGX((as, "save $i $r", ir, r));
431 emit_spstore(as, ir, r, sps_scale(ir->s));
432 }
433
434 #define MINCOST(name) \
435 if (rset_test(RSET_ALL, RID_##name) && \
436 LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
437 cost = as->cost[RID_##name];
438
439 /* Evict the register with the lowest cost, forcing a restore. */
ra_evict(ASMState * as,RegSet allow)440 static Reg ra_evict(ASMState *as, RegSet allow)
441 {
442 IRRef ref;
443 RegCost cost = ~(RegCost)0;
444 lua_assert(allow != RSET_EMPTY);
445 if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
446 GPRDEF(MINCOST)
447 } else {
448 FPRDEF(MINCOST)
449 }
450 ref = regcost_ref(cost);
451 lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins));
452 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
453 if (!irref_isk(ref) && (as->weakset & allow)) {
454 IRIns *ir = IR(ref);
455 if (!rset_test(as->weakset, ir->r))
456 ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
457 }
458 return ra_restore(as, ref);
459 }
460
461 /* Pick any register (marked as free). Evict on-demand. */
ra_pick(ASMState * as,RegSet allow)462 static Reg ra_pick(ASMState *as, RegSet allow)
463 {
464 RegSet pick = as->freeset & allow;
465 if (!pick)
466 return ra_evict(as, allow);
467 else
468 return rset_picktop(pick);
469 }
470
471 /* Get a scratch register (marked as free). */
ra_scratch(ASMState * as,RegSet allow)472 static Reg ra_scratch(ASMState *as, RegSet allow)
473 {
474 Reg r = ra_pick(as, allow);
475 ra_modified(as, r);
476 RA_DBGX((as, "scratch $r", r));
477 return r;
478 }
479
480 /* Evict all registers from a set (if not free). */
ra_evictset(ASMState * as,RegSet drop)481 static void ra_evictset(ASMState *as, RegSet drop)
482 {
483 RegSet work;
484 as->modset |= drop;
485 #if !LJ_SOFTFP
486 work = (drop & ~as->freeset) & RSET_FPR;
487 while (work) {
488 Reg r = rset_pickbot(work);
489 ra_restore(as, regcost_ref(as->cost[r]));
490 rset_clear(work, r);
491 checkmclim(as);
492 }
493 #endif
494 work = (drop & ~as->freeset);
495 while (work) {
496 Reg r = rset_pickbot(work);
497 ra_restore(as, regcost_ref(as->cost[r]));
498 rset_clear(work, r);
499 checkmclim(as);
500 }
501 }
502
503 /* Evict (rematerialize) all registers allocated to constants. */
ra_evictk(ASMState * as)504 static void ra_evictk(ASMState *as)
505 {
506 RegSet work;
507 #if !LJ_SOFTFP
508 work = ~as->freeset & RSET_FPR;
509 while (work) {
510 Reg r = rset_pickbot(work);
511 IRRef ref = regcost_ref(as->cost[r]);
512 if (emit_canremat(ref) && irref_isk(ref)) {
513 ra_rematk(as, ref);
514 checkmclim(as);
515 }
516 rset_clear(work, r);
517 }
518 #endif
519 work = ~as->freeset & RSET_GPR;
520 while (work) {
521 Reg r = rset_pickbot(work);
522 IRRef ref = regcost_ref(as->cost[r]);
523 if (emit_canremat(ref) && irref_isk(ref)) {
524 ra_rematk(as, ref);
525 checkmclim(as);
526 }
527 rset_clear(work, r);
528 }
529 }
530
531 #ifdef RID_NUM_KREF
532 /* Allocate a register for a constant. */
ra_allock(ASMState * as,intptr_t k,RegSet allow)533 static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
534 {
535 /* First try to find a register which already holds the same constant. */
536 RegSet pick, work = ~as->freeset & RSET_GPR;
537 Reg r;
538 while (work) {
539 IRRef ref;
540 r = rset_pickbot(work);
541 ref = regcost_ref(as->cost[r]);
542 #if LJ_64
543 if (ref < ASMREF_L) {
544 if (ra_iskref(ref)) {
545 if (k == ra_krefk(as, ref))
546 return r;
547 } else {
548 IRIns *ir = IR(ref);
549 if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
550 #if LJ_GC64
551 (ir->o == IR_KINT && k == ir->i) ||
552 (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
553 ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
554 k == (intptr_t)ir_kptr(ir))
555 #else
556 (ir->o != IR_KINT64 && k == ir->i)
557 #endif
558 )
559 return r;
560 }
561 }
562 #else
563 if (ref < ASMREF_L &&
564 k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
565 return r;
566 #endif
567 rset_clear(work, r);
568 }
569 pick = as->freeset & allow;
570 if (pick) {
571 /* Constants should preferably get unmodified registers. */
572 if ((pick & ~as->modset))
573 pick &= ~as->modset;
574 r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
575 } else {
576 r = ra_evict(as, allow);
577 }
578 RA_DBGX((as, "allock $x $r", k, r));
579 ra_setkref(as, r, k);
580 rset_clear(as->freeset, r);
581 ra_noweak(as, r);
582 return r;
583 }
584
585 /* Allocate a specific register for a constant. */
ra_allockreg(ASMState * as,intptr_t k,Reg r)586 static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
587 {
588 Reg kr = ra_allock(as, k, RID2RSET(r));
589 if (kr != r) {
590 IRIns irdummy;
591 irdummy.t.irt = IRT_INT;
592 ra_scratch(as, RID2RSET(r));
593 emit_movrr(as, &irdummy, r, kr);
594 }
595 }
596 #else
597 #define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
598 #endif
599
600 /* Allocate a register for ref from the allowed set of registers.
601 ** Note: this function assumes the ref does NOT have a register yet!
602 ** Picks an optimal register, sets the cost and marks the register as non-free.
603 */
ra_allocref(ASMState * as,IRRef ref,RegSet allow)604 static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
605 {
606 IRIns *ir = IR(ref);
607 RegSet pick = as->freeset & allow;
608 Reg r;
609 lua_assert(ra_noreg(ir->r));
610 if (pick) {
611 /* First check register hint from propagation or PHI. */
612 if (ra_hashint(ir->r)) {
613 r = ra_gethint(ir->r);
614 if (rset_test(pick, r)) /* Use hint register if possible. */
615 goto found;
616 /* Rematerialization is cheaper than missing a hint. */
617 if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
618 ra_rematk(as, regcost_ref(as->cost[r]));
619 goto found;
620 }
621 RA_DBGX((as, "hintmiss $f $r", ref, r));
622 }
623 /* Invariants should preferably get unmodified registers. */
624 if (ref < as->loopref && !irt_isphi(ir->t)) {
625 if ((pick & ~as->modset))
626 pick &= ~as->modset;
627 r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
628 } else {
629 /* We've got plenty of regs, so get callee-save regs if possible. */
630 if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
631 pick &= ~RSET_SCRATCH;
632 r = rset_picktop(pick);
633 }
634 } else {
635 r = ra_evict(as, allow);
636 }
637 found:
638 RA_DBGX((as, "alloc $f $r", ref, r));
639 ir->r = (uint8_t)r;
640 rset_clear(as->freeset, r);
641 ra_noweak(as, r);
642 as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
643 return r;
644 }
645
646 /* Allocate a register on-demand. */
ra_alloc1(ASMState * as,IRRef ref,RegSet allow)647 static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
648 {
649 Reg r = IR(ref)->r;
650 /* Note: allow is ignored if the register is already allocated. */
651 if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
652 ra_noweak(as, r);
653 return r;
654 }
655
656 /* Add a register rename to the IR. */
ra_addrename(ASMState * as,Reg down,IRRef ref,SnapNo snapno)657 static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
658 {
659 IRRef ren;
660 lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
661 ren = tref_ref(lj_ir_emit(as->J));
662 as->J->cur.ir[ren].r = (uint8_t)down;
663 as->J->cur.ir[ren].s = SPS_NONE;
664 }
665
666 /* Rename register allocation and emit move. */
ra_rename(ASMState * as,Reg down,Reg up)667 static void ra_rename(ASMState *as, Reg down, Reg up)
668 {
669 IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
670 IRIns *ir = IR(ref);
671 ir->r = (uint8_t)up;
672 as->cost[down] = 0;
673 lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR));
674 lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up));
675 ra_free(as, down); /* 'down' is free ... */
676 ra_modified(as, down);
677 rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
678 ra_noweak(as, up);
679 RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
680 emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
681 if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
682 ra_addrename(as, down, ref, as->snapno);
683 }
684 }
685
686 /* Pick a destination register (marked as free).
687 ** Caveat: allow is ignored if there's already a destination register.
688 ** Use ra_destreg() to get a specific register.
689 */
ra_dest(ASMState * as,IRIns * ir,RegSet allow)690 static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
691 {
692 Reg dest = ir->r;
693 if (ra_hasreg(dest)) {
694 ra_free(as, dest);
695 ra_modified(as, dest);
696 } else {
697 if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
698 dest = ra_gethint(dest);
699 ra_modified(as, dest);
700 RA_DBGX((as, "dest $r", dest));
701 } else {
702 dest = ra_scratch(as, allow);
703 }
704 ir->r = dest;
705 }
706 if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
707 return dest;
708 }
709
710 /* Force a specific destination register (marked as free). */
ra_destreg(ASMState * as,IRIns * ir,Reg r)711 static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
712 {
713 Reg dest = ra_dest(as, ir, RID2RSET(r));
714 if (dest != r) {
715 lua_assert(rset_test(as->freeset, r));
716 ra_modified(as, r);
717 emit_movrr(as, ir, dest, r);
718 }
719 }
720
721 #if LJ_TARGET_X86ORX64
722 /* Propagate dest register to left reference. Emit moves as needed.
723 ** This is a required fixup step for all 2-operand machine instructions.
724 */
ra_left(ASMState * as,Reg dest,IRRef lref)725 static void ra_left(ASMState *as, Reg dest, IRRef lref)
726 {
727 IRIns *ir = IR(lref);
728 Reg left = ir->r;
729 if (ra_noreg(left)) {
730 if (irref_isk(lref)) {
731 if (ir->o == IR_KNUM) {
732 /* FP remat needs a load except for +0. Still better than eviction. */
733 if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
734 emit_loadk64(as, dest, ir);
735 return;
736 }
737 #if LJ_64
738 } else if (ir->o == IR_KINT64) {
739 emit_loadk64(as, dest, ir);
740 return;
741 #if LJ_GC64
742 } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
743 emit_loadk64(as, dest, ir);
744 return;
745 #endif
746 #endif
747 } else if (ir->o != IR_KPRI) {
748 lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
749 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
750 emit_loadi(as, dest, ir->i);
751 return;
752 }
753 }
754 if (!ra_hashint(left) && !iscrossref(as, lref))
755 ra_sethint(ir->r, dest); /* Propagate register hint. */
756 left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
757 }
758 ra_noweak(as, left);
759 /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
760 if (dest != left) {
761 /* Use register renaming if dest is the PHI reg. */
762 if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
763 ra_modified(as, left);
764 ra_rename(as, left, dest);
765 } else {
766 emit_movrr(as, ir, dest, left);
767 }
768 }
769 }
770 #else
771 /* Similar to ra_left, except we override any hints. */
ra_leftov(ASMState * as,Reg dest,IRRef lref)772 static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
773 {
774 IRIns *ir = IR(lref);
775 Reg left = ir->r;
776 if (ra_noreg(left)) {
777 ra_sethint(ir->r, dest); /* Propagate register hint. */
778 left = ra_allocref(as, lref,
779 (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
780 }
781 ra_noweak(as, left);
782 if (dest != left) {
783 /* Use register renaming if dest is the PHI reg. */
784 if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
785 ra_modified(as, left);
786 ra_rename(as, left, dest);
787 } else {
788 emit_movrr(as, ir, dest, left);
789 }
790 }
791 }
792 #endif
793
794 #if !LJ_64
795 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
ra_destpair(ASMState * as,IRIns * ir)796 static void ra_destpair(ASMState *as, IRIns *ir)
797 {
798 Reg destlo = ir->r, desthi = (ir+1)->r;
799 /* First spill unrelated refs blocking the destination registers. */
800 if (!rset_test(as->freeset, RID_RETLO) &&
801 destlo != RID_RETLO && desthi != RID_RETLO)
802 ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
803 if (!rset_test(as->freeset, RID_RETHI) &&
804 destlo != RID_RETHI && desthi != RID_RETHI)
805 ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
806 /* Next free the destination registers (if any). */
807 if (ra_hasreg(destlo)) {
808 ra_free(as, destlo);
809 ra_modified(as, destlo);
810 } else {
811 destlo = RID_RETLO;
812 }
813 if (ra_hasreg(desthi)) {
814 ra_free(as, desthi);
815 ra_modified(as, desthi);
816 } else {
817 desthi = RID_RETHI;
818 }
819 /* Check for conflicts and shuffle the registers as needed. */
820 if (destlo == RID_RETHI) {
821 if (desthi == RID_RETLO) {
822 #if LJ_TARGET_X86
823 *--as->mcp = XI_XCHGa + RID_RETHI;
824 #else
825 emit_movrr(as, ir, RID_RETHI, RID_TMP);
826 emit_movrr(as, ir, RID_RETLO, RID_RETHI);
827 emit_movrr(as, ir, RID_TMP, RID_RETLO);
828 #endif
829 } else {
830 emit_movrr(as, ir, RID_RETHI, RID_RETLO);
831 if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
832 }
833 } else if (desthi == RID_RETLO) {
834 emit_movrr(as, ir, RID_RETLO, RID_RETHI);
835 if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
836 } else {
837 if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
838 if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
839 }
840 /* Restore spill slots (if any). */
841 if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
842 if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
843 }
844 #endif
845
846 /* -- Snapshot handling --------- ----------------------------------------- */
847
848 /* Can we rematerialize a KNUM instead of forcing a spill? */
asm_snap_canremat(ASMState * as)849 static int asm_snap_canremat(ASMState *as)
850 {
851 Reg r;
852 for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
853 if (irref_isk(regcost_ref(as->cost[r])))
854 return 1;
855 return 0;
856 }
857
858 /* Check whether a sunk store corresponds to an allocation. */
asm_sunk_store(ASMState * as,IRIns * ira,IRIns * irs)859 static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
860 {
861 if (irs->s == 255) {
862 if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
863 irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
864 IRIns *irk = IR(irs->op1);
865 if (irk->o == IR_AREF || irk->o == IR_HREFK)
866 irk = IR(irk->op1);
867 return (IR(irk->op1) == ira);
868 }
869 return 0;
870 } else {
871 return (ira + irs->s == irs); /* Quick check. */
872 }
873 }
874
875 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
asm_snap_alloc1(ASMState * as,IRRef ref)876 static void asm_snap_alloc1(ASMState *as, IRRef ref)
877 {
878 IRIns *ir = IR(ref);
879 if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) {
880 if (ir->r == RID_SINK) {
881 ir->r = RID_SUNK;
882 #if LJ_HASFFI
883 if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */
884 asm_snap_alloc1(as, ir->op2);
885 if (LJ_32 && (ir+1)->o == IR_HIOP)
886 asm_snap_alloc1(as, (ir+1)->op2);
887 } else
888 #endif
889 { /* Allocate stored values for TNEW, TDUP and CNEW. */
890 IRIns *irs;
891 lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW);
892 for (irs = IR(as->snapref-1); irs > ir; irs--)
893 if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
894 lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
895 irs->o == IR_FSTORE || irs->o == IR_XSTORE);
896 asm_snap_alloc1(as, irs->op2);
897 if (LJ_32 && (irs+1)->o == IR_HIOP)
898 asm_snap_alloc1(as, (irs+1)->op2);
899 }
900 }
901 } else {
902 RegSet allow;
903 if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
904 IRIns *irc;
905 for (irc = IR(as->curins); irc > ir; irc--)
906 if ((irc->op1 == ref || irc->op2 == ref) &&
907 !(irc->r == RID_SINK || irc->r == RID_SUNK))
908 goto nosink; /* Don't sink conversion if result is used. */
909 asm_snap_alloc1(as, ir->op1);
910 return;
911 }
912 nosink:
913 allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
914 if ((as->freeset & allow) ||
915 (allow == RSET_FPR && asm_snap_canremat(as))) {
916 /* Get a weak register if we have a free one or can rematerialize. */
917 Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
918 if (!irt_isphi(ir->t))
919 ra_weak(as, r); /* But mark it as weakly referenced. */
920 checkmclim(as);
921 RA_DBGX((as, "snapreg $f $r", ref, ir->r));
922 } else {
923 ra_spill(as, ir); /* Otherwise force a spill slot. */
924 RA_DBGX((as, "snapspill $f $s", ref, ir->s));
925 }
926 }
927 }
928 }
929
930 /* Allocate refs escaping to a snapshot. */
asm_snap_alloc(ASMState * as)931 static void asm_snap_alloc(ASMState *as)
932 {
933 SnapShot *snap = &as->T->snap[as->snapno];
934 SnapEntry *map = &as->T->snapmap[snap->mapofs];
935 MSize n, nent = snap->nent;
936 for (n = 0; n < nent; n++) {
937 SnapEntry sn = map[n];
938 IRRef ref = snap_ref(sn);
939 if (!irref_isk(ref)) {
940 asm_snap_alloc1(as, ref);
941 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
942 lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP);
943 asm_snap_alloc1(as, ref+1);
944 }
945 }
946 }
947 }
948
949 /* All guards for a snapshot use the same exitno. This is currently the
950 ** same as the snapshot number. Since the exact origin of the exit cannot
951 ** be determined, all guards for the same snapshot must exit with the same
952 ** RegSP mapping.
953 ** A renamed ref which has been used in a prior guard for the same snapshot
954 ** would cause an inconsistency. The easy way out is to force a spill slot.
955 */
asm_snap_checkrename(ASMState * as,IRRef ren)956 static int asm_snap_checkrename(ASMState *as, IRRef ren)
957 {
958 SnapShot *snap = &as->T->snap[as->snapno];
959 SnapEntry *map = &as->T->snapmap[snap->mapofs];
960 MSize n, nent = snap->nent;
961 for (n = 0; n < nent; n++) {
962 SnapEntry sn = map[n];
963 IRRef ref = snap_ref(sn);
964 if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
965 IRIns *ir = IR(ref);
966 ra_spill(as, ir); /* Register renamed, so force a spill slot. */
967 RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
968 return 1; /* Found. */
969 }
970 }
971 return 0; /* Not found. */
972 }
973
974 /* Prepare snapshot for next guard instruction. */
asm_snap_prep(ASMState * as)975 static void asm_snap_prep(ASMState *as)
976 {
977 if (as->curins < as->snapref) {
978 do {
979 if (as->snapno == 0) return; /* Called by sunk stores before snap #0. */
980 as->snapno--;
981 as->snapref = as->T->snap[as->snapno].ref;
982 } while (as->curins < as->snapref);
983 asm_snap_alloc(as);
984 as->snaprename = as->T->nins;
985 } else {
986 /* Process any renames above the highwater mark. */
987 for (; as->snaprename < as->T->nins; as->snaprename++) {
988 IRIns *ir = &as->T->ir[as->snaprename];
989 if (asm_snap_checkrename(as, ir->op1))
990 ir->op2 = REF_BIAS-1; /* Kill rename. */
991 }
992 }
993 }
994
995 /* -- Miscellaneous helpers ----------------------------------------------- */
996
997 /* Calculate stack adjustment. */
asm_stack_adjust(ASMState * as)998 static int32_t asm_stack_adjust(ASMState *as)
999 {
1000 if (as->evenspill <= SPS_FIXED)
1001 return 0;
1002 return sps_scale(sps_align(as->evenspill));
1003 }
1004
1005 /* Must match with hash*() in lj_tab.c. */
ir_khash(IRIns * ir)1006 static uint32_t ir_khash(IRIns *ir)
1007 {
1008 uint32_t lo, hi;
1009 if (irt_isstr(ir->t)) {
1010 return ir_kstr(ir)->hash;
1011 } else if (irt_isnum(ir->t)) {
1012 lo = ir_knum(ir)->u32.lo;
1013 hi = ir_knum(ir)->u32.hi << 1;
1014 } else if (irt_ispri(ir->t)) {
1015 lua_assert(!irt_isnil(ir->t));
1016 return irt_type(ir->t)-IRT_FALSE;
1017 } else {
1018 lua_assert(irt_isgcv(ir->t));
1019 lo = u32ptr(ir_kgc(ir));
1020 hi = lo + HASH_BIAS;
1021 }
1022 return hashrot(lo, hi);
1023 }
1024
1025 /* -- Allocations --------------------------------------------------------- */
1026
1027 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
1028 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
1029
asm_snew(ASMState * as,IRIns * ir)1030 static void asm_snew(ASMState *as, IRIns *ir)
1031 {
1032 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
1033 IRRef args[3];
1034 args[0] = ASMREF_L; /* lua_State *L */
1035 args[1] = ir->op1; /* const char *str */
1036 args[2] = ir->op2; /* size_t len */
1037 as->gcsteps++;
1038 asm_setupresult(as, ir, ci); /* GCstr * */
1039 asm_gencall(as, ci, args);
1040 }
1041
asm_tnew(ASMState * as,IRIns * ir)1042 static void asm_tnew(ASMState *as, IRIns *ir)
1043 {
1044 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
1045 IRRef args[2];
1046 args[0] = ASMREF_L; /* lua_State *L */
1047 args[1] = ASMREF_TMP1; /* uint32_t ahsize */
1048 as->gcsteps++;
1049 asm_setupresult(as, ir, ci); /* GCtab * */
1050 asm_gencall(as, ci, args);
1051 ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
1052 }
1053
asm_tdup(ASMState * as,IRIns * ir)1054 static void asm_tdup(ASMState *as, IRIns *ir)
1055 {
1056 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
1057 IRRef args[2];
1058 args[0] = ASMREF_L; /* lua_State *L */
1059 args[1] = ir->op1; /* const GCtab *kt */
1060 as->gcsteps++;
1061 asm_setupresult(as, ir, ci); /* GCtab * */
1062 asm_gencall(as, ci, args);
1063 }
1064
1065 static void asm_gc_check(ASMState *as);
1066
1067 /* Explicit GC step. */
asm_gcstep(ASMState * as,IRIns * ir)1068 static void asm_gcstep(ASMState *as, IRIns *ir)
1069 {
1070 IRIns *ira;
1071 for (ira = IR(as->stopins+1); ira < ir; ira++)
1072 if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
1073 (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
1074 ra_used(ira))
1075 as->gcsteps++;
1076 if (as->gcsteps)
1077 asm_gc_check(as);
1078 as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
1079 }
1080
1081 /* -- Buffer operations --------------------------------------------------- */
1082
1083 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref);
1084
asm_bufhdr(ASMState * as,IRIns * ir)1085 static void asm_bufhdr(ASMState *as, IRIns *ir)
1086 {
1087 Reg sb = ra_dest(as, ir, RSET_GPR);
1088 if ((ir->op2 & IRBUFHDR_APPEND)) {
1089 /* Rematerialize const buffer pointer instead of likely spill. */
1090 IRIns *irp = IR(ir->op1);
1091 if (!(ra_hasreg(irp->r) || irp == ir-1 ||
1092 (irp == ir-2 && !ra_used(ir-1)))) {
1093 while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND)))
1094 irp = IR(irp->op1);
1095 if (irref_isk(irp->op1)) {
1096 ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
1097 ir = irp;
1098 }
1099 }
1100 } else {
1101 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
1102 /* Passing ir isn't strictly correct, but it's an IRT_PGC, too. */
1103 emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p));
1104 emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b));
1105 }
1106 #if LJ_TARGET_X86ORX64
1107 ra_left(as, sb, ir->op1);
1108 #else
1109 ra_leftov(as, sb, ir->op1);
1110 #endif
1111 }
1112
asm_bufput(ASMState * as,IRIns * ir)1113 static void asm_bufput(ASMState *as, IRIns *ir)
1114 {
1115 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
1116 IRRef args[3];
1117 IRIns *irs;
1118 int kchar = -1;
1119 args[0] = ir->op1; /* SBuf * */
1120 args[1] = ir->op2; /* GCstr * */
1121 irs = IR(ir->op2);
1122 lua_assert(irt_isstr(irs->t));
1123 if (irs->o == IR_KGC) {
1124 GCstr *s = ir_kstr(irs);
1125 if (s->len == 1) { /* Optimize put of single-char string constant. */
1126 kchar = strdata(s)[0];
1127 args[1] = ASMREF_TMP1; /* int, truncated to char */
1128 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1129 }
1130 } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
1131 if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */
1132 if (irs->op2 == IRTOSTR_NUM) {
1133 args[1] = ASMREF_TMP1; /* TValue * */
1134 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
1135 } else {
1136 lua_assert(irt_isinteger(IR(irs->op1)->t));
1137 args[1] = irs->op1; /* int */
1138 if (irs->op2 == IRTOSTR_INT)
1139 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
1140 else
1141 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1142 }
1143 } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */
1144 args[1] = irs->op1; /* const void * */
1145 args[2] = irs->op2; /* MSize */
1146 ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
1147 }
1148 }
1149 asm_setupresult(as, ir, ci); /* SBuf * */
1150 asm_gencall(as, ci, args);
1151 if (args[1] == ASMREF_TMP1) {
1152 Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
1153 if (kchar == -1)
1154 asm_tvptr(as, tmp, irs->op1);
1155 else
1156 ra_allockreg(as, kchar, tmp);
1157 }
1158 }
1159
asm_bufstr(ASMState * as,IRIns * ir)1160 static void asm_bufstr(ASMState *as, IRIns *ir)
1161 {
1162 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
1163 IRRef args[1];
1164 args[0] = ir->op1; /* SBuf *sb */
1165 as->gcsteps++;
1166 asm_setupresult(as, ir, ci); /* GCstr * */
1167 asm_gencall(as, ci, args);
1168 }
1169
1170 /* -- Type conversions ---------------------------------------------------- */
1171
asm_tostr(ASMState * as,IRIns * ir)1172 static void asm_tostr(ASMState *as, IRIns *ir)
1173 {
1174 const CCallInfo *ci;
1175 IRRef args[2];
1176 args[0] = ASMREF_L;
1177 as->gcsteps++;
1178 if (ir->op2 == IRTOSTR_NUM) {
1179 args[1] = ASMREF_TMP1; /* cTValue * */
1180 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
1181 } else {
1182 args[1] = ir->op1; /* int32_t k */
1183 if (ir->op2 == IRTOSTR_INT)
1184 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
1185 else
1186 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
1187 }
1188 asm_setupresult(as, ir, ci); /* GCstr * */
1189 asm_gencall(as, ci, args);
1190 if (ir->op2 == IRTOSTR_NUM)
1191 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
1192 }
1193
1194 #if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
asm_conv64(ASMState * as,IRIns * ir)1195 static void asm_conv64(ASMState *as, IRIns *ir)
1196 {
1197 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
1198 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
1199 IRCallID id;
1200 IRRef args[2];
1201 lua_assert((ir-1)->o == IR_CONV && ir->o == IR_HIOP);
1202 args[LJ_BE] = (ir-1)->op1;
1203 args[LJ_LE] = ir->op1;
1204 if (st == IRT_NUM || st == IRT_FLOAT) {
1205 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
1206 ir--;
1207 } else {
1208 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
1209 }
1210 {
1211 #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1212 CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
1213 cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
1214 #else
1215 const CCallInfo *ci = &lj_ir_callinfo[id];
1216 #endif
1217 asm_setupresult(as, ir, ci);
1218 asm_gencall(as, ci, args);
1219 }
1220 }
1221 #endif
1222
1223 /* -- Memory references --------------------------------------------------- */
1224
asm_newref(ASMState * as,IRIns * ir)1225 static void asm_newref(ASMState *as, IRIns *ir)
1226 {
1227 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1228 IRRef args[3];
1229 if (ir->r == RID_SINK)
1230 return;
1231 args[0] = ASMREF_L; /* lua_State *L */
1232 args[1] = ir->op1; /* GCtab *t */
1233 args[2] = ASMREF_TMP1; /* cTValue *key */
1234 asm_setupresult(as, ir, ci); /* TValue * */
1235 asm_gencall(as, ci, args);
1236 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
1237 }
1238
asm_lref(ASMState * as,IRIns * ir)1239 static void asm_lref(ASMState *as, IRIns *ir)
1240 {
1241 Reg r = ra_dest(as, ir, RSET_GPR);
1242 #if LJ_TARGET_X86ORX64
1243 ra_left(as, r, ASMREF_L);
1244 #else
1245 ra_leftov(as, r, ASMREF_L);
1246 #endif
1247 }
1248
1249 /* -- Calls --------------------------------------------------------------- */
1250
1251 /* Collect arguments from CALL* and CARG instructions. */
asm_collectargs(ASMState * as,IRIns * ir,const CCallInfo * ci,IRRef * args)1252 static void asm_collectargs(ASMState *as, IRIns *ir,
1253 const CCallInfo *ci, IRRef *args)
1254 {
1255 uint32_t n = CCI_XNARGS(ci);
1256 lua_assert(n <= CCI_NARGS_MAX*2); /* Account for split args. */
1257 if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
1258 while (n-- > 1) {
1259 ir = IR(ir->op1);
1260 lua_assert(ir->o == IR_CARG);
1261 args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
1262 }
1263 args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
1264 lua_assert(IR(ir->op1)->o != IR_CARG);
1265 }
1266
1267 /* Reconstruct CCallInfo flags for CALLX*. */
asm_callx_flags(ASMState * as,IRIns * ir)1268 static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
1269 {
1270 uint32_t nargs = 0;
1271 if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
1272 IRIns *ira = IR(ir->op1);
1273 nargs++;
1274 while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
1275 }
1276 #if LJ_HASFFI
1277 if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
1278 CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
1279 CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
1280 nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
1281 #if LJ_TARGET_X86
1282 nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
1283 #endif
1284 }
1285 #endif
1286 return (nargs | (ir->t.irt << CCI_OTSHIFT));
1287 }
1288
asm_callid(ASMState * as,IRIns * ir,IRCallID id)1289 static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1290 {
1291 const CCallInfo *ci = &lj_ir_callinfo[id];
1292 IRRef args[2];
1293 args[0] = ir->op1;
1294 args[1] = ir->op2;
1295 asm_setupresult(as, ir, ci);
1296 asm_gencall(as, ci, args);
1297 }
1298
asm_call(ASMState * as,IRIns * ir)1299 static void asm_call(ASMState *as, IRIns *ir)
1300 {
1301 IRRef args[CCI_NARGS_MAX];
1302 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1303 asm_collectargs(as, ir, ci, args);
1304 asm_setupresult(as, ir, ci);
1305 asm_gencall(as, ci, args);
1306 }
1307
1308 #if !LJ_SOFTFP
asm_fppow(ASMState * as,IRIns * ir,IRRef lref,IRRef rref)1309 static void asm_fppow(ASMState *as, IRIns *ir, IRRef lref, IRRef rref)
1310 {
1311 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
1312 IRRef args[2];
1313 args[0] = lref;
1314 args[1] = rref;
1315 asm_setupresult(as, ir, ci);
1316 asm_gencall(as, ci, args);
1317 }
1318
asm_fpjoin_pow(ASMState * as,IRIns * ir)1319 static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
1320 {
1321 IRIns *irp = IR(ir->op1);
1322 if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
1323 IRIns *irpp = IR(irp->op1);
1324 if (irpp == ir-2 && irpp->o == IR_FPMATH &&
1325 irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
1326 asm_fppow(as, ir, irpp->op1, irp->op2);
1327 return 1;
1328 }
1329 }
1330 return 0;
1331 }
1332 #endif
1333
1334 /* -- PHI and loop handling ----------------------------------------------- */
1335
1336 /* Break a PHI cycle by renaming to a free register (evict if needed). */
asm_phi_break(ASMState * as,RegSet blocked,RegSet blockedby,RegSet allow)1337 static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
1338 RegSet allow)
1339 {
1340 RegSet candidates = blocked & allow;
1341 if (candidates) { /* If this register file has candidates. */
1342 /* Note: the set for ra_pick cannot be empty, since each register file
1343 ** has some registers never allocated to PHIs.
1344 */
1345 Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
1346 if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
1347 candidates = candidates & ~blockedby;
1348 down = rset_picktop(candidates); /* Pick candidate PHI register. */
1349 ra_rename(as, down, up); /* And rename it to the free register. */
1350 }
1351 }
1352
1353 /* PHI register shuffling.
1354 **
1355 ** The allocator tries hard to preserve PHI register assignments across
1356 ** the loop body. Most of the time this loop does nothing, since there
1357 ** are no register mismatches.
1358 **
1359 ** If a register mismatch is detected and ...
1360 ** - the register is currently free: rename it.
1361 ** - the register is blocked by an invariant: restore/remat and rename it.
1362 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1363 **
1364 ** The renames are order-sensitive, so just retry the loop if a register
1365 ** is marked as blocked, but has been freed in the meantime. A cycle is
1366 ** detected if all of the blocked registers are allocated. To break the
1367 ** cycle rename one of them to a free register and retry.
1368 **
1369 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1370 */
asm_phi_shuffle(ASMState * as)1371 static void asm_phi_shuffle(ASMState *as)
1372 {
1373 RegSet work;
1374
1375 /* Find and resolve PHI register mismatches. */
1376 for (;;) {
1377 RegSet blocked = RSET_EMPTY;
1378 RegSet blockedby = RSET_EMPTY;
1379 RegSet phiset = as->phiset;
1380 while (phiset) { /* Check all left PHI operand registers. */
1381 Reg r = rset_pickbot(phiset);
1382 IRIns *irl = IR(as->phireg[r]);
1383 Reg left = irl->r;
1384 if (r != left) { /* Mismatch? */
1385 if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
1386 IRRef ref = regcost_ref(as->cost[r]);
1387 /* Blocked by other PHI (w/reg)? */
1388 if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
1389 rset_set(blocked, r);
1390 if (ra_hasreg(left))
1391 rset_set(blockedby, left);
1392 left = RID_NONE;
1393 } else { /* Otherwise grab register from invariant. */
1394 ra_restore(as, ref);
1395 checkmclim(as);
1396 }
1397 }
1398 if (ra_hasreg(left)) {
1399 ra_rename(as, left, r);
1400 checkmclim(as);
1401 }
1402 }
1403 rset_clear(phiset, r);
1404 }
1405 if (!blocked) break; /* Finished. */
1406 if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
1407 asm_phi_break(as, blocked, blockedby, RSET_GPR);
1408 if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
1409 checkmclim(as);
1410 } /* Else retry some more renames. */
1411 }
1412
1413 /* Restore/remat invariants whose registers are modified inside the loop. */
1414 #if !LJ_SOFTFP
1415 work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
1416 while (work) {
1417 Reg r = rset_pickbot(work);
1418 ra_restore(as, regcost_ref(as->cost[r]));
1419 rset_clear(work, r);
1420 checkmclim(as);
1421 }
1422 #endif
1423 work = as->modset & ~(as->freeset | as->phiset);
1424 while (work) {
1425 Reg r = rset_pickbot(work);
1426 ra_restore(as, regcost_ref(as->cost[r]));
1427 rset_clear(work, r);
1428 checkmclim(as);
1429 }
1430
1431 /* Allocate and save all unsaved PHI regs and clear marks. */
1432 work = as->phiset;
1433 while (work) {
1434 Reg r = rset_picktop(work);
1435 IRRef lref = as->phireg[r];
1436 IRIns *ir = IR(lref);
1437 if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
1438 irt_clearmark(ir->t); /* Handled here, so clear marker now. */
1439 ra_alloc1(as, lref, RID2RSET(r));
1440 ra_save(as, ir, r); /* Save to spill slot inside the loop. */
1441 checkmclim(as);
1442 }
1443 rset_clear(work, r);
1444 }
1445 }
1446
1447 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
asm_phi_copyspill(ASMState * as)1448 static void asm_phi_copyspill(ASMState *as)
1449 {
1450 int need = 0;
1451 IRIns *ir;
1452 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
1453 if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
1454 need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
1455 if ((need & 1)) { /* Copy integer spill slots. */
1456 #if !LJ_TARGET_X86ORX64
1457 Reg r = RID_TMP;
1458 #else
1459 Reg r = RID_RET;
1460 if ((as->freeset & RSET_GPR))
1461 r = rset_pickbot((as->freeset & RSET_GPR));
1462 else
1463 emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1464 #endif
1465 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1466 if (ra_hasspill(ir->s)) {
1467 IRIns *irl = IR(ir->op1);
1468 if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
1469 emit_spstore(as, irl, r, sps_scale(irl->s));
1470 emit_spload(as, ir, r, sps_scale(ir->s));
1471 checkmclim(as);
1472 }
1473 }
1474 }
1475 #if LJ_TARGET_X86ORX64
1476 if (!rset_test(as->freeset, r))
1477 emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1478 #endif
1479 }
1480 #if !LJ_SOFTFP
1481 if ((need & 2)) { /* Copy FP spill slots. */
1482 #if LJ_TARGET_X86
1483 Reg r = RID_XMM0;
1484 #else
1485 Reg r = RID_FPRET;
1486 #endif
1487 if ((as->freeset & RSET_FPR))
1488 r = rset_pickbot((as->freeset & RSET_FPR));
1489 if (!rset_test(as->freeset, r))
1490 emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1491 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1492 if (ra_hasspill(ir->s)) {
1493 IRIns *irl = IR(ir->op1);
1494 if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
1495 emit_spstore(as, irl, r, sps_scale(irl->s));
1496 emit_spload(as, ir, r, sps_scale(ir->s));
1497 checkmclim(as);
1498 }
1499 }
1500 }
1501 if (!rset_test(as->freeset, r))
1502 emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1503 }
1504 #endif
1505 }
1506
1507 /* Emit renames for left PHIs which are only spilled outside the loop. */
asm_phi_fixup(ASMState * as)1508 static void asm_phi_fixup(ASMState *as)
1509 {
1510 RegSet work = as->phiset;
1511 while (work) {
1512 Reg r = rset_picktop(work);
1513 IRRef lref = as->phireg[r];
1514 IRIns *ir = IR(lref);
1515 if (irt_ismarked(ir->t)) {
1516 irt_clearmark(ir->t);
1517 /* Left PHI gained a spill slot before the loop? */
1518 if (ra_hasspill(ir->s)) {
1519 ra_addrename(as, r, lref, as->loopsnapno);
1520 }
1521 }
1522 rset_clear(work, r);
1523 }
1524 }
1525
1526 /* Setup right PHI reference. */
asm_phi(ASMState * as,IRIns * ir)1527 static void asm_phi(ASMState *as, IRIns *ir)
1528 {
1529 RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
1530 ~as->phiset;
1531 RegSet afree = (as->freeset & allow);
1532 IRIns *irl = IR(ir->op1);
1533 IRIns *irr = IR(ir->op2);
1534 if (ir->r == RID_SINK) /* Sink PHI. */
1535 return;
1536 /* Spill slot shuffling is not implemented yet (but rarely needed). */
1537 if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
1538 lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1539 /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1540 if ((afree & (afree-1))) { /* Two or more free registers? */
1541 Reg r;
1542 if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
1543 r = ra_allocref(as, ir->op2, allow);
1544 } else { /* Duplicate right PHI, need a copy (rare). */
1545 r = ra_scratch(as, allow);
1546 emit_movrr(as, irr, r, irr->r);
1547 }
1548 ir->r = (uint8_t)r;
1549 rset_set(as->phiset, r);
1550 as->phireg[r] = (IRRef1)ir->op1;
1551 irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
1552 if (ra_noreg(irl->r))
1553 ra_sethint(irl->r, r); /* Set register hint for left PHI. */
1554 } else { /* Otherwise allocate a spill slot. */
1555 /* This is overly restrictive, but it triggers only on synthetic code. */
1556 if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
1557 lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1558 ra_spill(as, ir);
1559 irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
1560 }
1561 }
1562
1563 static void asm_loop_fixup(ASMState *as);
1564
1565 /* Middle part of a loop. */
asm_loop(ASMState * as)1566 static void asm_loop(ASMState *as)
1567 {
1568 MCode *mcspill;
1569 /* LOOP is a guard, so the snapno is up to date. */
1570 as->loopsnapno = as->snapno;
1571 if (as->gcsteps)
1572 asm_gc_check(as);
1573 /* LOOP marks the transition from the variant to the invariant part. */
1574 as->flagmcp = as->invmcp = NULL;
1575 as->sectref = 0;
1576 if (!neverfuse(as)) as->fuseref = 0;
1577 asm_phi_shuffle(as);
1578 mcspill = as->mcp;
1579 asm_phi_copyspill(as);
1580 asm_loop_fixup(as);
1581 as->mcloop = as->mcp;
1582 RA_DBGX((as, "===== LOOP ====="));
1583 if (!as->realign) RA_DBG_FLUSH();
1584 if (as->mcp != mcspill)
1585 emit_jmp(as, mcspill);
1586 }
1587
1588 /* -- Target-specific assembler ------------------------------------------- */
1589
1590 #if LJ_TARGET_X86ORX64
1591 #include "lj_asm_x86.h"
1592 #elif LJ_TARGET_ARM
1593 #include "lj_asm_arm.h"
1594 #elif LJ_TARGET_ARM64
1595 #include "lj_asm_arm64.h"
1596 #elif LJ_TARGET_PPC
1597 #include "lj_asm_ppc.h"
1598 #elif LJ_TARGET_MIPS
1599 #include "lj_asm_mips.h"
1600 #else
1601 #error "Missing assembler for target CPU"
1602 #endif
1603
1604 /* -- Instruction dispatch ------------------------------------------------ */
1605
1606 /* Assemble a single instruction. */
asm_ir(ASMState * as,IRIns * ir)1607 static void asm_ir(ASMState *as, IRIns *ir)
1608 {
1609 switch ((IROp)ir->o) {
1610 /* Miscellaneous ops. */
1611 case IR_LOOP: asm_loop(as); break;
1612 case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
1613 case IR_USE:
1614 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1615 case IR_PHI: asm_phi(as, ir); break;
1616 case IR_HIOP: asm_hiop(as, ir); break;
1617 case IR_GCSTEP: asm_gcstep(as, ir); break;
1618 case IR_PROF: asm_prof(as, ir); break;
1619
1620 /* Guarded assertions. */
1621 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1622 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1623 case IR_ABC:
1624 asm_comp(as, ir);
1625 break;
1626 case IR_EQ: case IR_NE:
1627 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1628 as->curins--;
1629 asm_href(as, ir-1, (IROp)ir->o);
1630 } else {
1631 asm_equal(as, ir);
1632 }
1633 break;
1634
1635 case IR_RETF: asm_retf(as, ir); break;
1636
1637 /* Bit ops. */
1638 case IR_BNOT: asm_bnot(as, ir); break;
1639 case IR_BSWAP: asm_bswap(as, ir); break;
1640 case IR_BAND: asm_band(as, ir); break;
1641 case IR_BOR: asm_bor(as, ir); break;
1642 case IR_BXOR: asm_bxor(as, ir); break;
1643 case IR_BSHL: asm_bshl(as, ir); break;
1644 case IR_BSHR: asm_bshr(as, ir); break;
1645 case IR_BSAR: asm_bsar(as, ir); break;
1646 case IR_BROL: asm_brol(as, ir); break;
1647 case IR_BROR: asm_bror(as, ir); break;
1648
1649 /* Arithmetic ops. */
1650 case IR_ADD: asm_add(as, ir); break;
1651 case IR_SUB: asm_sub(as, ir); break;
1652 case IR_MUL: asm_mul(as, ir); break;
1653 case IR_MOD: asm_mod(as, ir); break;
1654 case IR_NEG: asm_neg(as, ir); break;
1655 #if LJ_SOFTFP
1656 case IR_DIV: case IR_POW: case IR_ABS:
1657 case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
1658 lua_assert(0); /* Unused for LJ_SOFTFP. */
1659 break;
1660 #else
1661 case IR_DIV: asm_div(as, ir); break;
1662 case IR_POW: asm_pow(as, ir); break;
1663 case IR_ABS: asm_abs(as, ir); break;
1664 case IR_ATAN2: asm_atan2(as, ir); break;
1665 case IR_LDEXP: asm_ldexp(as, ir); break;
1666 case IR_FPMATH: asm_fpmath(as, ir); break;
1667 case IR_TOBIT: asm_tobit(as, ir); break;
1668 #endif
1669 case IR_MIN: asm_min(as, ir); break;
1670 case IR_MAX: asm_max(as, ir); break;
1671
1672 /* Overflow-checking arithmetic ops. */
1673 case IR_ADDOV: asm_addov(as, ir); break;
1674 case IR_SUBOV: asm_subov(as, ir); break;
1675 case IR_MULOV: asm_mulov(as, ir); break;
1676
1677 /* Memory references. */
1678 case IR_AREF: asm_aref(as, ir); break;
1679 case IR_HREF: asm_href(as, ir, 0); break;
1680 case IR_HREFK: asm_hrefk(as, ir); break;
1681 case IR_NEWREF: asm_newref(as, ir); break;
1682 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1683 case IR_FREF: asm_fref(as, ir); break;
1684 case IR_STRREF: asm_strref(as, ir); break;
1685 case IR_LREF: asm_lref(as, ir); break;
1686
1687 /* Loads and stores. */
1688 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1689 asm_ahuvload(as, ir);
1690 break;
1691 case IR_FLOAD: asm_fload(as, ir); break;
1692 case IR_XLOAD: asm_xload(as, ir); break;
1693 case IR_SLOAD: asm_sload(as, ir); break;
1694
1695 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1696 case IR_FSTORE: asm_fstore(as, ir); break;
1697 case IR_XSTORE: asm_xstore(as, ir); break;
1698
1699 /* Allocations. */
1700 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1701 case IR_TNEW: asm_tnew(as, ir); break;
1702 case IR_TDUP: asm_tdup(as, ir); break;
1703 case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
1704
1705 /* Buffer operations. */
1706 case IR_BUFHDR: asm_bufhdr(as, ir); break;
1707 case IR_BUFPUT: asm_bufput(as, ir); break;
1708 case IR_BUFSTR: asm_bufstr(as, ir); break;
1709
1710 /* Write barriers. */
1711 case IR_TBAR: asm_tbar(as, ir); break;
1712 case IR_OBAR: asm_obar(as, ir); break;
1713
1714 /* Type conversions. */
1715 case IR_CONV: asm_conv(as, ir); break;
1716 case IR_TOSTR: asm_tostr(as, ir); break;
1717 case IR_STRTO: asm_strto(as, ir); break;
1718
1719 /* Calls. */
1720 case IR_CALLA:
1721 as->gcsteps++;
1722 /* fallthrough */
1723 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1724 case IR_CALLXS: asm_callx(as, ir); break;
1725 case IR_CARG: break;
1726
1727 default:
1728 setintV(&as->J->errinfo, ir->o);
1729 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1730 break;
1731 }
1732 }
1733
1734 /* -- Head of trace ------------------------------------------------------- */
1735
1736 /* Head of a root trace. */
asm_head_root(ASMState * as)1737 static void asm_head_root(ASMState *as)
1738 {
1739 int32_t spadj;
1740 asm_head_root_base(as);
1741 emit_setvmstate(as, (int32_t)as->T->traceno);
1742 spadj = asm_stack_adjust(as);
1743 as->T->spadjust = (uint16_t)spadj;
1744 emit_spsub(as, spadj);
1745 /* Root traces assume a checked stack for the starting proto. */
1746 as->T->topslot = gcref(as->T->startpt)->pt.framesize;
1747 }
1748
1749 /* Head of a side trace.
1750 **
1751 ** The current simplistic algorithm requires that all slots inherited
1752 ** from the parent are live in a register between pass 2 and pass 3. This
1753 ** avoids the complexity of stack slot shuffling. But of course this may
1754 ** overflow the register set in some cases and cause the dreaded error:
1755 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1756 */
asm_head_side(ASMState * as)1757 static void asm_head_side(ASMState *as)
1758 {
1759 IRRef1 sloadins[RID_MAX];
1760 RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
1761 RegSet live = RSET_EMPTY; /* Live parent registers. */
1762 IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
1763 int32_t spadj, spdelta;
1764 int pass2 = 0;
1765 int pass3 = 0;
1766 IRRef i;
1767
1768 if (as->snapno && as->topslot > as->parent->topslot) {
1769 /* Force snap #0 alloc to prevent register overwrite in stack check. */
1770 as->snapno = 0;
1771 asm_snap_alloc(as);
1772 }
1773 allow = asm_head_side_base(as, irp, allow);
1774
1775 /* Scan all parent SLOADs and collect register dependencies. */
1776 for (i = as->stopins; i > REF_BASE; i--) {
1777 IRIns *ir = IR(i);
1778 RegSP rs;
1779 lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1780 (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL);
1781 rs = as->parentmap[i - REF_FIRST];
1782 if (ra_hasreg(ir->r)) {
1783 rset_clear(allow, ir->r);
1784 if (ra_hasspill(ir->s)) {
1785 ra_save(as, ir, ir->r);
1786 checkmclim(as);
1787 }
1788 } else if (ra_hasspill(ir->s)) {
1789 irt_setmark(ir->t);
1790 pass2 = 1;
1791 }
1792 if (ir->r == rs) { /* Coalesce matching registers right now. */
1793 ra_free(as, ir->r);
1794 } else if (ra_hasspill(regsp_spill(rs))) {
1795 if (ra_hasreg(ir->r))
1796 pass3 = 1;
1797 } else if (ra_used(ir)) {
1798 sloadins[rs] = (IRRef1)i;
1799 rset_set(live, rs); /* Block live parent register. */
1800 }
1801 }
1802
1803 /* Calculate stack frame adjustment. */
1804 spadj = asm_stack_adjust(as);
1805 spdelta = spadj - (int32_t)as->parent->spadjust;
1806 if (spdelta < 0) { /* Don't shrink the stack frame. */
1807 spadj = (int32_t)as->parent->spadjust;
1808 spdelta = 0;
1809 }
1810 as->T->spadjust = (uint16_t)spadj;
1811
1812 /* Reload spilled target registers. */
1813 if (pass2) {
1814 for (i = as->stopins; i > REF_BASE; i--) {
1815 IRIns *ir = IR(i);
1816 if (irt_ismarked(ir->t)) {
1817 RegSet mask;
1818 Reg r;
1819 RegSP rs;
1820 irt_clearmark(ir->t);
1821 rs = as->parentmap[i - REF_FIRST];
1822 if (!ra_hasspill(regsp_spill(rs)))
1823 ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
1824 else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
1825 continue; /* Same spill slot, do nothing. */
1826 mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
1827 if (mask == RSET_EMPTY)
1828 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1829 r = ra_allocref(as, i, mask);
1830 ra_save(as, ir, r);
1831 rset_clear(allow, r);
1832 if (r == rs) { /* Coalesce matching registers right now. */
1833 ra_free(as, r);
1834 rset_clear(live, r);
1835 } else if (ra_hasspill(regsp_spill(rs))) {
1836 pass3 = 1;
1837 }
1838 checkmclim(as);
1839 }
1840 }
1841 }
1842
1843 /* Store trace number and adjust stack frame relative to the parent. */
1844 emit_setvmstate(as, (int32_t)as->T->traceno);
1845 emit_spsub(as, spdelta);
1846
1847 #if !LJ_TARGET_X86ORX64
1848 /* Restore BASE register from parent spill slot. */
1849 if (ra_hasspill(irp->s))
1850 emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
1851 #endif
1852
1853 /* Restore target registers from parent spill slots. */
1854 if (pass3) {
1855 RegSet work = ~as->freeset & RSET_ALL;
1856 while (work) {
1857 Reg r = rset_pickbot(work);
1858 IRRef ref = regcost_ref(as->cost[r]);
1859 RegSP rs = as->parentmap[ref - REF_FIRST];
1860 rset_clear(work, r);
1861 if (ra_hasspill(regsp_spill(rs))) {
1862 int32_t ofs = sps_scale(regsp_spill(rs));
1863 ra_free(as, r);
1864 emit_spload(as, IR(ref), r, ofs);
1865 checkmclim(as);
1866 }
1867 }
1868 }
1869
1870 /* Shuffle registers to match up target regs with parent regs. */
1871 for (;;) {
1872 RegSet work;
1873
1874 /* Repeatedly coalesce free live registers by moving to their target. */
1875 while ((work = as->freeset & live) != RSET_EMPTY) {
1876 Reg rp = rset_pickbot(work);
1877 IRIns *ir = IR(sloadins[rp]);
1878 rset_clear(live, rp);
1879 rset_clear(allow, rp);
1880 ra_free(as, ir->r);
1881 emit_movrr(as, ir, ir->r, rp);
1882 checkmclim(as);
1883 }
1884
1885 /* We're done if no live registers remain. */
1886 if (live == RSET_EMPTY)
1887 break;
1888
1889 /* Break cycles by renaming one target to a temp. register. */
1890 if (live & RSET_GPR) {
1891 RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
1892 if (tmpset == RSET_EMPTY)
1893 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1894 ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
1895 }
1896 if (!LJ_SOFTFP && (live & RSET_FPR)) {
1897 RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
1898 if (tmpset == RSET_EMPTY)
1899 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1900 ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
1901 }
1902 checkmclim(as);
1903 /* Continue with coalescing to fix up the broken cycle(s). */
1904 }
1905
1906 /* Inherit top stack slot already checked by parent trace. */
1907 as->T->topslot = as->parent->topslot;
1908 if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
1909 #ifdef EXITSTATE_CHECKEXIT
1910 /* Highest exit + 1 indicates stack check. */
1911 ExitNo exitno = as->T->nsnap;
1912 #else
1913 /* Reuse the parent exit in the context of the parent trace. */
1914 ExitNo exitno = as->J->exitno;
1915 #endif
1916 as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
1917 asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
1918 }
1919 }
1920
1921 /* -- Tail of trace ------------------------------------------------------- */
1922
1923 /* Get base slot for a snapshot. */
asm_baseslot(ASMState * as,SnapShot * snap,int * gotframe)1924 static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
1925 {
1926 SnapEntry *map = &as->T->snapmap[snap->mapofs];
1927 MSize n;
1928 for (n = snap->nent; n > 0; n--) {
1929 SnapEntry sn = map[n-1];
1930 if ((sn & SNAP_FRAME)) {
1931 *gotframe = 1;
1932 return snap_slot(sn) - LJ_FR2;
1933 }
1934 }
1935 return 0;
1936 }
1937
1938 /* Link to another trace. */
asm_tail_link(ASMState * as)1939 static void asm_tail_link(ASMState *as)
1940 {
1941 SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
1942 SnapShot *snap = &as->T->snap[snapno];
1943 int gotframe = 0;
1944 BCReg baseslot = asm_baseslot(as, snap, &gotframe);
1945
1946 as->topslot = snap->topslot;
1947 checkmclim(as);
1948 ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
1949
1950 if (as->T->link == 0) {
1951 /* Setup fixed registers for exit to interpreter. */
1952 const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
1953 int32_t mres;
1954 if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
1955 BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
1956 if (bc_isret(bc_op(*retpc)))
1957 pc = retpc;
1958 }
1959 #if LJ_GC64
1960 emit_loadu64(as, RID_LPC, u64ptr(pc));
1961 #else
1962 ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
1963 ra_allockreg(as, i32ptr(pc), RID_LPC);
1964 #endif
1965 mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
1966 switch (bc_op(*pc)) {
1967 case BC_CALLM: case BC_CALLMT:
1968 mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
1969 case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
1970 case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
1971 default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
1972 }
1973 ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
1974 } else if (baseslot) {
1975 /* Save modified BASE for linking to trace with higher start frame. */
1976 emit_setgl(as, RID_BASE, jit_base);
1977 }
1978 emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
1979
1980 if (as->J->ktrace) { /* Patch ktrace slot with the final GCtrace pointer. */
1981 setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
1982 IR(as->J->ktrace)->o = IR_KGC;
1983 }
1984
1985 /* Sync the interpreter state with the on-trace state. */
1986 asm_stack_restore(as, snap);
1987
1988 /* Root traces that add frames need to check the stack at the end. */
1989 if (!as->parent && gotframe)
1990 asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
1991 }
1992
1993 /* -- Trace setup --------------------------------------------------------- */
1994
1995 /* Clear reg/sp for all instructions and add register hints. */
asm_setup_regsp(ASMState * as)1996 static void asm_setup_regsp(ASMState *as)
1997 {
1998 GCtrace *T = as->T;
1999 int sink = T->sinktags;
2000 IRRef nins = T->nins;
2001 IRIns *ir, *lastir;
2002 int inloop;
2003 #if LJ_TARGET_ARM
2004 uint32_t rload = 0xa6402a64;
2005 #endif
2006
2007 ra_setup(as);
2008
2009 /* Clear reg/sp for constants. */
2010 for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
2011 ir->prev = REGSP_INIT;
2012 if (irt_is64(ir->t) && ir->o != IR_KNULL) {
2013 #if LJ_GC64
2014 ir->i = 0; /* Will become non-zero only for RIP-relative addresses. */
2015 #else
2016 /* Make life easier for backends by putting address of constant in i. */
2017 ir->i = (int32_t)(intptr_t)(ir+1);
2018 #endif
2019 ir++;
2020 }
2021 }
2022
2023 /* REF_BASE is used for implicit references to the BASE register. */
2024 lastir->prev = REGSP_HINT(RID_BASE);
2025
2026 as->snaprename = nins;
2027 as->snapref = nins;
2028 as->snapno = T->nsnap;
2029
2030 as->stopins = REF_BASE;
2031 as->orignins = nins;
2032 as->curins = nins;
2033
2034 /* Setup register hints for parent link instructions. */
2035 ir = IR(REF_FIRST);
2036 if (as->parent) {
2037 uint16_t *p;
2038 lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir);
2039 if (lastir - ir > LJ_MAX_JSLOTS)
2040 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2041 as->stopins = (IRRef)((lastir-1) - as->ir);
2042 for (p = as->parentmap; ir < lastir; ir++) {
2043 RegSP rs = ir->prev;
2044 *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */
2045 if (!ra_hasspill(regsp_spill(rs)))
2046 ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
2047 else
2048 ir->prev = REGSP_INIT;
2049 }
2050 }
2051
2052 inloop = 0;
2053 as->evenspill = SPS_FIRST;
2054 for (lastir = IR(nins); ir < lastir; ir++) {
2055 if (sink) {
2056 if (ir->r == RID_SINK)
2057 continue;
2058 if (ir->r == RID_SUNK) { /* Revert after ASM restart. */
2059 ir->r = RID_SINK;
2060 continue;
2061 }
2062 }
2063 switch (ir->o) {
2064 case IR_LOOP:
2065 inloop = 1;
2066 break;
2067 #if LJ_TARGET_ARM
2068 case IR_SLOAD:
2069 if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
2070 break;
2071 /* fallthrough */
2072 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2073 if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
2074 ir->prev = (uint16_t)REGSP_HINT((rload & 15));
2075 rload = lj_ror(rload, 4);
2076 continue;
2077 #endif
2078 case IR_CALLXS: {
2079 CCallInfo ci;
2080 ci.flags = asm_callx_flags(as, ir);
2081 ir->prev = asm_setup_call_slots(as, ir, &ci);
2082 if (inloop)
2083 as->modset |= RSET_SCRATCH;
2084 continue;
2085 }
2086 case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: {
2087 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
2088 ir->prev = asm_setup_call_slots(as, ir, ci);
2089 if (inloop)
2090 as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
2091 (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
2092 continue;
2093 }
2094 #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
2095 case IR_HIOP:
2096 switch ((ir-1)->o) {
2097 #if LJ_SOFTFP && LJ_TARGET_ARM
2098 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2099 if (ra_hashint((ir-1)->r)) {
2100 ir->prev = (ir-1)->prev + 1;
2101 continue;
2102 }
2103 break;
2104 #endif
2105 #if !LJ_SOFTFP && LJ_NEED_FP64
2106 case IR_CONV:
2107 if (irt_isfp((ir-1)->t)) {
2108 ir->prev = REGSP_HINT(RID_FPRET);
2109 continue;
2110 }
2111 /* fallthrough */
2112 #endif
2113 case IR_CALLN: case IR_CALLXS:
2114 #if LJ_SOFTFP
2115 case IR_MIN: case IR_MAX:
2116 #endif
2117 (ir-1)->prev = REGSP_HINT(RID_RETLO);
2118 ir->prev = REGSP_HINT(RID_RETHI);
2119 continue;
2120 default:
2121 break;
2122 }
2123 break;
2124 #endif
2125 #if LJ_SOFTFP
2126 case IR_MIN: case IR_MAX:
2127 if ((ir+1)->o != IR_HIOP) break;
2128 /* fallthrough */
2129 #endif
2130 /* C calls evict all scratch regs and return results in RID_RET. */
2131 case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
2132 if (REGARG_NUMGPR < 3 && as->evenspill < 3)
2133 as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
2134 #if LJ_TARGET_X86 && LJ_HASFFI
2135 if (0) {
2136 case IR_CNEW:
2137 if (ir->op2 != REF_NIL && as->evenspill < 4)
2138 as->evenspill = 4; /* lj_cdata_newv needs 4 args. */
2139 }
2140 #else
2141 case IR_CNEW:
2142 #endif
2143 case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
2144 case IR_BUFSTR:
2145 ir->prev = REGSP_HINT(RID_RET);
2146 if (inloop)
2147 as->modset = RSET_SCRATCH;
2148 continue;
2149 case IR_STRTO: case IR_OBAR:
2150 if (inloop)
2151 as->modset = RSET_SCRATCH;
2152 break;
2153 #if !LJ_SOFTFP
2154 case IR_ATAN2:
2155 #if LJ_TARGET_X86
2156 if (as->evenspill < 4) /* Leave room to call atan2(). */
2157 as->evenspill = 4;
2158 #endif
2159 #if !LJ_TARGET_X86ORX64
2160 case IR_LDEXP:
2161 #endif
2162 #endif
2163 case IR_POW:
2164 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
2165 if (inloop)
2166 as->modset |= RSET_SCRATCH;
2167 #if LJ_TARGET_X86
2168 break;
2169 #else
2170 ir->prev = REGSP_HINT(RID_FPRET);
2171 continue;
2172 #endif
2173 }
2174 /* fallthrough for integer POW */
2175 case IR_DIV: case IR_MOD:
2176 if (!irt_isnum(ir->t)) {
2177 ir->prev = REGSP_HINT(RID_RET);
2178 if (inloop)
2179 as->modset |= (RSET_SCRATCH & RSET_GPR);
2180 continue;
2181 }
2182 break;
2183 case IR_FPMATH:
2184 #if LJ_TARGET_X86ORX64
2185 if (ir->op2 <= IRFPM_TRUNC) {
2186 if (!(as->flags & JIT_F_SSE4_1)) {
2187 ir->prev = REGSP_HINT(RID_XMM0);
2188 if (inloop)
2189 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
2190 continue;
2191 }
2192 break;
2193 } else if (ir->op2 == IRFPM_EXP2 && !LJ_64) {
2194 if (as->evenspill < 4) /* Leave room to call pow(). */
2195 as->evenspill = 4;
2196 }
2197 #endif
2198 if (inloop)
2199 as->modset |= RSET_SCRATCH;
2200 #if LJ_TARGET_X86
2201 break;
2202 #else
2203 ir->prev = REGSP_HINT(RID_FPRET);
2204 continue;
2205 #endif
2206 #if LJ_TARGET_X86ORX64
2207 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
2208 case IR_BSHL: case IR_BSHR: case IR_BSAR:
2209 if ((as->flags & JIT_F_BMI2)) /* Except if BMI2 is available. */
2210 break;
2211 case IR_BROL: case IR_BROR:
2212 if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
2213 IR(ir->op2)->r = REGSP_HINT(RID_ECX);
2214 if (inloop)
2215 rset_set(as->modset, RID_ECX);
2216 }
2217 break;
2218 #endif
2219 /* Do not propagate hints across type conversions or loads. */
2220 case IR_TOBIT:
2221 case IR_XLOAD:
2222 #if !LJ_TARGET_ARM
2223 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2224 #endif
2225 break;
2226 case IR_CONV:
2227 if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
2228 (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
2229 break;
2230 /* fallthrough */
2231 default:
2232 /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
2233 if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
2234 ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
2235 ir->prev = IR(ir->op1)->prev;
2236 continue;
2237 }
2238 break;
2239 }
2240 ir->prev = REGSP_INIT;
2241 }
2242 if ((as->evenspill & 1))
2243 as->oddspill = as->evenspill++;
2244 else
2245 as->oddspill = 0;
2246 }
2247
2248 /* -- Assembler core ------------------------------------------------------ */
2249
2250 /* Assemble a trace. */
lj_asm_trace(jit_State * J,GCtrace * T)2251 void lj_asm_trace(jit_State *J, GCtrace *T)
2252 {
2253 ASMState as_;
2254 ASMState *as = &as_;
2255 MCode *origtop;
2256
2257 /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2258 {
2259 IRRef nins = T->nins;
2260 IRIns *ir = &T->ir[nins-1];
2261 if (ir->o == IR_NOP || ir->o == IR_RENAME) {
2262 do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
2263 T->nins = nins;
2264 }
2265 }
2266
2267 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
2268 /* This also allows one RENAME to be added without reallocating curfinal. */
2269 as->orignins = lj_ir_nextins(J);
2270 J->cur.ir[as->orignins].o = IR_NOP;
2271
2272 /* Setup initial state. Copy some fields to reduce indirections. */
2273 as->J = J;
2274 as->T = T;
2275 J->curfinal = lj_trace_alloc(J->L, T); /* This copies the IR, too. */
2276 as->flags = J->flags;
2277 as->loopref = J->loopref;
2278 as->realign = NULL;
2279 as->loopinv = 0;
2280 as->parent = J->parent ? traceref(J, J->parent) : NULL;
2281
2282 /* Reserve MCode memory. */
2283 as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
2284 as->mcp = as->mctop;
2285 as->mclim = as->mcbot + MCLIM_REDZONE;
2286 asm_setup_target(as);
2287
2288 /*
2289 ** This is a loop, because the MCode may have to be (re-)assembled
2290 ** multiple times:
2291 **
2292 ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2293 ** backend wants the MCode to be aligned differently.
2294 **
2295 ** This is currently only the case on x86/x64, where small loops get
2296 ** an aligned loop body plus a short branch. Not much effort is wasted,
2297 ** because the abort happens very quickly and only once.
2298 **
2299 ** 2. The IR is immovable, since the MCode embeds pointers to various
2300 ** constants inside the IR. But RENAMEs may need to be added to the IR
2301 ** during assembly, which might grow and reallocate the IR. We check
2302 ** at the end if the IR (in J->cur.ir) has actually grown, resize the
2303 ** copy (in J->curfinal.ir) and try again.
2304 **
2305 ** 95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2306 ** 2 RENAMEs and only 0.5% have more than that. That's why we opt to
2307 ** always have one spare slot in the IR (see above), which means we
2308 ** have to redo the assembly for only ~2% of all traces.
2309 **
2310 ** Very, very rarely, this needs to be done repeatedly, since the
2311 ** location of constants inside the IR (actually, reachability from
2312 ** a global pointer) may affect register allocation and thus the
2313 ** number of RENAMEs.
2314 */
2315 for (;;) {
2316 as->mcp = as->mctop;
2317 #ifdef LUA_USE_ASSERT
2318 as->mcp_prev = as->mcp;
2319 #endif
2320 as->ir = J->curfinal->ir; /* Use the copied IR. */
2321 as->curins = J->cur.nins = as->orignins;
2322
2323 RA_DBG_START();
2324 RA_DBGX((as, "===== STOP ====="));
2325
2326 /* General trace setup. Emit tail of trace. */
2327 asm_tail_prep(as);
2328 as->mcloop = NULL;
2329 as->flagmcp = NULL;
2330 as->topslot = 0;
2331 as->gcsteps = 0;
2332 as->sectref = as->loopref;
2333 as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
2334 asm_setup_regsp(as);
2335 if (!as->loopref)
2336 asm_tail_link(as);
2337
2338 /* Assemble a trace in linear backwards order. */
2339 for (as->curins--; as->curins > as->stopins; as->curins--) {
2340 IRIns *ir = IR(as->curins);
2341 lua_assert(!(LJ_32 && irt_isint64(ir->t))); /* Handled by SPLIT. */
2342 if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
2343 continue; /* Dead-code elimination can be soooo easy. */
2344 if (irt_isguard(ir->t))
2345 asm_snap_prep(as);
2346 RA_DBG_REF();
2347 checkmclim(as);
2348 asm_ir(as, ir);
2349 }
2350
2351 if (as->realign && J->curfinal->nins >= T->nins)
2352 continue; /* Retry in case only the MCode needs to be realigned. */
2353
2354 /* Emit head of trace. */
2355 RA_DBG_REF();
2356 checkmclim(as);
2357 if (as->gcsteps > 0) {
2358 as->curins = as->T->snap[0].ref;
2359 asm_snap_prep(as); /* The GC check is a guard. */
2360 asm_gc_check(as);
2361 as->curins = as->stopins;
2362 }
2363 ra_evictk(as);
2364 if (as->parent)
2365 asm_head_side(as);
2366 else
2367 asm_head_root(as);
2368 asm_phi_fixup(as);
2369
2370 if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */
2371 lua_assert(J->curfinal->nk == T->nk);
2372 memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
2373 (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */
2374 T->nins = J->curfinal->nins;
2375 break; /* Done. */
2376 }
2377
2378 /* Otherwise try again with a bigger IR. */
2379 lj_trace_free(J2G(J), J->curfinal);
2380 J->curfinal = NULL; /* In case lj_trace_alloc() OOMs. */
2381 J->curfinal = lj_trace_alloc(J->L, T);
2382 as->realign = NULL;
2383 }
2384
2385 RA_DBGX((as, "===== START ===="));
2386 RA_DBG_FLUSH();
2387 if (as->freeset != RSET_ALL)
2388 lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
2389
2390 /* Set trace entry point before fixing up tail to allow link to self. */
2391 T->mcode = as->mcp;
2392 T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
2393 if (!as->loopref)
2394 asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
2395 T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
2396 lj_mcode_sync(T->mcode, origtop);
2397 }
2398
2399 #undef IR
2400
2401 #endif
2402