1 /*
2 ** Trace recorder (bytecode -> SSA IR).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 #define lj_record_c
7 #define LUA_CORE
8
9 #include "lj_obj.h"
10
11 #if LJ_HASJIT
12
13 #include "lj_err.h"
14 #include "lj_str.h"
15 #include "lj_tab.h"
16 #include "lj_meta.h"
17 #include "lj_frame.h"
18 #if LJ_HASFFI
19 #include "lj_ctype.h"
20 #endif
21 #include "lj_bc.h"
22 #include "lj_ff.h"
23 #if LJ_HASPROFILE
24 #include "lj_debug.h"
25 #endif
26 #include "lj_ir.h"
27 #include "lj_jit.h"
28 #include "lj_ircall.h"
29 #include "lj_iropt.h"
30 #include "lj_trace.h"
31 #include "lj_record.h"
32 #include "lj_ffrecord.h"
33 #include "lj_snap.h"
34 #include "lj_dispatch.h"
35 #include "lj_vm.h"
36 #include "lj_prng.h"
37
38 /* Some local macros to save typing. Undef'd at the end. */
39 #define IR(ref) (&J->cur.ir[(ref)])
40
41 /* Pass IR on to next optimization in chain (FOLD). */
42 #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
43
44 /* Emit raw IR without passing through optimizations. */
45 #define emitir_raw(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
46
47 /* -- Sanity checks ------------------------------------------------------- */
48
49 #ifdef LUA_USE_ASSERT
50 /* Sanity check the whole IR -- sloooow. */
rec_check_ir(jit_State * J)51 static void rec_check_ir(jit_State *J)
52 {
53 IRRef i, nins = J->cur.nins, nk = J->cur.nk;
54 lj_assertJ(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536,
55 "inconsistent IR layout");
56 for (i = nk; i < nins; i++) {
57 IRIns *ir = IR(i);
58 uint32_t mode = lj_ir_mode[ir->o];
59 IRRef op1 = ir->op1;
60 IRRef op2 = ir->op2;
61 const char *err = NULL;
62 switch (irm_op1(mode)) {
63 case IRMnone:
64 if (op1 != 0) err = "IRMnone op1 used";
65 break;
66 case IRMref:
67 if (op1 < nk || (i >= REF_BIAS ? op1 >= i : op1 <= i))
68 err = "IRMref op1 out of range";
69 break;
70 case IRMlit: break;
71 case IRMcst:
72 if (i >= REF_BIAS) { err = "constant in IR range"; break; }
73 if (irt_is64(ir->t) && ir->o != IR_KNULL)
74 i++;
75 continue;
76 }
77 switch (irm_op2(mode)) {
78 case IRMnone:
79 if (op2) err = "IRMnone op2 used";
80 break;
81 case IRMref:
82 if (op2 < nk || (i >= REF_BIAS ? op2 >= i : op2 <= i))
83 err = "IRMref op2 out of range";
84 break;
85 case IRMlit: break;
86 case IRMcst: err = "IRMcst op2"; break;
87 }
88 if (!err && ir->prev) {
89 if (ir->prev < nk || (i >= REF_BIAS ? ir->prev >= i : ir->prev <= i))
90 err = "chain out of range";
91 else if (ir->o != IR_NOP && IR(ir->prev)->o != ir->o)
92 err = "chain to different op";
93 }
94 lj_assertJ(!err, "bad IR %04d op %d(%04d,%04d): %s",
95 i-REF_BIAS,
96 ir->o,
97 irm_op1(mode) == IRMref ? op1-REF_BIAS : op1,
98 irm_op2(mode) == IRMref ? op2-REF_BIAS : op2,
99 err);
100 }
101 }
102
103 /* Compare stack slots and frames of the recorder and the VM. */
rec_check_slots(jit_State * J)104 static void rec_check_slots(jit_State *J)
105 {
106 BCReg s, nslots = J->baseslot + J->maxslot;
107 int32_t depth = 0;
108 cTValue *base = J->L->base - J->baseslot;
109 lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot");
110 lj_assertJ(J->baseslot == 1+LJ_FR2 || (J->slot[J->baseslot-1] & TREF_FRAME),
111 "baseslot does not point to frame");
112 lj_assertJ(nslots <= LJ_MAX_JSLOTS, "slot overflow");
113 for (s = 0; s < nslots; s++) {
114 TRef tr = J->slot[s];
115 if (tr) {
116 cTValue *tv = &base[s];
117 IRRef ref = tref_ref(tr);
118 IRIns *ir = NULL; /* Silence compiler. */
119 if (!LJ_FR2 || ref || !(tr & (TREF_FRAME | TREF_CONT))) {
120 lj_assertJ(ref >= J->cur.nk && ref < J->cur.nins,
121 "slot %d ref %04d out of range", s, ref - REF_BIAS);
122 ir = IR(ref);
123 lj_assertJ(irt_t(ir->t) == tref_t(tr), "slot %d IR type mismatch", s);
124 }
125 if (s == 0) {
126 lj_assertJ(tref_isfunc(tr), "frame slot 0 is not a function");
127 #if LJ_FR2
128 } else if (s == 1) {
129 lj_assertJ((tr & ~TREF_FRAME) == 0, "bad frame slot 1");
130 #endif
131 } else if ((tr & TREF_FRAME)) {
132 GCfunc *fn = gco2func(frame_gc(tv));
133 BCReg delta = (BCReg)(tv - frame_prev(tv));
134 #if LJ_FR2
135 lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64,
136 "frame slot %d PC mismatch", s);
137 tr = J->slot[s-1];
138 ir = IR(tref_ref(tr));
139 #endif
140 lj_assertJ(tref_isfunc(tr),
141 "frame slot %d is not a function", s-LJ_FR2);
142 lj_assertJ(!tref_isk(tr) || fn == ir_kfunc(ir),
143 "frame slot %d function mismatch", s-LJ_FR2);
144 lj_assertJ(s > delta + LJ_FR2 ? (J->slot[s-delta] & TREF_FRAME)
145 : (s == delta + LJ_FR2),
146 "frame slot %d broken chain", s-LJ_FR2);
147 depth++;
148 } else if ((tr & TREF_CONT)) {
149 #if LJ_FR2
150 lj_assertJ(!ref || ir_knum(ir)->u64 == tv->u64,
151 "cont slot %d continuation mismatch", s);
152 #else
153 lj_assertJ(ir_kptr(ir) == gcrefp(tv->gcr, void),
154 "cont slot %d continuation mismatch", s);
155 #endif
156 lj_assertJ((J->slot[s+1+LJ_FR2] & TREF_FRAME),
157 "cont slot %d not followed by frame", s);
158 depth++;
159 } else {
160 /* Number repr. may differ, but other types must be the same. */
161 lj_assertJ(tvisnumber(tv) ? tref_isnumber(tr) :
162 itype2irt(tv) == tref_type(tr),
163 "slot %d type mismatch: stack type %d vs IR type %d",
164 s, itypemap(tv), tref_type(tr));
165 if (tref_isk(tr)) { /* Compare constants. */
166 TValue tvk;
167 lj_ir_kvalue(J->L, &tvk, ir);
168 lj_assertJ((tvisnum(&tvk) && tvisnan(&tvk)) ?
169 (tvisnum(tv) && tvisnan(tv)) :
170 lj_obj_equal(tv, &tvk),
171 "slot %d const mismatch: stack %016llx vs IR %016llx",
172 s, tv->u64, tvk.u64);
173 }
174 }
175 }
176 }
177 lj_assertJ(J->framedepth == depth,
178 "frame depth mismatch %d vs %d", J->framedepth, depth);
179 }
180 #endif
181
182 /* -- Type handling and specialization ------------------------------------ */
183
184 /* Note: these functions return tagged references (TRef). */
185
186 /* Specialize a slot to a specific type. Note: slot can be negative! */
sloadt(jit_State * J,int32_t slot,IRType t,int mode)187 static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode)
188 {
189 /* Caller may set IRT_GUARD in t. */
190 TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode);
191 J->base[slot] = ref;
192 return ref;
193 }
194
195 /* Specialize a slot to the runtime type. Note: slot can be negative! */
sload(jit_State * J,int32_t slot)196 static TRef sload(jit_State *J, int32_t slot)
197 {
198 IRType t = itype2irt(&J->L->base[slot]);
199 TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot,
200 IRSLOAD_TYPECHECK);
201 if (irtype_ispri(t)) ref = TREF_PRI(t); /* Canonicalize primitive refs. */
202 J->base[slot] = ref;
203 return ref;
204 }
205
206 /* Get TRef from slot. Load slot and specialize if not done already. */
207 #define getslot(J, s) (J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
208
209 /* Get TRef for current function. */
getcurrf(jit_State * J)210 static TRef getcurrf(jit_State *J)
211 {
212 if (J->base[-1-LJ_FR2])
213 return J->base[-1-LJ_FR2];
214 /* Non-base frame functions ought to be loaded already. */
215 lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot");
216 return sloadt(J, -1-LJ_FR2, IRT_FUNC, IRSLOAD_READONLY);
217 }
218
219 /* Compare for raw object equality.
220 ** Returns 0 if the objects are the same.
221 ** Returns 1 if they are different, but the same type.
222 ** Returns 2 for two different types.
223 ** Comparisons between primitives always return 1 -- no caller cares about it.
224 */
lj_record_objcmp(jit_State * J,TRef a,TRef b,cTValue * av,cTValue * bv)225 int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv)
226 {
227 int diff = !lj_obj_equal(av, bv);
228 if (!tref_isk2(a, b)) { /* Shortcut, also handles primitives. */
229 IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a);
230 IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b);
231 if (ta != tb) {
232 /* Widen mixed number/int comparisons to number/number comparison. */
233 if (ta == IRT_INT && tb == IRT_NUM) {
234 a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT);
235 ta = IRT_NUM;
236 } else if (ta == IRT_NUM && tb == IRT_INT) {
237 b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT);
238 } else {
239 return 2; /* Two different types are never equal. */
240 }
241 }
242 emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b);
243 }
244 return diff;
245 }
246
247 /* Constify a value. Returns 0 for non-representable object types. */
lj_record_constify(jit_State * J,cTValue * o)248 TRef lj_record_constify(jit_State *J, cTValue *o)
249 {
250 if (tvisgcv(o))
251 return lj_ir_kgc(J, gcV(o), itype2irt(o));
252 else if (tvisint(o))
253 return lj_ir_kint(J, intV(o));
254 else if (tvisnum(o))
255 return lj_ir_knumint(J, numV(o));
256 else if (tvisbool(o))
257 return TREF_PRI(itype2irt(o));
258 else
259 return 0; /* Can't represent lightuserdata (pointless). */
260 }
261
262 /* -- Record loop ops ----------------------------------------------------- */
263
264 /* Loop event. */
265 typedef enum {
266 LOOPEV_LEAVE, /* Loop is left or not entered. */
267 LOOPEV_ENTERLO, /* Loop is entered with a low iteration count left. */
268 LOOPEV_ENTER /* Loop is entered. */
269 } LoopEvent;
270
271 /* Canonicalize slots: convert integers to numbers. */
canonicalize_slots(jit_State * J)272 static void canonicalize_slots(jit_State *J)
273 {
274 BCReg s;
275 if (LJ_DUALNUM) return;
276 for (s = J->baseslot+J->maxslot-1; s >= 1; s--) {
277 TRef tr = J->slot[s];
278 if (tref_isinteger(tr)) {
279 IRIns *ir = IR(tref_ref(tr));
280 if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY)))
281 J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
282 }
283 }
284 }
285
286 /* Stop recording. */
lj_record_stop(jit_State * J,TraceLink linktype,TraceNo lnk)287 void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
288 {
289 #ifdef LUAJIT_ENABLE_TABLE_BUMP
290 if (J->retryrec)
291 lj_trace_err(J, LJ_TRERR_RETRY);
292 #endif
293 lj_trace_end(J);
294 J->cur.linktype = (uint8_t)linktype;
295 J->cur.link = (uint16_t)lnk;
296 /* Looping back at the same stack level? */
297 if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) {
298 if ((J->flags & JIT_F_OPT_LOOP)) /* Shall we try to create a loop? */
299 goto nocanon; /* Do not canonicalize or we lose the narrowing. */
300 if (J->cur.root) /* Otherwise ensure we always link to the root trace. */
301 J->cur.link = J->cur.root;
302 }
303 canonicalize_slots(J);
304 nocanon:
305 /* Note: all loop ops must set J->pc to the following instruction! */
306 lj_snap_add(J); /* Add loop snapshot. */
307 J->needsnap = 0;
308 J->mergesnap = 1; /* In case recording continues. */
309 }
310
311 /* Search bytecode backwards for a int/num constant slot initializer. */
find_kinit(jit_State * J,const BCIns * endpc,BCReg slot,IRType t)312 static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t)
313 {
314 /* This algorithm is rather simplistic and assumes quite a bit about
315 ** how the bytecode is generated. It works fine for FORI initializers,
316 ** but it won't necessarily work in other cases (e.g. iterator arguments).
317 ** It doesn't do anything fancy, either (like backpropagating MOVs).
318 */
319 const BCIns *pc, *startpc = proto_bc(J->pt);
320 for (pc = endpc-1; pc > startpc; pc--) {
321 BCIns ins = *pc;
322 BCOp op = bc_op(ins);
323 /* First try to find the last instruction that stores to this slot. */
324 if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) {
325 return 0; /* Multiple results, e.g. from a CALL or KNIL. */
326 } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) {
327 if (op == BC_KSHORT || op == BC_KNUM) { /* Found const. initializer. */
328 /* Now try to verify there's no forward jump across it. */
329 const BCIns *kpc = pc;
330 for (; pc > startpc; pc--)
331 if (bc_op(*pc) == BC_JMP) {
332 const BCIns *target = pc+bc_j(*pc)+1;
333 if (target > kpc && target <= endpc)
334 return 0; /* Conditional assignment. */
335 }
336 if (op == BC_KSHORT) {
337 int32_t k = (int32_t)(int16_t)bc_d(ins);
338 return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k);
339 } else {
340 cTValue *tv = proto_knumtv(J->pt, bc_d(ins));
341 if (t == IRT_INT) {
342 int32_t k = numberVint(tv);
343 if (tvisint(tv) || numV(tv) == (lua_Number)k) /* -0 is ok here. */
344 return lj_ir_kint(J, k);
345 return 0; /* Type mismatch. */
346 } else {
347 return lj_ir_knum(J, numberVnum(tv));
348 }
349 }
350 }
351 return 0; /* Non-constant initializer. */
352 }
353 }
354 return 0; /* No assignment to this slot found? */
355 }
356
357 /* Load and optionally convert a FORI argument from a slot. */
fori_load(jit_State * J,BCReg slot,IRType t,int mode)358 static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode)
359 {
360 int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0;
361 return sloadt(J, (int32_t)slot,
362 t + (((mode & IRSLOAD_TYPECHECK) ||
363 (conv && t == IRT_INT && !(mode >> 16))) ?
364 IRT_GUARD : 0),
365 mode + conv);
366 }
367
368 /* Peek before FORI to find a const initializer. Otherwise load from slot. */
fori_arg(jit_State * J,const BCIns * fori,BCReg slot,IRType t,int mode)369 static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot,
370 IRType t, int mode)
371 {
372 TRef tr = J->base[slot];
373 if (!tr) {
374 tr = find_kinit(J, fori, slot, t);
375 if (!tr)
376 tr = fori_load(J, slot, t, mode);
377 }
378 return tr;
379 }
380
381 /* Return the direction of the FOR loop iterator.
382 ** It's important to exactly reproduce the semantics of the interpreter.
383 */
rec_for_direction(cTValue * o)384 static int rec_for_direction(cTValue *o)
385 {
386 return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0;
387 }
388
389 /* Simulate the runtime behavior of the FOR loop iterator. */
rec_for_iter(IROp * op,cTValue * o,int isforl)390 static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl)
391 {
392 lua_Number stopv = numberVnum(&o[FORL_STOP]);
393 lua_Number idxv = numberVnum(&o[FORL_IDX]);
394 lua_Number stepv = numberVnum(&o[FORL_STEP]);
395 if (isforl)
396 idxv += stepv;
397 if (rec_for_direction(&o[FORL_STEP])) {
398 if (idxv <= stopv) {
399 *op = IR_LE;
400 return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
401 }
402 *op = IR_GT; return LOOPEV_LEAVE;
403 } else {
404 if (stopv <= idxv) {
405 *op = IR_GE;
406 return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
407 }
408 *op = IR_LT; return LOOPEV_LEAVE;
409 }
410 }
411
412 /* Record checks for FOR loop overflow and step direction. */
rec_for_check(jit_State * J,IRType t,int dir,TRef stop,TRef step,int init)413 static void rec_for_check(jit_State *J, IRType t, int dir,
414 TRef stop, TRef step, int init)
415 {
416 if (!tref_isk(step)) {
417 /* Non-constant step: need a guard for the direction. */
418 TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J);
419 emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero);
420 /* Add hoistable overflow checks for a narrowed FORL index. */
421 if (init && t == IRT_INT) {
422 if (tref_isk(stop)) {
423 /* Constant stop: optimize check away or to a range check for step. */
424 int32_t k = IR(tref_ref(stop))->i;
425 if (dir) {
426 if (k > 0)
427 emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k));
428 } else {
429 if (k < 0)
430 emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k));
431 }
432 } else {
433 /* Stop+step variable: need full overflow check. */
434 TRef tr = emitir(IRTGI(IR_ADDOV), step, stop);
435 emitir(IRTI(IR_USE), tr, 0); /* ADDOV is weak. Avoid dead result. */
436 }
437 }
438 } else if (init && t == IRT_INT && !tref_isk(stop)) {
439 /* Constant step: optimize overflow check to a range check for stop. */
440 int32_t k = IR(tref_ref(step))->i;
441 k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k;
442 emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k));
443 }
444 }
445
446 /* Record a FORL instruction. */
rec_for_loop(jit_State * J,const BCIns * fori,ScEvEntry * scev,int init)447 static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
448 int init)
449 {
450 BCReg ra = bc_a(*fori);
451 cTValue *tv = &J->L->base[ra];
452 TRef idx = J->base[ra+FORL_IDX];
453 IRType t = idx ? tref_type(idx) :
454 (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM;
455 int mode = IRSLOAD_INHERIT +
456 ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0);
457 TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
458 TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
459 int tc, dir = rec_for_direction(&tv[FORL_STEP]);
460 lj_assertJ(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI,
461 "bad bytecode %d instead of FORI/JFORI", bc_op(*fori));
462 scev->t.irt = t;
463 scev->dir = dir;
464 scev->stop = tref_ref(stop);
465 scev->step = tref_ref(step);
466 rec_for_check(J, t, dir, stop, step, init);
467 scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT));
468 tc = (LJ_DUALNUM &&
469 !(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) &&
470 tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ?
471 IRSLOAD_TYPECHECK : 0;
472 if (tc) {
473 J->base[ra+FORL_STOP] = stop;
474 J->base[ra+FORL_STEP] = step;
475 }
476 if (!idx)
477 idx = fori_load(J, ra+FORL_IDX, t,
478 IRSLOAD_INHERIT + tc + (J->scev.start << 16));
479 if (!init)
480 J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step);
481 J->base[ra+FORL_EXT] = idx;
482 scev->idx = tref_ref(idx);
483 setmref(scev->pc, fori);
484 J->maxslot = ra+FORL_EXT+1;
485 }
486
487 /* Record FORL/JFORL or FORI/JFORI. */
rec_for(jit_State * J,const BCIns * fori,int isforl)488 static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
489 {
490 BCReg ra = bc_a(*fori);
491 TValue *tv = &J->L->base[ra];
492 TRef *tr = &J->base[ra];
493 IROp op;
494 LoopEvent ev;
495 TRef stop;
496 IRType t;
497 if (isforl) { /* Handle FORL/JFORL opcodes. */
498 TRef idx = tr[FORL_IDX];
499 if (mref(J->scev.pc, const BCIns) == fori && tref_ref(idx) == J->scev.idx) {
500 t = J->scev.t.irt;
501 stop = J->scev.stop;
502 idx = emitir(IRT(IR_ADD, t), idx, J->scev.step);
503 tr[FORL_EXT] = tr[FORL_IDX] = idx;
504 } else {
505 ScEvEntry scev;
506 rec_for_loop(J, fori, &scev, 0);
507 t = scev.t.irt;
508 stop = scev.stop;
509 }
510 } else { /* Handle FORI/JFORI opcodes. */
511 BCReg i;
512 lj_meta_for(J->L, tv);
513 t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) :
514 IRT_NUM;
515 for (i = FORL_IDX; i <= FORL_STEP; i++) {
516 if (!tr[i]) sload(J, ra+i);
517 lj_assertJ(tref_isnumber_str(tr[i]), "bad FORI argument type");
518 if (tref_isstr(tr[i]))
519 tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
520 if (t == IRT_INT) {
521 if (!tref_isinteger(tr[i]))
522 tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK);
523 } else {
524 if (!tref_isnum(tr[i]))
525 tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT);
526 }
527 }
528 tr[FORL_EXT] = tr[FORL_IDX];
529 stop = tr[FORL_STOP];
530 rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]),
531 stop, tr[FORL_STEP], 1);
532 }
533
534 ev = rec_for_iter(&op, tv, isforl);
535 if (ev == LOOPEV_LEAVE) {
536 J->maxslot = ra+FORL_EXT+1;
537 J->pc = fori+1;
538 } else {
539 J->maxslot = ra;
540 J->pc = fori+bc_j(*fori)+1;
541 }
542 lj_snap_add(J);
543
544 emitir(IRTG(op, t), tr[FORL_IDX], stop);
545
546 if (ev == LOOPEV_LEAVE) {
547 J->maxslot = ra;
548 J->pc = fori+bc_j(*fori)+1;
549 } else {
550 J->maxslot = ra+FORL_EXT+1;
551 J->pc = fori+1;
552 }
553 J->needsnap = 1;
554 return ev;
555 }
556
557 /* Record ITERL/JITERL. */
rec_iterl(jit_State * J,const BCIns iterins)558 static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
559 {
560 BCReg ra = bc_a(iterins);
561 if (!tref_isnil(getslot(J, ra))) { /* Looping back? */
562 J->base[ra-1] = J->base[ra]; /* Copy result of ITERC to control var. */
563 J->maxslot = ra-1+bc_b(J->pc[-1]);
564 J->pc += bc_j(iterins)+1;
565 return LOOPEV_ENTER;
566 } else {
567 J->maxslot = ra-3;
568 J->pc++;
569 return LOOPEV_LEAVE;
570 }
571 }
572
573 /* Record LOOP/JLOOP. Now, that was easy. */
rec_loop(jit_State * J,BCReg ra,int skip)574 static LoopEvent rec_loop(jit_State *J, BCReg ra, int skip)
575 {
576 if (ra < J->maxslot) J->maxslot = ra;
577 J->pc += skip;
578 return LOOPEV_ENTER;
579 }
580
581 /* Check if a loop repeatedly failed to trace because it didn't loop back. */
innerloopleft(jit_State * J,const BCIns * pc)582 static int innerloopleft(jit_State *J, const BCIns *pc)
583 {
584 ptrdiff_t i;
585 for (i = 0; i < PENALTY_SLOTS; i++)
586 if (mref(J->penalty[i].pc, const BCIns) == pc) {
587 if ((J->penalty[i].reason == LJ_TRERR_LLEAVE ||
588 J->penalty[i].reason == LJ_TRERR_LINNER) &&
589 J->penalty[i].val >= 2*PENALTY_MIN)
590 return 1;
591 break;
592 }
593 return 0;
594 }
595
596 /* Handle the case when an interpreted loop op is hit. */
rec_loop_interp(jit_State * J,const BCIns * pc,LoopEvent ev)597 static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
598 {
599 if (J->parent == 0 && J->exitno == 0) {
600 if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
601 /* Same loop? */
602 if (ev == LOOPEV_LEAVE) /* Must loop back to form a root trace. */
603 lj_trace_err(J, LJ_TRERR_LLEAVE);
604 lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Looping trace. */
605 } else if (ev != LOOPEV_LEAVE) { /* Entering inner loop? */
606 /* It's usually better to abort here and wait until the inner loop
607 ** is traced. But if the inner loop repeatedly didn't loop back,
608 ** this indicates a low trip count. In this case try unrolling
609 ** an inner loop even in a root trace. But it's better to be a bit
610 ** more conservative here and only do it for very short loops.
611 */
612 if (bc_j(*pc) != -1 && !innerloopleft(J, pc))
613 lj_trace_err(J, LJ_TRERR_LINNER); /* Root trace hit an inner loop. */
614 if ((ev != LOOPEV_ENTERLO &&
615 J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0)
616 lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
617 J->loopref = J->cur.nins;
618 }
619 } else if (ev != LOOPEV_LEAVE) { /* Side trace enters an inner loop. */
620 J->loopref = J->cur.nins;
621 if (--J->loopunroll < 0)
622 lj_trace_err(J, LJ_TRERR_LUNROLL); /* Limit loop unrolling. */
623 } /* Side trace continues across a loop that's left or not entered. */
624 }
625
626 /* Handle the case when an already compiled loop op is hit. */
rec_loop_jit(jit_State * J,TraceNo lnk,LoopEvent ev)627 static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
628 {
629 if (J->parent == 0 && J->exitno == 0) { /* Root trace hit an inner loop. */
630 /* Better let the inner loop spawn a side trace back here. */
631 lj_trace_err(J, LJ_TRERR_LINNER);
632 } else if (ev != LOOPEV_LEAVE) { /* Side trace enters a compiled loop. */
633 J->instunroll = 0; /* Cannot continue across a compiled loop op. */
634 if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
635 lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno); /* Form extra loop. */
636 else
637 lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the loop. */
638 } /* Side trace continues across a loop that's left or not entered. */
639 }
640
641 /* -- Record profiler hook checks ----------------------------------------- */
642
643 #if LJ_HASPROFILE
644
645 /* Need to insert profiler hook check? */
rec_profile_need(jit_State * J,GCproto * pt,const BCIns * pc)646 static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc)
647 {
648 GCproto *ppt;
649 lj_assertJ(J->prof_mode == 'f' || J->prof_mode == 'l',
650 "bad profiler mode %c", J->prof_mode);
651 if (!pt)
652 return 0;
653 ppt = J->prev_pt;
654 J->prev_pt = pt;
655 if (pt != ppt && ppt) {
656 J->prev_line = -1;
657 return 1;
658 }
659 if (J->prof_mode == 'l') {
660 BCLine line = lj_debug_line(pt, proto_bcpos(pt, pc));
661 BCLine pline = J->prev_line;
662 J->prev_line = line;
663 if (pline != line)
664 return 1;
665 }
666 return 0;
667 }
668
rec_profile_ins(jit_State * J,const BCIns * pc)669 static void rec_profile_ins(jit_State *J, const BCIns *pc)
670 {
671 if (J->prof_mode && rec_profile_need(J, J->pt, pc)) {
672 emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
673 lj_snap_add(J);
674 }
675 }
676
rec_profile_ret(jit_State * J)677 static void rec_profile_ret(jit_State *J)
678 {
679 if (J->prof_mode == 'f') {
680 emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
681 J->prev_pt = NULL;
682 lj_snap_add(J);
683 }
684 }
685
686 #endif
687
688 /* -- Record calls and returns -------------------------------------------- */
689
690 /* Specialize to the runtime value of the called function or its prototype. */
rec_call_specialize(jit_State * J,GCfunc * fn,TRef tr)691 static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
692 {
693 TRef kfunc;
694 if (isluafunc(fn)) {
695 GCproto *pt = funcproto(fn);
696 /* Too many closures created? Probably not a monomorphic function. */
697 if (pt->flags >= PROTO_CLC_POLY) { /* Specialize to prototype instead. */
698 TRef trpt = emitir(IRT(IR_FLOAD, IRT_PGC), tr, IRFL_FUNC_PC);
699 emitir(IRTG(IR_EQ, IRT_PGC), trpt, lj_ir_kptr(J, proto_bc(pt)));
700 (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO); /* Prevent GC of proto. */
701 return tr;
702 }
703 } else {
704 /* Don't specialize to non-monomorphic builtins. */
705 switch (fn->c.ffid) {
706 case FF_coroutine_wrap_aux:
707 case FF_string_gmatch_aux:
708 /* NYI: io_file_iter doesn't have an ffid, yet. */
709 { /* Specialize to the ffid. */
710 TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), tr, IRFL_FUNC_FFID);
711 emitir(IRTG(IR_EQ, IRT_INT), trid, lj_ir_kint(J, fn->c.ffid));
712 }
713 return tr;
714 default:
715 /* NYI: don't specialize to non-monomorphic C functions. */
716 break;
717 }
718 }
719 /* Otherwise specialize to the function (closure) value itself. */
720 kfunc = lj_ir_kfunc(J, fn);
721 emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc);
722 return kfunc;
723 }
724
725 /* Record call setup. */
rec_call_setup(jit_State * J,BCReg func,ptrdiff_t nargs)726 static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
727 {
728 RecordIndex ix;
729 TValue *functv = &J->L->base[func];
730 TRef kfunc, *fbase = &J->base[func];
731 ptrdiff_t i;
732 (void)getslot(J, func); /* Ensure func has a reference. */
733 for (i = 1; i <= nargs; i++)
734 (void)getslot(J, func+LJ_FR2+i); /* Ensure all args have a reference. */
735 if (!tref_isfunc(fbase[0])) { /* Resolve __call metamethod. */
736 ix.tab = fbase[0];
737 copyTV(J->L, &ix.tabv, functv);
738 if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
739 lj_trace_err(J, LJ_TRERR_NOMM);
740 for (i = ++nargs; i > LJ_FR2; i--) /* Shift arguments up. */
741 fbase[i+LJ_FR2] = fbase[i+LJ_FR2-1];
742 #if LJ_FR2
743 fbase[2] = fbase[0];
744 #endif
745 fbase[0] = ix.mobj; /* Replace function. */
746 functv = &ix.mobjv;
747 }
748 kfunc = rec_call_specialize(J, funcV(functv), fbase[0]);
749 #if LJ_FR2
750 fbase[0] = kfunc;
751 fbase[1] = TREF_FRAME;
752 #else
753 fbase[0] = kfunc | TREF_FRAME;
754 #endif
755 J->maxslot = (BCReg)nargs;
756 }
757
758 /* Record call. */
lj_record_call(jit_State * J,BCReg func,ptrdiff_t nargs)759 void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
760 {
761 rec_call_setup(J, func, nargs);
762 /* Bump frame. */
763 J->framedepth++;
764 J->base += func+1+LJ_FR2;
765 J->baseslot += func+1+LJ_FR2;
766 if (J->baseslot + J->maxslot >= LJ_MAX_JSLOTS)
767 lj_trace_err(J, LJ_TRERR_STACKOV);
768 }
769
770 /* Record tail call. */
lj_record_tailcall(jit_State * J,BCReg func,ptrdiff_t nargs)771 void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
772 {
773 rec_call_setup(J, func, nargs);
774 if (frame_isvarg(J->L->base - 1)) {
775 BCReg cbase = (BCReg)frame_delta(J->L->base - 1);
776 if (--J->framedepth < 0)
777 lj_trace_err(J, LJ_TRERR_NYIRETL);
778 J->baseslot -= (BCReg)cbase;
779 J->base -= cbase;
780 func += cbase;
781 }
782 /* Move func + args down. */
783 if (LJ_FR2 && J->baseslot == 2)
784 J->base[func+1] = TREF_FRAME;
785 memmove(&J->base[-1-LJ_FR2], &J->base[func], sizeof(TRef)*(J->maxslot+1+LJ_FR2));
786 /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
787 /* Tailcalls can form a loop, so count towards the loop unroll limit. */
788 if (++J->tailcalled > J->loopunroll)
789 lj_trace_err(J, LJ_TRERR_LUNROLL);
790 }
791
792 /* Check unroll limits for down-recursion. */
check_downrec_unroll(jit_State * J,GCproto * pt)793 static int check_downrec_unroll(jit_State *J, GCproto *pt)
794 {
795 IRRef ptref;
796 for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev)
797 if (ir_kgc(IR(ptref)) == obj2gco(pt)) {
798 int count = 0;
799 IRRef ref;
800 for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev)
801 if (IR(ref)->op1 == ptref)
802 count++;
803 if (count) {
804 if (J->pc == J->startpc) {
805 if (count + J->tailcalled > J->param[JIT_P_recunroll])
806 return 1;
807 } else {
808 lj_trace_err(J, LJ_TRERR_DOWNREC);
809 }
810 }
811 }
812 return 0;
813 }
814
815 static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot);
816
817 /* Record return. */
lj_record_ret(jit_State * J,BCReg rbase,ptrdiff_t gotresults)818 void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
819 {
820 TValue *frame = J->L->base - 1;
821 ptrdiff_t i;
822 for (i = 0; i < gotresults; i++)
823 (void)getslot(J, rbase+i); /* Ensure all results have a reference. */
824 while (frame_ispcall(frame)) { /* Immediately resolve pcall() returns. */
825 BCReg cbase = (BCReg)frame_delta(frame);
826 if (--J->framedepth <= 0)
827 lj_trace_err(J, LJ_TRERR_NYIRETL);
828 lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return");
829 gotresults++;
830 rbase += cbase;
831 J->baseslot -= (BCReg)cbase;
832 J->base -= cbase;
833 J->base[--rbase] = TREF_TRUE; /* Prepend true to results. */
834 frame = frame_prevd(frame);
835 }
836 /* Return to lower frame via interpreter for unhandled cases. */
837 if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
838 (!frame_islua(frame) ||
839 (J->parent == 0 && J->exitno == 0 &&
840 !bc_isret(bc_op(J->cur.startins))))) {
841 /* NYI: specialize to frame type and return directly, not via RET*. */
842 for (i = 0; i < (ptrdiff_t)rbase; i++)
843 J->base[i] = 0; /* Purge dead slots. */
844 J->maxslot = rbase + (BCReg)gotresults;
845 lj_record_stop(J, LJ_TRLINK_RETURN, 0); /* Return to interpreter. */
846 return;
847 }
848 if (frame_isvarg(frame)) {
849 BCReg cbase = (BCReg)frame_delta(frame);
850 if (--J->framedepth < 0) /* NYI: return of vararg func to lower frame. */
851 lj_trace_err(J, LJ_TRERR_NYIRETL);
852 lj_assertJ(J->baseslot > 1+LJ_FR2, "bad baseslot for return");
853 rbase += cbase;
854 J->baseslot -= (BCReg)cbase;
855 J->base -= cbase;
856 frame = frame_prevd(frame);
857 }
858 if (frame_islua(frame)) { /* Return to Lua frame. */
859 BCIns callins = *(frame_pc(frame)-1);
860 ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
861 BCReg cbase = bc_a(callins);
862 GCproto *pt = funcproto(frame_func(frame - (cbase+1+LJ_FR2)));
863 if ((pt->flags & PROTO_NOJIT))
864 lj_trace_err(J, LJ_TRERR_CJITOFF);
865 if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
866 if (check_downrec_unroll(J, pt)) {
867 J->maxslot = (BCReg)(rbase + gotresults);
868 lj_snap_purge(J);
869 lj_record_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno); /* Down-rec. */
870 return;
871 }
872 lj_snap_add(J);
873 }
874 for (i = 0; i < nresults; i++) /* Adjust results. */
875 J->base[i-1-LJ_FR2] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
876 J->maxslot = cbase+(BCReg)nresults;
877 if (J->framedepth > 0) { /* Return to a frame that is part of the trace. */
878 J->framedepth--;
879 lj_assertJ(J->baseslot > cbase+1+LJ_FR2, "bad baseslot for return");
880 J->baseslot -= cbase+1+LJ_FR2;
881 J->base -= cbase+1+LJ_FR2;
882 } else if (J->parent == 0 && J->exitno == 0 &&
883 !bc_isret(bc_op(J->cur.startins))) {
884 /* Return to lower frame would leave the loop in a root trace. */
885 lj_trace_err(J, LJ_TRERR_LLEAVE);
886 } else if (J->needsnap) { /* Tailcalled to ff with side-effects. */
887 lj_trace_err(J, LJ_TRERR_NYIRETL); /* No way to insert snapshot here. */
888 } else { /* Return to lower frame. Guard for the target we return to. */
889 TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
890 TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
891 emitir(IRTG(IR_RETF, IRT_PGC), trpt, trpc);
892 J->retdepth++;
893 J->needsnap = 1;
894 lj_assertJ(J->baseslot == 1+LJ_FR2, "bad baseslot for return");
895 /* Shift result slots up and clear the slots of the new frame below. */
896 memmove(J->base + cbase, J->base-1-LJ_FR2, sizeof(TRef)*nresults);
897 memset(J->base-1-LJ_FR2, 0, sizeof(TRef)*(cbase+1+LJ_FR2));
898 }
899 } else if (frame_iscont(frame)) { /* Return to continuation frame. */
900 ASMFunction cont = frame_contf(frame);
901 BCReg cbase = (BCReg)frame_delta(frame);
902 if ((J->framedepth -= 2) < 0)
903 lj_trace_err(J, LJ_TRERR_NYIRETL);
904 J->baseslot -= (BCReg)cbase;
905 J->base -= cbase;
906 J->maxslot = cbase-(2<<LJ_FR2);
907 if (cont == lj_cont_ra) {
908 /* Copy result to destination slot. */
909 BCReg dst = bc_a(*(frame_contpc(frame)-1));
910 J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
911 if (dst >= J->maxslot) {
912 J->maxslot = dst+1;
913 }
914 } else if (cont == lj_cont_nop) {
915 /* Nothing to do here. */
916 } else if (cont == lj_cont_cat) {
917 BCReg bslot = bc_b(*(frame_contpc(frame)-1));
918 TRef tr = gotresults ? J->base[cbase+rbase] : TREF_NIL;
919 if (bslot != J->maxslot) { /* Concatenate the remainder. */
920 TValue *b = J->L->base, save; /* Simulate lower frame and result. */
921 J->base[J->maxslot] = tr;
922 copyTV(J->L, &save, b-(2<<LJ_FR2));
923 if (gotresults)
924 copyTV(J->L, b-(2<<LJ_FR2), b+rbase);
925 else
926 setnilV(b-(2<<LJ_FR2));
927 J->L->base = b - cbase;
928 tr = rec_cat(J, bslot, cbase-(2<<LJ_FR2));
929 b = J->L->base + cbase; /* Undo. */
930 J->L->base = b;
931 copyTV(J->L, b-(2<<LJ_FR2), &save);
932 }
933 if (tr) { /* Store final result. */
934 BCReg dst = bc_a(*(frame_contpc(frame)-1));
935 J->base[dst] = tr;
936 if (dst >= J->maxslot) {
937 J->maxslot = dst+1;
938 }
939 } /* Otherwise continue with another __concat call. */
940 } else {
941 /* Result type already specialized. */
942 lj_assertJ(cont == lj_cont_condf || cont == lj_cont_condt,
943 "bad continuation type");
944 }
945 } else {
946 lj_trace_err(J, LJ_TRERR_NYIRETL); /* NYI: handle return to C frame. */
947 }
948 lj_assertJ(J->baseslot >= 1+LJ_FR2, "bad baseslot for return");
949 }
950
951 /* -- Metamethod handling ------------------------------------------------- */
952
953 /* Prepare to record call to metamethod. */
rec_mm_prep(jit_State * J,ASMFunction cont)954 static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
955 {
956 BCReg s, top = cont == lj_cont_cat ? J->maxslot : curr_proto(J->L)->framesize;
957 #if LJ_FR2
958 J->base[top] = lj_ir_k64(J, IR_KNUM, u64ptr(contptr(cont)));
959 J->base[top+1] = TREF_CONT;
960 #else
961 J->base[top] = lj_ir_kptr(J, contptr(cont)) | TREF_CONT;
962 #endif
963 J->framedepth++;
964 for (s = J->maxslot; s < top; s++)
965 J->base[s] = 0; /* Clear frame gap to avoid resurrecting previous refs. */
966 return top+1+LJ_FR2;
967 }
968
969 /* Record metamethod lookup. */
lj_record_mm_lookup(jit_State * J,RecordIndex * ix,MMS mm)970 int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
971 {
972 RecordIndex mix;
973 GCtab *mt;
974 if (tref_istab(ix->tab)) {
975 mt = tabref(tabV(&ix->tabv)->metatable);
976 mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
977 } else if (tref_isudata(ix->tab)) {
978 int udtype = udataV(&ix->tabv)->udtype;
979 mt = tabref(udataV(&ix->tabv)->metatable);
980 /* The metatables of special userdata objects are treated as immutable. */
981 if (udtype != UDTYPE_USERDATA) {
982 cTValue *mo;
983 if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
984 /* Specialize to the C library namespace object. */
985 emitir(IRTG(IR_EQ, IRT_PGC), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
986 } else {
987 /* Specialize to the type of userdata. */
988 TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
989 emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype));
990 }
991 immutable_mt:
992 mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm));
993 if (!mo || tvisnil(mo))
994 return 0; /* No metamethod. */
995 /* Treat metamethod or index table as immutable, too. */
996 if (!(tvisfunc(mo) || tvistab(mo)))
997 lj_trace_err(J, LJ_TRERR_BADTYPE);
998 copyTV(J->L, &ix->mobjv, mo);
999 ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB);
1000 ix->mtv = mt;
1001 ix->mt = TREF_NIL; /* Dummy value for comparison semantics. */
1002 return 1; /* Got metamethod or index table. */
1003 }
1004 mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META);
1005 } else {
1006 /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
1007 mt = tabref(basemt_obj(J2G(J), &ix->tabv));
1008 if (mt == NULL) {
1009 ix->mt = TREF_NIL;
1010 return 0; /* No metamethod. */
1011 }
1012 /* The cdata metatable is treated as immutable. */
1013 if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
1014 ix->mt = mix.tab = lj_ir_ggfload(J, IRT_TAB,
1015 GG_OFS(g.gcroot[GCROOT_BASEMT+itypemap(&ix->tabv)]));
1016 goto nocheck;
1017 }
1018 ix->mt = mt ? mix.tab : TREF_NIL;
1019 emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB));
1020 nocheck:
1021 if (mt) {
1022 GCstr *mmstr = mmname_str(J2G(J), mm);
1023 cTValue *mo = lj_tab_getstr(mt, mmstr);
1024 if (mo && !tvisnil(mo))
1025 copyTV(J->L, &ix->mobjv, mo);
1026 ix->mtv = mt;
1027 settabV(J->L, &mix.tabv, mt);
1028 setstrV(J->L, &mix.keyv, mmstr);
1029 mix.key = lj_ir_kstr(J, mmstr);
1030 mix.val = 0;
1031 mix.idxchain = 0;
1032 ix->mobj = lj_record_idx(J, &mix);
1033 return !tref_isnil(ix->mobj); /* 1 if metamethod found, 0 if not. */
1034 }
1035 return 0; /* No metamethod. */
1036 }
1037
1038 /* Record call to arithmetic metamethod. */
rec_mm_arith(jit_State * J,RecordIndex * ix,MMS mm)1039 static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
1040 {
1041 /* Set up metamethod call first to save ix->tab and ix->tabv. */
1042 BCReg func = rec_mm_prep(J, mm == MM_concat ? lj_cont_cat : lj_cont_ra);
1043 TRef *base = J->base + func;
1044 TValue *basev = J->L->base + func;
1045 base[1+LJ_FR2] = ix->tab; base[2+LJ_FR2] = ix->key;
1046 copyTV(J->L, basev+1+LJ_FR2, &ix->tabv);
1047 copyTV(J->L, basev+2+LJ_FR2, &ix->keyv);
1048 if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
1049 if (mm != MM_unm) {
1050 ix->tab = ix->key;
1051 copyTV(J->L, &ix->tabv, &ix->keyv);
1052 if (lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
1053 goto ok;
1054 }
1055 lj_trace_err(J, LJ_TRERR_NOMM);
1056 }
1057 ok:
1058 base[0] = ix->mobj;
1059 #if LJ_FR2
1060 base[1] = 0;
1061 #endif
1062 copyTV(J->L, basev+0, &ix->mobjv);
1063 lj_record_call(J, func, 2);
1064 return 0; /* No result yet. */
1065 }
1066
1067 /* Record call to __len metamethod. */
rec_mm_len(jit_State * J,TRef tr,TValue * tv)1068 static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
1069 {
1070 RecordIndex ix;
1071 ix.tab = tr;
1072 copyTV(J->L, &ix.tabv, tv);
1073 if (lj_record_mm_lookup(J, &ix, MM_len)) {
1074 BCReg func = rec_mm_prep(J, lj_cont_ra);
1075 TRef *base = J->base + func;
1076 TValue *basev = J->L->base + func;
1077 base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
1078 base += LJ_FR2;
1079 basev += LJ_FR2;
1080 base[1] = tr; copyTV(J->L, basev+1, tv);
1081 #if LJ_52
1082 base[2] = tr; copyTV(J->L, basev+2, tv);
1083 #else
1084 base[2] = TREF_NIL; setnilV(basev+2);
1085 #endif
1086 lj_record_call(J, func, 2);
1087 } else {
1088 if (LJ_52 && tref_istab(tr))
1089 return emitir(IRTI(IR_ALEN), tr, TREF_NIL);
1090 lj_trace_err(J, LJ_TRERR_NOMM);
1091 }
1092 return 0; /* No result yet. */
1093 }
1094
1095 /* Call a comparison metamethod. */
rec_mm_callcomp(jit_State * J,RecordIndex * ix,int op)1096 static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
1097 {
1098 BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
1099 TRef *base = J->base + func + LJ_FR2;
1100 TValue *tv = J->L->base + func + LJ_FR2;
1101 base[-LJ_FR2] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
1102 copyTV(J->L, tv-LJ_FR2, &ix->mobjv);
1103 copyTV(J->L, tv+1, &ix->valv);
1104 copyTV(J->L, tv+2, &ix->keyv);
1105 lj_record_call(J, func, 2);
1106 }
1107
1108 /* Record call to equality comparison metamethod (for tab and udata only). */
rec_mm_equal(jit_State * J,RecordIndex * ix,int op)1109 static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op)
1110 {
1111 ix->tab = ix->val;
1112 copyTV(J->L, &ix->tabv, &ix->valv);
1113 if (lj_record_mm_lookup(J, ix, MM_eq)) { /* Lookup mm on 1st operand. */
1114 cTValue *bv;
1115 TRef mo1 = ix->mobj;
1116 TValue mo1v;
1117 copyTV(J->L, &mo1v, &ix->mobjv);
1118 /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
1119 bv = &ix->keyv;
1120 if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
1121 TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
1122 emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
1123 } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
1124 TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
1125 emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
1126 } else { /* Lookup metamethod on 2nd operand and compare both. */
1127 ix->tab = ix->key;
1128 copyTV(J->L, &ix->tabv, bv);
1129 if (!lj_record_mm_lookup(J, ix, MM_eq) ||
1130 lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
1131 return;
1132 }
1133 rec_mm_callcomp(J, ix, op);
1134 }
1135 }
1136
1137 /* Record call to ordered comparison metamethods (for arbitrary objects). */
rec_mm_comp(jit_State * J,RecordIndex * ix,int op)1138 static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op)
1139 {
1140 ix->tab = ix->val;
1141 copyTV(J->L, &ix->tabv, &ix->valv);
1142 while (1) {
1143 MMS mm = (op & 2) ? MM_le : MM_lt; /* Try __le + __lt or only __lt. */
1144 #if LJ_52
1145 if (!lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
1146 ix->tab = ix->key;
1147 copyTV(J->L, &ix->tabv, &ix->keyv);
1148 if (!lj_record_mm_lookup(J, ix, mm)) /* Lookup mm on 2nd operand. */
1149 goto nomatch;
1150 }
1151 rec_mm_callcomp(J, ix, op);
1152 return;
1153 #else
1154 if (lj_record_mm_lookup(J, ix, mm)) { /* Lookup mm on 1st operand. */
1155 cTValue *bv;
1156 TRef mo1 = ix->mobj;
1157 TValue mo1v;
1158 copyTV(J->L, &mo1v, &ix->mobjv);
1159 /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
1160 bv = &ix->keyv;
1161 if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
1162 TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
1163 emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
1164 } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
1165 TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
1166 emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
1167 } else { /* Lookup metamethod on 2nd operand and compare both. */
1168 ix->tab = ix->key;
1169 copyTV(J->L, &ix->tabv, bv);
1170 if (!lj_record_mm_lookup(J, ix, mm) ||
1171 lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
1172 goto nomatch;
1173 }
1174 rec_mm_callcomp(J, ix, op);
1175 return;
1176 }
1177 #endif
1178 nomatch:
1179 /* Lookup failed. Retry with __lt and swapped operands. */
1180 if (!(op & 2)) break; /* Already at __lt. Interpreter will throw. */
1181 ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab;
1182 copyTV(J->L, &ix->tabv, &ix->keyv);
1183 copyTV(J->L, &ix->keyv, &ix->valv);
1184 copyTV(J->L, &ix->valv, &ix->tabv);
1185 op ^= 3;
1186 }
1187 }
1188
1189 #if LJ_HASFFI
1190 /* Setup call to cdata comparison metamethod. */
rec_mm_comp_cdata(jit_State * J,RecordIndex * ix,int op,MMS mm)1191 static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
1192 {
1193 lj_snap_add(J);
1194 if (tref_iscdata(ix->val)) {
1195 ix->tab = ix->val;
1196 copyTV(J->L, &ix->tabv, &ix->valv);
1197 } else {
1198 lj_assertJ(tref_iscdata(ix->key), "cdata expected");
1199 ix->tab = ix->key;
1200 copyTV(J->L, &ix->tabv, &ix->keyv);
1201 }
1202 lj_record_mm_lookup(J, ix, mm);
1203 rec_mm_callcomp(J, ix, op);
1204 }
1205 #endif
1206
1207 /* -- Indexed access ------------------------------------------------------ */
1208
1209 #ifdef LUAJIT_ENABLE_TABLE_BUMP
1210 /* Bump table allocations in bytecode when they grow during recording. */
rec_idx_bump(jit_State * J,RecordIndex * ix)1211 static void rec_idx_bump(jit_State *J, RecordIndex *ix)
1212 {
1213 RBCHashEntry *rbc = &J->rbchash[(ix->tab & (RBCHASH_SLOTS-1))];
1214 if (tref_ref(ix->tab) == rbc->ref) {
1215 const BCIns *pc = mref(rbc->pc, const BCIns);
1216 GCtab *tb = tabV(&ix->tabv);
1217 uint32_t nhbits;
1218 IRIns *ir;
1219 if (!tvisnil(&ix->keyv))
1220 (void)lj_tab_set(J->L, tb, &ix->keyv); /* Grow table right now. */
1221 nhbits = tb->hmask > 0 ? lj_fls(tb->hmask)+1 : 0;
1222 ir = IR(tref_ref(ix->tab));
1223 if (ir->o == IR_TNEW) {
1224 uint32_t ah = bc_d(*pc);
1225 uint32_t asize = ah & 0x7ff, hbits = ah >> 11;
1226 if (nhbits > hbits) hbits = nhbits;
1227 if (tb->asize > asize) {
1228 asize = tb->asize <= 0x7ff ? tb->asize : 0x7ff;
1229 }
1230 if ((asize | (hbits<<11)) != ah) { /* Has the size changed? */
1231 /* Patch bytecode, but continue recording (for more patching). */
1232 setbc_d(pc, (asize | (hbits<<11)));
1233 /* Patching TNEW operands is only safe if the trace is aborted. */
1234 ir->op1 = asize; ir->op2 = hbits;
1235 J->retryrec = 1; /* Abort the trace at the end of recording. */
1236 }
1237 } else if (ir->o == IR_TDUP) {
1238 GCtab *tpl = gco2tab(proto_kgc(&gcref(rbc->pt)->pt, ~(ptrdiff_t)bc_d(*pc)));
1239 /* Grow template table, but preserve keys with nil values. */
1240 if ((tb->asize > tpl->asize && (1u << nhbits)-1 == tpl->hmask) ||
1241 (tb->asize == tpl->asize && (1u << nhbits)-1 > tpl->hmask)) {
1242 Node *node = noderef(tpl->node);
1243 uint32_t i, hmask = tpl->hmask, asize;
1244 TValue *array;
1245 for (i = 0; i <= hmask; i++) {
1246 if (!tvisnil(&node[i].key) && tvisnil(&node[i].val))
1247 settabV(J->L, &node[i].val, tpl);
1248 }
1249 if (!tvisnil(&ix->keyv) && tref_isk(ix->key)) {
1250 TValue *o = lj_tab_set(J->L, tpl, &ix->keyv);
1251 if (tvisnil(o)) settabV(J->L, o, tpl);
1252 }
1253 lj_tab_resize(J->L, tpl, tb->asize, nhbits);
1254 node = noderef(tpl->node);
1255 hmask = tpl->hmask;
1256 for (i = 0; i <= hmask; i++) {
1257 /* This is safe, since template tables only hold immutable values. */
1258 if (tvistab(&node[i].val))
1259 setnilV(&node[i].val);
1260 }
1261 /* The shape of the table may have changed. Clean up array part, too. */
1262 asize = tpl->asize;
1263 array = tvref(tpl->array);
1264 for (i = 0; i < asize; i++) {
1265 if (tvistab(&array[i]))
1266 setnilV(&array[i]);
1267 }
1268 J->retryrec = 1; /* Abort the trace at the end of recording. */
1269 }
1270 }
1271 }
1272 }
1273 #endif
1274
1275 /* Record bounds-check. */
rec_idx_abc(jit_State * J,TRef asizeref,TRef ikey,uint32_t asize)1276 static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
1277 {
1278 /* Try to emit invariant bounds checks. */
1279 if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) ==
1280 (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) {
1281 IRRef ref = tref_ref(ikey);
1282 IRIns *ir = IR(ref);
1283 int32_t ofs = 0;
1284 IRRef ofsref = 0;
1285 /* Handle constant offsets. */
1286 if (ir->o == IR_ADD && irref_isk(ir->op2)) {
1287 ofsref = ir->op2;
1288 ofs = IR(ofsref)->i;
1289 ref = ir->op1;
1290 ir = IR(ref);
1291 }
1292 /* Got scalar evolution analysis results for this reference? */
1293 if (ref == J->scev.idx) {
1294 int32_t stop;
1295 lj_assertJ(irt_isint(J->scev.t) && ir->o == IR_SLOAD,
1296 "only int SCEV supported");
1297 stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
1298 /* Runtime value for stop of loop is within bounds? */
1299 if ((uint64_t)stop + ofs < (uint64_t)asize) {
1300 /* Emit invariant bounds check for stop. */
1301 emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop :
1302 emitir(IRTI(IR_ADD), J->scev.stop, ofsref));
1303 /* Emit invariant bounds check for start, if not const or negative. */
1304 if (!(J->scev.dir && J->scev.start &&
1305 (int64_t)IR(J->scev.start)->i + ofs >= 0))
1306 emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey);
1307 return;
1308 }
1309 }
1310 }
1311 emitir(IRTGI(IR_ABC), asizeref, ikey); /* Emit regular bounds check. */
1312 }
1313
1314 /* Record indexed key lookup. */
rec_idx_key(jit_State * J,RecordIndex * ix,IRRef * rbref,IRType1 * rbguard)1315 static TRef rec_idx_key(jit_State *J, RecordIndex *ix, IRRef *rbref,
1316 IRType1 *rbguard)
1317 {
1318 TRef key;
1319 GCtab *t = tabV(&ix->tabv);
1320 ix->oldv = lj_tab_get(J->L, t, &ix->keyv); /* Lookup previous value. */
1321 *rbref = 0;
1322 rbguard->irt = 0;
1323
1324 /* Integer keys are looked up in the array part first. */
1325 key = ix->key;
1326 if (tref_isnumber(key)) {
1327 int32_t k = numberVint(&ix->keyv);
1328 if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k)
1329 k = LJ_MAX_ASIZE;
1330 if ((MSize)k < LJ_MAX_ASIZE) { /* Potential array key? */
1331 TRef ikey = lj_opt_narrow_index(J, key);
1332 TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
1333 if ((MSize)k < t->asize) { /* Currently an array key? */
1334 TRef arrayref;
1335 rec_idx_abc(J, asizeref, ikey, t->asize);
1336 arrayref = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_ARRAY);
1337 return emitir(IRT(IR_AREF, IRT_PGC), arrayref, ikey);
1338 } else { /* Currently not in array (may be an array extension)? */
1339 emitir(IRTGI(IR_ULE), asizeref, ikey); /* Inv. bounds check. */
1340 if (k == 0 && tref_isk(key))
1341 key = lj_ir_knum_zero(J); /* Canonicalize 0 or +-0.0 to +0.0. */
1342 /* And continue with the hash lookup. */
1343 }
1344 } else if (!tref_isk(key)) {
1345 /* We can rule out const numbers which failed the integerness test
1346 ** above. But all other numbers are potential array keys.
1347 */
1348 if (t->asize == 0) { /* True sparse tables have an empty array part. */
1349 /* Guard that the array part stays empty. */
1350 TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
1351 emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
1352 } else {
1353 lj_trace_err(J, LJ_TRERR_NYITMIX);
1354 }
1355 }
1356 }
1357
1358 /* Otherwise the key is located in the hash part. */
1359 if (t->hmask == 0) { /* Shortcut for empty hash part. */
1360 /* Guard that the hash part stays empty. */
1361 TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
1362 emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
1363 return lj_ir_kkptr(J, niltvg(J2G(J)));
1364 }
1365 if (tref_isinteger(key)) /* Hash keys are based on numbers, not ints. */
1366 key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
1367 if (tref_isk(key)) {
1368 /* Optimize lookup of constant hash keys. */
1369 MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val);
1370 if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) &&
1371 hslot <= 65535*(MSize)sizeof(Node)) {
1372 TRef node, kslot, hm;
1373 *rbref = J->cur.nins; /* Mark possible rollback point. */
1374 *rbguard = J->guardemit;
1375 hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
1376 emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
1377 node = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_NODE);
1378 kslot = lj_ir_kslot(J, key, hslot / sizeof(Node));
1379 return emitir(IRTG(IR_HREFK, IRT_PGC), node, kslot);
1380 }
1381 }
1382 /* Fall back to a regular hash lookup. */
1383 return emitir(IRT(IR_HREF, IRT_PGC), ix->tab, key);
1384 }
1385
1386 /* Determine whether a key is NOT one of the fast metamethod names. */
nommstr(jit_State * J,TRef key)1387 static int nommstr(jit_State *J, TRef key)
1388 {
1389 if (tref_isstr(key)) {
1390 if (tref_isk(key)) {
1391 GCstr *str = ir_kstr(IR(tref_ref(key)));
1392 uint32_t mm;
1393 for (mm = 0; mm <= MM_FAST; mm++)
1394 if (mmname_str(J2G(J), mm) == str)
1395 return 0; /* MUST be one the fast metamethod names. */
1396 } else {
1397 return 0; /* Variable string key MAY be a metamethod name. */
1398 }
1399 }
1400 return 1; /* CANNOT be a metamethod name. */
1401 }
1402
1403 /* Record indexed load/store. */
lj_record_idx(jit_State * J,RecordIndex * ix)1404 TRef lj_record_idx(jit_State *J, RecordIndex *ix)
1405 {
1406 TRef xref;
1407 IROp xrefop, loadop;
1408 IRRef rbref;
1409 IRType1 rbguard;
1410 cTValue *oldv;
1411
1412 while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
1413 /* Never call raw lj_record_idx() on non-table. */
1414 lj_assertJ(ix->idxchain != 0, "bad usage");
1415 if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
1416 lj_trace_err(J, LJ_TRERR_NOMM);
1417 handlemm:
1418 if (tref_isfunc(ix->mobj)) { /* Handle metamethod call. */
1419 BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
1420 TRef *base = J->base + func + LJ_FR2;
1421 TValue *tv = J->L->base + func + LJ_FR2;
1422 base[-LJ_FR2] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
1423 setfuncV(J->L, tv-LJ_FR2, funcV(&ix->mobjv));
1424 copyTV(J->L, tv+1, &ix->tabv);
1425 copyTV(J->L, tv+2, &ix->keyv);
1426 if (ix->val) {
1427 base[3] = ix->val;
1428 copyTV(J->L, tv+3, &ix->valv);
1429 lj_record_call(J, func, 3); /* mobj(tab, key, val) */
1430 return 0;
1431 } else {
1432 lj_record_call(J, func, 2); /* res = mobj(tab, key) */
1433 return 0; /* No result yet. */
1434 }
1435 }
1436 /* Otherwise retry lookup with metaobject. */
1437 ix->tab = ix->mobj;
1438 copyTV(J->L, &ix->tabv, &ix->mobjv);
1439 if (--ix->idxchain == 0)
1440 lj_trace_err(J, LJ_TRERR_IDXLOOP);
1441 }
1442
1443 /* First catch nil and NaN keys for tables. */
1444 if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) {
1445 if (ix->val) /* Better fail early. */
1446 lj_trace_err(J, LJ_TRERR_STORENN);
1447 if (tref_isk(ix->key)) {
1448 if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
1449 goto handlemm;
1450 return TREF_NIL;
1451 }
1452 }
1453
1454 /* Record the key lookup. */
1455 xref = rec_idx_key(J, ix, &rbref, &rbguard);
1456 xrefop = IR(tref_ref(xref))->o;
1457 loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
1458 /* The lj_meta_tset() inconsistency is gone, but better play safe. */
1459 oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv;
1460
1461 if (ix->val == 0) { /* Indexed load */
1462 IRType t = itype2irt(oldv);
1463 TRef res;
1464 if (oldv == niltvg(J2G(J))) {
1465 emitir(IRTG(IR_EQ, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
1466 res = TREF_NIL;
1467 } else {
1468 res = emitir(IRTG(loadop, t), xref, 0);
1469 }
1470 if (tref_ref(res) < rbref) { /* HREFK + load forwarded? */
1471 lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */
1472 J->guardemit = rbguard;
1473 }
1474 if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
1475 goto handlemm;
1476 if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitives. */
1477 return res;
1478 } else { /* Indexed store. */
1479 GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
1480 int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
1481 if (tref_ref(xref) < rbref) { /* HREFK forwarded? */
1482 lj_ir_rollback(J, rbref); /* Rollback to eliminate hmask guard. */
1483 J->guardemit = rbguard;
1484 }
1485 if (tvisnil(oldv)) { /* Previous value was nil? */
1486 /* Need to duplicate the hasmm check for the early guards. */
1487 int hasmm = 0;
1488 if (ix->idxchain && mt) {
1489 cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex));
1490 hasmm = mo && !tvisnil(mo);
1491 }
1492 if (hasmm)
1493 emitir(IRTG(loadop, IRT_NIL), xref, 0); /* Guard for nil value. */
1494 else if (xrefop == IR_HREF)
1495 emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_PGC),
1496 xref, lj_ir_kkptr(J, niltvg(J2G(J))));
1497 if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
1498 lj_assertJ(hasmm, "inconsistent metamethod handling");
1499 goto handlemm;
1500 }
1501 lj_assertJ(!hasmm, "inconsistent metamethod handling");
1502 if (oldv == niltvg(J2G(J))) { /* Need to insert a new key. */
1503 TRef key = ix->key;
1504 if (tref_isinteger(key)) /* NEWREF needs a TValue as a key. */
1505 key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
1506 xref = emitir(IRT(IR_NEWREF, IRT_PGC), ix->tab, key);
1507 keybarrier = 0; /* NEWREF already takes care of the key barrier. */
1508 #ifdef LUAJIT_ENABLE_TABLE_BUMP
1509 if ((J->flags & JIT_F_OPT_SINK)) /* Avoid a separate flag. */
1510 rec_idx_bump(J, ix);
1511 #endif
1512 }
1513 } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
1514 /* Cannot derive that the previous value was non-nil, must do checks. */
1515 if (xrefop == IR_HREF) /* Guard against store to niltv. */
1516 emitir(IRTG(IR_NE, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
1517 if (ix->idxchain) { /* Metamethod lookup required? */
1518 /* A check for NULL metatable is cheaper (hoistable) than a load. */
1519 if (!mt) {
1520 TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
1521 emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
1522 } else {
1523 IRType t = itype2irt(oldv);
1524 emitir(IRTG(loadop, t), xref, 0); /* Guard for non-nil value. */
1525 }
1526 }
1527 } else {
1528 keybarrier = 0; /* Previous non-nil value kept the key alive. */
1529 }
1530 /* Convert int to number before storing. */
1531 if (!LJ_DUALNUM && tref_isinteger(ix->val))
1532 ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT);
1533 emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val);
1534 if (keybarrier || tref_isgcv(ix->val))
1535 emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
1536 /* Invalidate neg. metamethod cache for stores with certain string keys. */
1537 if (!nommstr(J, ix->key)) {
1538 TRef fref = emitir(IRT(IR_FREF, IRT_PGC), ix->tab, IRFL_TAB_NOMM);
1539 emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
1540 }
1541 J->needsnap = 1;
1542 return 0;
1543 }
1544 }
1545
rec_tsetm(jit_State * J,BCReg ra,BCReg rn,int32_t i)1546 static void rec_tsetm(jit_State *J, BCReg ra, BCReg rn, int32_t i)
1547 {
1548 RecordIndex ix;
1549 cTValue *basev = J->L->base;
1550 GCtab *t = tabV(&basev[ra-1]);
1551 settabV(J->L, &ix.tabv, t);
1552 ix.tab = getslot(J, ra-1);
1553 ix.idxchain = 0;
1554 #ifdef LUAJIT_ENABLE_TABLE_BUMP
1555 if ((J->flags & JIT_F_OPT_SINK)) {
1556 if (t->asize < i+rn-ra)
1557 lj_tab_reasize(J->L, t, i+rn-ra);
1558 setnilV(&ix.keyv);
1559 rec_idx_bump(J, &ix);
1560 }
1561 #endif
1562 for (; ra < rn; i++, ra++) {
1563 setintV(&ix.keyv, i);
1564 ix.key = lj_ir_kint(J, i);
1565 copyTV(J->L, &ix.valv, &basev[ra]);
1566 ix.val = getslot(J, ra);
1567 lj_record_idx(J, &ix);
1568 }
1569 }
1570
1571 /* -- Upvalue access ------------------------------------------------------ */
1572
1573 /* Check whether upvalue is immutable and ok to constify. */
rec_upvalue_constify(jit_State * J,GCupval * uvp)1574 static int rec_upvalue_constify(jit_State *J, GCupval *uvp)
1575 {
1576 if (uvp->immutable) {
1577 cTValue *o = uvval(uvp);
1578 /* Don't constify objects that may retain large amounts of memory. */
1579 #if LJ_HASFFI
1580 if (tviscdata(o)) {
1581 GCcdata *cd = cdataV(o);
1582 if (!cdataisv(cd) && !(cd->marked & LJ_GC_CDATA_FIN)) {
1583 CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid);
1584 if (!ctype_hassize(ct->info) || ct->size <= 16)
1585 return 1;
1586 }
1587 return 0;
1588 }
1589 #else
1590 UNUSED(J);
1591 #endif
1592 if (!(tvistab(o) || tvisudata(o) || tvisthread(o)))
1593 return 1;
1594 }
1595 return 0;
1596 }
1597
1598 /* Record upvalue load/store. */
rec_upvalue(jit_State * J,uint32_t uv,TRef val)1599 static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
1600 {
1601 GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv;
1602 TRef fn = getcurrf(J);
1603 IRRef uref;
1604 int needbarrier = 0;
1605 if (rec_upvalue_constify(J, uvp)) { /* Try to constify immutable upvalue. */
1606 TRef tr, kfunc;
1607 lj_assertJ(val == 0, "bad usage");
1608 if (!tref_isk(fn)) { /* Late specialization of current function. */
1609 if (J->pt->flags >= PROTO_CLC_POLY)
1610 goto noconstify;
1611 kfunc = lj_ir_kfunc(J, J->fn);
1612 emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc);
1613 #if LJ_FR2
1614 J->base[-2] = kfunc;
1615 #else
1616 J->base[-1] = kfunc | TREF_FRAME;
1617 #endif
1618 fn = kfunc;
1619 }
1620 tr = lj_record_constify(J, uvval(uvp));
1621 if (tr)
1622 return tr;
1623 }
1624 noconstify:
1625 /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
1626 uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
1627 if (!uvp->closed) {
1628 uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_PGC), fn, uv));
1629 /* In current stack? */
1630 if (uvval(uvp) >= tvref(J->L->stack) &&
1631 uvval(uvp) < tvref(J->L->maxstack)) {
1632 int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
1633 if (slot >= 0) { /* Aliases an SSA slot? */
1634 emitir(IRTG(IR_EQ, IRT_PGC),
1635 REF_BASE,
1636 emitir(IRT(IR_ADD, IRT_PGC), uref,
1637 lj_ir_kint(J, (slot - 1 - LJ_FR2) * -8)));
1638 slot -= (int32_t)J->baseslot; /* Note: slot number may be negative! */
1639 if (val == 0) {
1640 return getslot(J, slot);
1641 } else {
1642 J->base[slot] = val;
1643 if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1);
1644 return 0;
1645 }
1646 }
1647 }
1648 emitir(IRTG(IR_UGT, IRT_PGC),
1649 emitir(IRT(IR_SUB, IRT_PGC), uref, REF_BASE),
1650 lj_ir_kint(J, (J->baseslot + J->maxslot) * 8));
1651 } else {
1652 needbarrier = 1;
1653 uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_PGC), fn, uv));
1654 }
1655 if (val == 0) { /* Upvalue load */
1656 IRType t = itype2irt(uvval(uvp));
1657 TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0);
1658 if (irtype_ispri(t)) res = TREF_PRI(t); /* Canonicalize primitive refs. */
1659 return res;
1660 } else { /* Upvalue store. */
1661 /* Convert int to number before storing. */
1662 if (!LJ_DUALNUM && tref_isinteger(val))
1663 val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
1664 emitir(IRT(IR_USTORE, tref_type(val)), uref, val);
1665 if (needbarrier && tref_isgcv(val))
1666 emitir(IRT(IR_OBAR, IRT_NIL), uref, val);
1667 J->needsnap = 1;
1668 return 0;
1669 }
1670 }
1671
1672 /* -- Record calls to Lua functions --------------------------------------- */
1673
1674 /* Check unroll limits for calls. */
check_call_unroll(jit_State * J,TraceNo lnk)1675 static void check_call_unroll(jit_State *J, TraceNo lnk)
1676 {
1677 cTValue *frame = J->L->base - 1;
1678 void *pc = mref(frame_func(frame)->l.pc, void);
1679 int32_t depth = J->framedepth;
1680 int32_t count = 0;
1681 if ((J->pt->flags & PROTO_VARARG)) depth--; /* Vararg frame still missing. */
1682 for (; depth > 0; depth--) { /* Count frames with same prototype. */
1683 if (frame_iscont(frame)) depth--;
1684 frame = frame_prev(frame);
1685 if (mref(frame_func(frame)->l.pc, void) == pc)
1686 count++;
1687 }
1688 if (J->pc == J->startpc) {
1689 if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
1690 J->pc++;
1691 if (J->framedepth + J->retdepth == 0)
1692 lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Tail-rec. */
1693 else
1694 lj_record_stop(J, LJ_TRLINK_UPREC, J->cur.traceno); /* Up-recursion. */
1695 }
1696 } else {
1697 if (count > J->param[JIT_P_callunroll]) {
1698 if (lnk) { /* Possible tail- or up-recursion. */
1699 lj_trace_flush(J, lnk); /* Flush trace that only returns. */
1700 /* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
1701 hotcount_set(J2GG(J), J->pc+1, lj_prng_u64(&J2G(J)->prng) & 15u);
1702 }
1703 lj_trace_err(J, LJ_TRERR_CUNROLL);
1704 }
1705 }
1706 }
1707
1708 /* Record Lua function setup. */
rec_func_setup(jit_State * J)1709 static void rec_func_setup(jit_State *J)
1710 {
1711 GCproto *pt = J->pt;
1712 BCReg s, numparams = pt->numparams;
1713 if ((pt->flags & PROTO_NOJIT))
1714 lj_trace_err(J, LJ_TRERR_CJITOFF);
1715 if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS)
1716 lj_trace_err(J, LJ_TRERR_STACKOV);
1717 /* Fill up missing parameters with nil. */
1718 for (s = J->maxslot; s < numparams; s++)
1719 J->base[s] = TREF_NIL;
1720 /* The remaining slots should never be read before they are written. */
1721 J->maxslot = numparams;
1722 }
1723
1724 /* Record Lua vararg function setup. */
rec_func_vararg(jit_State * J)1725 static void rec_func_vararg(jit_State *J)
1726 {
1727 GCproto *pt = J->pt;
1728 BCReg s, fixargs, vframe = J->maxslot+1+LJ_FR2;
1729 lj_assertJ((pt->flags & PROTO_VARARG), "FUNCV in non-vararg function");
1730 if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
1731 lj_trace_err(J, LJ_TRERR_STACKOV);
1732 J->base[vframe-1-LJ_FR2] = J->base[-1-LJ_FR2]; /* Copy function up. */
1733 #if LJ_FR2
1734 J->base[vframe-1] = TREF_FRAME;
1735 #endif
1736 /* Copy fixarg slots up and set their original slots to nil. */
1737 fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
1738 for (s = 0; s < fixargs; s++) {
1739 J->base[vframe+s] = J->base[s];
1740 J->base[s] = TREF_NIL;
1741 }
1742 J->maxslot = fixargs;
1743 J->framedepth++;
1744 J->base += vframe;
1745 J->baseslot += vframe;
1746 }
1747
1748 /* Record entry to a Lua function. */
rec_func_lua(jit_State * J)1749 static void rec_func_lua(jit_State *J)
1750 {
1751 rec_func_setup(J);
1752 check_call_unroll(J, 0);
1753 }
1754
1755 /* Record entry to an already compiled function. */
rec_func_jit(jit_State * J,TraceNo lnk)1756 static void rec_func_jit(jit_State *J, TraceNo lnk)
1757 {
1758 GCtrace *T;
1759 rec_func_setup(J);
1760 T = traceref(J, lnk);
1761 if (T->linktype == LJ_TRLINK_RETURN) { /* Trace returns to interpreter? */
1762 check_call_unroll(J, lnk);
1763 /* Temporarily unpatch JFUNC* to continue recording across function. */
1764 J->patchins = *J->pc;
1765 J->patchpc = (BCIns *)J->pc;
1766 *J->patchpc = T->startins;
1767 return;
1768 }
1769 J->instunroll = 0; /* Cannot continue across a compiled function. */
1770 if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
1771 lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno); /* Extra tail-rec. */
1772 else
1773 lj_record_stop(J, LJ_TRLINK_ROOT, lnk); /* Link to the function. */
1774 }
1775
1776 /* -- Vararg handling ----------------------------------------------------- */
1777
1778 /* Detect y = select(x, ...) idiom. */
select_detect(jit_State * J)1779 static int select_detect(jit_State *J)
1780 {
1781 BCIns ins = J->pc[1];
1782 if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) {
1783 cTValue *func = &J->L->base[bc_a(ins)];
1784 if (tvisfunc(func) && funcV(func)->c.ffid == FF_select) {
1785 TRef kfunc = lj_ir_kfunc(J, funcV(func));
1786 emitir(IRTG(IR_EQ, IRT_FUNC), getslot(J, bc_a(ins)), kfunc);
1787 return 1;
1788 }
1789 }
1790 return 0;
1791 }
1792
1793 /* Record vararg instruction. */
rec_varg(jit_State * J,BCReg dst,ptrdiff_t nresults)1794 static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
1795 {
1796 int32_t numparams = J->pt->numparams;
1797 ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1 - LJ_FR2;
1798 lj_assertJ(frame_isvarg(J->L->base-1), "VARG in non-vararg frame");
1799 if (LJ_FR2 && dst > J->maxslot)
1800 J->base[dst-1] = 0; /* Prevent resurrection of unrelated slot. */
1801 if (J->framedepth > 0) { /* Simple case: varargs defined on-trace. */
1802 ptrdiff_t i;
1803 if (nvararg < 0) nvararg = 0;
1804 if (nresults == -1) {
1805 nresults = nvararg;
1806 J->maxslot = dst + (BCReg)nvararg;
1807 } else if (dst + nresults > J->maxslot) {
1808 J->maxslot = dst + (BCReg)nresults;
1809 }
1810 for (i = 0; i < nresults; i++)
1811 J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1 - LJ_FR2) : TREF_NIL;
1812 } else { /* Unknown number of varargs passed to trace. */
1813 TRef fr = emitir(IRTI(IR_SLOAD), LJ_FR2, IRSLOAD_READONLY|IRSLOAD_FRAME);
1814 int32_t frofs = 8*(1+LJ_FR2+numparams)+FRAME_VARG;
1815 if (nresults >= 0) { /* Known fixed number of results. */
1816 ptrdiff_t i;
1817 if (nvararg > 0) {
1818 ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg;
1819 TRef vbase;
1820 if (nvararg >= nresults)
1821 emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
1822 else
1823 emitir(IRTGI(IR_EQ), fr,
1824 lj_ir_kint(J, (int32_t)frame_ftsz(J->L->base-1)));
1825 vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
1826 vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase, lj_ir_kint(J, frofs-8));
1827 for (i = 0; i < nload; i++) {
1828 IRType t = itype2irt(&J->L->base[i-1-LJ_FR2-nvararg]);
1829 TRef aref = emitir(IRT(IR_AREF, IRT_PGC),
1830 vbase, lj_ir_kint(J, (int32_t)i));
1831 TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
1832 if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
1833 J->base[dst+i] = tr;
1834 }
1835 } else {
1836 emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs));
1837 nvararg = 0;
1838 }
1839 for (i = nvararg; i < nresults; i++)
1840 J->base[dst+i] = TREF_NIL;
1841 if (dst + (BCReg)nresults > J->maxslot)
1842 J->maxslot = dst + (BCReg)nresults;
1843 } else if (select_detect(J)) { /* y = select(x, ...) */
1844 TRef tridx = J->base[dst-1];
1845 TRef tr = TREF_NIL;
1846 ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]);
1847 if (idx < 0) goto nyivarg;
1848 if (idx != 0 && !tref_isinteger(tridx))
1849 tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX);
1850 if (idx != 0 && tref_isk(tridx)) {
1851 emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT),
1852 fr, lj_ir_kint(J, frofs+8*(int32_t)idx));
1853 frofs -= 8; /* Bias for 1-based index. */
1854 } else if (idx <= nvararg) { /* Compute size. */
1855 TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs));
1856 if (numparams)
1857 emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0));
1858 tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3));
1859 if (idx != 0) {
1860 tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1));
1861 rec_idx_abc(J, tr, tridx, (uint32_t)nvararg);
1862 }
1863 } else {
1864 TRef tmp = lj_ir_kint(J, frofs);
1865 if (idx != 0) {
1866 TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3));
1867 tmp = emitir(IRTI(IR_ADD), tmp2, tmp);
1868 } else {
1869 tr = lj_ir_kint(J, 0);
1870 }
1871 emitir(IRTGI(IR_LT), fr, tmp);
1872 }
1873 if (idx != 0 && idx <= nvararg) {
1874 IRType t;
1875 TRef aref, vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
1876 vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase,
1877 lj_ir_kint(J, frofs-(8<<LJ_FR2)));
1878 t = itype2irt(&J->L->base[idx-2-LJ_FR2-nvararg]);
1879 aref = emitir(IRT(IR_AREF, IRT_PGC), vbase, tridx);
1880 tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
1881 if (irtype_ispri(t)) tr = TREF_PRI(t); /* Canonicalize primitives. */
1882 }
1883 J->base[dst-2-LJ_FR2] = tr;
1884 J->maxslot = dst-1-LJ_FR2;
1885 J->bcskip = 2; /* Skip CALLM + select. */
1886 } else {
1887 nyivarg:
1888 setintV(&J->errinfo, BC_VARG);
1889 lj_trace_err_info(J, LJ_TRERR_NYIBC);
1890 }
1891 }
1892 if (J->baseslot + J->maxslot >= LJ_MAX_JSLOTS)
1893 lj_trace_err(J, LJ_TRERR_STACKOV);
1894 }
1895
1896 /* -- Record allocations -------------------------------------------------- */
1897
rec_tnew(jit_State * J,uint32_t ah)1898 static TRef rec_tnew(jit_State *J, uint32_t ah)
1899 {
1900 uint32_t asize = ah & 0x7ff;
1901 uint32_t hbits = ah >> 11;
1902 TRef tr;
1903 if (asize == 0x7ff) asize = 0x801;
1904 tr = emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
1905 #ifdef LUAJIT_ENABLE_TABLE_BUMP
1906 J->rbchash[(tr & (RBCHASH_SLOTS-1))].ref = tref_ref(tr);
1907 setmref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pc, J->pc);
1908 setgcref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
1909 #endif
1910 return tr;
1911 }
1912
1913 /* -- Concatenation ------------------------------------------------------- */
1914
rec_cat(jit_State * J,BCReg baseslot,BCReg topslot)1915 static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot)
1916 {
1917 TRef *top = &J->base[topslot];
1918 TValue savetv[5];
1919 BCReg s;
1920 RecordIndex ix;
1921 lj_assertJ(baseslot < topslot, "bad CAT arg");
1922 for (s = baseslot; s <= topslot; s++)
1923 (void)getslot(J, s); /* Ensure all arguments have a reference. */
1924 if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) {
1925 TRef tr, hdr, *trp, *xbase, *base = &J->base[baseslot];
1926 /* First convert numbers to strings. */
1927 for (trp = top; trp >= base; trp--) {
1928 if (tref_isnumber(*trp))
1929 *trp = emitir(IRT(IR_TOSTR, IRT_STR), *trp,
1930 tref_isnum(*trp) ? IRTOSTR_NUM : IRTOSTR_INT);
1931 else if (!tref_isstr(*trp))
1932 break;
1933 }
1934 xbase = ++trp;
1935 tr = hdr = emitir(IRT(IR_BUFHDR, IRT_PGC),
1936 lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET);
1937 do {
1938 tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr, *trp++);
1939 } while (trp <= top);
1940 tr = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
1941 J->maxslot = (BCReg)(xbase - J->base);
1942 if (xbase == base) return tr; /* Return simple concatenation result. */
1943 /* Pass partial result. */
1944 topslot = J->maxslot--;
1945 *xbase = tr;
1946 top = xbase;
1947 setstrV(J->L, &ix.keyv, &J2G(J)->strempty); /* Simulate string result. */
1948 } else {
1949 J->maxslot = topslot-1;
1950 copyTV(J->L, &ix.keyv, &J->L->base[topslot]);
1951 }
1952 copyTV(J->L, &ix.tabv, &J->L->base[topslot-1]);
1953 ix.tab = top[-1];
1954 ix.key = top[0];
1955 memcpy(savetv, &J->L->base[topslot-1], sizeof(savetv)); /* Save slots. */
1956 rec_mm_arith(J, &ix, MM_concat); /* Call __concat metamethod. */
1957 memcpy(&J->L->base[topslot-1], savetv, sizeof(savetv)); /* Restore slots. */
1958 return 0; /* No result yet. */
1959 }
1960
1961 /* -- Record bytecode ops ------------------------------------------------- */
1962
1963 /* Prepare for comparison. */
rec_comp_prep(jit_State * J)1964 static void rec_comp_prep(jit_State *J)
1965 {
1966 /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
1967 if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins)
1968 emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
1969 lj_snap_add(J);
1970 }
1971
1972 /* Fixup comparison. */
rec_comp_fixup(jit_State * J,const BCIns * pc,int cond)1973 static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
1974 {
1975 BCIns jmpins = pc[1];
1976 const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
1977 SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
1978 /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
1979 #if LJ_FR2
1980 SnapEntry *flink = &J->cur.snapmap[snap->mapofs + snap->nent];
1981 uint64_t pcbase;
1982 memcpy(&pcbase, flink, sizeof(uint64_t));
1983 pcbase = (pcbase & 0xff) | (u64ptr(npc) << 8);
1984 memcpy(flink, &pcbase, sizeof(uint64_t));
1985 #else
1986 J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
1987 #endif
1988 J->needsnap = 1;
1989 if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
1990 lj_snap_shrink(J); /* Shrink last snapshot if possible. */
1991 }
1992
1993 /* Record the next bytecode instruction (_before_ it's executed). */
lj_record_ins(jit_State * J)1994 void lj_record_ins(jit_State *J)
1995 {
1996 cTValue *lbase;
1997 RecordIndex ix;
1998 const BCIns *pc;
1999 BCIns ins;
2000 BCOp op;
2001 TRef ra, rb, rc;
2002
2003 /* Perform post-processing action before recording the next instruction. */
2004 if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
2005 switch (J->postproc) {
2006 case LJ_POST_FIXCOMP: /* Fixup comparison. */
2007 pc = (const BCIns *)(uintptr_t)J2G(J)->tmptv.u64;
2008 rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
2009 /* fallthrough */
2010 case LJ_POST_FIXGUARD: /* Fixup and emit pending guard. */
2011 case LJ_POST_FIXGUARDSNAP: /* Fixup and emit pending guard and snapshot. */
2012 if (!tvistruecond(&J2G(J)->tmptv2)) {
2013 J->fold.ins.o ^= 1; /* Flip guard to opposite. */
2014 if (J->postproc == LJ_POST_FIXGUARDSNAP) {
2015 SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
2016 J->cur.snapmap[snap->mapofs+snap->nent-1]--; /* False -> true. */
2017 }
2018 }
2019 lj_opt_fold(J); /* Emit pending guard. */
2020 /* fallthrough */
2021 case LJ_POST_FIXBOOL:
2022 if (!tvistruecond(&J2G(J)->tmptv2)) {
2023 BCReg s;
2024 TValue *tv = J->L->base;
2025 for (s = 0; s < J->maxslot; s++) /* Fixup stack slot (if any). */
2026 if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) {
2027 J->base[s] = TREF_FALSE;
2028 break;
2029 }
2030 }
2031 break;
2032 case LJ_POST_FIXCONST:
2033 {
2034 BCReg s;
2035 TValue *tv = J->L->base;
2036 for (s = 0; s < J->maxslot; s++) /* Constify stack slots (if any). */
2037 if (J->base[s] == TREF_NIL && !tvisnil(&tv[s]))
2038 J->base[s] = lj_record_constify(J, &tv[s]);
2039 }
2040 break;
2041 case LJ_POST_FFRETRY: /* Suppress recording of retried fast function. */
2042 if (bc_op(*J->pc) >= BC__MAX)
2043 return;
2044 break;
2045 default: lj_assertJ(0, "bad post-processing mode"); break;
2046 }
2047 J->postproc = LJ_POST_NONE;
2048 }
2049
2050 /* Need snapshot before recording next bytecode (e.g. after a store). */
2051 if (J->needsnap) {
2052 J->needsnap = 0;
2053 lj_snap_purge(J);
2054 lj_snap_add(J);
2055 J->mergesnap = 1;
2056 }
2057
2058 /* Skip some bytecodes. */
2059 if (LJ_UNLIKELY(J->bcskip > 0)) {
2060 J->bcskip--;
2061 return;
2062 }
2063
2064 /* Record only closed loops for root traces. */
2065 pc = J->pc;
2066 if (J->framedepth == 0 &&
2067 (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent)
2068 lj_trace_err(J, LJ_TRERR_LLEAVE);
2069
2070 #ifdef LUA_USE_ASSERT
2071 rec_check_slots(J);
2072 rec_check_ir(J);
2073 #endif
2074
2075 #if LJ_HASPROFILE
2076 rec_profile_ins(J, pc);
2077 #endif
2078
2079 /* Keep a copy of the runtime values of var/num/str operands. */
2080 #define rav (&ix.valv)
2081 #define rbv (&ix.tabv)
2082 #define rcv (&ix.keyv)
2083
2084 lbase = J->L->base;
2085 ins = *pc;
2086 op = bc_op(ins);
2087 ra = bc_a(ins);
2088 ix.val = 0;
2089 switch (bcmode_a(op)) {
2090 case BCMvar:
2091 copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break;
2092 default: break; /* Handled later. */
2093 }
2094 rb = bc_b(ins);
2095 rc = bc_c(ins);
2096 switch (bcmode_b(op)) {
2097 case BCMnone: rb = 0; rc = bc_d(ins); break; /* Upgrade rc to 'rd'. */
2098 case BCMvar:
2099 copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break;
2100 default: break; /* Handled later. */
2101 }
2102 switch (bcmode_c(op)) {
2103 case BCMvar:
2104 copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
2105 case BCMpri: setpriV(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
2106 case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
2107 copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
2108 lj_ir_knumint(J, numV(tv)); } break;
2109 case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc));
2110 setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break;
2111 default: break; /* Handled later. */
2112 }
2113
2114 switch (op) {
2115
2116 /* -- Comparison ops ---------------------------------------------------- */
2117
2118 case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
2119 #if LJ_HASFFI
2120 if (tref_iscdata(ra) || tref_iscdata(rc)) {
2121 rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt);
2122 break;
2123 }
2124 #endif
2125 /* Emit nothing for two numeric or string consts. */
2126 if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) {
2127 IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra);
2128 IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc);
2129 int irop;
2130 if (ta != tc) {
2131 /* Widen mixed number/int comparisons to number/number comparison. */
2132 if (ta == IRT_INT && tc == IRT_NUM) {
2133 ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT);
2134 ta = IRT_NUM;
2135 } else if (ta == IRT_NUM && tc == IRT_INT) {
2136 rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
2137 } else if (LJ_52) {
2138 ta = IRT_NIL; /* Force metamethod for different types. */
2139 } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) &&
2140 (tc == IRT_FALSE || tc == IRT_TRUE))) {
2141 break; /* Interpreter will throw for two different types. */
2142 }
2143 }
2144 rec_comp_prep(J);
2145 irop = (int)op - (int)BC_ISLT + (int)IR_LT;
2146 if (ta == IRT_NUM) {
2147 if ((irop & 1)) irop ^= 4; /* ISGE/ISGT are unordered. */
2148 if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
2149 irop ^= 5;
2150 } else if (ta == IRT_INT) {
2151 if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
2152 irop ^= 1;
2153 } else if (ta == IRT_STR) {
2154 if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1;
2155 ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc);
2156 rc = lj_ir_kint(J, 0);
2157 ta = IRT_INT;
2158 } else {
2159 rec_mm_comp(J, &ix, (int)op);
2160 break;
2161 }
2162 emitir(IRTG(irop, ta), ra, rc);
2163 rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1);
2164 }
2165 break;
2166
2167 case BC_ISEQV: case BC_ISNEV:
2168 case BC_ISEQS: case BC_ISNES:
2169 case BC_ISEQN: case BC_ISNEN:
2170 case BC_ISEQP: case BC_ISNEP:
2171 #if LJ_HASFFI
2172 if (tref_iscdata(ra) || tref_iscdata(rc)) {
2173 rec_mm_comp_cdata(J, &ix, op, MM_eq);
2174 break;
2175 }
2176 #endif
2177 /* Emit nothing for two non-table, non-udata consts. */
2178 if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) {
2179 int diff;
2180 rec_comp_prep(J);
2181 diff = lj_record_objcmp(J, ra, rc, rav, rcv);
2182 if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra)))
2183 rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff);
2184 else if (diff == 1) /* Only check __eq if different, but same type. */
2185 rec_mm_equal(J, &ix, (int)op);
2186 }
2187 break;
2188
2189 /* -- Unary test and copy ops ------------------------------------------- */
2190
2191 case BC_ISTC: case BC_ISFC:
2192 if ((op & 1) == tref_istruecond(rc))
2193 rc = 0; /* Don't store if condition is not true. */
2194 /* fallthrough */
2195 case BC_IST: case BC_ISF: /* Type specialization suffices. */
2196 if (bc_a(pc[1]) < J->maxslot)
2197 J->maxslot = bc_a(pc[1]); /* Shrink used slots. */
2198 break;
2199
2200 case BC_ISTYPE: case BC_ISNUM:
2201 /* These coercions need to correspond with lj_meta_istype(). */
2202 if (LJ_DUALNUM && rc == ~LJ_TNUMX+1)
2203 ra = lj_opt_narrow_toint(J, ra);
2204 else if (rc == ~LJ_TNUMX+2)
2205 ra = lj_ir_tonum(J, ra);
2206 else if (rc == ~LJ_TSTR+1)
2207 ra = lj_ir_tostr(J, ra);
2208 /* else: type specialization suffices. */
2209 J->base[bc_a(ins)] = ra;
2210 break;
2211
2212 /* -- Unary ops --------------------------------------------------------- */
2213
2214 case BC_NOT:
2215 /* Type specialization already forces const result. */
2216 rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE;
2217 break;
2218
2219 case BC_LEN:
2220 if (tref_isstr(rc))
2221 rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
2222 else if (!LJ_52 && tref_istab(rc))
2223 rc = emitir(IRTI(IR_ALEN), rc, TREF_NIL);
2224 else
2225 rc = rec_mm_len(J, rc, rcv);
2226 break;
2227
2228 /* -- Arithmetic ops ---------------------------------------------------- */
2229
2230 case BC_UNM:
2231 if (tref_isnumber_str(rc)) {
2232 rc = lj_opt_narrow_unm(J, rc, rcv);
2233 } else {
2234 ix.tab = rc;
2235 copyTV(J->L, &ix.tabv, rcv);
2236 rc = rec_mm_arith(J, &ix, MM_unm);
2237 }
2238 break;
2239
2240 case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV:
2241 /* Swap rb/rc and rbv/rcv. rav is temp. */
2242 ix.tab = rc; ix.key = rc = rb; rb = ix.tab;
2243 copyTV(J->L, rav, rbv);
2244 copyTV(J->L, rbv, rcv);
2245 copyTV(J->L, rcv, rav);
2246 if (op == BC_MODNV)
2247 goto recmod;
2248 /* fallthrough */
2249 case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN:
2250 case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: {
2251 MMS mm = bcmode_mm(op);
2252 if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
2253 rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv,
2254 (int)mm - (int)MM_add + (int)IR_ADD);
2255 else
2256 rc = rec_mm_arith(J, &ix, mm);
2257 break;
2258 }
2259
2260 case BC_MODVN: case BC_MODVV:
2261 recmod:
2262 if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
2263 rc = lj_opt_narrow_mod(J, rb, rc, rbv, rcv);
2264 else
2265 rc = rec_mm_arith(J, &ix, MM_mod);
2266 break;
2267
2268 case BC_POW:
2269 if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
2270 rc = lj_opt_narrow_pow(J, rb, rc, rbv, rcv);
2271 else
2272 rc = rec_mm_arith(J, &ix, MM_pow);
2273 break;
2274
2275 /* -- Miscellaneous ops ------------------------------------------------- */
2276
2277 case BC_CAT:
2278 rc = rec_cat(J, rb, rc);
2279 break;
2280
2281 /* -- Constant and move ops --------------------------------------------- */
2282
2283 case BC_MOV:
2284 /* Clear gap of method call to avoid resurrecting previous refs. */
2285 if (ra > J->maxslot) {
2286 #if LJ_FR2
2287 memset(J->base + J->maxslot, 0, (ra - J->maxslot) * sizeof(TRef));
2288 #else
2289 J->base[ra-1] = 0;
2290 #endif
2291 }
2292 break;
2293 case BC_KSTR: case BC_KNUM: case BC_KPRI:
2294 break;
2295 case BC_KSHORT:
2296 rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
2297 break;
2298 case BC_KNIL:
2299 if (LJ_FR2 && ra > J->maxslot)
2300 J->base[ra-1] = 0;
2301 while (ra <= rc)
2302 J->base[ra++] = TREF_NIL;
2303 if (rc >= J->maxslot) J->maxslot = rc+1;
2304 break;
2305 #if LJ_HASFFI
2306 case BC_KCDATA:
2307 rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA);
2308 break;
2309 #endif
2310
2311 /* -- Upvalue and function ops ------------------------------------------ */
2312
2313 case BC_UGET:
2314 rc = rec_upvalue(J, rc, 0);
2315 break;
2316 case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP:
2317 rec_upvalue(J, ra, rc);
2318 break;
2319
2320 /* -- Table ops --------------------------------------------------------- */
2321
2322 case BC_GGET: case BC_GSET:
2323 settabV(J->L, &ix.tabv, tabref(J->fn->l.env));
2324 ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV);
2325 ix.idxchain = LJ_MAX_IDXCHAIN;
2326 rc = lj_record_idx(J, &ix);
2327 break;
2328
2329 case BC_TGETB: case BC_TSETB:
2330 setintV(&ix.keyv, (int32_t)rc);
2331 ix.key = lj_ir_kint(J, (int32_t)rc);
2332 /* fallthrough */
2333 case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS:
2334 ix.idxchain = LJ_MAX_IDXCHAIN;
2335 rc = lj_record_idx(J, &ix);
2336 break;
2337 case BC_TGETR: case BC_TSETR:
2338 ix.idxchain = 0;
2339 rc = lj_record_idx(J, &ix);
2340 break;
2341
2342 case BC_TSETM:
2343 rec_tsetm(J, ra, (BCReg)(J->L->top - J->L->base), (int32_t)rcv->u32.lo);
2344 break;
2345
2346 case BC_TNEW:
2347 rc = rec_tnew(J, rc);
2348 break;
2349 case BC_TDUP:
2350 rc = emitir(IRTG(IR_TDUP, IRT_TAB),
2351 lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
2352 #ifdef LUAJIT_ENABLE_TABLE_BUMP
2353 J->rbchash[(rc & (RBCHASH_SLOTS-1))].ref = tref_ref(rc);
2354 setmref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pc, pc);
2355 setgcref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
2356 #endif
2357 break;
2358
2359 /* -- Calls and vararg handling ----------------------------------------- */
2360
2361 case BC_ITERC:
2362 J->base[ra] = getslot(J, ra-3);
2363 J->base[ra+1+LJ_FR2] = getslot(J, ra-2);
2364 J->base[ra+2+LJ_FR2] = getslot(J, ra-1);
2365 { /* Do the actual copy now because lj_record_call needs the values. */
2366 TValue *b = &J->L->base[ra];
2367 copyTV(J->L, b, b-3);
2368 copyTV(J->L, b+1+LJ_FR2, b-2);
2369 copyTV(J->L, b+2+LJ_FR2, b-1);
2370 }
2371 lj_record_call(J, ra, (ptrdiff_t)rc-1);
2372 break;
2373
2374 /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
2375 case BC_CALLM:
2376 rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
2377 /* fallthrough */
2378 case BC_CALL:
2379 lj_record_call(J, ra, (ptrdiff_t)rc-1);
2380 break;
2381
2382 case BC_CALLMT:
2383 rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
2384 /* fallthrough */
2385 case BC_CALLT:
2386 lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
2387 break;
2388
2389 case BC_VARG:
2390 rec_varg(J, ra, (ptrdiff_t)rb-1);
2391 break;
2392
2393 /* -- Returns ----------------------------------------------------------- */
2394
2395 case BC_RETM:
2396 /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
2397 rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
2398 /* fallthrough */
2399 case BC_RET: case BC_RET0: case BC_RET1:
2400 #if LJ_HASPROFILE
2401 rec_profile_ret(J);
2402 #endif
2403 lj_record_ret(J, ra, (ptrdiff_t)rc-1);
2404 break;
2405
2406 /* -- Loops and branches ------------------------------------------------ */
2407
2408 case BC_FORI:
2409 if (rec_for(J, pc, 0) != LOOPEV_LEAVE)
2410 J->loopref = J->cur.nins;
2411 break;
2412 case BC_JFORI:
2413 lj_assertJ(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL,
2414 "JFORI does not point to JFORL");
2415 if (rec_for(J, pc, 0) != LOOPEV_LEAVE) /* Link to existing loop. */
2416 lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
2417 /* Continue tracing if the loop is not entered. */
2418 break;
2419
2420 case BC_FORL:
2421 rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1));
2422 break;
2423 case BC_ITERL:
2424 rec_loop_interp(J, pc, rec_iterl(J, *pc));
2425 break;
2426 case BC_LOOP:
2427 rec_loop_interp(J, pc, rec_loop(J, ra, 1));
2428 break;
2429
2430 case BC_JFORL:
2431 rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1));
2432 break;
2433 case BC_JITERL:
2434 rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins));
2435 break;
2436 case BC_JLOOP:
2437 rec_loop_jit(J, rc, rec_loop(J, ra,
2438 !bc_isret(bc_op(traceref(J, rc)->startins))));
2439 break;
2440
2441 case BC_IFORL:
2442 case BC_IITERL:
2443 case BC_ILOOP:
2444 case BC_IFUNCF:
2445 case BC_IFUNCV:
2446 lj_trace_err(J, LJ_TRERR_BLACKL);
2447 break;
2448
2449 case BC_JMP:
2450 if (ra < J->maxslot)
2451 J->maxslot = ra; /* Shrink used slots. */
2452 break;
2453
2454 /* -- Function headers -------------------------------------------------- */
2455
2456 case BC_FUNCF:
2457 rec_func_lua(J);
2458 break;
2459 case BC_JFUNCF:
2460 rec_func_jit(J, rc);
2461 break;
2462
2463 case BC_FUNCV:
2464 rec_func_vararg(J);
2465 rec_func_lua(J);
2466 break;
2467 case BC_JFUNCV:
2468 /* Cannot happen. No hotcall counting for varag funcs. */
2469 lj_assertJ(0, "unsupported vararg hotcall");
2470 break;
2471
2472 case BC_FUNCC:
2473 case BC_FUNCCW:
2474 lj_ffrecord_func(J);
2475 break;
2476
2477 default:
2478 if (op >= BC__MAX) {
2479 lj_ffrecord_func(J);
2480 break;
2481 }
2482 /* fallthrough */
2483 case BC_ITERN:
2484 case BC_ISNEXT:
2485 case BC_UCLO:
2486 case BC_FNEW:
2487 setintV(&J->errinfo, (int32_t)op);
2488 lj_trace_err_info(J, LJ_TRERR_NYIBC);
2489 break;
2490 }
2491
2492 /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
2493 if (bcmode_a(op) == BCMdst && rc) {
2494 J->base[ra] = rc;
2495 if (ra >= J->maxslot) {
2496 #if LJ_FR2
2497 if (ra > J->maxslot) J->base[ra-1] = 0;
2498 #endif
2499 J->maxslot = ra+1;
2500 }
2501 }
2502
2503 #undef rav
2504 #undef rbv
2505 #undef rcv
2506
2507 /* Limit the number of recorded IR instructions and constants. */
2508 if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord] ||
2509 J->cur.nk < REF_BIAS-(IRRef)J->param[JIT_P_maxirconst])
2510 lj_trace_err(J, LJ_TRERR_TRACEOV);
2511 }
2512
2513 /* -- Recording setup ----------------------------------------------------- */
2514
2515 /* Setup recording for a root trace started by a hot loop. */
rec_setup_root(jit_State * J)2516 static const BCIns *rec_setup_root(jit_State *J)
2517 {
2518 /* Determine the next PC and the bytecode range for the loop. */
2519 const BCIns *pcj, *pc = J->pc;
2520 BCIns ins = *pc;
2521 BCReg ra = bc_a(ins);
2522 switch (bc_op(ins)) {
2523 case BC_FORL:
2524 J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
2525 pc += 1+bc_j(ins);
2526 J->bc_min = pc;
2527 break;
2528 case BC_ITERL:
2529 lj_assertJ(bc_op(pc[-1]) == BC_ITERC, "no ITERC before ITERL");
2530 J->maxslot = ra + bc_b(pc[-1]) - 1;
2531 J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
2532 pc += 1+bc_j(ins);
2533 lj_assertJ(bc_op(pc[-1]) == BC_JMP, "ITERL does not point to JMP+1");
2534 J->bc_min = pc;
2535 break;
2536 case BC_LOOP:
2537 /* Only check BC range for real loops, but not for "repeat until true". */
2538 pcj = pc + bc_j(ins);
2539 ins = *pcj;
2540 if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) {
2541 J->bc_min = pcj+1 + bc_j(ins);
2542 J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
2543 }
2544 J->maxslot = ra;
2545 pc++;
2546 break;
2547 case BC_RET:
2548 case BC_RET0:
2549 case BC_RET1:
2550 /* No bytecode range check for down-recursive root traces. */
2551 J->maxslot = ra + bc_d(ins) - 1;
2552 break;
2553 case BC_FUNCF:
2554 /* No bytecode range check for root traces started by a hot call. */
2555 J->maxslot = J->pt->numparams;
2556 pc++;
2557 break;
2558 case BC_CALLM:
2559 case BC_CALL:
2560 case BC_ITERC:
2561 /* No bytecode range check for stitched traces. */
2562 pc++;
2563 break;
2564 default:
2565 lj_assertJ(0, "bad root trace start bytecode %d", bc_op(ins));
2566 break;
2567 }
2568 return pc;
2569 }
2570
2571 /* Setup for recording a new trace. */
lj_record_setup(jit_State * J)2572 void lj_record_setup(jit_State *J)
2573 {
2574 uint32_t i;
2575
2576 /* Initialize state related to current trace. */
2577 memset(J->slot, 0, sizeof(J->slot));
2578 memset(J->chain, 0, sizeof(J->chain));
2579 #ifdef LUAJIT_ENABLE_TABLE_BUMP
2580 memset(J->rbchash, 0, sizeof(J->rbchash));
2581 #endif
2582 memset(J->bpropcache, 0, sizeof(J->bpropcache));
2583 J->scev.idx = REF_NIL;
2584 setmref(J->scev.pc, NULL);
2585
2586 J->baseslot = 1+LJ_FR2; /* Invoking function is at base[-1-LJ_FR2]. */
2587 J->base = J->slot + J->baseslot;
2588 J->maxslot = 0;
2589 J->framedepth = 0;
2590 J->retdepth = 0;
2591
2592 J->instunroll = J->param[JIT_P_instunroll];
2593 J->loopunroll = J->param[JIT_P_loopunroll];
2594 J->tailcalled = 0;
2595 J->loopref = 0;
2596
2597 J->bc_min = NULL; /* Means no limit. */
2598 J->bc_extent = ~(MSize)0;
2599
2600 /* Emit instructions for fixed references. Also triggers initial IR alloc. */
2601 emitir_raw(IRT(IR_BASE, IRT_PGC), J->parent, J->exitno);
2602 for (i = 0; i <= 2; i++) {
2603 IRIns *ir = IR(REF_NIL-i);
2604 ir->i = 0;
2605 ir->t.irt = (uint8_t)(IRT_NIL+i);
2606 ir->o = IR_KPRI;
2607 ir->prev = 0;
2608 }
2609 J->cur.nk = REF_TRUE;
2610
2611 J->startpc = J->pc;
2612 setmref(J->cur.startpc, J->pc);
2613 if (J->parent) { /* Side trace. */
2614 GCtrace *T = traceref(J, J->parent);
2615 TraceNo root = T->root ? T->root : J->parent;
2616 J->cur.root = (uint16_t)root;
2617 J->cur.startins = BCINS_AD(BC_JMP, 0, 0);
2618 /* Check whether we could at least potentially form an extra loop. */
2619 if (J->exitno == 0 && T->snap[0].nent == 0) {
2620 /* We can narrow a FORL for some side traces, too. */
2621 if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI &&
2622 bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) {
2623 lj_snap_add(J);
2624 rec_for_loop(J, J->pc-1, &J->scev, 1);
2625 goto sidecheck;
2626 }
2627 } else {
2628 J->startpc = NULL; /* Prevent forming an extra loop. */
2629 }
2630 lj_snap_replay(J, T);
2631 sidecheck:
2632 if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
2633 T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
2634 J->param[JIT_P_tryside]) {
2635 lj_record_stop(J, LJ_TRLINK_INTERP, 0);
2636 }
2637 } else { /* Root trace. */
2638 J->cur.root = 0;
2639 J->cur.startins = *J->pc;
2640 J->pc = rec_setup_root(J);
2641 /* Note: the loop instruction itself is recorded at the end and not
2642 ** at the start! So snapshot #0 needs to point to the *next* instruction.
2643 */
2644 lj_snap_add(J);
2645 if (bc_op(J->cur.startins) == BC_FORL)
2646 rec_for_loop(J, J->pc-1, &J->scev, 1);
2647 else if (bc_op(J->cur.startins) == BC_ITERC)
2648 J->startpc = NULL;
2649 if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
2650 lj_trace_err(J, LJ_TRERR_STACKOV);
2651 }
2652 #if LJ_HASPROFILE
2653 J->prev_pt = NULL;
2654 J->prev_line = -1;
2655 #endif
2656 #ifdef LUAJIT_ENABLE_CHECKHOOK
2657 /* Regularly check for instruction/line hooks from compiled code and
2658 ** exit to the interpreter if the hooks are set.
2659 **
2660 ** This is a compile-time option and disabled by default, since the
2661 ** hook checks may be quite expensive in tight loops.
2662 **
2663 ** Note this is only useful if hooks are *not* set most of the time.
2664 ** Use this only if you want to *asynchronously* interrupt the execution.
2665 **
2666 ** You can set the instruction hook via lua_sethook() with a count of 1
2667 ** from a signal handler or another native thread. Please have a look
2668 ** at the first few functions in luajit.c for an example (Ctrl-C handler).
2669 */
2670 {
2671 TRef tr = emitir(IRT(IR_XLOAD, IRT_U8),
2672 lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE);
2673 tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT)));
2674 emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
2675 }
2676 #endif
2677 }
2678
2679 #undef IR
2680 #undef emitir_raw
2681 #undef emitir
2682
2683 #endif
2684