1 /*
2 ** SSA IR (Intermediate Representation) emitter.
3 ** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 #define lj_ir_c
7 #define LUA_CORE
8
9 /* For pointers to libc/libm functions. */
10 #include <stdio.h>
11 #include <math.h>
12
13 #include "lj_obj.h"
14
15 #if LJ_HASJIT
16
17 #include "lj_gc.h"
18 #include "lj_buf.h"
19 #include "lj_str.h"
20 #include "lj_tab.h"
21 #include "lj_ir.h"
22 #include "lj_jit.h"
23 #include "lj_ircall.h"
24 #include "lj_iropt.h"
25 #include "lj_trace.h"
26 #if LJ_HASFFI
27 #include "lj_ctype.h"
28 #include "lj_cdata.h"
29 #include "lj_carith.h"
30 #endif
31 #include "lj_vm.h"
32 #include "lj_strscan.h"
33 #include "lj_strfmt.h"
34 #include "lj_lib.h"
35
36 /* Some local macros to save typing. Undef'd at the end. */
37 #define IR(ref) (&J->cur.ir[(ref)])
38 #define fins (&J->fold.ins)
39
40 /* Pass IR on to next optimization in chain (FOLD). */
41 #define emitir(ot, a, b) (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
42
43 /* -- IR tables ----------------------------------------------------------- */
44
45 /* IR instruction modes. */
46 LJ_DATADEF const uint8_t lj_ir_mode[IR__MAX+1] = {
47 IRDEF(IRMODE)
48 0
49 };
50
51 /* IR type sizes. */
52 LJ_DATADEF const uint8_t lj_ir_type_size[IRT__MAX+1] = {
53 #define IRTSIZE(name, size) size,
54 IRTDEF(IRTSIZE)
55 #undef IRTSIZE
56 0
57 };
58
59 /* C call info for CALL* instructions. */
60 LJ_DATADEF const CCallInfo lj_ir_callinfo[] = {
61 #define IRCALLCI(cond, name, nargs, kind, type, flags) \
62 { (ASMFunction)IRCALLCOND_##cond(name), \
63 (nargs)|(CCI_CALL_##kind)|(IRT_##type<<CCI_OTSHIFT)|(flags) },
64 IRCALLDEF(IRCALLCI)
65 #undef IRCALLCI
66 { NULL, 0 }
67 };
68
69 /* -- IR emitter ---------------------------------------------------------- */
70
71 /* Grow IR buffer at the top. */
lj_ir_growtop(jit_State * J)72 void LJ_FASTCALL lj_ir_growtop(jit_State *J)
73 {
74 IRIns *baseir = J->irbuf + J->irbotlim;
75 MSize szins = J->irtoplim - J->irbotlim;
76 if (szins) {
77 baseir = (IRIns *)lj_mem_realloc(J->L, baseir, szins*sizeof(IRIns),
78 2*szins*sizeof(IRIns));
79 J->irtoplim = J->irbotlim + 2*szins;
80 } else {
81 baseir = (IRIns *)lj_mem_realloc(J->L, NULL, 0, LJ_MIN_IRSZ*sizeof(IRIns));
82 J->irbotlim = REF_BASE - LJ_MIN_IRSZ/4;
83 J->irtoplim = J->irbotlim + LJ_MIN_IRSZ;
84 }
85 J->cur.ir = J->irbuf = baseir - J->irbotlim;
86 }
87
88 /* Grow IR buffer at the bottom or shift it up. */
lj_ir_growbot(jit_State * J)89 static void lj_ir_growbot(jit_State *J)
90 {
91 IRIns *baseir = J->irbuf + J->irbotlim;
92 MSize szins = J->irtoplim - J->irbotlim;
93 lua_assert(szins != 0);
94 lua_assert(J->cur.nk == J->irbotlim);
95 if (J->cur.nins + (szins >> 1) < J->irtoplim) {
96 /* More than half of the buffer is free on top: shift up by a quarter. */
97 MSize ofs = szins >> 2;
98 memmove(baseir + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
99 J->irbotlim -= ofs;
100 J->irtoplim -= ofs;
101 J->cur.ir = J->irbuf = baseir - J->irbotlim;
102 } else {
103 /* Double the buffer size, but split the growth amongst top/bottom. */
104 IRIns *newbase = lj_mem_newt(J->L, 2*szins*sizeof(IRIns), IRIns);
105 MSize ofs = szins >= 256 ? 128 : (szins >> 1); /* Limit bottom growth. */
106 memcpy(newbase + ofs, baseir, (J->cur.nins - J->irbotlim)*sizeof(IRIns));
107 lj_mem_free(G(J->L), baseir, szins*sizeof(IRIns));
108 J->irbotlim -= ofs;
109 J->irtoplim = J->irbotlim + 2*szins;
110 J->cur.ir = J->irbuf = newbase - J->irbotlim;
111 }
112 }
113
114 /* Emit IR without any optimizations. */
lj_ir_emit(jit_State * J)115 TRef LJ_FASTCALL lj_ir_emit(jit_State *J)
116 {
117 IRRef ref = lj_ir_nextins(J);
118 IRIns *ir = IR(ref);
119 IROp op = fins->o;
120 ir->prev = J->chain[op];
121 J->chain[op] = (IRRef1)ref;
122 ir->o = op;
123 ir->op1 = fins->op1;
124 ir->op2 = fins->op2;
125 J->guardemit.irt |= fins->t.irt;
126 return TREF(ref, irt_t((ir->t = fins->t)));
127 }
128
129 /* Emit call to a C function. */
lj_ir_call(jit_State * J,IRCallID id,...)130 TRef lj_ir_call(jit_State *J, IRCallID id, ...)
131 {
132 const CCallInfo *ci = &lj_ir_callinfo[id];
133 uint32_t n = CCI_NARGS(ci);
134 TRef tr = TREF_NIL;
135 va_list argp;
136 va_start(argp, id);
137 if ((ci->flags & CCI_L)) n--;
138 if (n > 0)
139 tr = va_arg(argp, IRRef);
140 while (n-- > 1)
141 tr = emitir(IRT(IR_CARG, IRT_NIL), tr, va_arg(argp, IRRef));
142 va_end(argp);
143 if (CCI_OP(ci) == IR_CALLS)
144 J->needsnap = 1; /* Need snapshot after call with side effect. */
145 return emitir(CCI_OPTYPE(ci), tr, id);
146 }
147
148 /* -- Interning of constants ---------------------------------------------- */
149
150 /*
151 ** IR instructions for constants are kept between J->cur.nk >= ref < REF_BIAS.
152 ** They are chained like all other instructions, but grow downwards.
153 ** The are interned (like strings in the VM) to facilitate reference
154 ** comparisons. The same constant must get the same reference.
155 */
156
157 /* Get ref of next IR constant and optionally grow IR.
158 ** Note: this may invalidate all IRIns *!
159 */
ir_nextk(jit_State * J)160 static LJ_AINLINE IRRef ir_nextk(jit_State *J)
161 {
162 IRRef ref = J->cur.nk;
163 if (LJ_UNLIKELY(ref <= J->irbotlim)) lj_ir_growbot(J);
164 J->cur.nk = --ref;
165 return ref;
166 }
167
168 /* Intern int32_t constant. */
lj_ir_kint(jit_State * J,int32_t k)169 TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k)
170 {
171 IRIns *ir, *cir = J->cur.ir;
172 IRRef ref;
173 for (ref = J->chain[IR_KINT]; ref; ref = cir[ref].prev)
174 if (cir[ref].i == k)
175 goto found;
176 ref = ir_nextk(J);
177 ir = IR(ref);
178 ir->i = k;
179 ir->t.irt = IRT_INT;
180 ir->o = IR_KINT;
181 ir->prev = J->chain[IR_KINT];
182 J->chain[IR_KINT] = (IRRef1)ref;
183 found:
184 return TREF(ref, IRT_INT);
185 }
186
187 /* The MRef inside the KNUM/KINT64 IR instructions holds the address of the
188 ** 64 bit constant. The constants themselves are stored in a chained array
189 ** and shared across traces.
190 **
191 ** Rationale for choosing this data structure:
192 ** - The address of the constants is embedded in the generated machine code
193 ** and must never move. A resizable array or hash table wouldn't work.
194 ** - Most apps need very few non-32 bit integer constants (less than a dozen).
195 ** - Linear search is hard to beat in terms of speed and low complexity.
196 */
197 typedef struct K64Array {
198 MRef next; /* Pointer to next list. */
199 MSize numk; /* Number of used elements in this array. */
200 TValue k[LJ_MIN_K64SZ]; /* Array of constants. */
201 } K64Array;
202
203 /* Free all chained arrays. */
lj_ir_k64_freeall(jit_State * J)204 void lj_ir_k64_freeall(jit_State *J)
205 {
206 K64Array *k;
207 for (k = mref(J->k64, K64Array); k; ) {
208 K64Array *next = mref(k->next, K64Array);
209 lj_mem_free(J2G(J), k, sizeof(K64Array));
210 k = next;
211 }
212 setmref(J->k64, NULL);
213 }
214
215 /* Get new 64 bit constant slot. */
ir_k64_add(jit_State * J,K64Array * kp,uint64_t u64)216 static TValue *ir_k64_add(jit_State *J, K64Array *kp, uint64_t u64)
217 {
218 TValue *ntv;
219 if (!(kp && kp->numk < LJ_MIN_K64SZ)) { /* Allocate a new array. */
220 K64Array *kn = lj_mem_newt(J->L, sizeof(K64Array), K64Array);
221 setmref(kn->next, NULL);
222 kn->numk = 0;
223 if (kp)
224 setmref(kp->next, kn); /* Chain to the end of the list. */
225 else
226 setmref(J->k64, kn); /* Link first array. */
227 kp = kn;
228 }
229 ntv = &kp->k[kp->numk++]; /* Add to current array. */
230 ntv->u64 = u64;
231 return ntv;
232 }
233
234 /* Find 64 bit constant in chained array or add it. */
lj_ir_k64_find(jit_State * J,uint64_t u64)235 cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64)
236 {
237 K64Array *k, *kp = NULL;
238 MSize idx;
239 /* Search for the constant in the whole chain of arrays. */
240 for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) {
241 kp = k; /* Remember previous element in list. */
242 for (idx = 0; idx < k->numk; idx++) { /* Search one array. */
243 TValue *tv = &k->k[idx];
244 if (tv->u64 == u64) /* Needed for +-0/NaN/absmask. */
245 return tv;
246 }
247 }
248 /* Otherwise add a new constant. */
249 return ir_k64_add(J, kp, u64);
250 }
251
lj_ir_k64_reserve(jit_State * J)252 TValue *lj_ir_k64_reserve(jit_State *J)
253 {
254 K64Array *k, *kp = NULL;
255 lj_ir_k64_find(J, 0); /* Intern dummy 0 to protect the reserved slot. */
256 /* Find last K64Array, if any. */
257 for (k = mref(J->k64, K64Array); k; k = mref(k->next, K64Array)) kp = k;
258 return ir_k64_add(J, kp, 0); /* Set to 0. Final value is set later. */
259 }
260
261 /* Intern 64 bit constant, given by its address. */
lj_ir_k64(jit_State * J,IROp op,cTValue * tv)262 TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv)
263 {
264 IRIns *ir, *cir = J->cur.ir;
265 IRRef ref;
266 IRType t = op == IR_KNUM ? IRT_NUM : IRT_I64;
267 for (ref = J->chain[op]; ref; ref = cir[ref].prev)
268 if (ir_k64(&cir[ref]) == tv)
269 goto found;
270 ref = ir_nextk(J);
271 ir = IR(ref);
272 lua_assert(checkptrGC(tv));
273 setmref(ir->ptr, tv);
274 ir->t.irt = t;
275 ir->o = op;
276 ir->prev = J->chain[op];
277 J->chain[op] = (IRRef1)ref;
278 found:
279 return TREF(ref, t);
280 }
281
282 /* Intern FP constant, given by its 64 bit pattern. */
lj_ir_knum_u64(jit_State * J,uint64_t u64)283 TRef lj_ir_knum_u64(jit_State *J, uint64_t u64)
284 {
285 return lj_ir_k64(J, IR_KNUM, lj_ir_k64_find(J, u64));
286 }
287
288 /* Intern 64 bit integer constant. */
lj_ir_kint64(jit_State * J,uint64_t u64)289 TRef lj_ir_kint64(jit_State *J, uint64_t u64)
290 {
291 return lj_ir_k64(J, IR_KINT64, lj_ir_k64_find(J, u64));
292 }
293
294 /* Check whether a number is int and return it. -0 is NOT considered an int. */
numistrueint(lua_Number n,int32_t * kp)295 static int numistrueint(lua_Number n, int32_t *kp)
296 {
297 int32_t k = lj_num2int(n);
298 if (n == (lua_Number)k) {
299 if (kp) *kp = k;
300 if (k == 0) { /* Special check for -0. */
301 TValue tv;
302 setnumV(&tv, n);
303 if (tv.u32.hi != 0)
304 return 0;
305 }
306 return 1;
307 }
308 return 0;
309 }
310
311 /* Intern number as int32_t constant if possible, otherwise as FP constant. */
lj_ir_knumint(jit_State * J,lua_Number n)312 TRef lj_ir_knumint(jit_State *J, lua_Number n)
313 {
314 int32_t k;
315 if (numistrueint(n, &k))
316 return lj_ir_kint(J, k);
317 else
318 return lj_ir_knum(J, n);
319 }
320
321 /* Intern GC object "constant". */
lj_ir_kgc(jit_State * J,GCobj * o,IRType t)322 TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t)
323 {
324 IRIns *ir, *cir = J->cur.ir;
325 IRRef ref;
326 lua_assert(!LJ_GC64); /* TODO_GC64: major changes required. */
327 lua_assert(!isdead(J2G(J), o));
328 for (ref = J->chain[IR_KGC]; ref; ref = cir[ref].prev)
329 if (ir_kgc(&cir[ref]) == o)
330 goto found;
331 ref = ir_nextk(J);
332 ir = IR(ref);
333 /* NOBARRIER: Current trace is a GC root. */
334 setgcref(ir->gcr, o);
335 ir->t.irt = (uint8_t)t;
336 ir->o = IR_KGC;
337 ir->prev = J->chain[IR_KGC];
338 J->chain[IR_KGC] = (IRRef1)ref;
339 found:
340 return TREF(ref, t);
341 }
342
343 /* Intern 32 bit pointer constant. */
lj_ir_kptr_(jit_State * J,IROp op,void * ptr)344 TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr)
345 {
346 IRIns *ir, *cir = J->cur.ir;
347 IRRef ref;
348 lua_assert((void *)(intptr_t)i32ptr(ptr) == ptr);
349 for (ref = J->chain[op]; ref; ref = cir[ref].prev)
350 if (mref(cir[ref].ptr, void) == ptr)
351 goto found;
352 ref = ir_nextk(J);
353 ir = IR(ref);
354 setmref(ir->ptr, ptr);
355 ir->t.irt = IRT_P32;
356 ir->o = op;
357 ir->prev = J->chain[op];
358 J->chain[op] = (IRRef1)ref;
359 found:
360 return TREF(ref, IRT_P32);
361 }
362
363 /* Intern typed NULL constant. */
lj_ir_knull(jit_State * J,IRType t)364 TRef lj_ir_knull(jit_State *J, IRType t)
365 {
366 IRIns *ir, *cir = J->cur.ir;
367 IRRef ref;
368 for (ref = J->chain[IR_KNULL]; ref; ref = cir[ref].prev)
369 if (irt_t(cir[ref].t) == t)
370 goto found;
371 ref = ir_nextk(J);
372 ir = IR(ref);
373 ir->i = 0;
374 ir->t.irt = (uint8_t)t;
375 ir->o = IR_KNULL;
376 ir->prev = J->chain[IR_KNULL];
377 J->chain[IR_KNULL] = (IRRef1)ref;
378 found:
379 return TREF(ref, t);
380 }
381
382 /* Intern key slot. */
lj_ir_kslot(jit_State * J,TRef key,IRRef slot)383 TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot)
384 {
385 IRIns *ir, *cir = J->cur.ir;
386 IRRef2 op12 = IRREF2((IRRef1)key, (IRRef1)slot);
387 IRRef ref;
388 /* Const part is not touched by CSE/DCE, so 0-65535 is ok for IRMlit here. */
389 lua_assert(tref_isk(key) && slot == (IRRef)(IRRef1)slot);
390 for (ref = J->chain[IR_KSLOT]; ref; ref = cir[ref].prev)
391 if (cir[ref].op12 == op12)
392 goto found;
393 ref = ir_nextk(J);
394 ir = IR(ref);
395 ir->op12 = op12;
396 ir->t.irt = IRT_P32;
397 ir->o = IR_KSLOT;
398 ir->prev = J->chain[IR_KSLOT];
399 J->chain[IR_KSLOT] = (IRRef1)ref;
400 found:
401 return TREF(ref, IRT_P32);
402 }
403
404 /* -- Access to IR constants ---------------------------------------------- */
405
406 /* Copy value of IR constant. */
lj_ir_kvalue(lua_State * L,TValue * tv,const IRIns * ir)407 void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir)
408 {
409 UNUSED(L);
410 lua_assert(ir->o != IR_KSLOT); /* Common mistake. */
411 switch (ir->o) {
412 case IR_KPRI: setpriV(tv, irt_toitype(ir->t)); break;
413 case IR_KINT: setintV(tv, ir->i); break;
414 case IR_KGC: setgcV(L, tv, ir_kgc(ir), irt_toitype(ir->t)); break;
415 case IR_KPTR: case IR_KKPTR: case IR_KNULL:
416 setlightudV(tv, mref(ir->ptr, void));
417 break;
418 case IR_KNUM: setnumV(tv, ir_knum(ir)->n); break;
419 #if LJ_HASFFI
420 case IR_KINT64: {
421 GCcdata *cd = lj_cdata_new_(L, CTID_INT64, 8);
422 *(uint64_t *)cdataptr(cd) = ir_kint64(ir)->u64;
423 setcdataV(L, tv, cd);
424 break;
425 }
426 #endif
427 default: lua_assert(0); break;
428 }
429 }
430
431 /* -- Convert IR operand types -------------------------------------------- */
432
433 /* Convert from string to number. */
lj_ir_tonumber(jit_State * J,TRef tr)434 TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr)
435 {
436 if (!tref_isnumber(tr)) {
437 if (tref_isstr(tr))
438 tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
439 else
440 lj_trace_err(J, LJ_TRERR_BADTYPE);
441 }
442 return tr;
443 }
444
445 /* Convert from integer or string to number. */
lj_ir_tonum(jit_State * J,TRef tr)446 TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr)
447 {
448 if (!tref_isnum(tr)) {
449 if (tref_isinteger(tr))
450 tr = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
451 else if (tref_isstr(tr))
452 tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
453 else
454 lj_trace_err(J, LJ_TRERR_BADTYPE);
455 }
456 return tr;
457 }
458
459 /* Convert from integer or number to string. */
lj_ir_tostr(jit_State * J,TRef tr)460 TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr)
461 {
462 if (!tref_isstr(tr)) {
463 if (!tref_isnumber(tr))
464 lj_trace_err(J, LJ_TRERR_BADTYPE);
465 tr = emitir(IRT(IR_TOSTR, IRT_STR), tr,
466 tref_isnum(tr) ? IRTOSTR_NUM : IRTOSTR_INT);
467 }
468 return tr;
469 }
470
471 /* -- Miscellaneous IR ops ------------------------------------------------ */
472
473 /* Evaluate numeric comparison. */
lj_ir_numcmp(lua_Number a,lua_Number b,IROp op)474 int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op)
475 {
476 switch (op) {
477 case IR_EQ: return (a == b);
478 case IR_NE: return (a != b);
479 case IR_LT: return (a < b);
480 case IR_GE: return (a >= b);
481 case IR_LE: return (a <= b);
482 case IR_GT: return (a > b);
483 case IR_ULT: return !(a >= b);
484 case IR_UGE: return !(a < b);
485 case IR_ULE: return !(a > b);
486 case IR_UGT: return !(a <= b);
487 default: lua_assert(0); return 0;
488 }
489 }
490
491 /* Evaluate string comparison. */
lj_ir_strcmp(GCstr * a,GCstr * b,IROp op)492 int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op)
493 {
494 int res = lj_str_cmp(a, b);
495 switch (op) {
496 case IR_LT: return (res < 0);
497 case IR_GE: return (res >= 0);
498 case IR_LE: return (res <= 0);
499 case IR_GT: return (res > 0);
500 default: lua_assert(0); return 0;
501 }
502 }
503
504 /* Rollback IR to previous state. */
lj_ir_rollback(jit_State * J,IRRef ref)505 void lj_ir_rollback(jit_State *J, IRRef ref)
506 {
507 IRRef nins = J->cur.nins;
508 while (nins > ref) {
509 IRIns *ir;
510 nins--;
511 ir = IR(nins);
512 J->chain[ir->o] = ir->prev;
513 }
514 J->cur.nins = nins;
515 }
516
517 #undef IR
518 #undef fins
519 #undef emitir
520
521 #endif
522