1 /*
2 ** x86/x64 IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 /* -- Guard handling ------------------------------------------------------ */
7
8 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
asm_exitstub_gen(ASMState * as,ExitNo group)9 static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
10 {
11 ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
12 MCode *mxp = as->mcbot;
13 MCode *mxpstart = mxp;
14 if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
15 asm_mclimit(as);
16 /* Push low byte of exitno for each exit stub. */
17 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
18 for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
19 *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
20 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
21 }
22 /* Push the high byte of the exitno for each exit stub group. */
23 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
24 #if !LJ_GC64
25 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
26 *mxp++ = XI_MOVmi;
27 *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
28 *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
29 *mxp++ = 2*sizeof(void *);
30 *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
31 #endif
32 /* Jump to exit handler which fills in the ExitState. */
33 *mxp++ = XI_JMP; mxp += 4;
34 *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler);
35 /* Commit the code for this group (even if assembly fails later on). */
36 lj_mcode_commitbot(as->J, mxp);
37 as->mcbot = mxp;
38 as->mclim = as->mcbot + MCLIM_REDZONE;
39 return mxpstart;
40 }
41
42 /* Setup all needed exit stubs. */
asm_exitstub_setup(ASMState * as,ExitNo nexits)43 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
44 {
45 ExitNo i;
46 if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
47 lj_trace_err(as->J, LJ_TRERR_SNAPOV);
48 for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
49 if (as->J->exitstubgroup[i] == NULL)
50 as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
51 }
52
53 /* Emit conditional branch to exit for guard.
54 ** It's important to emit this *after* all registers have been allocated,
55 ** because rematerializations may invalidate the flags.
56 */
asm_guardcc(ASMState * as,int cc)57 static void asm_guardcc(ASMState *as, int cc)
58 {
59 MCode *target = exitstub_addr(as->J, as->snapno);
60 MCode *p = as->mcp;
61 if (LJ_UNLIKELY(p == as->invmcp)) {
62 as->loopinv = 1;
63 *(int32_t *)(p+1) = jmprel(p+5, target);
64 target = p;
65 cc ^= 1;
66 if (as->realign) {
67 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
68 as->mrm.ofs += 2; /* Fixup RIP offset for pending fused load. */
69 emit_sjcc(as, cc, target);
70 return;
71 }
72 }
73 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
74 as->mrm.ofs += 6; /* Fixup RIP offset for pending fused load. */
75 emit_jcc(as, cc, target);
76 }
77
78 /* -- Memory operand fusion ----------------------------------------------- */
79
80 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
81 #define CONFLICT_SEARCH_LIM 31
82
83 /* Check if a reference is a signed 32 bit constant. */
asm_isk32(ASMState * as,IRRef ref,int32_t * k)84 static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
85 {
86 if (irref_isk(ref)) {
87 IRIns *ir = IR(ref);
88 #if LJ_GC64
89 if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
90 *k = ir->i;
91 return 1;
92 } else if (checki32((int64_t)ir_k64(ir)->u64)) {
93 *k = (int32_t)ir_k64(ir)->u64;
94 return 1;
95 }
96 #else
97 if (ir->o != IR_KINT64) {
98 *k = ir->i;
99 return 1;
100 } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
101 *k = (int32_t)ir_kint64(ir)->u64;
102 return 1;
103 }
104 #endif
105 }
106 return 0;
107 }
108
109 /* Check if there's no conflicting instruction between curins and ref.
110 ** Also avoid fusing loads if there are multiple references.
111 */
noconflict(ASMState * as,IRRef ref,IROp conflict,int noload)112 static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
113 {
114 IRIns *ir = as->ir;
115 IRRef i = as->curins;
116 if (i > ref + CONFLICT_SEARCH_LIM)
117 return 0; /* Give up, ref is too far away. */
118 while (--i > ref) {
119 if (ir[i].o == conflict)
120 return 0; /* Conflict found. */
121 else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
122 return 0;
123 }
124 return 1; /* Ok, no conflict. */
125 }
126
127 /* Fuse array base into memory operand. */
asm_fuseabase(ASMState * as,IRRef ref)128 static IRRef asm_fuseabase(ASMState *as, IRRef ref)
129 {
130 IRIns *irb = IR(ref);
131 as->mrm.ofs = 0;
132 if (irb->o == IR_FLOAD) {
133 IRIns *ira = IR(irb->op1);
134 lua_assert(irb->op2 == IRFL_TAB_ARRAY);
135 /* We can avoid the FLOAD of t->array for colocated arrays. */
136 if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
137 !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
138 as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
139 return irb->op1; /* Table obj. */
140 }
141 } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
142 /* Fuse base offset (vararg load). */
143 as->mrm.ofs = IR(irb->op2)->i;
144 return irb->op1;
145 }
146 return ref; /* Otherwise use the given array base. */
147 }
148
149 /* Fuse array reference into memory operand. */
asm_fusearef(ASMState * as,IRIns * ir,RegSet allow)150 static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
151 {
152 IRIns *irx;
153 lua_assert(ir->o == IR_AREF);
154 as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
155 irx = IR(ir->op2);
156 if (irref_isk(ir->op2)) {
157 as->mrm.ofs += 8*irx->i;
158 as->mrm.idx = RID_NONE;
159 } else {
160 rset_clear(allow, as->mrm.base);
161 as->mrm.scale = XM_SCALE8;
162 /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
163 ** Doesn't help much without ABCelim, but reduces register pressure.
164 */
165 if (!LJ_64 && /* Has bad effects with negative index on x64. */
166 mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
167 irx->o == IR_ADD && irref_isk(irx->op2)) {
168 as->mrm.ofs += 8*IR(irx->op2)->i;
169 as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
170 } else {
171 as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
172 }
173 }
174 }
175
176 /* Fuse array/hash/upvalue reference into memory operand.
177 ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
178 ** pass the final allow mask, excluding any GPRs used for other inputs.
179 ** In particular: 2-operand GPR instructions need to call ra_dest() first!
180 */
asm_fuseahuref(ASMState * as,IRRef ref,RegSet allow)181 static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
182 {
183 IRIns *ir = IR(ref);
184 if (ra_noreg(ir->r)) {
185 switch ((IROp)ir->o) {
186 case IR_AREF:
187 if (mayfuse(as, ref)) {
188 asm_fusearef(as, ir, allow);
189 return;
190 }
191 break;
192 case IR_HREFK:
193 if (mayfuse(as, ref)) {
194 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
195 as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
196 as->mrm.idx = RID_NONE;
197 return;
198 }
199 break;
200 case IR_UREFC:
201 if (irref_isk(ir->op1)) {
202 GCfunc *fn = ir_kfunc(IR(ir->op1));
203 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
204 #if LJ_GC64
205 int64_t ofs = dispofs(as, &uv->tv);
206 if (checki32(ofs) && checki32(ofs+4)) {
207 as->mrm.ofs = (int32_t)ofs;
208 as->mrm.base = RID_DISPATCH;
209 as->mrm.idx = RID_NONE;
210 return;
211 }
212 #else
213 as->mrm.ofs = ptr2addr(&uv->tv);
214 as->mrm.base = as->mrm.idx = RID_NONE;
215 return;
216 #endif
217 }
218 break;
219 default:
220 lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
221 ir->o == IR_KKPTR);
222 break;
223 }
224 }
225 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
226 as->mrm.ofs = 0;
227 as->mrm.idx = RID_NONE;
228 }
229
230 /* Fuse FLOAD/FREF reference into memory operand. */
asm_fusefref(ASMState * as,IRIns * ir,RegSet allow)231 static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
232 {
233 lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF);
234 as->mrm.idx = RID_NONE;
235 if (ir->op1 == REF_NIL) {
236 #if LJ_GC64
237 as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch);
238 as->mrm.base = RID_DISPATCH;
239 #else
240 as->mrm.ofs = (int32_t)(ir->op2 << 2) + ptr2addr(J2GG(as->J));
241 as->mrm.base = RID_NONE;
242 #endif
243 return;
244 }
245 as->mrm.ofs = field_ofs[ir->op2];
246 if (irref_isk(ir->op1)) {
247 IRIns *op1 = IR(ir->op1);
248 #if LJ_GC64
249 if (ir->op1 == REF_NIL) {
250 as->mrm.ofs -= GG_OFS(dispatch);
251 as->mrm.base = RID_DISPATCH;
252 return;
253 } else if (op1->o == IR_KPTR || op1->o == IR_KKPTR) {
254 intptr_t ofs = dispofs(as, ir_kptr(op1));
255 if (checki32(as->mrm.ofs + ofs)) {
256 as->mrm.ofs += (int32_t)ofs;
257 as->mrm.base = RID_DISPATCH;
258 return;
259 }
260 }
261 #else
262 as->mrm.ofs += op1->i;
263 as->mrm.base = RID_NONE;
264 return;
265 #endif
266 }
267 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
268 }
269
270 /* Fuse string reference into memory operand. */
asm_fusestrref(ASMState * as,IRIns * ir,RegSet allow)271 static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
272 {
273 IRIns *irr;
274 lua_assert(ir->o == IR_STRREF);
275 as->mrm.base = as->mrm.idx = RID_NONE;
276 as->mrm.scale = XM_SCALE1;
277 as->mrm.ofs = sizeof(GCstr);
278 if (!LJ_GC64 && irref_isk(ir->op1)) {
279 as->mrm.ofs += IR(ir->op1)->i;
280 } else {
281 Reg r = ra_alloc1(as, ir->op1, allow);
282 rset_clear(allow, r);
283 as->mrm.base = (uint8_t)r;
284 }
285 irr = IR(ir->op2);
286 if (irref_isk(ir->op2)) {
287 as->mrm.ofs += irr->i;
288 } else {
289 Reg r;
290 /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
291 if (!LJ_64 && /* Has bad effects with negative index on x64. */
292 mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
293 as->mrm.ofs += IR(irr->op2)->i;
294 r = ra_alloc1(as, irr->op1, allow);
295 } else {
296 r = ra_alloc1(as, ir->op2, allow);
297 }
298 if (as->mrm.base == RID_NONE)
299 as->mrm.base = (uint8_t)r;
300 else
301 as->mrm.idx = (uint8_t)r;
302 }
303 }
304
asm_fusexref(ASMState * as,IRRef ref,RegSet allow)305 static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
306 {
307 IRIns *ir = IR(ref);
308 as->mrm.idx = RID_NONE;
309 if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
310 #if LJ_GC64
311 intptr_t ofs = dispofs(as, ir_kptr(ir));
312 if (checki32(ofs)) {
313 as->mrm.ofs = (int32_t)ofs;
314 as->mrm.base = RID_DISPATCH;
315 return;
316 }
317 } if (0) {
318 #else
319 as->mrm.ofs = ir->i;
320 as->mrm.base = RID_NONE;
321 } else if (ir->o == IR_STRREF) {
322 asm_fusestrref(as, ir, allow);
323 #endif
324 } else {
325 as->mrm.ofs = 0;
326 if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
327 /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
328 IRIns *irx;
329 IRRef idx;
330 Reg r;
331 if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
332 ref = ir->op1;
333 ir = IR(ref);
334 if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
335 goto noadd;
336 }
337 as->mrm.scale = XM_SCALE1;
338 idx = ir->op1;
339 ref = ir->op2;
340 irx = IR(idx);
341 if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
342 idx = ir->op2;
343 ref = ir->op1;
344 irx = IR(idx);
345 }
346 if (canfuse(as, irx) && ra_noreg(irx->r)) {
347 if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
348 /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
349 idx = irx->op1;
350 as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
351 } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
352 /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
353 idx = irx->op1;
354 as->mrm.scale = XM_SCALE2;
355 }
356 }
357 r = ra_alloc1(as, idx, allow);
358 rset_clear(allow, r);
359 as->mrm.idx = (uint8_t)r;
360 }
361 noadd:
362 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
363 }
364 }
365
366 /* Fuse load of 64 bit IR constant into memory operand. */
asm_fuseloadk64(ASMState * as,IRIns * ir)367 static Reg asm_fuseloadk64(ASMState *as, IRIns *ir)
368 {
369 const uint64_t *k = &ir_k64(ir)->u64;
370 if (!LJ_GC64 || checki32((intptr_t)k)) {
371 as->mrm.ofs = ptr2addr(k);
372 as->mrm.base = RID_NONE;
373 #if LJ_GC64
374 } else if (checki32(dispofs(as, k))) {
375 as->mrm.ofs = (int32_t)dispofs(as, k);
376 as->mrm.base = RID_DISPATCH;
377 } else if (checki32(mcpofs(as, k)) && checki32(mcpofs(as, k+1)) &&
378 checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) {
379 as->mrm.ofs = (int32_t)mcpofs(as, k);
380 as->mrm.base = RID_RIP;
381 } else {
382 if (ir->i) {
383 lua_assert(*k == *(uint64_t*)(as->mctop - ir->i));
384 } else {
385 while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3;
386 *(uint64_t*)as->mcbot = *k;
387 ir->i = (int32_t)(as->mctop - as->mcbot);
388 as->mcbot += 8;
389 as->mclim = as->mcbot + MCLIM_REDZONE;
390 }
391 as->mrm.ofs = (int32_t)mcpofs(as, as->mctop - ir->i);
392 as->mrm.base = RID_RIP;
393 #endif
394 }
395 as->mrm.idx = RID_NONE;
396 return RID_MRM;
397 }
398
399 /* Fuse load into memory operand.
400 **
401 ** Important caveat: this may emit RIP-relative loads! So don't place any
402 ** code emitters between this function and the use of its result.
403 ** The only permitted exception is asm_guardcc().
404 */
asm_fuseload(ASMState * as,IRRef ref,RegSet allow)405 static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
406 {
407 IRIns *ir = IR(ref);
408 if (ra_hasreg(ir->r)) {
409 if (allow != RSET_EMPTY) { /* Fast path. */
410 ra_noweak(as, ir->r);
411 return ir->r;
412 }
413 fusespill:
414 /* Force a spill if only memory operands are allowed (asm_x87load). */
415 as->mrm.base = RID_ESP;
416 as->mrm.ofs = ra_spill(as, ir);
417 as->mrm.idx = RID_NONE;
418 return RID_MRM;
419 }
420 if (ir->o == IR_KNUM) {
421 RegSet avail = as->freeset & ~as->modset & RSET_FPR;
422 lua_assert(allow != RSET_EMPTY);
423 if (!(avail & (avail-1))) /* Fuse if less than two regs available. */
424 return asm_fuseloadk64(as, ir);
425 } else if (ref == REF_BASE || ir->o == IR_KINT64) {
426 RegSet avail = as->freeset & ~as->modset & RSET_GPR;
427 lua_assert(allow != RSET_EMPTY);
428 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
429 if (ref == REF_BASE) {
430 #if LJ_GC64
431 as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->jit_base);
432 as->mrm.base = RID_DISPATCH;
433 #else
434 as->mrm.ofs = ptr2addr(&J2G(as->J)->jit_base);
435 as->mrm.base = RID_NONE;
436 #endif
437 as->mrm.idx = RID_NONE;
438 return RID_MRM;
439 } else {
440 return asm_fuseloadk64(as, ir);
441 }
442 }
443 } else if (mayfuse(as, ref)) {
444 RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
445 if (ir->o == IR_SLOAD) {
446 if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
447 noconflict(as, ref, IR_RETF, 0) &&
448 !(LJ_GC64 && irt_isaddr(ir->t))) {
449 as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
450 as->mrm.ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
451 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
452 as->mrm.idx = RID_NONE;
453 return RID_MRM;
454 }
455 } else if (ir->o == IR_FLOAD) {
456 /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
457 if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
458 noconflict(as, ref, IR_FSTORE, 0)) {
459 asm_fusefref(as, ir, xallow);
460 return RID_MRM;
461 }
462 } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
463 if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0) &&
464 !(LJ_GC64 && irt_isaddr(ir->t))) {
465 asm_fuseahuref(as, ir->op1, xallow);
466 return RID_MRM;
467 }
468 } else if (ir->o == IR_XLOAD) {
469 /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
470 ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
471 */
472 if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
473 noconflict(as, ref, IR_XSTORE, 0)) {
474 asm_fusexref(as, ir->op1, xallow);
475 return RID_MRM;
476 }
477 } else if (ir->o == IR_VLOAD && !(LJ_GC64 && irt_isaddr(ir->t))) {
478 asm_fuseahuref(as, ir->op1, xallow);
479 return RID_MRM;
480 }
481 }
482 if (ir->o == IR_FLOAD && ir->op1 == REF_NIL) {
483 asm_fusefref(as, ir, RSET_EMPTY);
484 return RID_MRM;
485 }
486 if (!(as->freeset & allow) && !emit_canremat(ref) &&
487 (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
488 goto fusespill;
489 return ra_allocref(as, ref, allow);
490 }
491
492 #if LJ_64
493 /* Don't fuse a 32 bit load into a 64 bit operation. */
asm_fuseloadm(ASMState * as,IRRef ref,RegSet allow,int is64)494 static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
495 {
496 if (is64 && !irt_is64(IR(ref)->t))
497 return ra_alloc1(as, ref, allow);
498 return asm_fuseload(as, ref, allow);
499 }
500 #else
501 #define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
502 #endif
503
504 /* -- Calls --------------------------------------------------------------- */
505
506 /* Count the required number of stack slots for a call. */
asm_count_call_slots(ASMState * as,const CCallInfo * ci,IRRef * args)507 static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
508 {
509 uint32_t i, nargs = CCI_XNARGS(ci);
510 int nslots = 0;
511 #if LJ_64
512 if (LJ_ABI_WIN) {
513 nslots = (int)(nargs*2); /* Only matters for more than four args. */
514 } else {
515 int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
516 for (i = 0; i < nargs; i++)
517 if (args[i] && irt_isfp(IR(args[i])->t)) {
518 if (nfpr > 0) nfpr--; else nslots += 2;
519 } else {
520 if (ngpr > 0) ngpr--; else nslots += 2;
521 }
522 }
523 #else
524 int ngpr = 0;
525 if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
526 ngpr = 2;
527 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
528 ngpr = 1;
529 for (i = 0; i < nargs; i++)
530 if (args[i] && irt_isfp(IR(args[i])->t)) {
531 nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
532 } else {
533 if (ngpr > 0) ngpr--; else nslots++;
534 }
535 #endif
536 return nslots;
537 }
538
539 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)540 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
541 {
542 uint32_t n, nargs = CCI_XNARGS(ci);
543 int32_t ofs = STACKARG_OFS;
544 #if LJ_64
545 uint32_t gprs = REGARG_GPRS;
546 Reg fpr = REGARG_FIRSTFPR;
547 #if !LJ_ABI_WIN
548 MCode *patchnfpr = NULL;
549 #endif
550 #else
551 uint32_t gprs = 0;
552 if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
553 if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
554 gprs = (REGARG_GPRS & 31);
555 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
556 gprs = REGARG_GPRS;
557 }
558 #endif
559 if ((void *)ci->func)
560 emit_call(as, ci->func);
561 #if LJ_64
562 if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
563 #if LJ_ABI_WIN
564 for (n = 0; n < 4 && n < nargs; n++) {
565 IRIns *ir = IR(args[n]);
566 if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
567 emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
568 ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
569 }
570 #else
571 patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
572 *--as->mcp = XI_MOVrib | RID_EAX;
573 #endif
574 }
575 #endif
576 for (n = 0; n < nargs; n++) { /* Setup args. */
577 IRRef ref = args[n];
578 IRIns *ir = IR(ref);
579 Reg r;
580 #if LJ_64 && LJ_ABI_WIN
581 /* Windows/x64 argument registers are strictly positional. */
582 r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
583 fpr++; gprs >>= 5;
584 #elif LJ_64
585 /* POSIX/x64 argument registers are used in order of appearance. */
586 if (irt_isfp(ir->t)) {
587 r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
588 } else {
589 r = gprs & 31; gprs >>= 5;
590 }
591 #else
592 if (ref && irt_isfp(ir->t)) {
593 r = 0;
594 } else {
595 r = gprs & 31; gprs >>= 5;
596 if (!ref) continue;
597 }
598 #endif
599 if (r) { /* Argument is in a register. */
600 if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
601 #if LJ_64
602 if (LJ_GC64 ? !(ir->o == IR_KINT || ir->o == IR_KNULL) : ir->o == IR_KINT64)
603 emit_loadu64(as, r, ir_k64(ir)->u64);
604 else
605 #endif
606 emit_loadi(as, r, ir->i);
607 } else {
608 lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */
609 if (ra_hasreg(ir->r)) {
610 ra_noweak(as, ir->r);
611 emit_movrr(as, ir, r, ir->r);
612 } else {
613 ra_allocref(as, ref, RID2RSET(r));
614 }
615 }
616 } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
617 lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */
618 if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
619 /* Split stores for unaligned FP consts. */
620 emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
621 emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
622 } else {
623 r = ra_alloc1(as, ref, RSET_FPR);
624 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
625 r, RID_ESP, ofs);
626 }
627 ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
628 } else { /* Non-FP argument is on stack. */
629 if (LJ_32 && ref < ASMREF_TMP1) {
630 emit_movmroi(as, RID_ESP, ofs, ir->i);
631 } else {
632 r = ra_alloc1(as, ref, RSET_GPR);
633 emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
634 }
635 ofs += sizeof(intptr_t);
636 }
637 checkmclim(as);
638 }
639 #if LJ_64 && !LJ_ABI_WIN
640 if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
641 #endif
642 }
643
644 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)645 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
646 {
647 RegSet drop = RSET_SCRATCH;
648 int hiop = (LJ_32 && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
649 if ((ci->flags & CCI_NOFPRCLOBBER))
650 drop &= ~RSET_FPR;
651 if (ra_hasreg(ir->r))
652 rset_clear(drop, ir->r); /* Dest reg handled below. */
653 if (hiop && ra_hasreg((ir+1)->r))
654 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
655 ra_evictset(as, drop); /* Evictions must be performed first. */
656 if (ra_used(ir)) {
657 if (irt_isfp(ir->t)) {
658 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
659 #if LJ_64
660 if ((ci->flags & CCI_CASTU64)) {
661 Reg dest = ir->r;
662 if (ra_hasreg(dest)) {
663 ra_free(as, dest);
664 ra_modified(as, dest);
665 emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
666 }
667 if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
668 } else {
669 ra_destreg(as, ir, RID_FPRET);
670 }
671 #else
672 /* Number result is in x87 st0 for x86 calling convention. */
673 Reg dest = ir->r;
674 if (ra_hasreg(dest)) {
675 ra_free(as, dest);
676 ra_modified(as, dest);
677 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS,
678 dest, RID_ESP, ofs);
679 }
680 if ((ci->flags & CCI_CASTU64)) {
681 emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
682 emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
683 } else {
684 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
685 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
686 }
687 #endif
688 #if LJ_32
689 } else if (hiop) {
690 ra_destpair(as, ir);
691 #endif
692 } else {
693 lua_assert(!irt_ispri(ir->t));
694 ra_destreg(as, ir, RID_RET);
695 }
696 } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
697 emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
698 }
699 }
700
701 /* Return a constant function pointer or NULL for indirect calls. */
asm_callx_func(ASMState * as,IRIns * irf,IRRef func)702 static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
703 {
704 #if LJ_32
705 UNUSED(as);
706 if (irref_isk(func))
707 return (void *)irf->i;
708 #else
709 if (irref_isk(func)) {
710 MCode *p;
711 if (irf->o == IR_KINT64)
712 p = (MCode *)(void *)ir_k64(irf)->u64;
713 else
714 p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
715 if (p - as->mcp == (int32_t)(p - as->mcp))
716 return p; /* Call target is still in +-2GB range. */
717 /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
718 }
719 #endif
720 return NULL;
721 }
722
asm_callx(ASMState * as,IRIns * ir)723 static void asm_callx(ASMState *as, IRIns *ir)
724 {
725 IRRef args[CCI_NARGS_MAX*2];
726 CCallInfo ci;
727 IRRef func;
728 IRIns *irf;
729 int32_t spadj = 0;
730 ci.flags = asm_callx_flags(as, ir);
731 asm_collectargs(as, ir, &ci, args);
732 asm_setupresult(as, ir, &ci);
733 #if LJ_32
734 /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
735 if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
736 spadj = 4 * asm_count_call_slots(as, &ci, args);
737 #endif
738 func = ir->op2; irf = IR(func);
739 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
740 ci.func = (ASMFunction)asm_callx_func(as, irf, func);
741 if (!(void *)ci.func) {
742 /* Use a (hoistable) non-scratch register for indirect calls. */
743 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
744 Reg r = ra_alloc1(as, func, allow);
745 if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
746 emit_rr(as, XO_GROUP5, XOg_CALL, r);
747 } else if (LJ_32) {
748 emit_spsub(as, spadj);
749 }
750 asm_gencall(as, &ci, args);
751 }
752
753 /* -- Returns ------------------------------------------------------------- */
754
755 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)756 static void asm_retf(ASMState *as, IRIns *ir)
757 {
758 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
759 #if LJ_FR2
760 Reg rpc = ra_scratch(as, rset_exclude(RSET_GPR, base));
761 #endif
762 void *pc = ir_kptr(IR(ir->op2));
763 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
764 as->topslot -= (BCReg)delta;
765 if ((int32_t)as->topslot < 0) as->topslot = 0;
766 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
767 emit_setgl(as, base, jit_base);
768 emit_addptr(as, base, -8*delta);
769 asm_guardcc(as, CC_NE);
770 #if LJ_FR2
771 emit_rmro(as, XO_CMP, rpc|REX_GC64, base, -8);
772 emit_loadu64(as, rpc, u64ptr(pc));
773 #else
774 emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
775 #endif
776 }
777
778 /* -- Type conversions ---------------------------------------------------- */
779
asm_tointg(ASMState * as,IRIns * ir,Reg left)780 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
781 {
782 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
783 Reg dest = ra_dest(as, ir, RSET_GPR);
784 asm_guardcc(as, CC_P);
785 asm_guardcc(as, CC_NE);
786 emit_rr(as, XO_UCOMISD, left, tmp);
787 emit_rr(as, XO_CVTSI2SD, tmp, dest);
788 emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
789 emit_rr(as, XO_CVTTSD2SI, dest, left);
790 /* Can't fuse since left is needed twice. */
791 }
792
asm_tobit(ASMState * as,IRIns * ir)793 static void asm_tobit(ASMState *as, IRIns *ir)
794 {
795 Reg dest = ra_dest(as, ir, RSET_GPR);
796 Reg tmp = ra_noreg(IR(ir->op1)->r) ?
797 ra_alloc1(as, ir->op1, RSET_FPR) :
798 ra_scratch(as, RSET_FPR);
799 Reg right;
800 emit_rr(as, XO_MOVDto, tmp, dest);
801 right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
802 emit_mrm(as, XO_ADDSD, tmp, right);
803 ra_left(as, tmp, ir->op1);
804 }
805
asm_conv(ASMState * as,IRIns * ir)806 static void asm_conv(ASMState *as, IRIns *ir)
807 {
808 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
809 int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
810 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
811 IRRef lref = ir->op1;
812 lua_assert(irt_type(ir->t) != st);
813 lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */
814 if (irt_isfp(ir->t)) {
815 Reg dest = ra_dest(as, ir, RSET_FPR);
816 if (stfp) { /* FP to FP conversion. */
817 Reg left = asm_fuseload(as, lref, RSET_FPR);
818 emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
819 if (left == dest) return; /* Avoid the XO_XORPS. */
820 } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
821 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
822 cTValue *k = &as->J->k64[LJ_K64_TOBIT];
823 Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
824 if (irt_isfloat(ir->t))
825 emit_rr(as, XO_CVTSD2SS, dest, dest);
826 emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
827 emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
828 emit_rma(as, XO_MOVSD, bias, k);
829 emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
830 return;
831 } else { /* Integer to FP conversion. */
832 Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
833 ra_alloc1(as, lref, RSET_GPR) :
834 asm_fuseloadm(as, lref, RSET_GPR, st64);
835 if (LJ_64 && st == IRT_U64) {
836 MCLabel l_end = emit_label(as);
837 cTValue *k = &as->J->k64[LJ_K64_2P64];
838 emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
839 emit_sjcc(as, CC_NS, l_end);
840 emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
841 }
842 emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
843 dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
844 }
845 emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
846 } else if (stfp) { /* FP to integer conversion. */
847 if (irt_isguard(ir->t)) {
848 /* Checked conversions are only supported from number to int. */
849 lua_assert(irt_isint(ir->t) && st == IRT_NUM);
850 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
851 } else {
852 Reg dest = ra_dest(as, ir, RSET_GPR);
853 x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI;
854 if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
855 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
856 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
857 Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
858 ra_scratch(as, RSET_FPR);
859 MCLabel l_end = emit_label(as);
860 if (LJ_32)
861 emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
862 emit_rr(as, op, dest|REX_64, tmp);
863 if (st == IRT_NUM)
864 emit_rma(as, XO_ADDSD, tmp, &as->J->k64[LJ_K64_M2P64_31]);
865 else
866 emit_rma(as, XO_ADDSS, tmp, &as->J->k32[LJ_K32_M2P64_31]);
867 emit_sjcc(as, CC_NS, l_end);
868 emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
869 emit_rr(as, op, dest|REX_64, tmp);
870 ra_left(as, tmp, lref);
871 } else {
872 if (LJ_64 && irt_isu32(ir->t))
873 emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
874 emit_mrm(as, op,
875 dest|((LJ_64 &&
876 (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
877 asm_fuseload(as, lref, RSET_FPR));
878 }
879 }
880 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
881 Reg left, dest = ra_dest(as, ir, RSET_GPR);
882 RegSet allow = RSET_GPR;
883 x86Op op;
884 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
885 if (st == IRT_I8) {
886 op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
887 } else if (st == IRT_U8) {
888 op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
889 } else if (st == IRT_I16) {
890 op = XO_MOVSXw;
891 } else {
892 op = XO_MOVZXw;
893 }
894 left = asm_fuseload(as, lref, allow);
895 /* Add extra MOV if source is already in wrong register. */
896 if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
897 Reg tmp = ra_scratch(as, allow);
898 emit_rr(as, op, dest, tmp);
899 emit_rr(as, XO_MOV, tmp, left);
900 } else {
901 emit_mrm(as, op, dest, left);
902 }
903 } else { /* 32/64 bit integer conversions. */
904 if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
905 Reg dest = ra_dest(as, ir, RSET_GPR);
906 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
907 } else if (irt_is64(ir->t)) {
908 Reg dest = ra_dest(as, ir, RSET_GPR);
909 if (st64 || !(ir->op2 & IRCONV_SEXT)) {
910 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
911 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
912 } else { /* 32 to 64 bit sign extension. */
913 Reg left = asm_fuseload(as, lref, RSET_GPR);
914 emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
915 }
916 } else {
917 Reg dest = ra_dest(as, ir, RSET_GPR);
918 if (st64) {
919 Reg left = asm_fuseload(as, lref, RSET_GPR);
920 /* This is either a 32 bit reg/reg mov which zeroes the hiword
921 ** or a load of the loword from a 64 bit address.
922 */
923 emit_mrm(as, XO_MOV, dest, left);
924 } else { /* 32/32 bit no-op (cast). */
925 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
926 }
927 }
928 }
929 }
930
931 #if LJ_32 && LJ_HASFFI
932 /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
933
934 /* 64 bit integer to FP conversion in 32 bit mode. */
asm_conv_fp_int64(ASMState * as,IRIns * ir)935 static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
936 {
937 Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
938 Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
939 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
940 Reg dest = ir->r;
941 if (ra_hasreg(dest)) {
942 ra_free(as, dest);
943 ra_modified(as, dest);
944 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs);
945 }
946 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
947 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
948 if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
949 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
950 MCLabel l_end = emit_label(as);
951 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_2P64]);
952 emit_sjcc(as, CC_NS, l_end);
953 emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
954 } else {
955 lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64);
956 }
957 emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
958 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
959 emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
960 emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
961 }
962
963 /* FP to 64 bit integer conversion in 32 bit mode. */
asm_conv_int64_fp(ASMState * as,IRIns * ir)964 static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
965 {
966 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
967 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
968 Reg lo, hi;
969 lua_assert(st == IRT_NUM || st == IRT_FLOAT);
970 lua_assert(dt == IRT_I64 || dt == IRT_U64);
971 hi = ra_dest(as, ir, RSET_GPR);
972 lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
973 if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
974 /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
975 if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
976 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
977 emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
978 emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
979 }
980 if (dt == IRT_U64) {
981 /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
982 MCLabel l_pop, l_end = emit_label(as);
983 emit_x87op(as, XI_FPOP);
984 l_pop = emit_label(as);
985 emit_sjmp(as, l_end);
986 emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
987 if ((as->flags & JIT_F_SSE3))
988 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
989 else
990 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
991 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_M2P64]);
992 emit_sjcc(as, CC_NS, l_pop);
993 emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
994 }
995 emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
996 if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
997 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
998 } else { /* Otherwise set FPU rounding mode to truncate before the store. */
999 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
1000 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
1001 emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
1002 emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
1003 emit_loadi(as, lo, 0xc00);
1004 emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
1005 }
1006 if (dt == IRT_U64)
1007 emit_x87op(as, XI_FDUP);
1008 emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
1009 st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
1010 asm_fuseload(as, ir->op1, RSET_EMPTY));
1011 }
1012
asm_conv64(ASMState * as,IRIns * ir)1013 static void asm_conv64(ASMState *as, IRIns *ir)
1014 {
1015 if (irt_isfp(ir->t))
1016 asm_conv_fp_int64(as, ir);
1017 else
1018 asm_conv_int64_fp(as, ir);
1019 }
1020 #endif
1021
asm_strto(ASMState * as,IRIns * ir)1022 static void asm_strto(ASMState *as, IRIns *ir)
1023 {
1024 /* Force a spill slot for the destination register (if any). */
1025 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
1026 IRRef args[2];
1027 RegSet drop = RSET_SCRATCH;
1028 if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
1029 rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
1030 ra_evictset(as, drop);
1031 asm_guardcc(as, CC_E);
1032 emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
1033 args[0] = ir->op1; /* GCstr *str */
1034 args[1] = ASMREF_TMP1; /* TValue *n */
1035 asm_gencall(as, ci, args);
1036 /* Store the result to the spill slot or temp slots. */
1037 emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
1038 RID_ESP, sps_scale(ir->s));
1039 }
1040
1041 /* -- Memory references --------------------------------------------------- */
1042
1043 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref)1044 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
1045 {
1046 IRIns *ir = IR(ref);
1047 if (irt_isnum(ir->t)) {
1048 /* For numbers use the constant itself or a spill slot as a TValue. */
1049 if (irref_isk(ref))
1050 emit_loada(as, dest, ir_knum(ir));
1051 else
1052 emit_rmro(as, XO_LEA, dest|REX_64, RID_ESP, ra_spill(as, ir));
1053 } else {
1054 /* Otherwise use g->tmptv to hold the TValue. */
1055 #if LJ_GC64
1056 if (irref_isk(ref)) {
1057 TValue k;
1058 lj_ir_kvalue(as->J->L, &k, ir);
1059 emit_movmroi(as, dest, 4, k.u32.hi);
1060 emit_movmroi(as, dest, 0, k.u32.lo);
1061 } else {
1062 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
1063 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
1064 if (irt_is64(ir->t)) {
1065 emit_u32(as, irt_toitype(ir->t) << 15);
1066 emit_rmro(as, XO_ARITHi, XOg_OR, dest, 4);
1067 } else {
1068 /* Currently, no caller passes integers that might end up here. */
1069 emit_movmroi(as, dest, 4, (irt_toitype(ir->t) << 15));
1070 }
1071 emit_movtomro(as, REX_64IR(ir, src), dest, 0);
1072 }
1073 #else
1074 if (!irref_isk(ref)) {
1075 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
1076 emit_movtomro(as, REX_64IR(ir, src), dest, 0);
1077 } else if (!irt_ispri(ir->t)) {
1078 emit_movmroi(as, dest, 0, ir->i);
1079 }
1080 if (!(LJ_64 && irt_islightud(ir->t)))
1081 emit_movmroi(as, dest, 4, irt_toitype(ir->t));
1082 #endif
1083 emit_loada(as, dest, &J2G(as->J)->tmptv);
1084 }
1085 }
1086
asm_aref(ASMState * as,IRIns * ir)1087 static void asm_aref(ASMState *as, IRIns *ir)
1088 {
1089 Reg dest = ra_dest(as, ir, RSET_GPR);
1090 asm_fusearef(as, ir, RSET_GPR);
1091 if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
1092 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
1093 else if (as->mrm.base != dest)
1094 emit_rr(as, XO_MOV, dest|REX_GC64, as->mrm.base);
1095 }
1096
1097 /* Inlined hash lookup. Specialized for key type and for const keys.
1098 ** The equivalent C code is:
1099 ** Node *n = hashkey(t, key);
1100 ** do {
1101 ** if (lj_obj_equal(&n->key, key)) return &n->val;
1102 ** } while ((n = nextnode(n)));
1103 ** return niltv(L);
1104 */
asm_href(ASMState * as,IRIns * ir,IROp merge)1105 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
1106 {
1107 RegSet allow = RSET_GPR;
1108 int destused = ra_used(ir);
1109 Reg dest = ra_dest(as, ir, allow);
1110 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
1111 Reg key = RID_NONE, tmp = RID_NONE;
1112 IRIns *irkey = IR(ir->op2);
1113 int isk = irref_isk(ir->op2);
1114 IRType1 kt = irkey->t;
1115 uint32_t khash;
1116 MCLabel l_end, l_loop, l_next;
1117
1118 if (!isk) {
1119 rset_clear(allow, tab);
1120 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
1121 if (LJ_GC64 || !irt_isstr(kt))
1122 tmp = ra_scratch(as, rset_exclude(allow, key));
1123 }
1124
1125 /* Key not found in chain: jump to exit (if merged) or load niltv. */
1126 l_end = emit_label(as);
1127 if (merge == IR_NE)
1128 asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */
1129 else if (destused)
1130 emit_loada(as, dest, niltvg(J2G(as->J)));
1131
1132 /* Follow hash chain until the end. */
1133 l_loop = emit_sjcc_label(as, CC_NZ);
1134 emit_rr(as, XO_TEST, dest|REX_GC64, dest);
1135 emit_rmro(as, XO_MOV, dest|REX_GC64, dest, offsetof(Node, next));
1136 l_next = emit_label(as);
1137
1138 /* Type and value comparison. */
1139 if (merge == IR_EQ)
1140 asm_guardcc(as, CC_E);
1141 else
1142 emit_sjcc(as, CC_E, l_end);
1143 if (irt_isnum(kt)) {
1144 if (isk) {
1145 /* Assumes -0.0 is already canonicalized to +0.0. */
1146 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
1147 (int32_t)ir_knum(irkey)->u32.lo);
1148 emit_sjcc(as, CC_NE, l_next);
1149 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
1150 (int32_t)ir_knum(irkey)->u32.hi);
1151 } else {
1152 emit_sjcc(as, CC_P, l_next);
1153 emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
1154 emit_sjcc(as, CC_AE, l_next);
1155 /* The type check avoids NaN penalties and complaints from Valgrind. */
1156 #if LJ_64 && !LJ_GC64
1157 emit_u32(as, LJ_TISNUM);
1158 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1159 #else
1160 emit_i8(as, LJ_TISNUM);
1161 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1162 #endif
1163 }
1164 #if LJ_64 && !LJ_GC64
1165 } else if (irt_islightud(kt)) {
1166 emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
1167 #endif
1168 #if LJ_GC64
1169 } else if (irt_isaddr(kt)) {
1170 if (isk) {
1171 TValue k;
1172 k.u64 = ((uint64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
1173 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
1174 k.u32.lo);
1175 emit_sjcc(as, CC_NE, l_next);
1176 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
1177 k.u32.hi);
1178 } else {
1179 emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64));
1180 }
1181 } else {
1182 lua_assert(irt_ispri(kt) && !irt_isnil(kt));
1183 emit_u32(as, (irt_toitype(kt)<<15)|0x7fff);
1184 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1185 #else
1186 } else {
1187 if (!irt_ispri(kt)) {
1188 lua_assert(irt_isaddr(kt));
1189 if (isk)
1190 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
1191 ptr2addr(ir_kgc(irkey)));
1192 else
1193 emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
1194 emit_sjcc(as, CC_NE, l_next);
1195 }
1196 lua_assert(!irt_isnil(kt));
1197 emit_i8(as, irt_toitype(kt));
1198 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1199 #endif
1200 }
1201 emit_sfixup(as, l_loop);
1202 checkmclim(as);
1203 #if LJ_GC64
1204 if (!isk && irt_isaddr(kt)) {
1205 emit_rr(as, XO_OR, tmp|REX_64, key);
1206 emit_loadu64(as, tmp, (uint64_t)irt_toitype(kt) << 47);
1207 }
1208 #endif
1209
1210 /* Load main position relative to tab->node into dest. */
1211 khash = isk ? ir_khash(irkey) : 1;
1212 if (khash == 0) {
1213 emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node));
1214 } else {
1215 emit_rmro(as, XO_ARITH(XOg_ADD), dest|REX_GC64, tab, offsetof(GCtab,node));
1216 if ((as->flags & JIT_F_PREFER_IMUL)) {
1217 emit_i8(as, sizeof(Node));
1218 emit_rr(as, XO_IMULi8, dest, dest);
1219 } else {
1220 emit_shifti(as, XOg_SHL, dest, 3);
1221 emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
1222 }
1223 if (isk) {
1224 emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
1225 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1226 } else if (irt_isstr(kt)) {
1227 emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash));
1228 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1229 } else { /* Must match with hashrot() in lj_tab.c. */
1230 emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
1231 emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
1232 emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
1233 emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
1234 emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
1235 emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
1236 emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
1237 emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
1238 if (irt_isnum(kt)) {
1239 emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
1240 #if LJ_64
1241 emit_shifti(as, XOg_SHR|REX_64, dest, 32);
1242 emit_rr(as, XO_MOV, tmp, dest);
1243 emit_rr(as, XO_MOVDto, key|REX_64, dest);
1244 #else
1245 emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
1246 emit_rr(as, XO_MOVDto, key, tmp);
1247 #endif
1248 } else {
1249 emit_rr(as, XO_MOV, tmp, key);
1250 #if LJ_GC64
1251 checkmclim(as);
1252 emit_gri(as, XG_ARITHi(XOg_XOR), dest, irt_toitype(kt) << 15);
1253 if ((as->flags & JIT_F_BMI2)) {
1254 emit_i8(as, 32);
1255 emit_mrm(as, XV_RORX|VEX_64, dest, key);
1256 } else {
1257 emit_shifti(as, XOg_SHR|REX_64, dest, 32);
1258 emit_rr(as, XO_MOV, dest|REX_64, key|REX_64);
1259 }
1260 #else
1261 emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
1262 #endif
1263 }
1264 }
1265 }
1266 }
1267
asm_hrefk(ASMState * as,IRIns * ir)1268 static void asm_hrefk(ASMState *as, IRIns *ir)
1269 {
1270 IRIns *kslot = IR(ir->op2);
1271 IRIns *irkey = IR(kslot->op1);
1272 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
1273 Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
1274 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
1275 #if !LJ_64
1276 MCLabel l_exit;
1277 #endif
1278 lua_assert(ofs % sizeof(Node) == 0);
1279 if (ra_hasreg(dest)) {
1280 if (ofs != 0) {
1281 if (dest == node && !(as->flags & JIT_F_LEA_AGU))
1282 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, ofs);
1283 else
1284 emit_rmro(as, XO_LEA, dest|REX_GC64, node, ofs);
1285 } else if (dest != node) {
1286 emit_rr(as, XO_MOV, dest|REX_GC64, node);
1287 }
1288 }
1289 asm_guardcc(as, CC_NE);
1290 #if LJ_64
1291 if (!irt_ispri(irkey->t)) {
1292 Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
1293 emit_rmro(as, XO_CMP, key|REX_64, node,
1294 ofs + (int32_t)offsetof(Node, key.u64));
1295 lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t));
1296 /* Assumes -0.0 is already canonicalized to +0.0. */
1297 emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
1298 #if LJ_GC64
1299 ((uint64_t)irt_toitype(irkey->t) << 47) |
1300 (uint64_t)ir_kgc(irkey));
1301 #else
1302 ((uint64_t)irt_toitype(irkey->t) << 32) |
1303 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
1304 #endif
1305 } else {
1306 lua_assert(!irt_isnil(irkey->t));
1307 #if LJ_GC64
1308 emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff);
1309 emit_rmro(as, XO_ARITHi, XOg_CMP, node,
1310 ofs + (int32_t)offsetof(Node, key.it));
1311 #else
1312 emit_i8(as, irt_toitype(irkey->t));
1313 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1314 ofs + (int32_t)offsetof(Node, key.it));
1315 #endif
1316 }
1317 #else
1318 l_exit = emit_label(as);
1319 if (irt_isnum(irkey->t)) {
1320 /* Assumes -0.0 is already canonicalized to +0.0. */
1321 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1322 ofs + (int32_t)offsetof(Node, key.u32.lo),
1323 (int32_t)ir_knum(irkey)->u32.lo);
1324 emit_sjcc(as, CC_NE, l_exit);
1325 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1326 ofs + (int32_t)offsetof(Node, key.u32.hi),
1327 (int32_t)ir_knum(irkey)->u32.hi);
1328 } else {
1329 if (!irt_ispri(irkey->t)) {
1330 lua_assert(irt_isgcv(irkey->t));
1331 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1332 ofs + (int32_t)offsetof(Node, key.gcr),
1333 ptr2addr(ir_kgc(irkey)));
1334 emit_sjcc(as, CC_NE, l_exit);
1335 }
1336 lua_assert(!irt_isnil(irkey->t));
1337 emit_i8(as, irt_toitype(irkey->t));
1338 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1339 ofs + (int32_t)offsetof(Node, key.it));
1340 }
1341 #endif
1342 }
1343
asm_uref(ASMState * as,IRIns * ir)1344 static void asm_uref(ASMState *as, IRIns *ir)
1345 {
1346 Reg dest = ra_dest(as, ir, RSET_GPR);
1347 if (irref_isk(ir->op1)) {
1348 GCfunc *fn = ir_kfunc(IR(ir->op1));
1349 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
1350 emit_rma(as, XO_MOV, dest|REX_GC64, v);
1351 } else {
1352 Reg uv = ra_scratch(as, RSET_GPR);
1353 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
1354 if (ir->o == IR_UREFC) {
1355 emit_rmro(as, XO_LEA, dest|REX_GC64, uv, offsetof(GCupval, tv));
1356 asm_guardcc(as, CC_NE);
1357 emit_i8(as, 1);
1358 emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
1359 } else {
1360 emit_rmro(as, XO_MOV, dest|REX_GC64, uv, offsetof(GCupval, v));
1361 }
1362 emit_rmro(as, XO_MOV, uv|REX_GC64, func,
1363 (int32_t)offsetof(GCfuncL, uvptr) +
1364 (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
1365 }
1366 }
1367
asm_fref(ASMState * as,IRIns * ir)1368 static void asm_fref(ASMState *as, IRIns *ir)
1369 {
1370 Reg dest = ra_dest(as, ir, RSET_GPR);
1371 asm_fusefref(as, ir, RSET_GPR);
1372 emit_mrm(as, XO_LEA, dest, RID_MRM);
1373 }
1374
asm_strref(ASMState * as,IRIns * ir)1375 static void asm_strref(ASMState *as, IRIns *ir)
1376 {
1377 Reg dest = ra_dest(as, ir, RSET_GPR);
1378 asm_fusestrref(as, ir, RSET_GPR);
1379 if (as->mrm.base == RID_NONE)
1380 emit_loadi(as, dest, as->mrm.ofs);
1381 else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
1382 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, as->mrm.ofs);
1383 else
1384 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
1385 }
1386
1387 /* -- Loads and stores ---------------------------------------------------- */
1388
asm_fxload(ASMState * as,IRIns * ir)1389 static void asm_fxload(ASMState *as, IRIns *ir)
1390 {
1391 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1392 x86Op xo;
1393 if (ir->o == IR_FLOAD)
1394 asm_fusefref(as, ir, RSET_GPR);
1395 else
1396 asm_fusexref(as, ir->op1, RSET_GPR);
1397 /* ir->op2 is ignored -- unaligned loads are ok on x86. */
1398 switch (irt_type(ir->t)) {
1399 case IRT_I8: xo = XO_MOVSXb; break;
1400 case IRT_U8: xo = XO_MOVZXb; break;
1401 case IRT_I16: xo = XO_MOVSXw; break;
1402 case IRT_U16: xo = XO_MOVZXw; break;
1403 case IRT_NUM: xo = XO_MOVSD; break;
1404 case IRT_FLOAT: xo = XO_MOVSS; break;
1405 default:
1406 if (LJ_64 && irt_is64(ir->t))
1407 dest |= REX_64;
1408 else
1409 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
1410 xo = XO_MOV;
1411 break;
1412 }
1413 emit_mrm(as, xo, dest, RID_MRM);
1414 }
1415
1416 #define asm_fload(as, ir) asm_fxload(as, ir)
1417 #define asm_xload(as, ir) asm_fxload(as, ir)
1418
asm_fxstore(ASMState * as,IRIns * ir)1419 static void asm_fxstore(ASMState *as, IRIns *ir)
1420 {
1421 RegSet allow = RSET_GPR;
1422 Reg src = RID_NONE, osrc = RID_NONE;
1423 int32_t k = 0;
1424 if (ir->r == RID_SINK)
1425 return;
1426 /* The IRT_I16/IRT_U16 stores should never be simplified for constant
1427 ** values since mov word [mem], imm16 has a length-changing prefix.
1428 */
1429 if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
1430 !asm_isk32(as, ir->op2, &k)) {
1431 RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
1432 (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
1433 src = osrc = ra_alloc1(as, ir->op2, allow8);
1434 if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
1435 rset_clear(allow, osrc);
1436 src = ra_scratch(as, allow8);
1437 }
1438 rset_clear(allow, src);
1439 }
1440 if (ir->o == IR_FSTORE) {
1441 asm_fusefref(as, IR(ir->op1), allow);
1442 } else {
1443 asm_fusexref(as, ir->op1, allow);
1444 if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
1445 }
1446 if (ra_hasreg(src)) {
1447 x86Op xo;
1448 switch (irt_type(ir->t)) {
1449 case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
1450 case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
1451 case IRT_NUM: xo = XO_MOVSDto; break;
1452 case IRT_FLOAT: xo = XO_MOVSSto; break;
1453 #if LJ_64 && !LJ_GC64
1454 case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
1455 #endif
1456 default:
1457 if (LJ_64 && irt_is64(ir->t))
1458 src |= REX_64;
1459 else
1460 lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
1461 xo = XO_MOVto;
1462 break;
1463 }
1464 emit_mrm(as, xo, src, RID_MRM);
1465 if (!LJ_64 && src != osrc) {
1466 ra_noweak(as, osrc);
1467 emit_rr(as, XO_MOV, src, osrc);
1468 }
1469 } else {
1470 if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
1471 emit_i8(as, k);
1472 emit_mrm(as, XO_MOVmib, 0, RID_MRM);
1473 } else {
1474 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
1475 irt_isaddr(ir->t));
1476 emit_i32(as, k);
1477 emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
1478 }
1479 }
1480 }
1481
1482 #define asm_fstore(as, ir) asm_fxstore(as, ir)
1483 #define asm_xstore(as, ir) asm_fxstore(as, ir)
1484
1485 #if LJ_64 && !LJ_GC64
asm_load_lightud64(ASMState * as,IRIns * ir,int typecheck)1486 static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
1487 {
1488 if (ra_used(ir) || typecheck) {
1489 Reg dest = ra_dest(as, ir, RSET_GPR);
1490 if (typecheck) {
1491 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
1492 asm_guardcc(as, CC_NE);
1493 emit_i8(as, -2);
1494 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
1495 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1496 emit_rr(as, XO_MOV, tmp|REX_64, dest);
1497 }
1498 return dest;
1499 } else {
1500 return RID_NONE;
1501 }
1502 }
1503 #endif
1504
asm_ahuvload(ASMState * as,IRIns * ir)1505 static void asm_ahuvload(ASMState *as, IRIns *ir)
1506 {
1507 #if LJ_GC64
1508 Reg tmp = RID_NONE;
1509 #endif
1510 lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1511 (LJ_DUALNUM && irt_isint(ir->t)));
1512 #if LJ_64 && !LJ_GC64
1513 if (irt_islightud(ir->t)) {
1514 Reg dest = asm_load_lightud64(as, ir, 1);
1515 if (ra_hasreg(dest)) {
1516 asm_fuseahuref(as, ir->op1, RSET_GPR);
1517 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
1518 }
1519 return;
1520 } else
1521 #endif
1522 if (ra_used(ir)) {
1523 RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1524 Reg dest = ra_dest(as, ir, allow);
1525 asm_fuseahuref(as, ir->op1, RSET_GPR);
1526 #if LJ_GC64
1527 if (irt_isaddr(ir->t)) {
1528 emit_shifti(as, XOg_SHR|REX_64, dest, 17);
1529 asm_guardcc(as, CC_NE);
1530 emit_i8(as, irt_toitype(ir->t));
1531 emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
1532 emit_i8(as, XI_O16);
1533 if ((as->flags & JIT_F_BMI2)) {
1534 emit_i8(as, 47);
1535 emit_mrm(as, XV_RORX|VEX_64, dest, RID_MRM);
1536 } else {
1537 emit_shifti(as, XOg_ROR|REX_64, dest, 47);
1538 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
1539 }
1540 return;
1541 } else
1542 #endif
1543 emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM);
1544 } else {
1545 RegSet gpr = RSET_GPR;
1546 #if LJ_GC64
1547 if (irt_isaddr(ir->t)) {
1548 tmp = ra_scratch(as, RSET_GPR);
1549 gpr = rset_exclude(gpr, tmp);
1550 }
1551 #endif
1552 asm_fuseahuref(as, ir->op1, gpr);
1553 }
1554 /* Always do the type check, even if the load result is unused. */
1555 as->mrm.ofs += 4;
1556 asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
1557 if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
1558 lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t));
1559 #if LJ_GC64
1560 emit_u32(as, LJ_TISNUM << 15);
1561 #else
1562 emit_u32(as, LJ_TISNUM);
1563 #endif
1564 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1565 #if LJ_GC64
1566 } else if (irt_isaddr(ir->t)) {
1567 as->mrm.ofs -= 4;
1568 emit_i8(as, irt_toitype(ir->t));
1569 emit_mrm(as, XO_ARITHi8, XOg_CMP, tmp);
1570 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1571 emit_mrm(as, XO_MOV, tmp|REX_64, RID_MRM);
1572 } else if (irt_isnil(ir->t)) {
1573 as->mrm.ofs -= 4;
1574 emit_i8(as, -1);
1575 emit_mrm(as, XO_ARITHi8, XOg_CMP|REX_64, RID_MRM);
1576 } else {
1577 emit_u32(as, (irt_toitype(ir->t) << 15) | 0x7fff);
1578 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1579 #else
1580 } else {
1581 emit_i8(as, irt_toitype(ir->t));
1582 emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
1583 #endif
1584 }
1585 }
1586
asm_ahustore(ASMState * as,IRIns * ir)1587 static void asm_ahustore(ASMState *as, IRIns *ir)
1588 {
1589 if (ir->r == RID_SINK)
1590 return;
1591 if (irt_isnum(ir->t)) {
1592 Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
1593 asm_fuseahuref(as, ir->op1, RSET_GPR);
1594 emit_mrm(as, XO_MOVSDto, src, RID_MRM);
1595 #if LJ_64 && !LJ_GC64
1596 } else if (irt_islightud(ir->t)) {
1597 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1598 asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
1599 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1600 #endif
1601 #if LJ_GC64
1602 } else if (irref_isk(ir->op2)) {
1603 TValue k;
1604 lj_ir_kvalue(as->J->L, &k, IR(ir->op2));
1605 asm_fuseahuref(as, ir->op1, RSET_GPR);
1606 if (tvisnil(&k)) {
1607 emit_i32(as, -1);
1608 emit_mrm(as, XO_MOVmi, REX_64, RID_MRM);
1609 } else {
1610 emit_u32(as, k.u32.lo);
1611 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1612 as->mrm.ofs += 4;
1613 emit_u32(as, k.u32.hi);
1614 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1615 }
1616 #endif
1617 } else {
1618 IRIns *irr = IR(ir->op2);
1619 RegSet allow = RSET_GPR;
1620 Reg src = RID_NONE;
1621 if (!irref_isk(ir->op2)) {
1622 src = ra_alloc1(as, ir->op2, allow);
1623 rset_clear(allow, src);
1624 }
1625 asm_fuseahuref(as, ir->op1, allow);
1626 if (ra_hasreg(src)) {
1627 #if LJ_GC64
1628 if (!(LJ_DUALNUM && irt_isinteger(ir->t))) {
1629 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
1630 as->mrm.ofs += 4;
1631 emit_u32(as, irt_toitype(ir->t) << 15);
1632 emit_mrm(as, XO_ARITHi, XOg_OR, RID_MRM);
1633 as->mrm.ofs -= 4;
1634 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1635 return;
1636 }
1637 #endif
1638 emit_mrm(as, XO_MOVto, src, RID_MRM);
1639 } else if (!irt_ispri(irr->t)) {
1640 lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)));
1641 emit_i32(as, irr->i);
1642 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1643 }
1644 as->mrm.ofs += 4;
1645 #if LJ_GC64
1646 lua_assert(LJ_DUALNUM && irt_isinteger(ir->t));
1647 emit_i32(as, LJ_TNUMX << 15);
1648 #else
1649 emit_i32(as, (int32_t)irt_toitype(ir->t));
1650 #endif
1651 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1652 }
1653 }
1654
asm_sload(ASMState * as,IRIns * ir)1655 static void asm_sload(ASMState *as, IRIns *ir)
1656 {
1657 int32_t ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
1658 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1659 IRType1 t = ir->t;
1660 Reg base;
1661 lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
1662 lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
1663 lua_assert(LJ_DUALNUM ||
1664 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
1665 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1666 Reg left = ra_scratch(as, RSET_FPR);
1667 asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
1668 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1669 emit_rmro(as, XO_MOVSD, left, base, ofs);
1670 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1671 #if LJ_64 && !LJ_GC64
1672 } else if (irt_islightud(t)) {
1673 Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
1674 if (ra_hasreg(dest)) {
1675 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1676 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
1677 }
1678 return;
1679 #endif
1680 } else if (ra_used(ir)) {
1681 RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
1682 Reg dest = ra_dest(as, ir, allow);
1683 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1684 lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
1685 if ((ir->op2 & IRSLOAD_CONVERT)) {
1686 t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
1687 emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs);
1688 } else {
1689 #if LJ_GC64
1690 if (irt_isaddr(t)) {
1691 /* LJ_GC64 type check + tag removal without BMI2 and with BMI2:
1692 **
1693 ** mov r64, [addr] rorx r64, [addr], 47
1694 ** ror r64, 47
1695 ** cmp r16, itype cmp r16, itype
1696 ** jne ->exit jne ->exit
1697 ** shr r64, 16 shr r64, 16
1698 */
1699 emit_shifti(as, XOg_SHR|REX_64, dest, 17);
1700 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1701 asm_guardcc(as, CC_NE);
1702 emit_i8(as, irt_toitype(t));
1703 emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
1704 emit_i8(as, XI_O16);
1705 }
1706 if ((as->flags & JIT_F_BMI2)) {
1707 emit_i8(as, 47);
1708 emit_rmro(as, XV_RORX|VEX_64, dest, base, ofs);
1709 } else {
1710 if ((ir->op2 & IRSLOAD_TYPECHECK))
1711 emit_shifti(as, XOg_ROR|REX_64, dest, 47);
1712 else
1713 emit_shifti(as, XOg_SHL|REX_64, dest, 17);
1714 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
1715 }
1716 return;
1717 } else
1718 #endif
1719 emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs);
1720 }
1721 } else {
1722 if (!(ir->op2 & IRSLOAD_TYPECHECK))
1723 return; /* No type check: avoid base alloc. */
1724 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1725 }
1726 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1727 /* Need type check, even if the load result is unused. */
1728 asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
1729 if (LJ_64 && irt_type(t) >= IRT_NUM) {
1730 lua_assert(irt_isinteger(t) || irt_isnum(t));
1731 #if LJ_GC64
1732 emit_u32(as, LJ_TISNUM << 15);
1733 #else
1734 emit_u32(as, LJ_TISNUM);
1735 #endif
1736 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1737 #if LJ_GC64
1738 } else if (irt_isnil(t)) {
1739 /* LJ_GC64 type check for nil:
1740 **
1741 ** cmp qword [addr], -1
1742 ** jne ->exit
1743 */
1744 emit_i8(as, -1);
1745 emit_rmro(as, XO_ARITHi8, XOg_CMP|REX_64, base, ofs);
1746 } else if (irt_ispri(t)) {
1747 emit_u32(as, (irt_toitype(t) << 15) | 0x7fff);
1748 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1749 } else {
1750 /* LJ_GC64 type check only:
1751 **
1752 ** mov r64, [addr]
1753 ** sar r64, 47
1754 ** cmp r32, itype
1755 ** jne ->exit
1756 */
1757 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, base));
1758 emit_i8(as, irt_toitype(t));
1759 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
1760 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1761 emit_rmro(as, XO_MOV, tmp|REX_64, base, ofs+4);
1762 #else
1763 } else {
1764 emit_i8(as, irt_toitype(t));
1765 emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
1766 #endif
1767 }
1768 }
1769 }
1770
1771 /* -- Allocations --------------------------------------------------------- */
1772
1773 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1774 static void asm_cnew(ASMState *as, IRIns *ir)
1775 {
1776 CTState *cts = ctype_ctsG(J2G(as->J));
1777 CTypeID id = (CTypeID)IR(ir->op1)->i;
1778 CTSize sz;
1779 CTInfo info = lj_ctype_info(cts, id, &sz);
1780 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1781 IRRef args[4];
1782 lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL));
1783
1784 as->gcsteps++;
1785 asm_setupresult(as, ir, ci); /* GCcdata * */
1786
1787 /* Initialize immutable cdata object. */
1788 if (ir->o == IR_CNEWI) {
1789 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1790 #if LJ_64
1791 Reg r64 = sz == 8 ? REX_64 : 0;
1792 if (irref_isk(ir->op2)) {
1793 IRIns *irk = IR(ir->op2);
1794 uint64_t k = (irk->o == IR_KINT64 ||
1795 (LJ_GC64 && (irk->o == IR_KPTR || irk->o == IR_KKPTR))) ?
1796 ir_k64(irk)->u64 : (uint64_t)(uint32_t)irk->i;
1797 if (sz == 4 || checki32((int64_t)k)) {
1798 emit_i32(as, (int32_t)k);
1799 emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
1800 } else {
1801 emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
1802 emit_loadu64(as, RID_ECX, k);
1803 }
1804 } else {
1805 Reg r = ra_alloc1(as, ir->op2, allow);
1806 emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
1807 }
1808 #else
1809 int32_t ofs = sizeof(GCcdata);
1810 if (sz == 8) {
1811 ofs += 4; ir++;
1812 lua_assert(ir->o == IR_HIOP);
1813 }
1814 do {
1815 if (irref_isk(ir->op2)) {
1816 emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
1817 } else {
1818 Reg r = ra_alloc1(as, ir->op2, allow);
1819 emit_movtomro(as, r, RID_RET, ofs);
1820 rset_clear(allow, r);
1821 }
1822 if (ofs == sizeof(GCcdata)) break;
1823 ofs -= 4; ir--;
1824 } while (1);
1825 #endif
1826 lua_assert(sz == 4 || sz == 8);
1827 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1828 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1829 args[0] = ASMREF_L; /* lua_State *L */
1830 args[1] = ir->op1; /* CTypeID id */
1831 args[2] = ir->op2; /* CTSize sz */
1832 args[3] = ASMREF_TMP1; /* CTSize align */
1833 asm_gencall(as, ci, args);
1834 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1835 return;
1836 }
1837
1838 /* Combine initialization of marked, gct and ctypeid. */
1839 emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
1840 emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
1841 (int32_t)((~LJ_TCDATA<<8)+(id<<16)));
1842 emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
1843 emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
1844
1845 args[0] = ASMREF_L; /* lua_State *L */
1846 args[1] = ASMREF_TMP1; /* MSize size */
1847 asm_gencall(as, ci, args);
1848 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
1849 }
1850 #else
1851 #define asm_cnew(as, ir) ((void)0)
1852 #endif
1853
1854 /* -- Write barriers ------------------------------------------------------ */
1855
asm_tbar(ASMState * as,IRIns * ir)1856 static void asm_tbar(ASMState *as, IRIns *ir)
1857 {
1858 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1859 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1860 MCLabel l_end = emit_label(as);
1861 emit_movtomro(as, tmp|REX_GC64, tab, offsetof(GCtab, gclist));
1862 emit_setgl(as, tab, gc.grayagain);
1863 emit_getgl(as, tmp, gc.grayagain);
1864 emit_i8(as, ~LJ_GC_BLACK);
1865 emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
1866 emit_sjcc(as, CC_Z, l_end);
1867 emit_i8(as, LJ_GC_BLACK);
1868 emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
1869 }
1870
asm_obar(ASMState * as,IRIns * ir)1871 static void asm_obar(ASMState *as, IRIns *ir)
1872 {
1873 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1874 IRRef args[2];
1875 MCLabel l_end;
1876 Reg obj;
1877 /* No need for other object barriers (yet). */
1878 lua_assert(IR(ir->op1)->o == IR_UREFC);
1879 ra_evictset(as, RSET_SCRATCH);
1880 l_end = emit_label(as);
1881 args[0] = ASMREF_TMP1; /* global_State *g */
1882 args[1] = ir->op1; /* TValue *tv */
1883 asm_gencall(as, ci, args);
1884 emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
1885 obj = IR(ir->op1)->r;
1886 emit_sjcc(as, CC_Z, l_end);
1887 emit_i8(as, LJ_GC_WHITES);
1888 if (irref_isk(ir->op2)) {
1889 GCobj *vp = ir_kgc(IR(ir->op2));
1890 emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
1891 } else {
1892 Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
1893 emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
1894 }
1895 emit_sjcc(as, CC_Z, l_end);
1896 emit_i8(as, LJ_GC_BLACK);
1897 emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
1898 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1899 }
1900
1901 /* -- FP/int arithmetic and logic operations ------------------------------ */
1902
1903 /* Load reference onto x87 stack. Force a spill to memory if needed. */
asm_x87load(ASMState * as,IRRef ref)1904 static void asm_x87load(ASMState *as, IRRef ref)
1905 {
1906 IRIns *ir = IR(ref);
1907 if (ir->o == IR_KNUM) {
1908 cTValue *tv = ir_knum(ir);
1909 if (tvispzero(tv)) /* Use fldz only for +0. */
1910 emit_x87op(as, XI_FLDZ);
1911 else if (tvispone(tv))
1912 emit_x87op(as, XI_FLD1);
1913 else
1914 emit_rma(as, XO_FLDq, XOg_FLDq, tv);
1915 } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
1916 !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
1917 IRIns *iri = IR(ir->op1);
1918 emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
1919 } else {
1920 emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
1921 }
1922 }
1923
asm_fpmath(ASMState * as,IRIns * ir)1924 static void asm_fpmath(ASMState *as, IRIns *ir)
1925 {
1926 IRFPMathOp fpm = (IRFPMathOp)ir->op2;
1927 if (fpm == IRFPM_SQRT) {
1928 Reg dest = ra_dest(as, ir, RSET_FPR);
1929 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
1930 emit_mrm(as, XO_SQRTSD, dest, left);
1931 } else if (fpm <= IRFPM_TRUNC) {
1932 if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
1933 Reg dest = ra_dest(as, ir, RSET_FPR);
1934 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
1935 /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
1936 ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
1937 ** This is atrocious, but the alternatives are much worse.
1938 */
1939 /* Round down/up/trunc == 1001/1010/1011. */
1940 emit_i8(as, 0x09 + fpm);
1941 emit_mrm(as, XO_ROUNDSD, dest, left);
1942 if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
1943 as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
1944 }
1945 *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
1946 } else { /* Call helper functions for SSE2 variant. */
1947 /* The modified regs must match with the *.dasc implementation. */
1948 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
1949 if (ra_hasreg(ir->r))
1950 rset_clear(drop, ir->r); /* Dest reg handled below. */
1951 ra_evictset(as, drop);
1952 ra_destreg(as, ir, RID_XMM0);
1953 emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
1954 fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
1955 ra_left(as, RID_XMM0, ir->op1);
1956 }
1957 } else if (fpm == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) {
1958 /* Rejoined to pow(). */
1959 } else {
1960 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
1961 }
1962 }
1963
1964 #define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2)
1965
asm_ldexp(ASMState * as,IRIns * ir)1966 static void asm_ldexp(ASMState *as, IRIns *ir)
1967 {
1968 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
1969 Reg dest = ir->r;
1970 if (ra_hasreg(dest)) {
1971 ra_free(as, dest);
1972 ra_modified(as, dest);
1973 emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs);
1974 }
1975 emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
1976 emit_x87op(as, XI_FPOP1);
1977 emit_x87op(as, XI_FSCALE);
1978 asm_x87load(as, ir->op1);
1979 asm_x87load(as, ir->op2);
1980 }
1981
asm_fppowi(ASMState * as,IRIns * ir)1982 static void asm_fppowi(ASMState *as, IRIns *ir)
1983 {
1984 /* The modified regs must match with the *.dasc implementation. */
1985 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
1986 if (ra_hasreg(ir->r))
1987 rset_clear(drop, ir->r); /* Dest reg handled below. */
1988 ra_evictset(as, drop);
1989 ra_destreg(as, ir, RID_XMM0);
1990 emit_call(as, lj_vm_powi_sse);
1991 ra_left(as, RID_XMM0, ir->op1);
1992 ra_left(as, RID_EAX, ir->op2);
1993 }
1994
asm_pow(ASMState * as,IRIns * ir)1995 static void asm_pow(ASMState *as, IRIns *ir)
1996 {
1997 #if LJ_64 && LJ_HASFFI
1998 if (!irt_isnum(ir->t))
1999 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
2000 IRCALL_lj_carith_powu64);
2001 else
2002 #endif
2003 asm_fppowi(as, ir);
2004 }
2005
asm_swapops(ASMState * as,IRIns * ir)2006 static int asm_swapops(ASMState *as, IRIns *ir)
2007 {
2008 IRIns *irl = IR(ir->op1);
2009 IRIns *irr = IR(ir->op2);
2010 lua_assert(ra_noreg(irr->r));
2011 if (!irm_iscomm(lj_ir_mode[ir->o]))
2012 return 0; /* Can't swap non-commutative operations. */
2013 if (irref_isk(ir->op2))
2014 return 0; /* Don't swap constants to the left. */
2015 if (ra_hasreg(irl->r))
2016 return 1; /* Swap if left already has a register. */
2017 if (ra_samehint(ir->r, irr->r))
2018 return 1; /* Swap if dest and right have matching hints. */
2019 if (as->curins > as->loopref) { /* In variant part? */
2020 if (ir->op2 < as->loopref && !irt_isphi(irr->t))
2021 return 0; /* Keep invariants on the right. */
2022 if (ir->op1 < as->loopref && !irt_isphi(irl->t))
2023 return 1; /* Swap invariants to the right. */
2024 }
2025 if (opisfusableload(irl->o))
2026 return 1; /* Swap fusable loads to the right. */
2027 return 0; /* Otherwise don't swap. */
2028 }
2029
asm_fparith(ASMState * as,IRIns * ir,x86Op xo)2030 static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
2031 {
2032 IRRef lref = ir->op1;
2033 IRRef rref = ir->op2;
2034 RegSet allow = RSET_FPR;
2035 Reg dest;
2036 Reg right = IR(rref)->r;
2037 if (ra_hasreg(right)) {
2038 rset_clear(allow, right);
2039 ra_noweak(as, right);
2040 }
2041 dest = ra_dest(as, ir, allow);
2042 if (lref == rref) {
2043 right = dest;
2044 } else if (ra_noreg(right)) {
2045 if (asm_swapops(as, ir)) {
2046 IRRef tmp = lref; lref = rref; rref = tmp;
2047 }
2048 right = asm_fuseload(as, rref, rset_clear(allow, dest));
2049 }
2050 emit_mrm(as, xo, dest, right);
2051 ra_left(as, dest, lref);
2052 }
2053
asm_intarith(ASMState * as,IRIns * ir,x86Arith xa)2054 static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
2055 {
2056 IRRef lref = ir->op1;
2057 IRRef rref = ir->op2;
2058 RegSet allow = RSET_GPR;
2059 Reg dest, right;
2060 int32_t k = 0;
2061 if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
2062 MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2);
2063 if ((p[1] & 15) < 14) {
2064 if ((p[1] & 15) >= 12) p[1] -= 4; /* L <->S, NL <-> NS */
2065 as->flagmcp = NULL;
2066 as->mcp = p;
2067 } /* else: cannot transform LE/NLE to cc without use of OF. */
2068 }
2069 right = IR(rref)->r;
2070 if (ra_hasreg(right)) {
2071 rset_clear(allow, right);
2072 ra_noweak(as, right);
2073 }
2074 dest = ra_dest(as, ir, allow);
2075 if (lref == rref) {
2076 right = dest;
2077 } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
2078 if (asm_swapops(as, ir)) {
2079 IRRef tmp = lref; lref = rref; rref = tmp;
2080 }
2081 right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
2082 }
2083 if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
2084 asm_guardcc(as, CC_O);
2085 if (xa != XOg_X_IMUL) {
2086 if (ra_hasreg(right))
2087 emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
2088 else
2089 emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
2090 } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
2091 emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
2092 } else { /* IMUL r, r, k. */
2093 /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
2094 Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
2095 x86Op xo;
2096 if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
2097 } else { emit_i32(as, k); xo = XO_IMULi; }
2098 emit_mrm(as, xo, REX_64IR(ir, dest), left);
2099 return;
2100 }
2101 ra_left(as, dest, lref);
2102 }
2103
2104 /* LEA is really a 4-operand ADD with an independent destination register,
2105 ** up to two source registers and an immediate. One register can be scaled
2106 ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
2107 ** instructions.
2108 **
2109 ** Currently only a few common cases are supported:
2110 ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
2111 ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
2112 ** - Right ADD fusion: y = a+(b+k)
2113 ** The ommited variants have already been reduced by FOLD.
2114 **
2115 ** There are more fusion opportunities, like gathering shifts or joining
2116 ** common references. But these are probably not worth the trouble, since
2117 ** array indexing is not decomposed and already makes use of all fields
2118 ** of the ModRM operand.
2119 */
asm_lea(ASMState * as,IRIns * ir)2120 static int asm_lea(ASMState *as, IRIns *ir)
2121 {
2122 IRIns *irl = IR(ir->op1);
2123 IRIns *irr = IR(ir->op2);
2124 RegSet allow = RSET_GPR;
2125 Reg dest;
2126 as->mrm.base = as->mrm.idx = RID_NONE;
2127 as->mrm.scale = XM_SCALE1;
2128 as->mrm.ofs = 0;
2129 if (ra_hasreg(irl->r)) {
2130 rset_clear(allow, irl->r);
2131 ra_noweak(as, irl->r);
2132 as->mrm.base = irl->r;
2133 if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
2134 /* The PHI renaming logic does a better job in some cases. */
2135 if (ra_hasreg(ir->r) &&
2136 ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
2137 (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
2138 return 0;
2139 if (irref_isk(ir->op2)) {
2140 as->mrm.ofs = irr->i;
2141 } else {
2142 rset_clear(allow, irr->r);
2143 ra_noweak(as, irr->r);
2144 as->mrm.idx = irr->r;
2145 }
2146 } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
2147 irref_isk(irr->op2)) {
2148 Reg idx = ra_alloc1(as, irr->op1, allow);
2149 rset_clear(allow, idx);
2150 as->mrm.idx = (uint8_t)idx;
2151 as->mrm.ofs = IR(irr->op2)->i;
2152 } else {
2153 return 0;
2154 }
2155 } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
2156 (irref_isk(ir->op2) || irref_isk(irl->op2))) {
2157 Reg idx, base = ra_alloc1(as, irl->op1, allow);
2158 rset_clear(allow, base);
2159 as->mrm.base = (uint8_t)base;
2160 if (irref_isk(ir->op2)) {
2161 as->mrm.ofs = irr->i;
2162 idx = ra_alloc1(as, irl->op2, allow);
2163 } else {
2164 as->mrm.ofs = IR(irl->op2)->i;
2165 idx = ra_alloc1(as, ir->op2, allow);
2166 }
2167 rset_clear(allow, idx);
2168 as->mrm.idx = (uint8_t)idx;
2169 } else {
2170 return 0;
2171 }
2172 dest = ra_dest(as, ir, allow);
2173 emit_mrm(as, XO_LEA, dest, RID_MRM);
2174 return 1; /* Success. */
2175 }
2176
asm_add(ASMState * as,IRIns * ir)2177 static void asm_add(ASMState *as, IRIns *ir)
2178 {
2179 if (irt_isnum(ir->t))
2180 asm_fparith(as, ir, XO_ADDSD);
2181 else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp ||
2182 irt_is64(ir->t) || !asm_lea(as, ir))
2183 asm_intarith(as, ir, XOg_ADD);
2184 }
2185
asm_sub(ASMState * as,IRIns * ir)2186 static void asm_sub(ASMState *as, IRIns *ir)
2187 {
2188 if (irt_isnum(ir->t))
2189 asm_fparith(as, ir, XO_SUBSD);
2190 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2191 asm_intarith(as, ir, XOg_SUB);
2192 }
2193
asm_mul(ASMState * as,IRIns * ir)2194 static void asm_mul(ASMState *as, IRIns *ir)
2195 {
2196 if (irt_isnum(ir->t))
2197 asm_fparith(as, ir, XO_MULSD);
2198 else
2199 asm_intarith(as, ir, XOg_X_IMUL);
2200 }
2201
asm_div(ASMState * as,IRIns * ir)2202 static void asm_div(ASMState *as, IRIns *ir)
2203 {
2204 #if LJ_64 && LJ_HASFFI
2205 if (!irt_isnum(ir->t))
2206 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
2207 IRCALL_lj_carith_divu64);
2208 else
2209 #endif
2210 asm_fparith(as, ir, XO_DIVSD);
2211 }
2212
asm_mod(ASMState * as,IRIns * ir)2213 static void asm_mod(ASMState *as, IRIns *ir)
2214 {
2215 #if LJ_64 && LJ_HASFFI
2216 if (!irt_isint(ir->t))
2217 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
2218 IRCALL_lj_carith_modu64);
2219 else
2220 #endif
2221 asm_callid(as, ir, IRCALL_lj_vm_modi);
2222 }
2223
asm_neg_not(ASMState * as,IRIns * ir,x86Group3 xg)2224 static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
2225 {
2226 Reg dest = ra_dest(as, ir, RSET_GPR);
2227 emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
2228 ra_left(as, dest, ir->op1);
2229 }
2230
asm_neg(ASMState * as,IRIns * ir)2231 static void asm_neg(ASMState *as, IRIns *ir)
2232 {
2233 if (irt_isnum(ir->t))
2234 asm_fparith(as, ir, XO_XORPS);
2235 else
2236 asm_neg_not(as, ir, XOg_NEG);
2237 }
2238
2239 #define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS)
2240
asm_intmin_max(ASMState * as,IRIns * ir,int cc)2241 static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
2242 {
2243 Reg right, dest = ra_dest(as, ir, RSET_GPR);
2244 IRRef lref = ir->op1, rref = ir->op2;
2245 if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
2246 right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
2247 emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
2248 emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
2249 ra_left(as, dest, lref);
2250 }
2251
asm_min(ASMState * as,IRIns * ir)2252 static void asm_min(ASMState *as, IRIns *ir)
2253 {
2254 if (irt_isnum(ir->t))
2255 asm_fparith(as, ir, XO_MINSD);
2256 else
2257 asm_intmin_max(as, ir, CC_G);
2258 }
2259
asm_max(ASMState * as,IRIns * ir)2260 static void asm_max(ASMState *as, IRIns *ir)
2261 {
2262 if (irt_isnum(ir->t))
2263 asm_fparith(as, ir, XO_MAXSD);
2264 else
2265 asm_intmin_max(as, ir, CC_L);
2266 }
2267
2268 /* Note: don't use LEA for overflow-checking arithmetic! */
2269 #define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD)
2270 #define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB)
2271 #define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL)
2272
2273 #define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT)
2274
asm_bswap(ASMState * as,IRIns * ir)2275 static void asm_bswap(ASMState *as, IRIns *ir)
2276 {
2277 Reg dest = ra_dest(as, ir, RSET_GPR);
2278 as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
2279 REX_64IR(ir, 0), dest, 0, as->mcp, 1);
2280 ra_left(as, dest, ir->op1);
2281 }
2282
2283 #define asm_band(as, ir) asm_intarith(as, ir, XOg_AND)
2284 #define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR)
2285 #define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR)
2286
asm_bitshift(ASMState * as,IRIns * ir,x86Shift xs,x86Op xv)2287 static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs, x86Op xv)
2288 {
2289 IRRef rref = ir->op2;
2290 IRIns *irr = IR(rref);
2291 Reg dest;
2292 if (irref_isk(rref)) { /* Constant shifts. */
2293 int shift;
2294 dest = ra_dest(as, ir, RSET_GPR);
2295 shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
2296 if (!xv && shift && (as->flags & JIT_F_BMI2)) {
2297 Reg left = asm_fuseloadm(as, ir->op1, RSET_GPR, irt_is64(ir->t));
2298 if (left != dest) { /* BMI2 rotate right by constant. */
2299 emit_i8(as, xs == XOg_ROL ? -shift : shift);
2300 emit_mrm(as, VEX_64IR(ir, XV_RORX), dest, left);
2301 return;
2302 }
2303 }
2304 switch (shift) {
2305 case 0: break;
2306 case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
2307 default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
2308 }
2309 } else if ((as->flags & JIT_F_BMI2) && xv) { /* BMI2 variable shifts. */
2310 Reg left, right;
2311 dest = ra_dest(as, ir, RSET_GPR);
2312 right = ra_alloc1(as, rref, RSET_GPR);
2313 left = asm_fuseloadm(as, ir->op1, rset_exclude(RSET_GPR, right),
2314 irt_is64(ir->t));
2315 emit_mrm(as, VEX_64IR(ir, xv) ^ (right << 19), dest, left);
2316 return;
2317 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
2318 Reg right;
2319 dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
2320 if (dest == RID_ECX) {
2321 dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
2322 emit_rr(as, XO_MOV, RID_ECX, dest);
2323 }
2324 right = irr->r;
2325 if (ra_noreg(right))
2326 right = ra_allocref(as, rref, RID2RSET(RID_ECX));
2327 else if (right != RID_ECX)
2328 ra_scratch(as, RID2RSET(RID_ECX));
2329 emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
2330 ra_noweak(as, right);
2331 if (right != RID_ECX)
2332 emit_rr(as, XO_MOV, RID_ECX, right);
2333 }
2334 ra_left(as, dest, ir->op1);
2335 /*
2336 ** Note: avoid using the flags resulting from a shift or rotate!
2337 ** All of them cause a partial flag stall, except for r,1 shifts
2338 ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
2339 */
2340 }
2341
2342 #define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL, XV_SHLX)
2343 #define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR, XV_SHRX)
2344 #define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR, XV_SARX)
2345 #define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL, 0)
2346 #define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR, 0)
2347
2348 /* -- Comparisons --------------------------------------------------------- */
2349
2350 /* Virtual flags for unordered FP comparisons. */
2351 #define VCC_U 0x1000 /* Unordered. */
2352 #define VCC_P 0x2000 /* Needs extra CC_P branch. */
2353 #define VCC_S 0x4000 /* Swap avoids CC_P branch. */
2354 #define VCC_PS (VCC_P|VCC_S)
2355
2356 /* Map of comparisons to flags. ORDER IR. */
2357 #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
2358 static const uint16_t asm_compmap[IR_ABC+1] = {
2359 /* signed non-eq unsigned flags */
2360 /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
2361 /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
2362 /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
2363 /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
2364 /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
2365 /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
2366 /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
2367 /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
2368 /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
2369 /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
2370 /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
2371 };
2372
2373 /* FP and integer comparisons. */
asm_comp(ASMState * as,IRIns * ir)2374 static void asm_comp(ASMState *as, IRIns *ir)
2375 {
2376 uint32_t cc = asm_compmap[ir->o];
2377 if (irt_isnum(ir->t)) {
2378 IRRef lref = ir->op1;
2379 IRRef rref = ir->op2;
2380 Reg left, right;
2381 MCLabel l_around;
2382 /*
2383 ** An extra CC_P branch is required to preserve ordered/unordered
2384 ** semantics for FP comparisons. This can be avoided by swapping
2385 ** the operands and inverting the condition (except for EQ and UNE).
2386 ** So always try to swap if possible.
2387 **
2388 ** Another option would be to swap operands to achieve better memory
2389 ** operand fusion. But it's unlikely that this outweighs the cost
2390 ** of the extra branches.
2391 */
2392 if (cc & VCC_S) { /* Swap? */
2393 IRRef tmp = lref; lref = rref; rref = tmp;
2394 cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
2395 }
2396 left = ra_alloc1(as, lref, RSET_FPR);
2397 l_around = emit_label(as);
2398 asm_guardcc(as, cc >> 4);
2399 if (cc & VCC_P) { /* Extra CC_P branch required? */
2400 if (!(cc & VCC_U)) {
2401 asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
2402 } else if (l_around != as->invmcp) {
2403 emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
2404 } else {
2405 /* Patched to mcloop by asm_loop_fixup. */
2406 as->loopinv = 2;
2407 if (as->realign)
2408 emit_sjcc(as, CC_P, as->mcp);
2409 else
2410 emit_jcc(as, CC_P, as->mcp);
2411 }
2412 }
2413 right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
2414 emit_mrm(as, XO_UCOMISD, left, right);
2415 } else {
2416 IRRef lref = ir->op1, rref = ir->op2;
2417 IROp leftop = (IROp)(IR(lref)->o);
2418 Reg r64 = REX_64IR(ir, 0);
2419 int32_t imm = 0;
2420 lua_assert(irt_is64(ir->t) || irt_isint(ir->t) ||
2421 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t));
2422 /* Swap constants (only for ABC) and fusable loads to the right. */
2423 if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
2424 if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
2425 else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
2426 lref = ir->op2; rref = ir->op1;
2427 }
2428 if (asm_isk32(as, rref, &imm)) {
2429 IRIns *irl = IR(lref);
2430 /* Check wether we can use test ins. Not for unsigned, since CF=0. */
2431 int usetest = (imm == 0 && (cc & 0xa) != 0x2);
2432 if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
2433 /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
2434 Reg right, left = RID_NONE;
2435 RegSet allow = RSET_GPR;
2436 if (!asm_isk32(as, irl->op2, &imm)) {
2437 left = ra_alloc1(as, irl->op2, allow);
2438 rset_clear(allow, left);
2439 } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
2440 IRIns *irll = IR(irl->op1);
2441 if (opisfusableload((IROp)irll->o) &&
2442 (irt_isi8(irll->t) || irt_isu8(irll->t))) {
2443 IRType1 origt = irll->t; /* Temporarily flip types. */
2444 irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
2445 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
2446 right = asm_fuseload(as, irl->op1, RSET_GPR);
2447 as->curins++;
2448 irll->t = origt;
2449 if (right != RID_MRM) goto test_nofuse;
2450 /* Fusion succeeded, emit test byte mrm, imm8. */
2451 asm_guardcc(as, cc);
2452 emit_i8(as, (imm & 0xff));
2453 emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
2454 return;
2455 }
2456 }
2457 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
2458 right = asm_fuseloadm(as, irl->op1, allow, r64);
2459 as->curins++; /* Undo the above. */
2460 test_nofuse:
2461 asm_guardcc(as, cc);
2462 if (ra_noreg(left)) {
2463 emit_i32(as, imm);
2464 emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
2465 } else {
2466 emit_mrm(as, XO_TEST, r64 + left, right);
2467 }
2468 } else {
2469 Reg left;
2470 if (opisfusableload((IROp)irl->o) &&
2471 ((irt_isu8(irl->t) && checku8(imm)) ||
2472 ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
2473 (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
2474 /* Only the IRT_INT case is fused by asm_fuseload.
2475 ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
2476 ** are handled here.
2477 ** Note that cmp word [mem], imm16 should not be generated,
2478 ** since it has a length-changing prefix. Compares of a word
2479 ** against a sign-extended imm8 are ok, however.
2480 */
2481 IRType1 origt = irl->t; /* Temporarily flip types. */
2482 irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
2483 left = asm_fuseload(as, lref, RSET_GPR);
2484 irl->t = origt;
2485 if (left == RID_MRM) { /* Fusion succeeded? */
2486 if (irt_isu8(irl->t) || irt_isu16(irl->t))
2487 cc >>= 4; /* Need unsigned compare. */
2488 asm_guardcc(as, cc);
2489 emit_i8(as, imm);
2490 emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
2491 XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
2492 return;
2493 } /* Otherwise handle register case as usual. */
2494 } else {
2495 left = asm_fuseloadm(as, lref,
2496 irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
2497 }
2498 asm_guardcc(as, cc);
2499 if (usetest && left != RID_MRM) {
2500 /* Use test r,r instead of cmp r,0. */
2501 x86Op xo = XO_TEST;
2502 if (irt_isu8(ir->t)) {
2503 lua_assert(ir->o == IR_EQ || ir->o == IR_NE);
2504 xo = XO_TESTb;
2505 if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
2506 if (LJ_64) {
2507 left |= FORCE_REX;
2508 } else {
2509 emit_i32(as, 0xff);
2510 emit_mrm(as, XO_GROUP3, XOg_TEST, left);
2511 return;
2512 }
2513 }
2514 }
2515 emit_rr(as, xo, r64 + left, left);
2516 if (irl+1 == ir) /* Referencing previous ins? */
2517 as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
2518 } else {
2519 emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
2520 }
2521 }
2522 } else {
2523 Reg left = ra_alloc1(as, lref, RSET_GPR);
2524 Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
2525 asm_guardcc(as, cc);
2526 emit_mrm(as, XO_CMP, r64 + left, right);
2527 }
2528 }
2529 }
2530
2531 #define asm_equal(as, ir) asm_comp(as, ir)
2532
2533 #if LJ_32 && LJ_HASFFI
2534 /* 64 bit integer comparisons in 32 bit mode. */
asm_comp_int64(ASMState * as,IRIns * ir)2535 static void asm_comp_int64(ASMState *as, IRIns *ir)
2536 {
2537 uint32_t cc = asm_compmap[(ir-1)->o];
2538 RegSet allow = RSET_GPR;
2539 Reg lefthi = RID_NONE, leftlo = RID_NONE;
2540 Reg righthi = RID_NONE, rightlo = RID_NONE;
2541 MCLabel l_around;
2542 x86ModRM mrm;
2543
2544 as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
2545
2546 /* Allocate/fuse hiword operands. */
2547 if (irref_isk(ir->op2)) {
2548 lefthi = asm_fuseload(as, ir->op1, allow);
2549 } else {
2550 lefthi = ra_alloc1(as, ir->op1, allow);
2551 rset_clear(allow, lefthi);
2552 righthi = asm_fuseload(as, ir->op2, allow);
2553 if (righthi == RID_MRM) {
2554 if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
2555 if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
2556 } else {
2557 rset_clear(allow, righthi);
2558 }
2559 }
2560 mrm = as->mrm; /* Save state for hiword instruction. */
2561
2562 /* Allocate/fuse loword operands. */
2563 if (irref_isk((ir-1)->op2)) {
2564 leftlo = asm_fuseload(as, (ir-1)->op1, allow);
2565 } else {
2566 leftlo = ra_alloc1(as, (ir-1)->op1, allow);
2567 rset_clear(allow, leftlo);
2568 rightlo = asm_fuseload(as, (ir-1)->op2, allow);
2569 }
2570
2571 /* All register allocations must be performed _before_ this point. */
2572 l_around = emit_label(as);
2573 as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
2574
2575 /* Loword comparison and branch. */
2576 asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
2577 if (ra_noreg(rightlo)) {
2578 int32_t imm = IR((ir-1)->op2)->i;
2579 if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
2580 emit_rr(as, XO_TEST, leftlo, leftlo);
2581 else
2582 emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
2583 } else {
2584 emit_mrm(as, XO_CMP, leftlo, rightlo);
2585 }
2586
2587 /* Hiword comparison and branches. */
2588 if ((cc & 15) != CC_NE)
2589 emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
2590 if ((cc & 15) != CC_E)
2591 asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
2592 as->mrm = mrm; /* Restore state. */
2593 if (ra_noreg(righthi)) {
2594 int32_t imm = IR(ir->op2)->i;
2595 if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
2596 emit_rr(as, XO_TEST, lefthi, lefthi);
2597 else
2598 emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
2599 } else {
2600 emit_mrm(as, XO_CMP, lefthi, righthi);
2601 }
2602 }
2603 #endif
2604
2605 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
2606
2607 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
asm_hiop(ASMState * as,IRIns * ir)2608 static void asm_hiop(ASMState *as, IRIns *ir)
2609 {
2610 #if LJ_32 && LJ_HASFFI
2611 /* HIOP is marked as a store because it needs its own DCE logic. */
2612 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
2613 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
2614 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
2615 as->curins--; /* Always skip the CONV. */
2616 if (usehi || uselo)
2617 asm_conv64(as, ir);
2618 return;
2619 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
2620 asm_comp_int64(as, ir);
2621 return;
2622 } else if ((ir-1)->o == IR_XSTORE) {
2623 if ((ir-1)->r != RID_SINK)
2624 asm_fxstore(as, ir);
2625 return;
2626 }
2627 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
2628 switch ((ir-1)->o) {
2629 case IR_ADD:
2630 as->flagmcp = NULL;
2631 as->curins--;
2632 asm_intarith(as, ir, XOg_ADC);
2633 asm_intarith(as, ir-1, XOg_ADD);
2634 break;
2635 case IR_SUB:
2636 as->flagmcp = NULL;
2637 as->curins--;
2638 asm_intarith(as, ir, XOg_SBB);
2639 asm_intarith(as, ir-1, XOg_SUB);
2640 break;
2641 case IR_NEG: {
2642 Reg dest = ra_dest(as, ir, RSET_GPR);
2643 emit_rr(as, XO_GROUP3, XOg_NEG, dest);
2644 emit_i8(as, 0);
2645 emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
2646 ra_left(as, dest, ir->op1);
2647 as->curins--;
2648 asm_neg_not(as, ir-1, XOg_NEG);
2649 break;
2650 }
2651 case IR_CALLN:
2652 case IR_CALLXS:
2653 if (!uselo)
2654 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
2655 break;
2656 case IR_CNEWI:
2657 /* Nothing to do here. Handled by CNEWI itself. */
2658 break;
2659 default: lua_assert(0); break;
2660 }
2661 #else
2662 UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */
2663 #endif
2664 }
2665
2666 /* -- Profiling ----------------------------------------------------------- */
2667
asm_prof(ASMState * as,IRIns * ir)2668 static void asm_prof(ASMState *as, IRIns *ir)
2669 {
2670 UNUSED(ir);
2671 asm_guardcc(as, CC_NE);
2672 emit_i8(as, HOOK_PROFILE);
2673 emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask);
2674 }
2675
2676 /* -- Stack handling ------------------------------------------------------ */
2677
2678 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)2679 static void asm_stack_check(ASMState *as, BCReg topslot,
2680 IRIns *irp, RegSet allow, ExitNo exitno)
2681 {
2682 /* Try to get an unused temp. register, otherwise spill/restore eax. */
2683 Reg pbase = irp ? irp->r : RID_BASE;
2684 Reg r = allow ? rset_pickbot(allow) : RID_EAX;
2685 emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
2686 if (allow == RSET_EMPTY) /* Restore temp. register. */
2687 emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
2688 else
2689 ra_modified(as, r);
2690 emit_gri(as, XG_ARITHi(XOg_CMP), r|REX_GC64, (int32_t)(8*topslot));
2691 if (ra_hasreg(pbase) && pbase != r)
2692 emit_rr(as, XO_ARITH(XOg_SUB), r|REX_GC64, pbase);
2693 else
2694 #if LJ_GC64
2695 emit_rmro(as, XO_ARITH(XOg_SUB), r|REX_64, RID_DISPATCH,
2696 (int32_t)dispofs(as, &J2G(as->J)->jit_base));
2697 #else
2698 emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
2699 ptr2addr(&J2G(as->J)->jit_base));
2700 #endif
2701 emit_rmro(as, XO_MOV, r|REX_GC64, r, offsetof(lua_State, maxstack));
2702 emit_getgl(as, r, cur_L);
2703 if (allow == RSET_EMPTY) /* Spill temp. register. */
2704 emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
2705 }
2706
2707 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)2708 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2709 {
2710 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2711 #if !LJ_FR2 || defined(LUA_USE_ASSERT)
2712 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
2713 #endif
2714 MSize n, nent = snap->nent;
2715 /* Store the value of all modified slots to the Lua stack. */
2716 for (n = 0; n < nent; n++) {
2717 SnapEntry sn = map[n];
2718 BCReg s = snap_slot(sn);
2719 int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
2720 IRRef ref = snap_ref(sn);
2721 IRIns *ir = IR(ref);
2722 if ((sn & SNAP_NORESTORE))
2723 continue;
2724 if (irt_isnum(ir->t)) {
2725 Reg src = ra_alloc1(as, ref, RSET_FPR);
2726 emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
2727 } else {
2728 lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
2729 (LJ_DUALNUM && irt_isinteger(ir->t)));
2730 if (!irref_isk(ref)) {
2731 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2732 #if LJ_GC64
2733 if (irt_is64(ir->t)) {
2734 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
2735 emit_u32(as, irt_toitype(ir->t) << 15);
2736 emit_rmro(as, XO_ARITHi, XOg_OR, RID_BASE, ofs+4);
2737 } else if (LJ_DUALNUM && irt_isinteger(ir->t)) {
2738 emit_movmroi(as, RID_BASE, ofs+4, LJ_TISNUM << 15);
2739 } else {
2740 emit_movmroi(as, RID_BASE, ofs+4, (irt_toitype(ir->t)<<15)|0x7fff);
2741 }
2742 #endif
2743 emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
2744 #if LJ_GC64
2745 } else {
2746 TValue k;
2747 lj_ir_kvalue(as->J->L, &k, ir);
2748 if (tvisnil(&k)) {
2749 emit_i32(as, -1);
2750 emit_rmro(as, XO_MOVmi, REX_64, RID_BASE, ofs);
2751 } else {
2752 emit_movmroi(as, RID_BASE, ofs+4, k.u32.hi);
2753 emit_movmroi(as, RID_BASE, ofs, k.u32.lo);
2754 }
2755 #else
2756 } else if (!irt_ispri(ir->t)) {
2757 emit_movmroi(as, RID_BASE, ofs, ir->i);
2758 #endif
2759 }
2760 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2761 #if !LJ_FR2
2762 if (s != 0) /* Do not overwrite link to previous frame. */
2763 emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
2764 #endif
2765 #if !LJ_GC64
2766 } else {
2767 if (!(LJ_64 && irt_islightud(ir->t)))
2768 emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
2769 #endif
2770 }
2771 }
2772 checkmclim(as);
2773 }
2774 lua_assert(map + nent == flinks);
2775 }
2776
2777 /* -- GC handling --------------------------------------------------------- */
2778
2779 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)2780 static void asm_gc_check(ASMState *as)
2781 {
2782 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2783 IRRef args[2];
2784 MCLabel l_end;
2785 Reg tmp;
2786 ra_evictset(as, RSET_SCRATCH);
2787 l_end = emit_label(as);
2788 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2789 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2790 emit_rr(as, XO_TEST, RID_RET, RID_RET);
2791 args[0] = ASMREF_TMP1; /* global_State *g */
2792 args[1] = ASMREF_TMP2; /* MSize steps */
2793 asm_gencall(as, ci, args);
2794 tmp = ra_releasetmp(as, ASMREF_TMP1);
2795 #if LJ_GC64
2796 emit_rmro(as, XO_LEA, tmp|REX_64, RID_DISPATCH, GG_DISP2G);
2797 #else
2798 emit_loada(as, tmp, J2G(as->J));
2799 #endif
2800 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
2801 /* Jump around GC step if GC total < GC threshold. */
2802 emit_sjcc(as, CC_B, l_end);
2803 emit_opgl(as, XO_ARITH(XOg_CMP), tmp|REX_GC64, gc.threshold);
2804 emit_getgl(as, tmp, gc.total);
2805 as->gcsteps = 0;
2806 checkmclim(as);
2807 }
2808
2809 /* -- Loop handling ------------------------------------------------------- */
2810
2811 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)2812 static void asm_loop_fixup(ASMState *as)
2813 {
2814 MCode *p = as->mctop;
2815 MCode *target = as->mcp;
2816 if (as->realign) { /* Realigned loops use short jumps. */
2817 as->realign = NULL; /* Stop another retry. */
2818 lua_assert(((intptr_t)target & 15) == 0);
2819 if (as->loopinv) { /* Inverted loop branch? */
2820 p -= 5;
2821 p[0] = XI_JMP;
2822 lua_assert(target - p >= -128);
2823 p[-1] = (MCode)(target - p); /* Patch sjcc. */
2824 if (as->loopinv == 2)
2825 p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
2826 } else {
2827 lua_assert(target - p >= -128);
2828 p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
2829 p[-2] = XI_JMPs;
2830 }
2831 } else {
2832 MCode *newloop;
2833 p[-5] = XI_JMP;
2834 if (as->loopinv) { /* Inverted loop branch? */
2835 /* asm_guardcc already inverted the jcc and patched the jmp. */
2836 p -= 5;
2837 newloop = target+4;
2838 *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
2839 if (as->loopinv == 2) {
2840 *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
2841 newloop = target+8;
2842 }
2843 } else { /* Otherwise just patch jmp. */
2844 *(int32_t *)(p-4) = (int32_t)(target - p);
2845 newloop = target+3;
2846 }
2847 /* Realign small loops and shorten the loop branch. */
2848 if (newloop >= p - 128) {
2849 as->realign = newloop; /* Force a retry and remember alignment. */
2850 as->curins = as->stopins; /* Abort asm_trace now. */
2851 as->T->nins = as->orignins; /* Remove any added renames. */
2852 }
2853 }
2854 }
2855
2856 /* -- Head of trace ------------------------------------------------------- */
2857
2858 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)2859 static void asm_head_root_base(ASMState *as)
2860 {
2861 IRIns *ir = IR(REF_BASE);
2862 Reg r = ir->r;
2863 if (ra_hasreg(r)) {
2864 ra_free(as, r);
2865 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2866 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2867 if (r != RID_BASE)
2868 emit_rr(as, XO_MOV, r|REX_GC64, RID_BASE);
2869 }
2870 }
2871
2872 /* Coalesce or reload BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)2873 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2874 {
2875 IRIns *ir = IR(REF_BASE);
2876 Reg r = ir->r;
2877 if (ra_hasreg(r)) {
2878 ra_free(as, r);
2879 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2880 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2881 if (irp->r == r) {
2882 rset_clear(allow, r); /* Mark same BASE register as coalesced. */
2883 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2884 /* Move from coalesced parent reg. */
2885 rset_clear(allow, irp->r);
2886 emit_rr(as, XO_MOV, r|REX_GC64, irp->r);
2887 } else {
2888 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
2889 }
2890 }
2891 return allow;
2892 }
2893
2894 /* -- Tail of trace ------------------------------------------------------- */
2895
2896 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)2897 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2898 {
2899 /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
2900 MCode *p = as->mctop;
2901 MCode *target, *q;
2902 int32_t spadj = as->T->spadjust;
2903 if (spadj == 0) {
2904 p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0);
2905 } else {
2906 MCode *p1;
2907 /* Patch stack adjustment. */
2908 if (checki8(spadj)) {
2909 p -= 3;
2910 p1 = p-6;
2911 *p1 = (MCode)spadj;
2912 } else {
2913 p1 = p-9;
2914 *(int32_t *)p1 = spadj;
2915 }
2916 if ((as->flags & JIT_F_LEA_AGU)) {
2917 #if LJ_64
2918 p1[-4] = 0x48;
2919 #endif
2920 p1[-3] = (MCode)XI_LEA;
2921 p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP);
2922 p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
2923 } else {
2924 #if LJ_64
2925 p1[-3] = 0x48;
2926 #endif
2927 p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
2928 p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
2929 }
2930 }
2931 /* Patch exit branch. */
2932 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2933 *(int32_t *)(p-4) = jmprel(p, target);
2934 p[-5] = XI_JMP;
2935 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
2936 for (q = as->mctop-1; q >= p; q--)
2937 *q = XI_NOP;
2938 as->mctop = p;
2939 }
2940
2941 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)2942 static void asm_tail_prep(ASMState *as)
2943 {
2944 MCode *p = as->mctop;
2945 /* Realign and leave room for backwards loop branch or exit branch. */
2946 if (as->realign) {
2947 int i = ((int)(intptr_t)as->realign) & 15;
2948 /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
2949 while (i-- > 0)
2950 *--p = XI_NOP;
2951 as->mctop = p;
2952 p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
2953 } else {
2954 p -= 5; /* Space for exit branch (near jmp). */
2955 }
2956 if (as->loopref) {
2957 as->invmcp = as->mcp = p;
2958 } else {
2959 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
2960 as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0));
2961 as->invmcp = NULL;
2962 }
2963 }
2964
2965 /* -- Trace setup --------------------------------------------------------- */
2966
2967 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)2968 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2969 {
2970 IRRef args[CCI_NARGS_MAX*2];
2971 int nslots;
2972 asm_collectargs(as, ir, ci, args);
2973 nslots = asm_count_call_slots(as, ci, args);
2974 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2975 as->evenspill = nslots;
2976 #if LJ_64
2977 return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
2978 #else
2979 return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
2980 #endif
2981 }
2982
2983 /* Target-specific setup. */
asm_setup_target(ASMState * as)2984 static void asm_setup_target(ASMState *as)
2985 {
2986 asm_exitstub_setup(as, as->T->nsnap);
2987 as->mrm.base = 0;
2988 }
2989
2990 /* -- Trace patching ------------------------------------------------------ */
2991
2992 static const uint8_t map_op1[256] = {
2993 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x20,
2994 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,
2995 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
2996 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
2997 #if LJ_64
2998 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x14,0x14,0x14,0x14,0x14,0x14,0x14,0x14,
2999 #else
3000 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
3001 #endif
3002 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
3003 0x51,0x51,0x92,0x92,0x10,0x10,0x12,0x11,0x45,0x86,0x52,0x93,0x51,0x51,0x51,0x51,
3004 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
3005 0x93,0x86,0x93,0x93,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
3006 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x47,0x51,0x51,0x51,0x51,0x51,
3007 #if LJ_64
3008 0x59,0x59,0x59,0x59,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
3009 #else
3010 0x55,0x55,0x55,0x55,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
3011 #endif
3012 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,
3013 0x93,0x93,0x53,0x51,0x70,0x71,0x93,0x86,0x54,0x51,0x53,0x51,0x51,0x52,0x51,0x51,
3014 0x92,0x92,0x92,0x92,0x52,0x52,0x51,0x51,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
3015 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x45,0x45,0x47,0x52,0x51,0x51,0x51,0x51,
3016 0x10,0x51,0x10,0x10,0x51,0x51,0x63,0x66,0x51,0x51,0x51,0x51,0x51,0x51,0x92,0x92
3017 };
3018
3019 static const uint8_t map_op2[256] = {
3020 0x93,0x93,0x93,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x51,0x52,0x51,0x93,0x52,0x94,
3021 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3022 0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3023 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x34,0x51,0x35,0x51,0x51,0x51,0x51,0x51,
3024 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3025 0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3026 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3027 0x94,0x54,0x54,0x54,0x93,0x93,0x93,0x52,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3028 0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,
3029 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3030 0x52,0x52,0x52,0x93,0x94,0x93,0x51,0x51,0x52,0x52,0x52,0x93,0x94,0x93,0x93,0x93,
3031 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x94,0x93,0x93,0x93,0x93,0x93,
3032 0x93,0x93,0x94,0x93,0x94,0x94,0x94,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
3033 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3034 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3035 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x52
3036 };
3037
asm_x86_inslen(const uint8_t * p)3038 static uint32_t asm_x86_inslen(const uint8_t* p)
3039 {
3040 uint32_t result = 0;
3041 uint32_t prefixes = 0;
3042 uint32_t x = map_op1[*p];
3043 for (;;) {
3044 switch (x >> 4) {
3045 case 0: return result + x + (prefixes & 4);
3046 case 1: prefixes |= x; x = map_op1[*++p]; result++; break;
3047 case 2: x = map_op2[*++p]; break;
3048 case 3: p++; goto mrm;
3049 case 4: result -= (prefixes & 2); /* fallthrough */
3050 case 5: return result + (x & 15);
3051 case 6: /* Group 3. */
3052 if (p[1] & 0x38) x = 2;
3053 else if ((prefixes & 2) && (x == 0x66)) x = 4;
3054 goto mrm;
3055 case 7: /* VEX c4/c5. */
3056 if (LJ_32 && p[1] < 0xc0) {
3057 x = 2;
3058 goto mrm;
3059 }
3060 if (x == 0x70) {
3061 x = *++p & 0x1f;
3062 result++;
3063 if (x >= 2) {
3064 p += 2;
3065 result += 2;
3066 goto mrm;
3067 }
3068 }
3069 p++;
3070 result++;
3071 x = map_op2[*++p];
3072 break;
3073 case 8: result -= (prefixes & 2); /* fallthrough */
3074 case 9: mrm: /* ModR/M and possibly SIB. */
3075 result += (x & 15);
3076 x = *++p;
3077 switch (x >> 6) {
3078 case 0: if ((x & 7) == 5) return result + 4; break;
3079 case 1: result++; break;
3080 case 2: result += 4; break;
3081 case 3: return result;
3082 }
3083 if ((x & 7) == 4) {
3084 result++;
3085 if (x < 0x40 && (p[1] & 7) == 5) result += 4;
3086 }
3087 return result;
3088 }
3089 }
3090 }
3091
3092 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)3093 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
3094 {
3095 MCode *p = T->mcode;
3096 MCode *mcarea = lj_mcode_patch(J, p, 0);
3097 MSize len = T->szmcode;
3098 MCode *px = exitstub_addr(J, exitno) - 6;
3099 MCode *pe = p+len-6;
3100 #if LJ_GC64
3101 uint32_t statei = (uint32_t)(GG_OFS(g.vmstate) - GG_OFS(dispatch));
3102 #else
3103 uint32_t statei = u32ptr(&J2G(J)->vmstate);
3104 #endif
3105 if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
3106 *(int32_t *)(p+len-4) = jmprel(p+len, target);
3107 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
3108 for (; p < pe; p += asm_x86_inslen(p)) {
3109 intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64;
3110 if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi)
3111 break;
3112 }
3113 lua_assert(p < pe);
3114 for (; p < pe; p += asm_x86_inslen(p))
3115 if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px)
3116 *(int32_t *)(p+2) = jmprel(p+6, target);
3117 lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
3118 lj_mcode_patch(J, mcarea, 1);
3119 }
3120
3121