1 /*
2 ** x86/x64 IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 /* -- Guard handling ------------------------------------------------------ */
7
8 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
asm_exitstub_gen(ASMState * as,ExitNo group)9 static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
10 {
11 ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
12 MCode *mxp = as->mcbot;
13 MCode *mxpstart = mxp;
14 if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
15 asm_mclimit(as);
16 /* Push low byte of exitno for each exit stub. */
17 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
18 for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
19 *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
20 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
21 }
22 /* Push the high byte of the exitno for each exit stub group. */
23 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
24 #if !LJ_GC64
25 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
26 *mxp++ = XI_MOVmi;
27 *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
28 *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
29 *mxp++ = 2*sizeof(void *);
30 *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
31 #endif
32 /* Jump to exit handler which fills in the ExitState. */
33 *mxp++ = XI_JMP; mxp += 4;
34 *((int32_t *)(mxp-4)) = jmprel(as->J, mxp, (MCode *)(void *)lj_vm_exit_handler);
35 /* Commit the code for this group (even if assembly fails later on). */
36 lj_mcode_commitbot(as->J, mxp);
37 as->mcbot = mxp;
38 as->mclim = as->mcbot + MCLIM_REDZONE;
39 return mxpstart;
40 }
41
42 /* Setup all needed exit stubs. */
asm_exitstub_setup(ASMState * as,ExitNo nexits)43 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
44 {
45 ExitNo i;
46 if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
47 lj_trace_err(as->J, LJ_TRERR_SNAPOV);
48 for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
49 if (as->J->exitstubgroup[i] == NULL)
50 as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
51 }
52
53 /* Emit conditional branch to exit for guard.
54 ** It's important to emit this *after* all registers have been allocated,
55 ** because rematerializations may invalidate the flags.
56 */
asm_guardcc(ASMState * as,int cc)57 static void asm_guardcc(ASMState *as, int cc)
58 {
59 MCode *target = exitstub_addr(as->J, as->snapno);
60 MCode *p = as->mcp;
61 if (LJ_UNLIKELY(p == as->invmcp)) {
62 as->loopinv = 1;
63 *(int32_t *)(p+1) = jmprel(as->J, p+5, target);
64 target = p;
65 cc ^= 1;
66 if (as->realign) {
67 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
68 as->mrm.ofs += 2; /* Fixup RIP offset for pending fused load. */
69 emit_sjcc(as, cc, target);
70 return;
71 }
72 }
73 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP))
74 as->mrm.ofs += 6; /* Fixup RIP offset for pending fused load. */
75 emit_jcc(as, cc, target);
76 }
77
78 /* -- Memory operand fusion ----------------------------------------------- */
79
80 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
81 #define CONFLICT_SEARCH_LIM 31
82
83 /* Check if a reference is a signed 32 bit constant. */
asm_isk32(ASMState * as,IRRef ref,int32_t * k)84 static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
85 {
86 if (irref_isk(ref)) {
87 IRIns *ir = IR(ref);
88 #if LJ_GC64
89 if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
90 *k = ir->i;
91 return 1;
92 } else if (checki32((int64_t)ir_k64(ir)->u64)) {
93 *k = (int32_t)ir_k64(ir)->u64;
94 return 1;
95 }
96 #else
97 if (ir->o != IR_KINT64) {
98 *k = ir->i;
99 return 1;
100 } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
101 *k = (int32_t)ir_kint64(ir)->u64;
102 return 1;
103 }
104 #endif
105 }
106 return 0;
107 }
108
109 /* Check if there's no conflicting instruction between curins and ref.
110 ** Also avoid fusing loads if there are multiple references.
111 */
noconflict(ASMState * as,IRRef ref,IROp conflict,int noload)112 static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
113 {
114 IRIns *ir = as->ir;
115 IRRef i = as->curins;
116 if (i > ref + CONFLICT_SEARCH_LIM)
117 return 0; /* Give up, ref is too far away. */
118 while (--i > ref) {
119 if (ir[i].o == conflict)
120 return 0; /* Conflict found. */
121 else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
122 return 0;
123 }
124 return 1; /* Ok, no conflict. */
125 }
126
127 /* Fuse array base into memory operand. */
asm_fuseabase(ASMState * as,IRRef ref)128 static IRRef asm_fuseabase(ASMState *as, IRRef ref)
129 {
130 IRIns *irb = IR(ref);
131 as->mrm.ofs = 0;
132 if (irb->o == IR_FLOAD) {
133 IRIns *ira = IR(irb->op1);
134 lj_assertA(irb->op2 == IRFL_TAB_ARRAY, "expected FLOAD TAB_ARRAY");
135 /* We can avoid the FLOAD of t->array for colocated arrays. */
136 if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
137 !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
138 as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
139 return irb->op1; /* Table obj. */
140 }
141 } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
142 /* Fuse base offset (vararg load). */
143 as->mrm.ofs = IR(irb->op2)->i;
144 return irb->op1;
145 }
146 return ref; /* Otherwise use the given array base. */
147 }
148
149 /* Fuse array reference into memory operand. */
asm_fusearef(ASMState * as,IRIns * ir,RegSet allow)150 static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
151 {
152 IRIns *irx;
153 lj_assertA(ir->o == IR_AREF, "expected AREF");
154 as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
155 irx = IR(ir->op2);
156 if (irref_isk(ir->op2)) {
157 as->mrm.ofs += 8*irx->i;
158 as->mrm.idx = RID_NONE;
159 } else {
160 rset_clear(allow, as->mrm.base);
161 as->mrm.scale = XM_SCALE8;
162 /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
163 ** Doesn't help much without ABCelim, but reduces register pressure.
164 */
165 if (!LJ_64 && /* Has bad effects with negative index on x64. */
166 mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
167 irx->o == IR_ADD && irref_isk(irx->op2)) {
168 as->mrm.ofs += 8*IR(irx->op2)->i;
169 as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
170 } else {
171 as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
172 }
173 }
174 }
175
176 /* Fuse array/hash/upvalue reference into memory operand.
177 ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
178 ** pass the final allow mask, excluding any GPRs used for other inputs.
179 ** In particular: 2-operand GPR instructions need to call ra_dest() first!
180 */
asm_fuseahuref(ASMState * as,IRRef ref,RegSet allow)181 static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
182 {
183 IRIns *ir = IR(ref);
184 if (ra_noreg(ir->r)) {
185 switch ((IROp)ir->o) {
186 case IR_AREF:
187 if (mayfuse(as, ref)) {
188 asm_fusearef(as, ir, allow);
189 return;
190 }
191 break;
192 case IR_HREFK:
193 if (mayfuse(as, ref)) {
194 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
195 as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
196 as->mrm.idx = RID_NONE;
197 return;
198 }
199 break;
200 case IR_UREFC:
201 if (irref_isk(ir->op1)) {
202 GCfunc *fn = ir_kfunc(IR(ir->op1));
203 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
204 #if LJ_GC64
205 int64_t ofs = dispofs(as, &uv->tv);
206 if (checki32(ofs) && checki32(ofs+4)) {
207 as->mrm.ofs = (int32_t)ofs;
208 as->mrm.base = RID_DISPATCH;
209 as->mrm.idx = RID_NONE;
210 return;
211 }
212 #else
213 as->mrm.ofs = ptr2addr(&uv->tv);
214 as->mrm.base = as->mrm.idx = RID_NONE;
215 return;
216 #endif
217 }
218 break;
219 case IR_TMPREF:
220 #if LJ_GC64
221 as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->tmptv);
222 as->mrm.base = RID_DISPATCH;
223 as->mrm.idx = RID_NONE;
224 #else
225 as->mrm.ofs = igcptr(&J2G(as->J)->tmptv);
226 as->mrm.base = as->mrm.idx = RID_NONE;
227 #endif
228 return;
229 default:
230 break;
231 }
232 }
233 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
234 as->mrm.ofs = 0;
235 as->mrm.idx = RID_NONE;
236 }
237
238 /* Fuse FLOAD/FREF reference into memory operand. */
asm_fusefref(ASMState * as,IRIns * ir,RegSet allow)239 static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
240 {
241 lj_assertA(ir->o == IR_FLOAD || ir->o == IR_FREF,
242 "bad IR op %d", ir->o);
243 as->mrm.idx = RID_NONE;
244 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
245 #if LJ_GC64
246 as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch);
247 as->mrm.base = RID_DISPATCH;
248 #else
249 as->mrm.ofs = (int32_t)(ir->op2 << 2) + ptr2addr(J2GG(as->J));
250 as->mrm.base = RID_NONE;
251 #endif
252 return;
253 }
254 as->mrm.ofs = field_ofs[ir->op2];
255 if (irref_isk(ir->op1)) {
256 IRIns *op1 = IR(ir->op1);
257 #if LJ_GC64
258 if (ir->op1 == REF_NIL) {
259 as->mrm.ofs -= GG_OFS(dispatch);
260 as->mrm.base = RID_DISPATCH;
261 return;
262 } else if (op1->o == IR_KPTR || op1->o == IR_KKPTR) {
263 intptr_t ofs = dispofs(as, ir_kptr(op1));
264 if (checki32(as->mrm.ofs + ofs)) {
265 as->mrm.ofs += (int32_t)ofs;
266 as->mrm.base = RID_DISPATCH;
267 return;
268 }
269 }
270 #else
271 as->mrm.ofs += op1->i;
272 as->mrm.base = RID_NONE;
273 return;
274 #endif
275 }
276 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
277 }
278
279 /* Fuse string reference into memory operand. */
asm_fusestrref(ASMState * as,IRIns * ir,RegSet allow)280 static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
281 {
282 IRIns *irr;
283 lj_assertA(ir->o == IR_STRREF, "bad IR op %d", ir->o);
284 as->mrm.base = as->mrm.idx = RID_NONE;
285 as->mrm.scale = XM_SCALE1;
286 as->mrm.ofs = sizeof(GCstr);
287 if (!LJ_GC64 && irref_isk(ir->op1)) {
288 as->mrm.ofs += IR(ir->op1)->i;
289 } else {
290 Reg r = ra_alloc1(as, ir->op1, allow);
291 rset_clear(allow, r);
292 as->mrm.base = (uint8_t)r;
293 }
294 irr = IR(ir->op2);
295 if (irref_isk(ir->op2)) {
296 as->mrm.ofs += irr->i;
297 } else {
298 Reg r;
299 /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
300 if (!LJ_64 && /* Has bad effects with negative index on x64. */
301 mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
302 as->mrm.ofs += IR(irr->op2)->i;
303 r = ra_alloc1(as, irr->op1, allow);
304 } else {
305 r = ra_alloc1(as, ir->op2, allow);
306 }
307 if (as->mrm.base == RID_NONE)
308 as->mrm.base = (uint8_t)r;
309 else
310 as->mrm.idx = (uint8_t)r;
311 }
312 }
313
asm_fusexref(ASMState * as,IRRef ref,RegSet allow)314 static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
315 {
316 IRIns *ir = IR(ref);
317 as->mrm.idx = RID_NONE;
318 if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
319 #if LJ_GC64
320 intptr_t ofs = dispofs(as, ir_kptr(ir));
321 if (checki32(ofs)) {
322 as->mrm.ofs = (int32_t)ofs;
323 as->mrm.base = RID_DISPATCH;
324 return;
325 }
326 } if (0) {
327 #else
328 as->mrm.ofs = ir->i;
329 as->mrm.base = RID_NONE;
330 } else if (ir->o == IR_STRREF) {
331 asm_fusestrref(as, ir, allow);
332 #endif
333 } else {
334 as->mrm.ofs = 0;
335 if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
336 /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
337 IRIns *irx;
338 IRRef idx;
339 Reg r;
340 if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
341 ref = ir->op1;
342 ir = IR(ref);
343 if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
344 goto noadd;
345 }
346 as->mrm.scale = XM_SCALE1;
347 idx = ir->op1;
348 ref = ir->op2;
349 irx = IR(idx);
350 if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
351 idx = ir->op2;
352 ref = ir->op1;
353 irx = IR(idx);
354 }
355 if (canfuse(as, irx) && ra_noreg(irx->r)) {
356 if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
357 /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
358 idx = irx->op1;
359 as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
360 } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
361 /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
362 idx = irx->op1;
363 as->mrm.scale = XM_SCALE2;
364 }
365 }
366 r = ra_alloc1(as, idx, allow);
367 rset_clear(allow, r);
368 as->mrm.idx = (uint8_t)r;
369 }
370 noadd:
371 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
372 }
373 }
374
375 /* Fuse load of 64 bit IR constant into memory operand. */
asm_fuseloadk64(ASMState * as,IRIns * ir)376 static Reg asm_fuseloadk64(ASMState *as, IRIns *ir)
377 {
378 const uint64_t *k = &ir_k64(ir)->u64;
379 if (!LJ_GC64 || checki32((intptr_t)k)) {
380 as->mrm.ofs = ptr2addr(k);
381 as->mrm.base = RID_NONE;
382 #if LJ_GC64
383 } else if (checki32(dispofs(as, k))) {
384 as->mrm.ofs = (int32_t)dispofs(as, k);
385 as->mrm.base = RID_DISPATCH;
386 } else if (checki32(mcpofs(as, k)) && checki32(mcpofs(as, k+1)) &&
387 checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) {
388 as->mrm.ofs = (int32_t)mcpofs(as, k);
389 as->mrm.base = RID_RIP;
390 } else { /* Intern 64 bit constant at bottom of mcode. */
391 if (ir->i) {
392 lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i),
393 "bad interned 64 bit constant");
394 } else {
395 while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3;
396 *(uint64_t*)as->mcbot = *k;
397 ir->i = (int32_t)(as->mctop - as->mcbot);
398 as->mcbot += 8;
399 as->mclim = as->mcbot + MCLIM_REDZONE;
400 lj_mcode_commitbot(as->J, as->mcbot);
401 }
402 as->mrm.ofs = (int32_t)mcpofs(as, as->mctop - ir->i);
403 as->mrm.base = RID_RIP;
404 #endif
405 }
406 as->mrm.idx = RID_NONE;
407 return RID_MRM;
408 }
409
410 /* Fuse load into memory operand.
411 **
412 ** Important caveat: this may emit RIP-relative loads! So don't place any
413 ** code emitters between this function and the use of its result.
414 ** The only permitted exception is asm_guardcc().
415 */
asm_fuseload(ASMState * as,IRRef ref,RegSet allow)416 static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
417 {
418 IRIns *ir = IR(ref);
419 if (ra_hasreg(ir->r)) {
420 if (allow != RSET_EMPTY) { /* Fast path. */
421 ra_noweak(as, ir->r);
422 return ir->r;
423 }
424 fusespill:
425 /* Force a spill if only memory operands are allowed (asm_x87load). */
426 as->mrm.base = RID_ESP;
427 as->mrm.ofs = ra_spill(as, ir);
428 as->mrm.idx = RID_NONE;
429 return RID_MRM;
430 }
431 if (ir->o == IR_KNUM) {
432 RegSet avail = as->freeset & ~as->modset & RSET_FPR;
433 lj_assertA(allow != RSET_EMPTY, "no register allowed");
434 if (!(avail & (avail-1))) /* Fuse if less than two regs available. */
435 return asm_fuseloadk64(as, ir);
436 } else if (ref == REF_BASE || ir->o == IR_KINT64) {
437 RegSet avail = as->freeset & ~as->modset & RSET_GPR;
438 lj_assertA(allow != RSET_EMPTY, "no register allowed");
439 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
440 if (ref == REF_BASE) {
441 #if LJ_GC64
442 as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->jit_base);
443 as->mrm.base = RID_DISPATCH;
444 #else
445 as->mrm.ofs = ptr2addr(&J2G(as->J)->jit_base);
446 as->mrm.base = RID_NONE;
447 #endif
448 as->mrm.idx = RID_NONE;
449 return RID_MRM;
450 } else {
451 return asm_fuseloadk64(as, ir);
452 }
453 }
454 } else if (mayfuse(as, ref)) {
455 RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
456 if (ir->o == IR_SLOAD) {
457 if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
458 noconflict(as, ref, IR_RETF, 0) &&
459 !(LJ_GC64 && irt_isaddr(ir->t))) {
460 as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
461 as->mrm.ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
462 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
463 as->mrm.idx = RID_NONE;
464 return RID_MRM;
465 }
466 } else if (ir->o == IR_FLOAD) {
467 /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
468 if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
469 noconflict(as, ref, IR_FSTORE, 0)) {
470 asm_fusefref(as, ir, xallow);
471 return RID_MRM;
472 }
473 } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
474 if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0) &&
475 !(LJ_GC64 && irt_isaddr(ir->t))) {
476 asm_fuseahuref(as, ir->op1, xallow);
477 return RID_MRM;
478 }
479 } else if (ir->o == IR_XLOAD) {
480 /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
481 ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
482 */
483 if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
484 noconflict(as, ref, IR_XSTORE, 0)) {
485 asm_fusexref(as, ir->op1, xallow);
486 return RID_MRM;
487 }
488 } else if (ir->o == IR_VLOAD && !(LJ_GC64 && irt_isaddr(ir->t))) {
489 asm_fuseahuref(as, ir->op1, xallow);
490 as->mrm.ofs += 8 * ir->op2;
491 return RID_MRM;
492 }
493 }
494 if (ir->o == IR_FLOAD && ir->op1 == REF_NIL) {
495 asm_fusefref(as, ir, RSET_EMPTY);
496 return RID_MRM;
497 }
498 if (!(as->freeset & allow) && !emit_canremat(ref) &&
499 (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
500 goto fusespill;
501 return ra_allocref(as, ref, allow);
502 }
503
504 #if LJ_64
505 /* Don't fuse a 32 bit load into a 64 bit operation. */
asm_fuseloadm(ASMState * as,IRRef ref,RegSet allow,int is64)506 static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
507 {
508 if (is64 && !irt_is64(IR(ref)->t))
509 return ra_alloc1(as, ref, allow);
510 return asm_fuseload(as, ref, allow);
511 }
512 #else
513 #define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
514 #endif
515
516 /* -- Calls --------------------------------------------------------------- */
517
518 /* Count the required number of stack slots for a call. */
asm_count_call_slots(ASMState * as,const CCallInfo * ci,IRRef * args)519 static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
520 {
521 uint32_t i, nargs = CCI_XNARGS(ci);
522 int nslots = 0;
523 #if LJ_64
524 if (LJ_ABI_WIN) {
525 nslots = (int)(nargs*2); /* Only matters for more than four args. */
526 } else {
527 int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
528 for (i = 0; i < nargs; i++)
529 if (args[i] && irt_isfp(IR(args[i])->t)) {
530 if (nfpr > 0) nfpr--; else nslots += 2;
531 } else {
532 if (ngpr > 0) ngpr--; else nslots += 2;
533 }
534 }
535 #else
536 int ngpr = 0;
537 if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
538 ngpr = 2;
539 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
540 ngpr = 1;
541 for (i = 0; i < nargs; i++)
542 if (args[i] && irt_isfp(IR(args[i])->t)) {
543 nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
544 } else {
545 if (ngpr > 0) ngpr--; else nslots++;
546 }
547 #endif
548 return nslots;
549 }
550
551 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)552 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
553 {
554 uint32_t n, nargs = CCI_XNARGS(ci);
555 int32_t ofs = STACKARG_OFS;
556 #if LJ_64
557 uint32_t gprs = REGARG_GPRS;
558 Reg fpr = REGARG_FIRSTFPR;
559 #if !LJ_ABI_WIN
560 MCode *patchnfpr = NULL;
561 #endif
562 #else
563 uint32_t gprs = 0;
564 if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
565 if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
566 gprs = (REGARG_GPRS & 31);
567 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
568 gprs = REGARG_GPRS;
569 }
570 #endif
571 if ((void *)ci->func)
572 emit_call(as, ci->func);
573 #if LJ_64
574 if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
575 #if LJ_ABI_WIN
576 for (n = 0; n < 4 && n < nargs; n++) {
577 IRIns *ir = IR(args[n]);
578 if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
579 emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
580 ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
581 }
582 #else
583 patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
584 *--as->mcp = XI_MOVrib | RID_EAX;
585 #endif
586 }
587 #endif
588 for (n = 0; n < nargs; n++) { /* Setup args. */
589 IRRef ref = args[n];
590 IRIns *ir = IR(ref);
591 Reg r;
592 #if LJ_64 && LJ_ABI_WIN
593 /* Windows/x64 argument registers are strictly positional. */
594 r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
595 fpr++; gprs >>= 5;
596 #elif LJ_64
597 /* POSIX/x64 argument registers are used in order of appearance. */
598 if (irt_isfp(ir->t)) {
599 r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
600 } else {
601 r = gprs & 31; gprs >>= 5;
602 }
603 #else
604 if (ref && irt_isfp(ir->t)) {
605 r = 0;
606 } else {
607 r = gprs & 31; gprs >>= 5;
608 if (!ref) continue;
609 }
610 #endif
611 if (r) { /* Argument is in a register. */
612 if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
613 #if LJ_64
614 if (LJ_GC64 ? !(ir->o == IR_KINT || ir->o == IR_KNULL) : ir->o == IR_KINT64)
615 emit_loadu64(as, r, ir_k64(ir)->u64);
616 else
617 #endif
618 emit_loadi(as, r, ir->i);
619 } else {
620 /* Must have been evicted. */
621 lj_assertA(rset_test(as->freeset, r), "reg %d not free", r);
622 if (ra_hasreg(ir->r)) {
623 ra_noweak(as, ir->r);
624 emit_movrr(as, ir, r, ir->r);
625 } else {
626 ra_allocref(as, ref, RID2RSET(r));
627 }
628 }
629 } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
630 lj_assertA(!(irt_isfloat(ir->t) && irref_isk(ref)),
631 "unexpected float constant");
632 if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
633 /* Split stores for unaligned FP consts. */
634 emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
635 emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
636 } else {
637 r = ra_alloc1(as, ref, RSET_FPR);
638 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
639 r, RID_ESP, ofs);
640 }
641 ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
642 } else { /* Non-FP argument is on stack. */
643 if (LJ_32 && ref < ASMREF_TMP1) {
644 emit_movmroi(as, RID_ESP, ofs, ir->i);
645 } else {
646 r = ra_alloc1(as, ref, RSET_GPR);
647 emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
648 }
649 ofs += sizeof(intptr_t);
650 }
651 checkmclim(as);
652 }
653 #if LJ_64 && !LJ_ABI_WIN
654 if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
655 #endif
656 }
657
658 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)659 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
660 {
661 RegSet drop = RSET_SCRATCH;
662 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
663 if ((ci->flags & CCI_NOFPRCLOBBER))
664 drop &= ~RSET_FPR;
665 if (ra_hasreg(ir->r))
666 rset_clear(drop, ir->r); /* Dest reg handled below. */
667 if (hiop && ra_hasreg((ir+1)->r))
668 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
669 ra_evictset(as, drop); /* Evictions must be performed first. */
670 if (ra_used(ir)) {
671 if (irt_isfp(ir->t)) {
672 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
673 #if LJ_64
674 if ((ci->flags & CCI_CASTU64)) {
675 Reg dest = ir->r;
676 if (ra_hasreg(dest)) {
677 ra_free(as, dest);
678 ra_modified(as, dest);
679 emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
680 }
681 if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
682 } else {
683 ra_destreg(as, ir, RID_FPRET);
684 }
685 #else
686 /* Number result is in x87 st0 for x86 calling convention. */
687 Reg dest = ir->r;
688 if (ra_hasreg(dest)) {
689 ra_free(as, dest);
690 ra_modified(as, dest);
691 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS,
692 dest, RID_ESP, ofs);
693 }
694 if ((ci->flags & CCI_CASTU64)) {
695 emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
696 emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
697 } else {
698 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
699 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
700 }
701 #endif
702 } else if (hiop) {
703 ra_destpair(as, ir);
704 } else {
705 lj_assertA(!irt_ispri(ir->t), "PRI dest");
706 ra_destreg(as, ir, RID_RET);
707 }
708 } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
709 emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
710 }
711 }
712
713 /* Return a constant function pointer or NULL for indirect calls. */
asm_callx_func(ASMState * as,IRIns * irf,IRRef func)714 static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
715 {
716 #if LJ_32
717 UNUSED(as);
718 if (irref_isk(func))
719 return (void *)irf->i;
720 #else
721 if (irref_isk(func)) {
722 MCode *p;
723 if (irf->o == IR_KINT64)
724 p = (MCode *)(void *)ir_k64(irf)->u64;
725 else
726 p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
727 if (p - as->mcp == (int32_t)(p - as->mcp))
728 return p; /* Call target is still in +-2GB range. */
729 /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
730 }
731 #endif
732 return NULL;
733 }
734
asm_callx(ASMState * as,IRIns * ir)735 static void asm_callx(ASMState *as, IRIns *ir)
736 {
737 IRRef args[CCI_NARGS_MAX*2];
738 CCallInfo ci;
739 IRRef func;
740 IRIns *irf;
741 int32_t spadj = 0;
742 ci.flags = asm_callx_flags(as, ir);
743 asm_collectargs(as, ir, &ci, args);
744 asm_setupresult(as, ir, &ci);
745 #if LJ_32
746 /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
747 if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
748 spadj = 4 * asm_count_call_slots(as, &ci, args);
749 #endif
750 func = ir->op2; irf = IR(func);
751 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
752 ci.func = (ASMFunction)asm_callx_func(as, irf, func);
753 if (!(void *)ci.func) {
754 /* Use a (hoistable) non-scratch register for indirect calls. */
755 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
756 Reg r = ra_alloc1(as, func, allow);
757 if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
758 emit_rr(as, XO_GROUP5, XOg_CALL, r);
759 } else if (LJ_32) {
760 emit_spsub(as, spadj);
761 }
762 asm_gencall(as, &ci, args);
763 }
764
765 /* -- Returns ------------------------------------------------------------- */
766
767 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)768 static void asm_retf(ASMState *as, IRIns *ir)
769 {
770 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
771 #if LJ_FR2
772 Reg rpc = ra_scratch(as, rset_exclude(RSET_GPR, base));
773 #endif
774 void *pc = ir_kptr(IR(ir->op2));
775 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
776 as->topslot -= (BCReg)delta;
777 if ((int32_t)as->topslot < 0) as->topslot = 0;
778 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
779 emit_setgl(as, base, jit_base);
780 emit_addptr(as, base, -8*delta);
781 asm_guardcc(as, CC_NE);
782 #if LJ_FR2
783 emit_rmro(as, XO_CMP, rpc|REX_GC64, base, -8);
784 emit_loadu64(as, rpc, u64ptr(pc));
785 #else
786 emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
787 #endif
788 }
789
790 /* -- Buffer operations --------------------------------------------------- */
791
792 #if LJ_HASBUFFER
asm_bufhdr_write(ASMState * as,Reg sb)793 static void asm_bufhdr_write(ASMState *as, Reg sb)
794 {
795 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
796 IRIns irgc;
797 irgc.ot = IRT(0, IRT_PGC); /* GC type. */
798 emit_storeofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
799 emit_opgl(as, XO_ARITH(XOg_OR), tmp|REX_GC64, cur_L);
800 emit_gri(as, XG_ARITHi(XOg_AND), tmp, SBUF_MASK_FLAG);
801 emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
802 }
803 #endif
804
805 /* -- Type conversions ---------------------------------------------------- */
806
asm_tointg(ASMState * as,IRIns * ir,Reg left)807 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
808 {
809 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
810 Reg dest = ra_dest(as, ir, RSET_GPR);
811 asm_guardcc(as, CC_P);
812 asm_guardcc(as, CC_NE);
813 emit_rr(as, XO_UCOMISD, left, tmp);
814 emit_rr(as, XO_CVTSI2SD, tmp, dest);
815 emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
816 emit_rr(as, XO_CVTTSD2SI, dest, left);
817 /* Can't fuse since left is needed twice. */
818 }
819
asm_tobit(ASMState * as,IRIns * ir)820 static void asm_tobit(ASMState *as, IRIns *ir)
821 {
822 Reg dest = ra_dest(as, ir, RSET_GPR);
823 Reg tmp = ra_noreg(IR(ir->op1)->r) ?
824 ra_alloc1(as, ir->op1, RSET_FPR) :
825 ra_scratch(as, RSET_FPR);
826 Reg right;
827 emit_rr(as, XO_MOVDto, tmp, dest);
828 right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
829 emit_mrm(as, XO_ADDSD, tmp, right);
830 ra_left(as, tmp, ir->op1);
831 }
832
asm_conv(ASMState * as,IRIns * ir)833 static void asm_conv(ASMState *as, IRIns *ir)
834 {
835 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
836 int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
837 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
838 IRRef lref = ir->op1;
839 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
840 lj_assertA(!(LJ_32 && (irt_isint64(ir->t) || st64)),
841 "IR %04d has unsplit 64 bit type",
842 (int)(ir - as->ir) - REF_BIAS);
843 if (irt_isfp(ir->t)) {
844 Reg dest = ra_dest(as, ir, RSET_FPR);
845 if (stfp) { /* FP to FP conversion. */
846 Reg left = asm_fuseload(as, lref, RSET_FPR);
847 emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
848 if (left == dest) return; /* Avoid the XO_XORPS. */
849 } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
850 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
851 cTValue *k = &as->J->k64[LJ_K64_TOBIT];
852 Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
853 if (irt_isfloat(ir->t))
854 emit_rr(as, XO_CVTSD2SS, dest, dest);
855 emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
856 emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
857 emit_rma(as, XO_MOVSD, bias, k);
858 emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
859 return;
860 } else { /* Integer to FP conversion. */
861 Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
862 ra_alloc1(as, lref, RSET_GPR) :
863 asm_fuseloadm(as, lref, RSET_GPR, st64);
864 if (LJ_64 && st == IRT_U64) {
865 MCLabel l_end = emit_label(as);
866 cTValue *k = &as->J->k64[LJ_K64_2P64];
867 emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
868 emit_sjcc(as, CC_NS, l_end);
869 emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
870 }
871 emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
872 dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
873 }
874 emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
875 } else if (stfp) { /* FP to integer conversion. */
876 if (irt_isguard(ir->t)) {
877 /* Checked conversions are only supported from number to int. */
878 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
879 "bad type for checked CONV");
880 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
881 } else {
882 Reg dest = ra_dest(as, ir, RSET_GPR);
883 x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI;
884 if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
885 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
886 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
887 Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
888 ra_scratch(as, RSET_FPR);
889 MCLabel l_end = emit_label(as);
890 if (LJ_32)
891 emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
892 emit_rr(as, op, dest|REX_64, tmp);
893 if (st == IRT_NUM)
894 emit_rma(as, XO_ADDSD, tmp, &as->J->k64[LJ_K64_M2P64_31]);
895 else
896 emit_rma(as, XO_ADDSS, tmp, &as->J->k32[LJ_K32_M2P64_31]);
897 emit_sjcc(as, CC_NS, l_end);
898 emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
899 emit_rr(as, op, dest|REX_64, tmp);
900 ra_left(as, tmp, lref);
901 } else {
902 if (LJ_64 && irt_isu32(ir->t))
903 emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
904 emit_mrm(as, op,
905 dest|((LJ_64 &&
906 (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
907 asm_fuseload(as, lref, RSET_FPR));
908 }
909 }
910 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
911 Reg left, dest = ra_dest(as, ir, RSET_GPR);
912 RegSet allow = RSET_GPR;
913 x86Op op;
914 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
915 if (st == IRT_I8) {
916 op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
917 } else if (st == IRT_U8) {
918 op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
919 } else if (st == IRT_I16) {
920 op = XO_MOVSXw;
921 } else {
922 op = XO_MOVZXw;
923 }
924 left = asm_fuseload(as, lref, allow);
925 /* Add extra MOV if source is already in wrong register. */
926 if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
927 Reg tmp = ra_scratch(as, allow);
928 emit_rr(as, op, dest, tmp);
929 emit_rr(as, XO_MOV, tmp, left);
930 } else {
931 emit_mrm(as, op, dest, left);
932 }
933 } else { /* 32/64 bit integer conversions. */
934 if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
935 Reg dest = ra_dest(as, ir, RSET_GPR);
936 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
937 } else if (irt_is64(ir->t)) {
938 Reg dest = ra_dest(as, ir, RSET_GPR);
939 if (st64 || !(ir->op2 & IRCONV_SEXT)) {
940 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
941 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
942 } else { /* 32 to 64 bit sign extension. */
943 Reg left = asm_fuseload(as, lref, RSET_GPR);
944 emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
945 }
946 } else {
947 Reg dest = ra_dest(as, ir, RSET_GPR);
948 if (st64 && !(ir->op2 & IRCONV_NONE)) {
949 Reg left = asm_fuseload(as, lref, RSET_GPR);
950 /* This is either a 32 bit reg/reg mov which zeroes the hiword
951 ** or a load of the loword from a 64 bit address.
952 */
953 emit_mrm(as, XO_MOV, dest, left);
954 } else { /* 32/32 bit no-op (cast). */
955 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
956 }
957 }
958 }
959 }
960
961 #if LJ_32 && LJ_HASFFI
962 /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
963
964 /* 64 bit integer to FP conversion in 32 bit mode. */
asm_conv_fp_int64(ASMState * as,IRIns * ir)965 static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
966 {
967 Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
968 Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
969 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
970 Reg dest = ir->r;
971 if (ra_hasreg(dest)) {
972 ra_free(as, dest);
973 ra_modified(as, dest);
974 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs);
975 }
976 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
977 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
978 if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
979 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
980 MCLabel l_end = emit_label(as);
981 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_2P64]);
982 emit_sjcc(as, CC_NS, l_end);
983 emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
984 } else {
985 lj_assertA(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64, "bad type for CONV");
986 }
987 emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
988 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
989 emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
990 emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
991 }
992
993 /* FP to 64 bit integer conversion in 32 bit mode. */
asm_conv_int64_fp(ASMState * as,IRIns * ir)994 static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
995 {
996 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
997 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
998 Reg lo, hi;
999 lj_assertA(st == IRT_NUM || st == IRT_FLOAT, "bad type for CONV");
1000 lj_assertA(dt == IRT_I64 || dt == IRT_U64, "bad type for CONV");
1001 hi = ra_dest(as, ir, RSET_GPR);
1002 lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
1003 if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
1004 /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
1005 if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
1006 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
1007 emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
1008 emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
1009 }
1010 if (dt == IRT_U64) {
1011 /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
1012 MCLabel l_pop, l_end = emit_label(as);
1013 emit_x87op(as, XI_FPOP);
1014 l_pop = emit_label(as);
1015 emit_sjmp(as, l_end);
1016 emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
1017 if ((as->flags & JIT_F_SSE3))
1018 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
1019 else
1020 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
1021 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_M2P64]);
1022 emit_sjcc(as, CC_NS, l_pop);
1023 emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
1024 }
1025 emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
1026 if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
1027 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
1028 } else { /* Otherwise set FPU rounding mode to truncate before the store. */
1029 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
1030 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
1031 emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
1032 emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
1033 emit_loadi(as, lo, 0xc00);
1034 emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
1035 }
1036 if (dt == IRT_U64)
1037 emit_x87op(as, XI_FDUP);
1038 emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
1039 st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
1040 asm_fuseload(as, ir->op1, RSET_EMPTY));
1041 }
1042
asm_conv64(ASMState * as,IRIns * ir)1043 static void asm_conv64(ASMState *as, IRIns *ir)
1044 {
1045 if (irt_isfp(ir->t))
1046 asm_conv_fp_int64(as, ir);
1047 else
1048 asm_conv_int64_fp(as, ir);
1049 }
1050 #endif
1051
asm_strto(ASMState * as,IRIns * ir)1052 static void asm_strto(ASMState *as, IRIns *ir)
1053 {
1054 /* Force a spill slot for the destination register (if any). */
1055 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
1056 IRRef args[2];
1057 RegSet drop = RSET_SCRATCH;
1058 if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
1059 rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
1060 ra_evictset(as, drop);
1061 asm_guardcc(as, CC_E);
1062 emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
1063 args[0] = ir->op1; /* GCstr *str */
1064 args[1] = ASMREF_TMP1; /* TValue *n */
1065 asm_gencall(as, ci, args);
1066 /* Store the result to the spill slot or temp slots. */
1067 emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
1068 RID_ESP, sps_scale(ir->s));
1069 }
1070
1071 /* -- Memory references --------------------------------------------------- */
1072
1073 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref,MSize mode)1074 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
1075 {
1076 if ((mode & IRTMPREF_IN1)) {
1077 IRIns *ir = IR(ref);
1078 if (irt_isnum(ir->t)) {
1079 if (irref_isk(ref) && !(mode & IRTMPREF_OUT1)) {
1080 /* Use the number constant itself as a TValue. */
1081 emit_loada(as, dest, ir_knum(ir));
1082 return;
1083 }
1084 emit_rmro(as, XO_MOVSDto, ra_alloc1(as, ref, RSET_FPR), dest, 0);
1085 } else {
1086 #if LJ_GC64
1087 if (irref_isk(ref)) {
1088 TValue k;
1089 lj_ir_kvalue(as->J->L, &k, ir);
1090 emit_movmroi(as, dest, 4, k.u32.hi);
1091 emit_movmroi(as, dest, 0, k.u32.lo);
1092 } else {
1093 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
1094 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
1095 if (irt_is64(ir->t)) {
1096 emit_u32(as, irt_toitype(ir->t) << 15);
1097 emit_rmro(as, XO_ARITHi, XOg_OR, dest, 4);
1098 } else {
1099 emit_movmroi(as, dest, 4, (irt_toitype(ir->t) << 15));
1100 }
1101 emit_movtomro(as, REX_64IR(ir, src), dest, 0);
1102 }
1103 #else
1104 if (!irref_isk(ref)) {
1105 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest));
1106 emit_movtomro(as, REX_64IR(ir, src), dest, 0);
1107 } else if (!irt_ispri(ir->t)) {
1108 emit_movmroi(as, dest, 0, ir->i);
1109 }
1110 if (!(LJ_64 && irt_islightud(ir->t)))
1111 emit_movmroi(as, dest, 4, irt_toitype(ir->t));
1112 #endif
1113 }
1114 }
1115 emit_loada(as, dest, &J2G(as->J)->tmptv); /* g->tmptv holds the TValue(s). */
1116 }
1117
asm_aref(ASMState * as,IRIns * ir)1118 static void asm_aref(ASMState *as, IRIns *ir)
1119 {
1120 Reg dest = ra_dest(as, ir, RSET_GPR);
1121 asm_fusearef(as, ir, RSET_GPR);
1122 if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
1123 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
1124 else if (as->mrm.base != dest)
1125 emit_rr(as, XO_MOV, dest|REX_GC64, as->mrm.base);
1126 }
1127
1128 /* Inlined hash lookup. Specialized for key type and for const keys.
1129 ** The equivalent C code is:
1130 ** Node *n = hashkey(t, key);
1131 ** do {
1132 ** if (lj_obj_equal(&n->key, key)) return &n->val;
1133 ** } while ((n = nextnode(n)));
1134 ** return niltv(L);
1135 */
asm_href(ASMState * as,IRIns * ir,IROp merge)1136 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
1137 {
1138 RegSet allow = RSET_GPR;
1139 int destused = ra_used(ir);
1140 Reg dest = ra_dest(as, ir, allow);
1141 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
1142 Reg key = RID_NONE, tmp = RID_NONE;
1143 IRIns *irkey = IR(ir->op2);
1144 int isk = irref_isk(ir->op2);
1145 IRType1 kt = irkey->t;
1146 uint32_t khash;
1147 MCLabel l_end, l_loop, l_next;
1148
1149 if (!isk) {
1150 rset_clear(allow, tab);
1151 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
1152 if (LJ_GC64 || !irt_isstr(kt))
1153 tmp = ra_scratch(as, rset_exclude(allow, key));
1154 }
1155
1156 /* Key not found in chain: jump to exit (if merged) or load niltv. */
1157 l_end = emit_label(as);
1158 if (merge == IR_NE)
1159 asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */
1160 else if (destused)
1161 emit_loada(as, dest, niltvg(J2G(as->J)));
1162
1163 /* Follow hash chain until the end. */
1164 l_loop = emit_sjcc_label(as, CC_NZ);
1165 emit_rr(as, XO_TEST, dest|REX_GC64, dest);
1166 emit_rmro(as, XO_MOV, dest|REX_GC64, dest, offsetof(Node, next));
1167 l_next = emit_label(as);
1168
1169 /* Type and value comparison. */
1170 if (merge == IR_EQ)
1171 asm_guardcc(as, CC_E);
1172 else
1173 emit_sjcc(as, CC_E, l_end);
1174 if (irt_isnum(kt)) {
1175 if (isk) {
1176 /* Assumes -0.0 is already canonicalized to +0.0. */
1177 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
1178 (int32_t)ir_knum(irkey)->u32.lo);
1179 emit_sjcc(as, CC_NE, l_next);
1180 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
1181 (int32_t)ir_knum(irkey)->u32.hi);
1182 } else {
1183 emit_sjcc(as, CC_P, l_next);
1184 emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
1185 emit_sjcc(as, CC_AE, l_next);
1186 /* The type check avoids NaN penalties and complaints from Valgrind. */
1187 #if LJ_64 && !LJ_GC64
1188 emit_u32(as, LJ_TISNUM);
1189 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1190 #else
1191 emit_i8(as, LJ_TISNUM);
1192 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1193 #endif
1194 }
1195 #if LJ_64 && !LJ_GC64
1196 } else if (irt_islightud(kt)) {
1197 emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
1198 #endif
1199 #if LJ_GC64
1200 } else if (irt_isaddr(kt)) {
1201 if (isk) {
1202 TValue k;
1203 k.u64 = ((uint64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64;
1204 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
1205 k.u32.lo);
1206 emit_sjcc(as, CC_NE, l_next);
1207 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
1208 k.u32.hi);
1209 } else {
1210 emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64));
1211 }
1212 } else {
1213 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
1214 emit_u32(as, (irt_toitype(kt)<<15)|0x7fff);
1215 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
1216 #else
1217 } else {
1218 if (!irt_ispri(kt)) {
1219 lj_assertA(irt_isaddr(kt), "bad HREF key type");
1220 if (isk)
1221 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
1222 ptr2addr(ir_kgc(irkey)));
1223 else
1224 emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
1225 emit_sjcc(as, CC_NE, l_next);
1226 }
1227 lj_assertA(!irt_isnil(kt), "bad HREF key type");
1228 emit_i8(as, irt_toitype(kt));
1229 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
1230 #endif
1231 }
1232 emit_sfixup(as, l_loop);
1233 checkmclim(as);
1234 #if LJ_GC64
1235 if (!isk && irt_isaddr(kt)) {
1236 emit_rr(as, XO_OR, tmp|REX_64, key);
1237 emit_loadu64(as, tmp, (uint64_t)irt_toitype(kt) << 47);
1238 }
1239 #endif
1240
1241 /* Load main position relative to tab->node into dest. */
1242 khash = isk ? ir_khash(as, irkey) : 1;
1243 if (khash == 0) {
1244 emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node));
1245 } else {
1246 emit_rmro(as, XO_ARITH(XOg_ADD), dest|REX_GC64, tab, offsetof(GCtab,node));
1247 emit_shifti(as, XOg_SHL, dest, 3);
1248 emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
1249 if (isk) {
1250 emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
1251 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1252 } else if (irt_isstr(kt)) {
1253 emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, sid));
1254 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
1255 } else { /* Must match with hashrot() in lj_tab.c. */
1256 emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
1257 emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
1258 emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
1259 emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
1260 emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
1261 emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
1262 emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
1263 emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
1264 if (irt_isnum(kt)) {
1265 emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
1266 #if LJ_64
1267 emit_shifti(as, XOg_SHR|REX_64, dest, 32);
1268 emit_rr(as, XO_MOV, tmp, dest);
1269 emit_rr(as, XO_MOVDto, key|REX_64, dest);
1270 #else
1271 emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
1272 emit_rr(as, XO_MOVDto, key, tmp);
1273 #endif
1274 } else {
1275 emit_rr(as, XO_MOV, tmp, key);
1276 #if LJ_GC64
1277 checkmclim(as);
1278 emit_gri(as, XG_ARITHi(XOg_XOR), dest, irt_toitype(kt) << 15);
1279 if ((as->flags & JIT_F_BMI2)) {
1280 emit_i8(as, 32);
1281 emit_mrm(as, XV_RORX|VEX_64, dest, key);
1282 } else {
1283 emit_shifti(as, XOg_SHR|REX_64, dest, 32);
1284 emit_rr(as, XO_MOV, dest|REX_64, key|REX_64);
1285 }
1286 #else
1287 emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
1288 #endif
1289 }
1290 }
1291 }
1292 }
1293
asm_hrefk(ASMState * as,IRIns * ir)1294 static void asm_hrefk(ASMState *as, IRIns *ir)
1295 {
1296 IRIns *kslot = IR(ir->op2);
1297 IRIns *irkey = IR(kslot->op1);
1298 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
1299 Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
1300 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
1301 #if !LJ_64 || (defined(LUAJIT_USE_VALGRIND) && !LJ_GC64)
1302 MCLabel l_exit;
1303 #endif
1304 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
1305 if (ra_hasreg(dest)) {
1306 if (ofs != 0) {
1307 if (dest == node)
1308 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, ofs);
1309 else
1310 emit_rmro(as, XO_LEA, dest|REX_GC64, node, ofs);
1311 } else if (dest != node) {
1312 emit_rr(as, XO_MOV, dest|REX_GC64, node);
1313 }
1314 }
1315 asm_guardcc(as, CC_NE);
1316 #if LJ_64 && (!defined(LUAJIT_USE_VALGRIND) || LJ_GC64)
1317 if (!irt_ispri(irkey->t)) {
1318 Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
1319 emit_rmro(as, XO_CMP, key|REX_64, node,
1320 ofs + (int32_t)offsetof(Node, key.u64));
1321 lj_assertA(irt_isnum(irkey->t) || irt_isgcv(irkey->t),
1322 "bad HREFK key type");
1323 /* Assumes -0.0 is already canonicalized to +0.0. */
1324 emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
1325 #if LJ_GC64
1326 ((uint64_t)irt_toitype(irkey->t) << 47) |
1327 (uint64_t)ir_kgc(irkey));
1328 #else
1329 ((uint64_t)irt_toitype(irkey->t) << 32) |
1330 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
1331 #endif
1332 } else {
1333 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
1334 #if LJ_GC64
1335 emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff);
1336 emit_rmro(as, XO_ARITHi, XOg_CMP, node,
1337 ofs + (int32_t)offsetof(Node, key.it));
1338 #else
1339 emit_i8(as, irt_toitype(irkey->t));
1340 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1341 ofs + (int32_t)offsetof(Node, key.it));
1342 #endif
1343 }
1344 #else
1345 l_exit = emit_label(as);
1346 if (irt_isnum(irkey->t)) {
1347 /* Assumes -0.0 is already canonicalized to +0.0. */
1348 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1349 ofs + (int32_t)offsetof(Node, key.u32.lo),
1350 (int32_t)ir_knum(irkey)->u32.lo);
1351 emit_sjcc(as, CC_NE, l_exit);
1352 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1353 ofs + (int32_t)offsetof(Node, key.u32.hi),
1354 (int32_t)ir_knum(irkey)->u32.hi);
1355 } else {
1356 if (!irt_ispri(irkey->t)) {
1357 lj_assertA(irt_isgcv(irkey->t), "bad HREFK key type");
1358 emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
1359 ofs + (int32_t)offsetof(Node, key.gcr),
1360 ptr2addr(ir_kgc(irkey)));
1361 emit_sjcc(as, CC_NE, l_exit);
1362 }
1363 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type");
1364 emit_i8(as, irt_toitype(irkey->t));
1365 emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
1366 ofs + (int32_t)offsetof(Node, key.it));
1367 }
1368 #endif
1369 }
1370
asm_uref(ASMState * as,IRIns * ir)1371 static void asm_uref(ASMState *as, IRIns *ir)
1372 {
1373 Reg dest = ra_dest(as, ir, RSET_GPR);
1374 if (irref_isk(ir->op1)) {
1375 GCfunc *fn = ir_kfunc(IR(ir->op1));
1376 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
1377 emit_rma(as, XO_MOV, dest|REX_GC64, v);
1378 } else {
1379 Reg uv = ra_scratch(as, RSET_GPR);
1380 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
1381 if (ir->o == IR_UREFC) {
1382 emit_rmro(as, XO_LEA, dest|REX_GC64, uv, offsetof(GCupval, tv));
1383 asm_guardcc(as, CC_NE);
1384 emit_i8(as, 1);
1385 emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
1386 } else {
1387 emit_rmro(as, XO_MOV, dest|REX_GC64, uv, offsetof(GCupval, v));
1388 }
1389 emit_rmro(as, XO_MOV, uv|REX_GC64, func,
1390 (int32_t)offsetof(GCfuncL, uvptr) +
1391 (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8));
1392 }
1393 }
1394
asm_fref(ASMState * as,IRIns * ir)1395 static void asm_fref(ASMState *as, IRIns *ir)
1396 {
1397 Reg dest = ra_dest(as, ir, RSET_GPR);
1398 asm_fusefref(as, ir, RSET_GPR);
1399 emit_mrm(as, XO_LEA, dest, RID_MRM);
1400 }
1401
asm_strref(ASMState * as,IRIns * ir)1402 static void asm_strref(ASMState *as, IRIns *ir)
1403 {
1404 Reg dest = ra_dest(as, ir, RSET_GPR);
1405 asm_fusestrref(as, ir, RSET_GPR);
1406 if (as->mrm.base == RID_NONE)
1407 emit_loadi(as, dest, as->mrm.ofs);
1408 else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
1409 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, as->mrm.ofs);
1410 else
1411 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM);
1412 }
1413
1414 /* -- Loads and stores ---------------------------------------------------- */
1415
asm_fxload(ASMState * as,IRIns * ir)1416 static void asm_fxload(ASMState *as, IRIns *ir)
1417 {
1418 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1419 x86Op xo;
1420 if (ir->o == IR_FLOAD)
1421 asm_fusefref(as, ir, RSET_GPR);
1422 else
1423 asm_fusexref(as, ir->op1, RSET_GPR);
1424 /* ir->op2 is ignored -- unaligned loads are ok on x86. */
1425 switch (irt_type(ir->t)) {
1426 case IRT_I8: xo = XO_MOVSXb; break;
1427 case IRT_U8: xo = XO_MOVZXb; break;
1428 case IRT_I16: xo = XO_MOVSXw; break;
1429 case IRT_U16: xo = XO_MOVZXw; break;
1430 case IRT_NUM: xo = XO_MOVSD; break;
1431 case IRT_FLOAT: xo = XO_MOVSS; break;
1432 default:
1433 if (LJ_64 && irt_is64(ir->t))
1434 dest |= REX_64;
1435 else
1436 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
1437 "unsplit 64 bit load");
1438 xo = XO_MOV;
1439 break;
1440 }
1441 emit_mrm(as, xo, dest, RID_MRM);
1442 }
1443
1444 #define asm_fload(as, ir) asm_fxload(as, ir)
1445 #define asm_xload(as, ir) asm_fxload(as, ir)
1446
asm_fxstore(ASMState * as,IRIns * ir)1447 static void asm_fxstore(ASMState *as, IRIns *ir)
1448 {
1449 RegSet allow = RSET_GPR;
1450 Reg src = RID_NONE, osrc = RID_NONE;
1451 int32_t k = 0;
1452 if (ir->r == RID_SINK)
1453 return;
1454 /* The IRT_I16/IRT_U16 stores should never be simplified for constant
1455 ** values since mov word [mem], imm16 has a length-changing prefix.
1456 */
1457 if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
1458 !asm_isk32(as, ir->op2, &k)) {
1459 RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
1460 (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
1461 src = osrc = ra_alloc1(as, ir->op2, allow8);
1462 if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
1463 rset_clear(allow, osrc);
1464 src = ra_scratch(as, allow8);
1465 }
1466 rset_clear(allow, src);
1467 }
1468 if (ir->o == IR_FSTORE) {
1469 asm_fusefref(as, IR(ir->op1), allow);
1470 } else {
1471 asm_fusexref(as, ir->op1, allow);
1472 if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
1473 }
1474 if (ra_hasreg(src)) {
1475 x86Op xo;
1476 switch (irt_type(ir->t)) {
1477 case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
1478 case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
1479 case IRT_NUM: xo = XO_MOVSDto; break;
1480 case IRT_FLOAT: xo = XO_MOVSSto; break;
1481 #if LJ_64 && !LJ_GC64
1482 case IRT_LIGHTUD:
1483 /* NYI: mask 64 bit lightuserdata. */
1484 lj_assertA(0, "store of lightuserdata");
1485 #endif
1486 default:
1487 if (LJ_64 && irt_is64(ir->t))
1488 src |= REX_64;
1489 else
1490 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
1491 "unsplit 64 bit store");
1492 xo = XO_MOVto;
1493 break;
1494 }
1495 emit_mrm(as, xo, src, RID_MRM);
1496 if (!LJ_64 && src != osrc) {
1497 ra_noweak(as, osrc);
1498 emit_rr(as, XO_MOV, src, osrc);
1499 }
1500 } else {
1501 if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
1502 emit_i8(as, k);
1503 emit_mrm(as, XO_MOVmib, 0, RID_MRM);
1504 } else {
1505 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
1506 irt_isaddr(ir->t), "bad store type");
1507 emit_i32(as, k);
1508 emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
1509 }
1510 }
1511 }
1512
1513 #define asm_fstore(as, ir) asm_fxstore(as, ir)
1514 #define asm_xstore(as, ir) asm_fxstore(as, ir)
1515
1516 #if LJ_64 && !LJ_GC64
asm_load_lightud64(ASMState * as,IRIns * ir,int typecheck)1517 static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
1518 {
1519 if (ra_used(ir) || typecheck) {
1520 Reg dest = ra_dest(as, ir, RSET_GPR);
1521 if (typecheck) {
1522 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
1523 asm_guardcc(as, CC_NE);
1524 emit_i8(as, -2);
1525 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
1526 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1527 emit_rr(as, XO_MOV, tmp|REX_64, dest);
1528 }
1529 return dest;
1530 } else {
1531 return RID_NONE;
1532 }
1533 }
1534 #endif
1535
asm_ahuvload(ASMState * as,IRIns * ir)1536 static void asm_ahuvload(ASMState *as, IRIns *ir)
1537 {
1538 #if LJ_GC64
1539 Reg tmp = RID_NONE;
1540 #endif
1541 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1542 (LJ_DUALNUM && irt_isint(ir->t)),
1543 "bad load type %d", irt_type(ir->t));
1544 #if LJ_64 && !LJ_GC64
1545 if (irt_islightud(ir->t)) {
1546 Reg dest = asm_load_lightud64(as, ir, 1);
1547 if (ra_hasreg(dest)) {
1548 asm_fuseahuref(as, ir->op1, RSET_GPR);
1549 if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2;
1550 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
1551 }
1552 return;
1553 } else
1554 #endif
1555 if (ra_used(ir)) {
1556 RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1557 Reg dest = ra_dest(as, ir, allow);
1558 asm_fuseahuref(as, ir->op1, RSET_GPR);
1559 if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2;
1560 #if LJ_GC64
1561 if (irt_isaddr(ir->t)) {
1562 emit_shifti(as, XOg_SHR|REX_64, dest, 17);
1563 asm_guardcc(as, CC_NE);
1564 emit_i8(as, irt_toitype(ir->t));
1565 emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
1566 emit_i8(as, XI_O16);
1567 if ((as->flags & JIT_F_BMI2)) {
1568 emit_i8(as, 47);
1569 emit_mrm(as, XV_RORX|VEX_64, dest, RID_MRM);
1570 } else {
1571 emit_shifti(as, XOg_ROR|REX_64, dest, 47);
1572 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
1573 }
1574 return;
1575 } else
1576 #endif
1577 emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM);
1578 } else {
1579 RegSet gpr = RSET_GPR;
1580 #if LJ_GC64
1581 if (irt_isaddr(ir->t)) {
1582 tmp = ra_scratch(as, RSET_GPR);
1583 gpr = rset_exclude(gpr, tmp);
1584 }
1585 #endif
1586 asm_fuseahuref(as, ir->op1, gpr);
1587 if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2;
1588 }
1589 /* Always do the type check, even if the load result is unused. */
1590 as->mrm.ofs += 4;
1591 asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
1592 if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
1593 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
1594 "bad load type %d", irt_type(ir->t));
1595 #if LJ_GC64
1596 emit_u32(as, LJ_TISNUM << 15);
1597 #else
1598 emit_u32(as, LJ_TISNUM);
1599 #endif
1600 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1601 #if LJ_GC64
1602 } else if (irt_isaddr(ir->t)) {
1603 as->mrm.ofs -= 4;
1604 emit_i8(as, irt_toitype(ir->t));
1605 emit_mrm(as, XO_ARITHi8, XOg_CMP, tmp);
1606 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1607 emit_mrm(as, XO_MOV, tmp|REX_64, RID_MRM);
1608 } else if (irt_isnil(ir->t)) {
1609 as->mrm.ofs -= 4;
1610 emit_i8(as, -1);
1611 emit_mrm(as, XO_ARITHi8, XOg_CMP|REX_64, RID_MRM);
1612 } else {
1613 emit_u32(as, (irt_toitype(ir->t) << 15) | 0x7fff);
1614 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
1615 #else
1616 } else {
1617 emit_i8(as, irt_toitype(ir->t));
1618 emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
1619 #endif
1620 }
1621 }
1622
asm_ahustore(ASMState * as,IRIns * ir)1623 static void asm_ahustore(ASMState *as, IRIns *ir)
1624 {
1625 if (ir->r == RID_SINK)
1626 return;
1627 if (irt_isnum(ir->t)) {
1628 Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
1629 asm_fuseahuref(as, ir->op1, RSET_GPR);
1630 emit_mrm(as, XO_MOVSDto, src, RID_MRM);
1631 #if LJ_64 && !LJ_GC64
1632 } else if (irt_islightud(ir->t)) {
1633 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1634 asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
1635 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1636 #endif
1637 #if LJ_GC64
1638 } else if (irref_isk(ir->op2)) {
1639 TValue k;
1640 lj_ir_kvalue(as->J->L, &k, IR(ir->op2));
1641 asm_fuseahuref(as, ir->op1, RSET_GPR);
1642 if (tvisnil(&k)) {
1643 emit_i32(as, -1);
1644 emit_mrm(as, XO_MOVmi, REX_64, RID_MRM);
1645 } else {
1646 emit_u32(as, k.u32.lo);
1647 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1648 as->mrm.ofs += 4;
1649 emit_u32(as, k.u32.hi);
1650 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1651 }
1652 #endif
1653 } else {
1654 IRIns *irr = IR(ir->op2);
1655 RegSet allow = RSET_GPR;
1656 Reg src = RID_NONE;
1657 if (!irref_isk(ir->op2)) {
1658 src = ra_alloc1(as, ir->op2, allow);
1659 rset_clear(allow, src);
1660 }
1661 asm_fuseahuref(as, ir->op1, allow);
1662 if (ra_hasreg(src)) {
1663 #if LJ_GC64
1664 if (!(LJ_DUALNUM && irt_isinteger(ir->t))) {
1665 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
1666 as->mrm.ofs += 4;
1667 emit_u32(as, irt_toitype(ir->t) << 15);
1668 emit_mrm(as, XO_ARITHi, XOg_OR, RID_MRM);
1669 as->mrm.ofs -= 4;
1670 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
1671 return;
1672 }
1673 #endif
1674 emit_mrm(as, XO_MOVto, src, RID_MRM);
1675 } else if (!irt_ispri(irr->t)) {
1676 lj_assertA(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)),
1677 "bad store type");
1678 emit_i32(as, irr->i);
1679 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1680 }
1681 as->mrm.ofs += 4;
1682 #if LJ_GC64
1683 lj_assertA(LJ_DUALNUM && irt_isinteger(ir->t), "bad store type");
1684 emit_i32(as, LJ_TNUMX << 15);
1685 #else
1686 emit_i32(as, (int32_t)irt_toitype(ir->t));
1687 #endif
1688 emit_mrm(as, XO_MOVmi, 0, RID_MRM);
1689 }
1690 }
1691
asm_sload(ASMState * as,IRIns * ir)1692 static void asm_sload(ASMState *as, IRIns *ir)
1693 {
1694 int32_t ofs = 8*((int32_t)ir->op1-1-LJ_FR2) +
1695 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1696 IRType1 t = ir->t;
1697 Reg base;
1698 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1699 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1700 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1701 "inconsistent SLOAD variant");
1702 lj_assertA(LJ_DUALNUM ||
1703 !irt_isint(t) ||
1704 (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME|IRSLOAD_KEYINDEX)),
1705 "bad SLOAD type");
1706 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1707 Reg left = ra_scratch(as, RSET_FPR);
1708 asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
1709 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1710 emit_rmro(as, XO_MOVSD, left, base, ofs);
1711 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1712 #if LJ_64 && !LJ_GC64
1713 } else if (irt_islightud(t)) {
1714 Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
1715 if (ra_hasreg(dest)) {
1716 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1717 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
1718 }
1719 return;
1720 #endif
1721 } else if (ra_used(ir)) {
1722 RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
1723 Reg dest = ra_dest(as, ir, allow);
1724 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1725 lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
1726 "bad SLOAD type %d", irt_type(t));
1727 if ((ir->op2 & IRSLOAD_CONVERT)) {
1728 t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
1729 emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs);
1730 } else {
1731 #if LJ_GC64
1732 if (irt_isaddr(t)) {
1733 /* LJ_GC64 type check + tag removal without BMI2 and with BMI2:
1734 **
1735 ** mov r64, [addr] rorx r64, [addr], 47
1736 ** ror r64, 47
1737 ** cmp r16, itype cmp r16, itype
1738 ** jne ->exit jne ->exit
1739 ** shr r64, 16 shr r64, 16
1740 */
1741 emit_shifti(as, XOg_SHR|REX_64, dest, 17);
1742 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1743 asm_guardcc(as, CC_NE);
1744 emit_i8(as, irt_toitype(t));
1745 emit_rr(as, XO_ARITHi8, XOg_CMP, dest);
1746 emit_i8(as, XI_O16);
1747 }
1748 if ((as->flags & JIT_F_BMI2)) {
1749 emit_i8(as, 47);
1750 emit_rmro(as, XV_RORX|VEX_64, dest, base, ofs);
1751 } else {
1752 if ((ir->op2 & IRSLOAD_TYPECHECK))
1753 emit_shifti(as, XOg_ROR|REX_64, dest, 47);
1754 else
1755 emit_shifti(as, XOg_SHL|REX_64, dest, 17);
1756 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
1757 }
1758 return;
1759 } else
1760 #endif
1761 emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs);
1762 }
1763 } else {
1764 if (!(ir->op2 & IRSLOAD_TYPECHECK))
1765 return; /* No type check: avoid base alloc. */
1766 base = ra_alloc1(as, REF_BASE, RSET_GPR);
1767 }
1768 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1769 /* Need type check, even if the load result is unused. */
1770 asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
1771 if (LJ_64 && irt_type(t) >= IRT_NUM) {
1772 lj_assertA(irt_isinteger(t) || irt_isnum(t),
1773 "bad SLOAD type %d", irt_type(t));
1774 #if LJ_GC64
1775 emit_u32(as, LJ_TISNUM << 15);
1776 #else
1777 emit_u32(as, LJ_TISNUM);
1778 #endif
1779 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1780 #if LJ_GC64
1781 } else if (irt_isnil(t)) {
1782 /* LJ_GC64 type check for nil:
1783 **
1784 ** cmp qword [addr], -1
1785 ** jne ->exit
1786 */
1787 emit_i8(as, -1);
1788 emit_rmro(as, XO_ARITHi8, XOg_CMP|REX_64, base, ofs);
1789 } else if (irt_ispri(t)) {
1790 emit_u32(as, (irt_toitype(t) << 15) | 0x7fff);
1791 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
1792 } else {
1793 /* LJ_GC64 type check only:
1794 **
1795 ** mov r64, [addr]
1796 ** sar r64, 47
1797 ** cmp r32, itype
1798 ** jne ->exit
1799 */
1800 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, base));
1801 emit_i8(as, irt_toitype(t));
1802 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
1803 emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
1804 emit_rmro(as, XO_MOV, tmp|REX_64, base, ofs);
1805 #else
1806 } else {
1807 emit_i8(as, irt_toitype(t));
1808 emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
1809 #endif
1810 }
1811 }
1812 }
1813
1814 /* -- Allocations --------------------------------------------------------- */
1815
1816 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1817 static void asm_cnew(ASMState *as, IRIns *ir)
1818 {
1819 CTState *cts = ctype_ctsG(J2G(as->J));
1820 CTypeID id = (CTypeID)IR(ir->op1)->i;
1821 CTSize sz;
1822 CTInfo info = lj_ctype_info(cts, id, &sz);
1823 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1824 IRRef args[4];
1825 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1826 "bad CNEW/CNEWI operands");
1827
1828 as->gcsteps++;
1829 asm_setupresult(as, ir, ci); /* GCcdata * */
1830
1831 /* Initialize immutable cdata object. */
1832 if (ir->o == IR_CNEWI) {
1833 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1834 #if LJ_64
1835 Reg r64 = sz == 8 ? REX_64 : 0;
1836 if (irref_isk(ir->op2)) {
1837 IRIns *irk = IR(ir->op2);
1838 uint64_t k = (irk->o == IR_KINT64 ||
1839 (LJ_GC64 && (irk->o == IR_KPTR || irk->o == IR_KKPTR))) ?
1840 ir_k64(irk)->u64 : (uint64_t)(uint32_t)irk->i;
1841 if (sz == 4 || checki32((int64_t)k)) {
1842 emit_i32(as, (int32_t)k);
1843 emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
1844 } else {
1845 emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
1846 emit_loadu64(as, RID_ECX, k);
1847 }
1848 } else {
1849 Reg r = ra_alloc1(as, ir->op2, allow);
1850 emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
1851 }
1852 #else
1853 int32_t ofs = sizeof(GCcdata);
1854 if (sz == 8) {
1855 ofs += 4; ir++;
1856 lj_assertA(ir->o == IR_HIOP, "missing CNEWI HIOP");
1857 }
1858 do {
1859 if (irref_isk(ir->op2)) {
1860 emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
1861 } else {
1862 Reg r = ra_alloc1(as, ir->op2, allow);
1863 emit_movtomro(as, r, RID_RET, ofs);
1864 rset_clear(allow, r);
1865 }
1866 if (ofs == sizeof(GCcdata)) break;
1867 ofs -= 4; ir--;
1868 } while (1);
1869 #endif
1870 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1871 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1872 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1873 args[0] = ASMREF_L; /* lua_State *L */
1874 args[1] = ir->op1; /* CTypeID id */
1875 args[2] = ir->op2; /* CTSize sz */
1876 args[3] = ASMREF_TMP1; /* CTSize align */
1877 asm_gencall(as, ci, args);
1878 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1879 return;
1880 }
1881
1882 /* Combine initialization of marked, gct and ctypeid. */
1883 emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
1884 emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
1885 (int32_t)((~LJ_TCDATA<<8)+(id<<16)));
1886 emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
1887 emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
1888
1889 args[0] = ASMREF_L; /* lua_State *L */
1890 args[1] = ASMREF_TMP1; /* MSize size */
1891 asm_gencall(as, ci, args);
1892 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
1893 }
1894 #endif
1895
1896 /* -- Write barriers ------------------------------------------------------ */
1897
asm_tbar(ASMState * as,IRIns * ir)1898 static void asm_tbar(ASMState *as, IRIns *ir)
1899 {
1900 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1901 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1902 MCLabel l_end = emit_label(as);
1903 emit_movtomro(as, tmp|REX_GC64, tab, offsetof(GCtab, gclist));
1904 emit_setgl(as, tab, gc.grayagain);
1905 emit_getgl(as, tmp, gc.grayagain);
1906 emit_i8(as, ~LJ_GC_BLACK);
1907 emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
1908 emit_sjcc(as, CC_Z, l_end);
1909 emit_i8(as, LJ_GC_BLACK);
1910 emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
1911 }
1912
asm_obar(ASMState * as,IRIns * ir)1913 static void asm_obar(ASMState *as, IRIns *ir)
1914 {
1915 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1916 IRRef args[2];
1917 MCLabel l_end;
1918 Reg obj;
1919 /* No need for other object barriers (yet). */
1920 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1921 ra_evictset(as, RSET_SCRATCH);
1922 l_end = emit_label(as);
1923 args[0] = ASMREF_TMP1; /* global_State *g */
1924 args[1] = ir->op1; /* TValue *tv */
1925 asm_gencall(as, ci, args);
1926 emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
1927 obj = IR(ir->op1)->r;
1928 emit_sjcc(as, CC_Z, l_end);
1929 emit_i8(as, LJ_GC_WHITES);
1930 if (irref_isk(ir->op2)) {
1931 GCobj *vp = ir_kgc(IR(ir->op2));
1932 emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
1933 } else {
1934 Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
1935 emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
1936 }
1937 emit_sjcc(as, CC_Z, l_end);
1938 emit_i8(as, LJ_GC_BLACK);
1939 emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
1940 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1941 }
1942
1943 /* -- FP/int arithmetic and logic operations ------------------------------ */
1944
1945 /* Load reference onto x87 stack. Force a spill to memory if needed. */
asm_x87load(ASMState * as,IRRef ref)1946 static void asm_x87load(ASMState *as, IRRef ref)
1947 {
1948 IRIns *ir = IR(ref);
1949 if (ir->o == IR_KNUM) {
1950 cTValue *tv = ir_knum(ir);
1951 if (tvispzero(tv)) /* Use fldz only for +0. */
1952 emit_x87op(as, XI_FLDZ);
1953 else if (tvispone(tv))
1954 emit_x87op(as, XI_FLD1);
1955 else
1956 emit_rma(as, XO_FLDq, XOg_FLDq, tv);
1957 } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
1958 !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
1959 IRIns *iri = IR(ir->op1);
1960 emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
1961 } else {
1962 emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
1963 }
1964 }
1965
asm_fpmath(ASMState * as,IRIns * ir)1966 static void asm_fpmath(ASMState *as, IRIns *ir)
1967 {
1968 IRFPMathOp fpm = (IRFPMathOp)ir->op2;
1969 if (fpm == IRFPM_SQRT) {
1970 Reg dest = ra_dest(as, ir, RSET_FPR);
1971 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
1972 emit_mrm(as, XO_SQRTSD, dest, left);
1973 } else if (fpm <= IRFPM_TRUNC) {
1974 if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
1975 Reg dest = ra_dest(as, ir, RSET_FPR);
1976 Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
1977 /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
1978 ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
1979 ** This is atrocious, but the alternatives are much worse.
1980 */
1981 /* Round down/up/trunc == 1001/1010/1011. */
1982 emit_i8(as, 0x09 + fpm);
1983 emit_mrm(as, XO_ROUNDSD, dest, left);
1984 if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
1985 as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
1986 }
1987 *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
1988 } else { /* Call helper functions for SSE2 variant. */
1989 /* The modified regs must match with the *.dasc implementation. */
1990 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
1991 if (ra_hasreg(ir->r))
1992 rset_clear(drop, ir->r); /* Dest reg handled below. */
1993 ra_evictset(as, drop);
1994 ra_destreg(as, ir, RID_XMM0);
1995 emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
1996 fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
1997 ra_left(as, RID_XMM0, ir->op1);
1998 }
1999 } else {
2000 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
2001 }
2002 }
2003
asm_ldexp(ASMState * as,IRIns * ir)2004 static void asm_ldexp(ASMState *as, IRIns *ir)
2005 {
2006 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
2007 Reg dest = ir->r;
2008 if (ra_hasreg(dest)) {
2009 ra_free(as, dest);
2010 ra_modified(as, dest);
2011 emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs);
2012 }
2013 emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
2014 emit_x87op(as, XI_FPOP1);
2015 emit_x87op(as, XI_FSCALE);
2016 asm_x87load(as, ir->op1);
2017 asm_x87load(as, ir->op2);
2018 }
2019
asm_fppowi(ASMState * as,IRIns * ir)2020 static void asm_fppowi(ASMState *as, IRIns *ir)
2021 {
2022 /* The modified regs must match with the *.dasc implementation. */
2023 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
2024 if (ra_hasreg(ir->r))
2025 rset_clear(drop, ir->r); /* Dest reg handled below. */
2026 ra_evictset(as, drop);
2027 ra_destreg(as, ir, RID_XMM0);
2028 emit_call(as, lj_vm_powi_sse);
2029 ra_left(as, RID_XMM0, ir->op1);
2030 ra_left(as, RID_EAX, ir->op2);
2031 }
2032
asm_swapops(ASMState * as,IRIns * ir)2033 static int asm_swapops(ASMState *as, IRIns *ir)
2034 {
2035 IRIns *irl = IR(ir->op1);
2036 IRIns *irr = IR(ir->op2);
2037 lj_assertA(ra_noreg(irr->r), "bad usage");
2038 if (!irm_iscomm(lj_ir_mode[ir->o]))
2039 return 0; /* Can't swap non-commutative operations. */
2040 if (irref_isk(ir->op2))
2041 return 0; /* Don't swap constants to the left. */
2042 if (ra_hasreg(irl->r))
2043 return 1; /* Swap if left already has a register. */
2044 if (ra_samehint(ir->r, irr->r))
2045 return 1; /* Swap if dest and right have matching hints. */
2046 if (as->curins > as->loopref) { /* In variant part? */
2047 if (ir->op2 < as->loopref && !irt_isphi(irr->t))
2048 return 0; /* Keep invariants on the right. */
2049 if (ir->op1 < as->loopref && !irt_isphi(irl->t))
2050 return 1; /* Swap invariants to the right. */
2051 }
2052 if (opisfusableload(irl->o))
2053 return 1; /* Swap fusable loads to the right. */
2054 return 0; /* Otherwise don't swap. */
2055 }
2056
asm_fparith(ASMState * as,IRIns * ir,x86Op xo)2057 static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
2058 {
2059 IRRef lref = ir->op1;
2060 IRRef rref = ir->op2;
2061 RegSet allow = RSET_FPR;
2062 Reg dest;
2063 Reg right = IR(rref)->r;
2064 if (ra_hasreg(right)) {
2065 rset_clear(allow, right);
2066 ra_noweak(as, right);
2067 }
2068 dest = ra_dest(as, ir, allow);
2069 if (lref == rref) {
2070 right = dest;
2071 } else if (ra_noreg(right)) {
2072 if (asm_swapops(as, ir)) {
2073 IRRef tmp = lref; lref = rref; rref = tmp;
2074 }
2075 right = asm_fuseload(as, rref, rset_clear(allow, dest));
2076 }
2077 emit_mrm(as, xo, dest, right);
2078 ra_left(as, dest, lref);
2079 }
2080
asm_intarith(ASMState * as,IRIns * ir,x86Arith xa)2081 static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
2082 {
2083 IRRef lref = ir->op1;
2084 IRRef rref = ir->op2;
2085 RegSet allow = RSET_GPR;
2086 Reg dest, right;
2087 int32_t k = 0;
2088 if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
2089 MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2);
2090 MCode *q = p[0] == 0x0f ? p+1 : p;
2091 if ((*q & 15) < 14) {
2092 if ((*q & 15) >= 12) *q -= 4; /* L <->S, NL <-> NS */
2093 as->flagmcp = NULL;
2094 as->mcp = p;
2095 } /* else: cannot transform LE/NLE to cc without use of OF. */
2096 }
2097 right = IR(rref)->r;
2098 if (ra_hasreg(right)) {
2099 rset_clear(allow, right);
2100 ra_noweak(as, right);
2101 }
2102 dest = ra_dest(as, ir, allow);
2103 if (lref == rref) {
2104 right = dest;
2105 } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
2106 if (asm_swapops(as, ir)) {
2107 IRRef tmp = lref; lref = rref; rref = tmp;
2108 }
2109 right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
2110 }
2111 if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
2112 asm_guardcc(as, CC_O);
2113 if (xa != XOg_X_IMUL) {
2114 if (ra_hasreg(right))
2115 emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
2116 else
2117 emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
2118 } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
2119 emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
2120 } else { /* IMUL r, r, k. */
2121 /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
2122 Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
2123 x86Op xo;
2124 if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
2125 } else { emit_i32(as, k); xo = XO_IMULi; }
2126 emit_mrm(as, xo, REX_64IR(ir, dest), left);
2127 return;
2128 }
2129 ra_left(as, dest, lref);
2130 }
2131
2132 /* LEA is really a 4-operand ADD with an independent destination register,
2133 ** up to two source registers and an immediate. One register can be scaled
2134 ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
2135 ** instructions.
2136 **
2137 ** Currently only a few common cases are supported:
2138 ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
2139 ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
2140 ** - Right ADD fusion: y = a+(b+k)
2141 ** The ommited variants have already been reduced by FOLD.
2142 **
2143 ** There are more fusion opportunities, like gathering shifts or joining
2144 ** common references. But these are probably not worth the trouble, since
2145 ** array indexing is not decomposed and already makes use of all fields
2146 ** of the ModRM operand.
2147 */
asm_lea(ASMState * as,IRIns * ir)2148 static int asm_lea(ASMState *as, IRIns *ir)
2149 {
2150 IRIns *irl = IR(ir->op1);
2151 IRIns *irr = IR(ir->op2);
2152 RegSet allow = RSET_GPR;
2153 Reg dest;
2154 as->mrm.base = as->mrm.idx = RID_NONE;
2155 as->mrm.scale = XM_SCALE1;
2156 as->mrm.ofs = 0;
2157 if (ra_hasreg(irl->r)) {
2158 rset_clear(allow, irl->r);
2159 ra_noweak(as, irl->r);
2160 as->mrm.base = irl->r;
2161 if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
2162 /* The PHI renaming logic does a better job in some cases. */
2163 if (ra_hasreg(ir->r) &&
2164 ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
2165 (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
2166 return 0;
2167 if (irref_isk(ir->op2)) {
2168 as->mrm.ofs = irr->i;
2169 } else {
2170 rset_clear(allow, irr->r);
2171 ra_noweak(as, irr->r);
2172 as->mrm.idx = irr->r;
2173 }
2174 } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
2175 irref_isk(irr->op2)) {
2176 Reg idx = ra_alloc1(as, irr->op1, allow);
2177 rset_clear(allow, idx);
2178 as->mrm.idx = (uint8_t)idx;
2179 as->mrm.ofs = IR(irr->op2)->i;
2180 } else {
2181 return 0;
2182 }
2183 } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
2184 (irref_isk(ir->op2) || irref_isk(irl->op2))) {
2185 Reg idx, base = ra_alloc1(as, irl->op1, allow);
2186 rset_clear(allow, base);
2187 as->mrm.base = (uint8_t)base;
2188 if (irref_isk(ir->op2)) {
2189 as->mrm.ofs = irr->i;
2190 idx = ra_alloc1(as, irl->op2, allow);
2191 } else {
2192 as->mrm.ofs = IR(irl->op2)->i;
2193 idx = ra_alloc1(as, ir->op2, allow);
2194 }
2195 rset_clear(allow, idx);
2196 as->mrm.idx = (uint8_t)idx;
2197 } else {
2198 return 0;
2199 }
2200 dest = ra_dest(as, ir, allow);
2201 emit_mrm(as, XO_LEA, dest, RID_MRM);
2202 return 1; /* Success. */
2203 }
2204
asm_add(ASMState * as,IRIns * ir)2205 static void asm_add(ASMState *as, IRIns *ir)
2206 {
2207 if (irt_isnum(ir->t))
2208 asm_fparith(as, ir, XO_ADDSD);
2209 else if (as->flagmcp == as->mcp || irt_is64(ir->t) || !asm_lea(as, ir))
2210 asm_intarith(as, ir, XOg_ADD);
2211 }
2212
asm_sub(ASMState * as,IRIns * ir)2213 static void asm_sub(ASMState *as, IRIns *ir)
2214 {
2215 if (irt_isnum(ir->t))
2216 asm_fparith(as, ir, XO_SUBSD);
2217 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
2218 asm_intarith(as, ir, XOg_SUB);
2219 }
2220
asm_mul(ASMState * as,IRIns * ir)2221 static void asm_mul(ASMState *as, IRIns *ir)
2222 {
2223 if (irt_isnum(ir->t))
2224 asm_fparith(as, ir, XO_MULSD);
2225 else
2226 asm_intarith(as, ir, XOg_X_IMUL);
2227 }
2228
2229 #define asm_fpdiv(as, ir) asm_fparith(as, ir, XO_DIVSD)
2230
asm_neg_not(ASMState * as,IRIns * ir,x86Group3 xg)2231 static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
2232 {
2233 Reg dest = ra_dest(as, ir, RSET_GPR);
2234 emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
2235 ra_left(as, dest, ir->op1);
2236 }
2237
asm_neg(ASMState * as,IRIns * ir)2238 static void asm_neg(ASMState *as, IRIns *ir)
2239 {
2240 if (irt_isnum(ir->t))
2241 asm_fparith(as, ir, XO_XORPS);
2242 else
2243 asm_neg_not(as, ir, XOg_NEG);
2244 }
2245
2246 #define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS)
2247
asm_intmin_max(ASMState * as,IRIns * ir,int cc)2248 static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
2249 {
2250 Reg right, dest = ra_dest(as, ir, RSET_GPR);
2251 IRRef lref = ir->op1, rref = ir->op2;
2252 if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
2253 right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
2254 emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
2255 emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
2256 ra_left(as, dest, lref);
2257 }
2258
asm_min(ASMState * as,IRIns * ir)2259 static void asm_min(ASMState *as, IRIns *ir)
2260 {
2261 if (irt_isnum(ir->t))
2262 asm_fparith(as, ir, XO_MINSD);
2263 else
2264 asm_intmin_max(as, ir, CC_G);
2265 }
2266
asm_max(ASMState * as,IRIns * ir)2267 static void asm_max(ASMState *as, IRIns *ir)
2268 {
2269 if (irt_isnum(ir->t))
2270 asm_fparith(as, ir, XO_MAXSD);
2271 else
2272 asm_intmin_max(as, ir, CC_L);
2273 }
2274
2275 /* Note: don't use LEA for overflow-checking arithmetic! */
2276 #define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD)
2277 #define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB)
2278 #define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL)
2279
2280 #define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT)
2281
asm_bswap(ASMState * as,IRIns * ir)2282 static void asm_bswap(ASMState *as, IRIns *ir)
2283 {
2284 Reg dest = ra_dest(as, ir, RSET_GPR);
2285 as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
2286 REX_64IR(ir, 0), dest, 0, as->mcp, 1);
2287 ra_left(as, dest, ir->op1);
2288 }
2289
2290 #define asm_band(as, ir) asm_intarith(as, ir, XOg_AND)
2291 #define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR)
2292 #define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR)
2293
asm_bitshift(ASMState * as,IRIns * ir,x86Shift xs,x86Op xv)2294 static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs, x86Op xv)
2295 {
2296 IRRef rref = ir->op2;
2297 IRIns *irr = IR(rref);
2298 Reg dest;
2299 if (irref_isk(rref)) { /* Constant shifts. */
2300 int shift;
2301 dest = ra_dest(as, ir, RSET_GPR);
2302 shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
2303 if (!xv && shift && (as->flags & JIT_F_BMI2)) {
2304 Reg left = asm_fuseloadm(as, ir->op1, RSET_GPR, irt_is64(ir->t));
2305 if (left != dest) { /* BMI2 rotate right by constant. */
2306 emit_i8(as, xs == XOg_ROL ? -shift : shift);
2307 emit_mrm(as, VEX_64IR(ir, XV_RORX), dest, left);
2308 return;
2309 }
2310 }
2311 switch (shift) {
2312 case 0: break;
2313 case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
2314 default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
2315 }
2316 } else if ((as->flags & JIT_F_BMI2) && xv) { /* BMI2 variable shifts. */
2317 Reg left, right;
2318 dest = ra_dest(as, ir, RSET_GPR);
2319 right = ra_alloc1(as, rref, RSET_GPR);
2320 left = asm_fuseloadm(as, ir->op1, rset_exclude(RSET_GPR, right),
2321 irt_is64(ir->t));
2322 emit_mrm(as, VEX_64IR(ir, xv) ^ (right << 19), dest, left);
2323 return;
2324 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
2325 Reg right;
2326 dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
2327 if (dest == RID_ECX) {
2328 dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
2329 emit_rr(as, XO_MOV, REX_64IR(ir, RID_ECX), dest);
2330 }
2331 right = irr->r;
2332 if (ra_noreg(right))
2333 right = ra_allocref(as, rref, RID2RSET(RID_ECX));
2334 else if (right != RID_ECX)
2335 ra_scratch(as, RID2RSET(RID_ECX));
2336 emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
2337 ra_noweak(as, right);
2338 if (right != RID_ECX)
2339 emit_rr(as, XO_MOV, RID_ECX, right);
2340 }
2341 ra_left(as, dest, ir->op1);
2342 /*
2343 ** Note: avoid using the flags resulting from a shift or rotate!
2344 ** All of them cause a partial flag stall, except for r,1 shifts
2345 ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
2346 */
2347 }
2348
2349 #define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL, XV_SHLX)
2350 #define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR, XV_SHRX)
2351 #define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR, XV_SARX)
2352 #define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL, 0)
2353 #define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR, 0)
2354
2355 /* -- Comparisons --------------------------------------------------------- */
2356
2357 /* Virtual flags for unordered FP comparisons. */
2358 #define VCC_U 0x1000 /* Unordered. */
2359 #define VCC_P 0x2000 /* Needs extra CC_P branch. */
2360 #define VCC_S 0x4000 /* Swap avoids CC_P branch. */
2361 #define VCC_PS (VCC_P|VCC_S)
2362
2363 /* Map of comparisons to flags. ORDER IR. */
2364 #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
2365 static const uint16_t asm_compmap[IR_ABC+1] = {
2366 /* signed non-eq unsigned flags */
2367 /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
2368 /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
2369 /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
2370 /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
2371 /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
2372 /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
2373 /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
2374 /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
2375 /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
2376 /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
2377 /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
2378 };
2379
2380 /* FP and integer comparisons. */
asm_comp(ASMState * as,IRIns * ir)2381 static void asm_comp(ASMState *as, IRIns *ir)
2382 {
2383 uint32_t cc = asm_compmap[ir->o];
2384 if (irt_isnum(ir->t)) {
2385 IRRef lref = ir->op1;
2386 IRRef rref = ir->op2;
2387 Reg left, right;
2388 MCLabel l_around;
2389 /*
2390 ** An extra CC_P branch is required to preserve ordered/unordered
2391 ** semantics for FP comparisons. This can be avoided by swapping
2392 ** the operands and inverting the condition (except for EQ and UNE).
2393 ** So always try to swap if possible.
2394 **
2395 ** Another option would be to swap operands to achieve better memory
2396 ** operand fusion. But it's unlikely that this outweighs the cost
2397 ** of the extra branches.
2398 */
2399 if (cc & VCC_S) { /* Swap? */
2400 IRRef tmp = lref; lref = rref; rref = tmp;
2401 cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
2402 }
2403 left = ra_alloc1(as, lref, RSET_FPR);
2404 l_around = emit_label(as);
2405 asm_guardcc(as, cc >> 4);
2406 if (cc & VCC_P) { /* Extra CC_P branch required? */
2407 if (!(cc & VCC_U)) {
2408 asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
2409 } else if (l_around != as->invmcp) {
2410 emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
2411 } else {
2412 /* Patched to mcloop by asm_loop_fixup. */
2413 as->loopinv = 2;
2414 if (as->realign)
2415 emit_sjcc(as, CC_P, as->mcp);
2416 else
2417 emit_jcc(as, CC_P, as->mcp);
2418 }
2419 }
2420 right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
2421 emit_mrm(as, XO_UCOMISD, left, right);
2422 } else {
2423 IRRef lref = ir->op1, rref = ir->op2;
2424 IROp leftop = (IROp)(IR(lref)->o);
2425 Reg r64 = REX_64IR(ir, 0);
2426 int32_t imm = 0;
2427 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
2428 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
2429 "bad comparison data type %d", irt_type(ir->t));
2430 /* Swap constants (only for ABC) and fusable loads to the right. */
2431 if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
2432 if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
2433 else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
2434 lref = ir->op2; rref = ir->op1;
2435 }
2436 if (asm_isk32(as, rref, &imm)) {
2437 IRIns *irl = IR(lref);
2438 /* Check wether we can use test ins. Not for unsigned, since CF=0. */
2439 int usetest = (imm == 0 && (cc & 0xa) != 0x2);
2440 if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
2441 /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
2442 Reg right, left = RID_NONE;
2443 RegSet allow = RSET_GPR;
2444 if (!asm_isk32(as, irl->op2, &imm)) {
2445 left = ra_alloc1(as, irl->op2, allow);
2446 rset_clear(allow, left);
2447 } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
2448 IRIns *irll = IR(irl->op1);
2449 if (opisfusableload((IROp)irll->o) &&
2450 (irt_isi8(irll->t) || irt_isu8(irll->t))) {
2451 IRType1 origt = irll->t; /* Temporarily flip types. */
2452 irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
2453 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
2454 right = asm_fuseload(as, irl->op1, RSET_GPR);
2455 as->curins++;
2456 irll->t = origt;
2457 if (right != RID_MRM) goto test_nofuse;
2458 /* Fusion succeeded, emit test byte mrm, imm8. */
2459 asm_guardcc(as, cc);
2460 emit_i8(as, (imm & 0xff));
2461 emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
2462 return;
2463 }
2464 }
2465 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
2466 right = asm_fuseloadm(as, irl->op1, allow, r64);
2467 as->curins++; /* Undo the above. */
2468 test_nofuse:
2469 asm_guardcc(as, cc);
2470 if (ra_noreg(left)) {
2471 emit_i32(as, imm);
2472 emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
2473 } else {
2474 emit_mrm(as, XO_TEST, r64 + left, right);
2475 }
2476 } else {
2477 Reg left;
2478 if (opisfusableload((IROp)irl->o) &&
2479 ((irt_isu8(irl->t) && checku8(imm)) ||
2480 ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
2481 (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
2482 /* Only the IRT_INT case is fused by asm_fuseload.
2483 ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
2484 ** are handled here.
2485 ** Note that cmp word [mem], imm16 should not be generated,
2486 ** since it has a length-changing prefix. Compares of a word
2487 ** against a sign-extended imm8 are ok, however.
2488 */
2489 IRType1 origt = irl->t; /* Temporarily flip types. */
2490 irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
2491 left = asm_fuseload(as, lref, RSET_GPR);
2492 irl->t = origt;
2493 if (left == RID_MRM) { /* Fusion succeeded? */
2494 if (irt_isu8(irl->t) || irt_isu16(irl->t))
2495 cc >>= 4; /* Need unsigned compare. */
2496 asm_guardcc(as, cc);
2497 emit_i8(as, imm);
2498 emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
2499 XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
2500 return;
2501 } /* Otherwise handle register case as usual. */
2502 } else {
2503 left = asm_fuseloadm(as, lref,
2504 irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
2505 }
2506 asm_guardcc(as, cc);
2507 if (usetest && left != RID_MRM) {
2508 /* Use test r,r instead of cmp r,0. */
2509 x86Op xo = XO_TEST;
2510 if (irt_isu8(ir->t)) {
2511 lj_assertA(ir->o == IR_EQ || ir->o == IR_NE, "bad usage");
2512 xo = XO_TESTb;
2513 if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
2514 if (LJ_64) {
2515 left |= FORCE_REX;
2516 } else {
2517 emit_i32(as, 0xff);
2518 emit_mrm(as, XO_GROUP3, XOg_TEST, left);
2519 return;
2520 }
2521 }
2522 }
2523 emit_rr(as, xo, r64 + left, left);
2524 if (irl+1 == ir) /* Referencing previous ins? */
2525 as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
2526 } else {
2527 emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
2528 }
2529 }
2530 } else {
2531 Reg left = ra_alloc1(as, lref, RSET_GPR);
2532 Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
2533 asm_guardcc(as, cc);
2534 emit_mrm(as, XO_CMP, r64 + left, right);
2535 }
2536 }
2537 }
2538
2539 #define asm_equal(as, ir) asm_comp(as, ir)
2540
2541 #if LJ_32 && LJ_HASFFI
2542 /* 64 bit integer comparisons in 32 bit mode. */
asm_comp_int64(ASMState * as,IRIns * ir)2543 static void asm_comp_int64(ASMState *as, IRIns *ir)
2544 {
2545 uint32_t cc = asm_compmap[(ir-1)->o];
2546 RegSet allow = RSET_GPR;
2547 Reg lefthi = RID_NONE, leftlo = RID_NONE;
2548 Reg righthi = RID_NONE, rightlo = RID_NONE;
2549 MCLabel l_around;
2550 x86ModRM mrm;
2551
2552 as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
2553
2554 /* Allocate/fuse hiword operands. */
2555 if (irref_isk(ir->op2)) {
2556 lefthi = asm_fuseload(as, ir->op1, allow);
2557 } else {
2558 lefthi = ra_alloc1(as, ir->op1, allow);
2559 rset_clear(allow, lefthi);
2560 righthi = asm_fuseload(as, ir->op2, allow);
2561 if (righthi == RID_MRM) {
2562 if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
2563 if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
2564 } else {
2565 rset_clear(allow, righthi);
2566 }
2567 }
2568 mrm = as->mrm; /* Save state for hiword instruction. */
2569
2570 /* Allocate/fuse loword operands. */
2571 if (irref_isk((ir-1)->op2)) {
2572 leftlo = asm_fuseload(as, (ir-1)->op1, allow);
2573 } else {
2574 leftlo = ra_alloc1(as, (ir-1)->op1, allow);
2575 rset_clear(allow, leftlo);
2576 rightlo = asm_fuseload(as, (ir-1)->op2, allow);
2577 }
2578
2579 /* All register allocations must be performed _before_ this point. */
2580 l_around = emit_label(as);
2581 as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
2582
2583 /* Loword comparison and branch. */
2584 asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
2585 if (ra_noreg(rightlo)) {
2586 int32_t imm = IR((ir-1)->op2)->i;
2587 if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
2588 emit_rr(as, XO_TEST, leftlo, leftlo);
2589 else
2590 emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
2591 } else {
2592 emit_mrm(as, XO_CMP, leftlo, rightlo);
2593 }
2594
2595 /* Hiword comparison and branches. */
2596 if ((cc & 15) != CC_NE)
2597 emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
2598 if ((cc & 15) != CC_E)
2599 asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
2600 as->mrm = mrm; /* Restore state. */
2601 if (ra_noreg(righthi)) {
2602 int32_t imm = IR(ir->op2)->i;
2603 if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
2604 emit_rr(as, XO_TEST, lefthi, lefthi);
2605 else
2606 emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
2607 } else {
2608 emit_mrm(as, XO_CMP, lefthi, righthi);
2609 }
2610 }
2611 #endif
2612
2613 /* -- Split register ops -------------------------------------------------- */
2614
2615 /* Hiword op of a split 32/32 or 64/64 bit op. Previous op is the loword op. */
asm_hiop(ASMState * as,IRIns * ir)2616 static void asm_hiop(ASMState *as, IRIns *ir)
2617 {
2618 /* HIOP is marked as a store because it needs its own DCE logic. */
2619 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
2620 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
2621 #if LJ_32 && LJ_HASFFI
2622 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
2623 as->curins--; /* Always skip the CONV. */
2624 if (usehi || uselo)
2625 asm_conv64(as, ir);
2626 return;
2627 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
2628 asm_comp_int64(as, ir);
2629 return;
2630 } else if ((ir-1)->o == IR_XSTORE) {
2631 if ((ir-1)->r != RID_SINK)
2632 asm_fxstore(as, ir);
2633 return;
2634 }
2635 #endif
2636 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
2637 switch ((ir-1)->o) {
2638 #if LJ_32 && LJ_HASFFI
2639 case IR_ADD:
2640 as->flagmcp = NULL;
2641 as->curins--;
2642 asm_intarith(as, ir, XOg_ADC);
2643 asm_intarith(as, ir-1, XOg_ADD);
2644 break;
2645 case IR_SUB:
2646 as->flagmcp = NULL;
2647 as->curins--;
2648 asm_intarith(as, ir, XOg_SBB);
2649 asm_intarith(as, ir-1, XOg_SUB);
2650 break;
2651 case IR_NEG: {
2652 Reg dest = ra_dest(as, ir, RSET_GPR);
2653 emit_rr(as, XO_GROUP3, XOg_NEG, dest);
2654 emit_i8(as, 0);
2655 emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
2656 ra_left(as, dest, ir->op1);
2657 as->curins--;
2658 asm_neg_not(as, ir-1, XOg_NEG);
2659 break;
2660 }
2661 case IR_CNEWI:
2662 /* Nothing to do here. Handled by CNEWI itself. */
2663 break;
2664 #endif
2665 case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
2666 if (!uselo)
2667 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
2668 break;
2669 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
2670 }
2671 }
2672
2673 /* -- Profiling ----------------------------------------------------------- */
2674
asm_prof(ASMState * as,IRIns * ir)2675 static void asm_prof(ASMState *as, IRIns *ir)
2676 {
2677 UNUSED(ir);
2678 asm_guardcc(as, CC_NE);
2679 emit_i8(as, HOOK_PROFILE);
2680 emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask);
2681 }
2682
2683 /* -- Stack handling ------------------------------------------------------ */
2684
2685 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)2686 static void asm_stack_check(ASMState *as, BCReg topslot,
2687 IRIns *irp, RegSet allow, ExitNo exitno)
2688 {
2689 /* Try to get an unused temp. register, otherwise spill/restore eax. */
2690 Reg pbase = irp ? irp->r : RID_BASE;
2691 Reg r = allow ? rset_pickbot(allow) : RID_EAX;
2692 emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
2693 if (allow == RSET_EMPTY) /* Restore temp. register. */
2694 emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
2695 else
2696 ra_modified(as, r);
2697 emit_gri(as, XG_ARITHi(XOg_CMP), r|REX_GC64, (int32_t)(8*topslot));
2698 if (ra_hasreg(pbase) && pbase != r)
2699 emit_rr(as, XO_ARITH(XOg_SUB), r|REX_GC64, pbase);
2700 else
2701 #if LJ_GC64
2702 emit_rmro(as, XO_ARITH(XOg_SUB), r|REX_64, RID_DISPATCH,
2703 (int32_t)dispofs(as, &J2G(as->J)->jit_base));
2704 #else
2705 emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
2706 ptr2addr(&J2G(as->J)->jit_base));
2707 #endif
2708 emit_rmro(as, XO_MOV, r|REX_GC64, r, offsetof(lua_State, maxstack));
2709 emit_getgl(as, r, cur_L);
2710 if (allow == RSET_EMPTY) /* Spill temp. register. */
2711 emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
2712 }
2713
2714 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)2715 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2716 {
2717 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2718 #if !LJ_FR2 || defined(LUA_USE_ASSERT)
2719 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
2720 #endif
2721 MSize n, nent = snap->nent;
2722 /* Store the value of all modified slots to the Lua stack. */
2723 for (n = 0; n < nent; n++) {
2724 SnapEntry sn = map[n];
2725 BCReg s = snap_slot(sn);
2726 int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
2727 IRRef ref = snap_ref(sn);
2728 IRIns *ir = IR(ref);
2729 if ((sn & SNAP_NORESTORE))
2730 continue;
2731 if ((sn & SNAP_KEYINDEX)) {
2732 emit_movmroi(as, RID_BASE, ofs+4, LJ_KEYINDEX);
2733 if (irref_isk(ref)) {
2734 emit_movmroi(as, RID_BASE, ofs, ir->i);
2735 } else {
2736 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2737 emit_movtomro(as, src, RID_BASE, ofs);
2738 }
2739 } else if (irt_isnum(ir->t)) {
2740 Reg src = ra_alloc1(as, ref, RSET_FPR);
2741 emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
2742 } else {
2743 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
2744 (LJ_DUALNUM && irt_isinteger(ir->t)),
2745 "restore of IR type %d", irt_type(ir->t));
2746 if (!irref_isk(ref)) {
2747 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
2748 #if LJ_GC64
2749 if (irt_is64(ir->t)) {
2750 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */
2751 emit_u32(as, irt_toitype(ir->t) << 15);
2752 emit_rmro(as, XO_ARITHi, XOg_OR, RID_BASE, ofs+4);
2753 } else if (LJ_DUALNUM && irt_isinteger(ir->t)) {
2754 emit_movmroi(as, RID_BASE, ofs+4, LJ_TISNUM << 15);
2755 } else {
2756 emit_movmroi(as, RID_BASE, ofs+4, (irt_toitype(ir->t)<<15)|0x7fff);
2757 }
2758 #endif
2759 emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
2760 #if LJ_GC64
2761 } else {
2762 TValue k;
2763 lj_ir_kvalue(as->J->L, &k, ir);
2764 if (tvisnil(&k)) {
2765 emit_i32(as, -1);
2766 emit_rmro(as, XO_MOVmi, REX_64, RID_BASE, ofs);
2767 } else {
2768 emit_movmroi(as, RID_BASE, ofs+4, k.u32.hi);
2769 emit_movmroi(as, RID_BASE, ofs, k.u32.lo);
2770 }
2771 #else
2772 } else if (!irt_ispri(ir->t)) {
2773 emit_movmroi(as, RID_BASE, ofs, ir->i);
2774 #endif
2775 }
2776 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2777 #if !LJ_FR2
2778 if (s != 0) /* Do not overwrite link to previous frame. */
2779 emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
2780 #endif
2781 #if !LJ_GC64
2782 } else {
2783 if (!(LJ_64 && irt_islightud(ir->t)))
2784 emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
2785 #endif
2786 }
2787 }
2788 checkmclim(as);
2789 }
2790 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2791 }
2792
2793 /* -- GC handling --------------------------------------------------------- */
2794
2795 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)2796 static void asm_gc_check(ASMState *as)
2797 {
2798 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2799 IRRef args[2];
2800 MCLabel l_end;
2801 Reg tmp;
2802 ra_evictset(as, RSET_SCRATCH);
2803 l_end = emit_label(as);
2804 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2805 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2806 emit_rr(as, XO_TEST, RID_RET, RID_RET);
2807 args[0] = ASMREF_TMP1; /* global_State *g */
2808 args[1] = ASMREF_TMP2; /* MSize steps */
2809 asm_gencall(as, ci, args);
2810 tmp = ra_releasetmp(as, ASMREF_TMP1);
2811 #if LJ_GC64
2812 emit_rmro(as, XO_LEA, tmp|REX_64, RID_DISPATCH, GG_DISP2G);
2813 #else
2814 emit_loada(as, tmp, J2G(as->J));
2815 #endif
2816 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
2817 /* Jump around GC step if GC total < GC threshold. */
2818 emit_sjcc(as, CC_B, l_end);
2819 emit_opgl(as, XO_ARITH(XOg_CMP), tmp|REX_GC64, gc.threshold);
2820 emit_getgl(as, tmp, gc.total);
2821 as->gcsteps = 0;
2822 checkmclim(as);
2823 }
2824
2825 /* -- Loop handling ------------------------------------------------------- */
2826
2827 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)2828 static void asm_loop_fixup(ASMState *as)
2829 {
2830 MCode *p = as->mctop;
2831 MCode *target = as->mcp;
2832 if (as->realign) { /* Realigned loops use short jumps. */
2833 as->realign = NULL; /* Stop another retry. */
2834 lj_assertA(((intptr_t)target & 15) == 0, "loop realign failed");
2835 if (as->loopinv) { /* Inverted loop branch? */
2836 p -= 5;
2837 p[0] = XI_JMP;
2838 lj_assertA(target - p >= -128, "loop realign failed");
2839 p[-1] = (MCode)(target - p); /* Patch sjcc. */
2840 if (as->loopinv == 2)
2841 p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
2842 } else {
2843 lj_assertA(target - p >= -128, "loop realign failed");
2844 p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
2845 p[-2] = XI_JMPs;
2846 }
2847 } else {
2848 MCode *newloop;
2849 p[-5] = XI_JMP;
2850 if (as->loopinv) { /* Inverted loop branch? */
2851 /* asm_guardcc already inverted the jcc and patched the jmp. */
2852 p -= 5;
2853 newloop = target+4;
2854 *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
2855 if (as->loopinv == 2) {
2856 *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
2857 newloop = target+8;
2858 }
2859 } else { /* Otherwise just patch jmp. */
2860 *(int32_t *)(p-4) = (int32_t)(target - p);
2861 newloop = target+3;
2862 }
2863 /* Realign small loops and shorten the loop branch. */
2864 if (newloop >= p - 128) {
2865 as->realign = newloop; /* Force a retry and remember alignment. */
2866 as->curins = as->stopins; /* Abort asm_trace now. */
2867 as->T->nins = as->orignins; /* Remove any added renames. */
2868 }
2869 }
2870 }
2871
2872 /* Fixup the tail of the loop. */
asm_loop_tail_fixup(ASMState * as)2873 static void asm_loop_tail_fixup(ASMState *as)
2874 {
2875 UNUSED(as); /* Nothing to do. */
2876 }
2877
2878 /* -- Head of trace ------------------------------------------------------- */
2879
2880 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)2881 static void asm_head_root_base(ASMState *as)
2882 {
2883 IRIns *ir = IR(REF_BASE);
2884 Reg r = ir->r;
2885 if (ra_hasreg(r)) {
2886 ra_free(as, r);
2887 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2888 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2889 if (r != RID_BASE)
2890 emit_rr(as, XO_MOV, r|REX_GC64, RID_BASE);
2891 }
2892 }
2893
2894 /* Coalesce or reload BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)2895 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2896 {
2897 IRIns *ir = IR(REF_BASE);
2898 Reg r = ir->r;
2899 if (ra_hasreg(r)) {
2900 ra_free(as, r);
2901 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2902 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2903 if (irp->r == r) {
2904 rset_clear(allow, r); /* Mark same BASE register as coalesced. */
2905 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2906 /* Move from coalesced parent reg. */
2907 rset_clear(allow, irp->r);
2908 emit_rr(as, XO_MOV, r|REX_GC64, irp->r);
2909 } else {
2910 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
2911 }
2912 }
2913 return allow;
2914 }
2915
2916 /* -- Tail of trace ------------------------------------------------------- */
2917
2918 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)2919 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2920 {
2921 /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
2922 MCode *p = as->mctop;
2923 MCode *target, *q;
2924 int32_t spadj = as->T->spadjust;
2925 if (spadj == 0) {
2926 p -= LJ_64 ? 7 : 6;
2927 } else {
2928 MCode *p1;
2929 /* Patch stack adjustment. */
2930 if (checki8(spadj)) {
2931 p -= 3;
2932 p1 = p-6;
2933 *p1 = (MCode)spadj;
2934 } else {
2935 p1 = p-9;
2936 *(int32_t *)p1 = spadj;
2937 }
2938 #if LJ_64
2939 p1[-3] = 0x48;
2940 #endif
2941 p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
2942 p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
2943 }
2944 /* Patch exit branch. */
2945 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2946 *(int32_t *)(p-4) = jmprel(as->J, p, target);
2947 p[-5] = XI_JMP;
2948 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
2949 for (q = as->mctop-1; q >= p; q--)
2950 *q = XI_NOP;
2951 as->mctop = p;
2952 }
2953
2954 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)2955 static void asm_tail_prep(ASMState *as)
2956 {
2957 MCode *p = as->mctop;
2958 /* Realign and leave room for backwards loop branch or exit branch. */
2959 if (as->realign) {
2960 int i = ((int)(intptr_t)as->realign) & 15;
2961 /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
2962 while (i-- > 0)
2963 *--p = XI_NOP;
2964 as->mctop = p;
2965 p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
2966 } else {
2967 p -= 5; /* Space for exit branch (near jmp). */
2968 }
2969 if (as->loopref) {
2970 as->invmcp = as->mcp = p;
2971 } else {
2972 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
2973 as->mcp = p - (LJ_64 ? 7 : 6);
2974 as->invmcp = NULL;
2975 }
2976 }
2977
2978 /* -- Trace setup --------------------------------------------------------- */
2979
2980 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)2981 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2982 {
2983 IRRef args[CCI_NARGS_MAX*2];
2984 int nslots;
2985 asm_collectargs(as, ir, ci, args);
2986 nslots = asm_count_call_slots(as, ci, args);
2987 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2988 as->evenspill = nslots;
2989 #if LJ_64
2990 return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
2991 #else
2992 return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
2993 #endif
2994 }
2995
2996 /* Target-specific setup. */
asm_setup_target(ASMState * as)2997 static void asm_setup_target(ASMState *as)
2998 {
2999 asm_exitstub_setup(as, as->T->nsnap);
3000 as->mrm.base = 0;
3001 }
3002
3003 /* -- Trace patching ------------------------------------------------------ */
3004
3005 static const uint8_t map_op1[256] = {
3006 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x20,
3007 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,
3008 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
3009 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
3010 #if LJ_64
3011 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x14,0x14,0x14,0x14,0x14,0x14,0x14,0x14,
3012 #else
3013 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
3014 #endif
3015 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
3016 0x51,0x51,0x92,0x92,0x10,0x10,0x12,0x11,0x45,0x86,0x52,0x93,0x51,0x51,0x51,0x51,
3017 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
3018 0x93,0x86,0x93,0x93,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
3019 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x47,0x51,0x51,0x51,0x51,0x51,
3020 #if LJ_64
3021 0x59,0x59,0x59,0x59,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
3022 #else
3023 0x55,0x55,0x55,0x55,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
3024 #endif
3025 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,
3026 0x93,0x93,0x53,0x51,0x70,0x71,0x93,0x86,0x54,0x51,0x53,0x51,0x51,0x52,0x51,0x51,
3027 0x92,0x92,0x92,0x92,0x52,0x52,0x51,0x51,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
3028 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x45,0x45,0x47,0x52,0x51,0x51,0x51,0x51,
3029 0x10,0x51,0x10,0x10,0x51,0x51,0x63,0x66,0x51,0x51,0x51,0x51,0x51,0x51,0x92,0x92
3030 };
3031
3032 static const uint8_t map_op2[256] = {
3033 0x93,0x93,0x93,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x51,0x52,0x51,0x93,0x52,0x94,
3034 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3035 0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3036 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x34,0x51,0x35,0x51,0x51,0x51,0x51,0x51,
3037 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3038 0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3039 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3040 0x94,0x54,0x54,0x54,0x93,0x93,0x93,0x52,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3041 0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,
3042 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3043 0x52,0x52,0x52,0x93,0x94,0x93,0x51,0x51,0x52,0x52,0x52,0x93,0x94,0x93,0x93,0x93,
3044 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x94,0x93,0x93,0x93,0x93,0x93,
3045 0x93,0x93,0x94,0x93,0x94,0x94,0x94,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
3046 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3047 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
3048 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x52
3049 };
3050
asm_x86_inslen(const uint8_t * p)3051 static uint32_t asm_x86_inslen(const uint8_t* p)
3052 {
3053 uint32_t result = 0;
3054 uint32_t prefixes = 0;
3055 uint32_t x = map_op1[*p];
3056 for (;;) {
3057 switch (x >> 4) {
3058 case 0: return result + x + (prefixes & 4);
3059 case 1: prefixes |= x; x = map_op1[*++p]; result++; break;
3060 case 2: x = map_op2[*++p]; break;
3061 case 3: p++; goto mrm;
3062 case 4: result -= (prefixes & 2); /* fallthrough */
3063 case 5: return result + (x & 15);
3064 case 6: /* Group 3. */
3065 if (p[1] & 0x38) x = 2;
3066 else if ((prefixes & 2) && (x == 0x66)) x = 4;
3067 goto mrm;
3068 case 7: /* VEX c4/c5. */
3069 if (LJ_32 && p[1] < 0xc0) {
3070 x = 2;
3071 goto mrm;
3072 }
3073 if (x == 0x70) {
3074 x = *++p & 0x1f;
3075 result++;
3076 if (x >= 2) {
3077 p += 2;
3078 result += 2;
3079 goto mrm;
3080 }
3081 }
3082 p++;
3083 result++;
3084 x = map_op2[*++p];
3085 break;
3086 case 8: result -= (prefixes & 2); /* fallthrough */
3087 case 9: mrm: /* ModR/M and possibly SIB. */
3088 result += (x & 15);
3089 x = *++p;
3090 switch (x >> 6) {
3091 case 0: if ((x & 7) == 5) return result + 4; break;
3092 case 1: result++; break;
3093 case 2: result += 4; break;
3094 case 3: return result;
3095 }
3096 if ((x & 7) == 4) {
3097 result++;
3098 if (x < 0x40 && (p[1] & 7) == 5) result += 4;
3099 }
3100 return result;
3101 }
3102 }
3103 }
3104
3105 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)3106 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
3107 {
3108 MCode *p = T->mcode;
3109 MCode *mcarea = lj_mcode_patch(J, p, 0);
3110 MSize len = T->szmcode;
3111 MCode *px = exitstub_addr(J, exitno) - 6;
3112 MCode *pe = p+len-6;
3113 MCode *pgc = NULL;
3114 #if LJ_GC64
3115 uint32_t statei = (uint32_t)(GG_OFS(g.vmstate) - GG_OFS(dispatch));
3116 #else
3117 uint32_t statei = u32ptr(&J2G(J)->vmstate);
3118 #endif
3119 if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
3120 *(int32_t *)(p+len-4) = jmprel(J, p+len, target);
3121 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
3122 for (; p < pe; p += asm_x86_inslen(p)) {
3123 intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64;
3124 if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi)
3125 break;
3126 }
3127 lj_assertJ(p < pe, "instruction length decoder failed");
3128 for (; p < pe; p += asm_x86_inslen(p)) {
3129 if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px &&
3130 p != pgc) {
3131 *(int32_t *)(p+2) = jmprel(J, p+6, target);
3132 } else if (*p == XI_CALL &&
3133 (void *)(p+5+*(int32_t *)(p+1)) == (void *)lj_gc_step_jit) {
3134 pgc = p+7; /* Do not patch GC check exit. */
3135 }
3136 }
3137 lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
3138 lj_mcode_patch(J, mcarea, 1);
3139 }
3140
3141