1 /*
2 ** ARM64 IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 **
5 ** Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com.
6 ** Sponsored by Cisco Systems, Inc.
7 */
8
9 /* -- Register allocator extensions --------------------------------------- */
10
11 /* Allocate a register with a hint. */
ra_hintalloc(ASMState * as,IRRef ref,Reg hint,RegSet allow)12 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
13 {
14 Reg r = IR(ref)->r;
15 if (ra_noreg(r)) {
16 if (!ra_hashint(r) && !iscrossref(as, ref))
17 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
18 r = ra_allocref(as, ref, allow);
19 }
20 ra_noweak(as, r);
21 return r;
22 }
23
24 /* Allocate two source registers for three-operand instructions. */
ra_alloc2(ASMState * as,IRIns * ir,RegSet allow)25 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
26 {
27 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
28 Reg left = irl->r, right = irr->r;
29 if (ra_hasreg(left)) {
30 ra_noweak(as, left);
31 if (ra_noreg(right))
32 right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
33 else
34 ra_noweak(as, right);
35 } else if (ra_hasreg(right)) {
36 ra_noweak(as, right);
37 left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
38 } else if (ra_hashint(right)) {
39 right = ra_allocref(as, ir->op2, allow);
40 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
41 } else {
42 left = ra_allocref(as, ir->op1, allow);
43 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
44 }
45 return left | (right << 8);
46 }
47
48 /* -- Guard handling ------------------------------------------------------ */
49
50 /* Setup all needed exit stubs. */
asm_exitstub_setup(ASMState * as,ExitNo nexits)51 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
52 {
53 ExitNo i;
54 MCode *mxp = as->mctop;
55 if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
56 asm_mclimit(as);
57 /* 1: str lr,[sp]; bl ->vm_exit_handler; movz w0,traceno; bl <1; bl <1; ... */
58 for (i = nexits-1; (int32_t)i >= 0; i--)
59 *--mxp = A64I_LE(A64I_BL | A64F_S26(-3-i));
60 *--mxp = A64I_LE(A64I_MOVZw | A64F_U16(as->T->traceno));
61 mxp--;
62 *mxp = A64I_LE(A64I_BL | A64F_S26(((MCode *)(void *)lj_vm_exit_handler-mxp)));
63 *--mxp = A64I_LE(A64I_STRx | A64F_D(RID_LR) | A64F_N(RID_SP));
64 as->mctop = mxp;
65 }
66
asm_exitstub_addr(ASMState * as,ExitNo exitno)67 static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
68 {
69 /* Keep this in-sync with exitstub_trace_addr(). */
70 return as->mctop + exitno + 3;
71 }
72
73 /* Emit conditional branch to exit for guard. */
asm_guardcc(ASMState * as,A64CC cc)74 static void asm_guardcc(ASMState *as, A64CC cc)
75 {
76 MCode *target = asm_exitstub_addr(as, as->snapno);
77 MCode *p = as->mcp;
78 if (LJ_UNLIKELY(p == as->invmcp)) {
79 as->loopinv = 1;
80 *p = A64I_B | A64F_S26(target-p);
81 emit_cond_branch(as, cc^1, p-1);
82 return;
83 }
84 emit_cond_branch(as, cc, target);
85 }
86
87 /* Emit test and branch instruction to exit for guard. */
asm_guardtnb(ASMState * as,A64Ins ai,Reg r,uint32_t bit)88 static void asm_guardtnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit)
89 {
90 MCode *target = asm_exitstub_addr(as, as->snapno);
91 MCode *p = as->mcp;
92 if (LJ_UNLIKELY(p == as->invmcp)) {
93 as->loopinv = 1;
94 *p = A64I_B | A64F_S26(target-p);
95 emit_tnb(as, ai^0x01000000u, r, bit, p-1);
96 return;
97 }
98 emit_tnb(as, ai, r, bit, target);
99 }
100
101 /* Emit compare and branch instruction to exit for guard. */
asm_guardcnb(ASMState * as,A64Ins ai,Reg r)102 static void asm_guardcnb(ASMState *as, A64Ins ai, Reg r)
103 {
104 MCode *target = asm_exitstub_addr(as, as->snapno);
105 MCode *p = as->mcp;
106 if (LJ_UNLIKELY(p == as->invmcp)) {
107 as->loopinv = 1;
108 *p = A64I_B | A64F_S26(target-p);
109 emit_cnb(as, ai^0x01000000u, r, p-1);
110 return;
111 }
112 emit_cnb(as, ai, r, target);
113 }
114
115 /* -- Operand fusion ------------------------------------------------------ */
116
117 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
118 #define CONFLICT_SEARCH_LIM 31
119
asm_isk32(ASMState * as,IRRef ref,int32_t * k)120 static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
121 {
122 if (irref_isk(ref)) {
123 IRIns *ir = IR(ref);
124 if (ir->o == IR_KNULL || !irt_is64(ir->t)) {
125 *k = ir->i;
126 return 1;
127 } else if (checki32((int64_t)ir_k64(ir)->u64)) {
128 *k = (int32_t)ir_k64(ir)->u64;
129 return 1;
130 }
131 }
132 return 0;
133 }
134
135 /* Check if there's no conflicting instruction between curins and ref. */
noconflict(ASMState * as,IRRef ref,IROp conflict)136 static int noconflict(ASMState *as, IRRef ref, IROp conflict)
137 {
138 IRIns *ir = as->ir;
139 IRRef i = as->curins;
140 if (i > ref + CONFLICT_SEARCH_LIM)
141 return 0; /* Give up, ref is too far away. */
142 while (--i > ref)
143 if (ir[i].o == conflict)
144 return 0; /* Conflict found. */
145 return 1; /* Ok, no conflict. */
146 }
147
148 /* Fuse the array base of colocated arrays. */
asm_fuseabase(ASMState * as,IRRef ref)149 static int32_t asm_fuseabase(ASMState *as, IRRef ref)
150 {
151 IRIns *ir = IR(ref);
152 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
153 !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
154 return (int32_t)sizeof(GCtab);
155 return 0;
156 }
157
158 #define FUSE_REG 0x40000000
159
160 /* Fuse array/hash/upvalue reference into register+offset operand. */
asm_fuseahuref(ASMState * as,IRRef ref,int32_t * ofsp,RegSet allow,A64Ins ins)161 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
162 A64Ins ins)
163 {
164 IRIns *ir = IR(ref);
165 if (ra_noreg(ir->r)) {
166 if (ir->o == IR_AREF) {
167 if (mayfuse(as, ref)) {
168 if (irref_isk(ir->op2)) {
169 IRRef tab = IR(ir->op1)->op1;
170 int32_t ofs = asm_fuseabase(as, tab);
171 IRRef refa = ofs ? tab : ir->op1;
172 ofs += 8*IR(ir->op2)->i;
173 if (emit_checkofs(ins, ofs)) {
174 *ofsp = ofs;
175 return ra_alloc1(as, refa, allow);
176 }
177 } else {
178 Reg base = ra_alloc1(as, ir->op1, allow);
179 *ofsp = FUSE_REG|ra_alloc1(as, ir->op2, rset_exclude(allow, base));
180 return base;
181 }
182 }
183 } else if (ir->o == IR_HREFK) {
184 if (mayfuse(as, ref)) {
185 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
186 if (emit_checkofs(ins, ofs)) {
187 *ofsp = ofs;
188 return ra_alloc1(as, ir->op1, allow);
189 }
190 }
191 } else if (ir->o == IR_UREFC) {
192 if (irref_isk(ir->op1)) {
193 GCfunc *fn = ir_kfunc(IR(ir->op1));
194 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
195 int64_t ofs = glofs(as, &uv->tv);
196 if (emit_checkofs(ins, ofs)) {
197 *ofsp = (int32_t)ofs;
198 return RID_GL;
199 }
200 }
201 } else if (ir->o == IR_TMPREF) {
202 *ofsp = (int32_t)glofs(as, &J2G(as->J)->tmptv);
203 return RID_GL;
204 }
205 }
206 *ofsp = 0;
207 return ra_alloc1(as, ref, allow);
208 }
209
210 /* Fuse m operand into arithmetic/logic instructions. */
asm_fuseopm(ASMState * as,A64Ins ai,IRRef ref,RegSet allow)211 static uint32_t asm_fuseopm(ASMState *as, A64Ins ai, IRRef ref, RegSet allow)
212 {
213 IRIns *ir = IR(ref);
214 if (ra_hasreg(ir->r)) {
215 ra_noweak(as, ir->r);
216 return A64F_M(ir->r);
217 } else if (irref_isk(ref)) {
218 uint32_t m;
219 int64_t k = get_k64val(as, ref);
220 if ((ai & 0x1f000000) == 0x0a000000)
221 m = emit_isk13(k, irt_is64(ir->t));
222 else
223 m = emit_isk12(k);
224 if (m)
225 return m;
226 } else if (mayfuse(as, ref)) {
227 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR && irref_isk(ir->op2)) ||
228 (ir->o == IR_ADD && ir->op1 == ir->op2)) {
229 A64Shift sh = ir->o == IR_BSHR ? A64SH_LSR :
230 ir->o == IR_BSAR ? A64SH_ASR : A64SH_LSL;
231 int shift = ir->o == IR_ADD ? 1 :
232 (IR(ir->op2)->i & (irt_is64(ir->t) ? 63 : 31));
233 IRIns *irl = IR(ir->op1);
234 if (sh == A64SH_LSL &&
235 irl->o == IR_CONV &&
236 irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) &&
237 shift <= 4 &&
238 canfuse(as, irl)) {
239 Reg m = ra_alloc1(as, irl->op1, allow);
240 return A64F_M(m) | A64F_EXSH(A64EX_SXTW, shift);
241 } else {
242 Reg m = ra_alloc1(as, ir->op1, allow);
243 return A64F_M(m) | A64F_SH(sh, shift);
244 }
245 } else if (ir->o == IR_CONV &&
246 ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)) {
247 Reg m = ra_alloc1(as, ir->op1, allow);
248 return A64F_M(m) | A64F_EX(A64EX_SXTW);
249 }
250 }
251 return A64F_M(ra_allocref(as, ref, allow));
252 }
253
254 /* Fuse XLOAD/XSTORE reference into load/store operand. */
asm_fusexref(ASMState * as,A64Ins ai,Reg rd,IRRef ref,RegSet allow)255 static void asm_fusexref(ASMState *as, A64Ins ai, Reg rd, IRRef ref,
256 RegSet allow)
257 {
258 IRIns *ir = IR(ref);
259 Reg base;
260 int32_t ofs = 0;
261 if (ra_noreg(ir->r) && canfuse(as, ir)) {
262 if (ir->o == IR_ADD) {
263 if (asm_isk32(as, ir->op2, &ofs) && emit_checkofs(ai, ofs)) {
264 ref = ir->op1;
265 } else {
266 Reg rn, rm;
267 IRRef lref = ir->op1, rref = ir->op2;
268 IRIns *irl = IR(lref);
269 if (mayfuse(as, irl->op1)) {
270 unsigned int shift = 4;
271 if (irl->o == IR_BSHL && irref_isk(irl->op2)) {
272 shift = (IR(irl->op2)->i & 63);
273 } else if (irl->o == IR_ADD && irl->op1 == irl->op2) {
274 shift = 1;
275 }
276 if ((ai >> 30) == shift) {
277 lref = irl->op1;
278 irl = IR(lref);
279 ai |= A64I_LS_SH;
280 }
281 }
282 if (irl->o == IR_CONV &&
283 irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) &&
284 canfuse(as, irl)) {
285 lref = irl->op1;
286 ai |= A64I_LS_SXTWx;
287 } else {
288 ai |= A64I_LS_LSLx;
289 }
290 rm = ra_alloc1(as, lref, allow);
291 rn = ra_alloc1(as, rref, rset_exclude(allow, rm));
292 emit_dnm(as, (ai^A64I_LS_R), (rd & 31), rn, rm);
293 return;
294 }
295 } else if (ir->o == IR_STRREF) {
296 if (asm_isk32(as, ir->op2, &ofs)) {
297 ref = ir->op1;
298 } else if (asm_isk32(as, ir->op1, &ofs)) {
299 ref = ir->op2;
300 } else {
301 Reg refk = irref_isk(ir->op1) ? ir->op1 : ir->op2;
302 Reg refv = irref_isk(ir->op1) ? ir->op2 : ir->op1;
303 Reg rn = ra_alloc1(as, refv, allow);
304 IRIns *irr = IR(refk);
305 uint32_t m;
306 if (irr+1 == ir && !ra_used(irr) &&
307 irr->o == IR_ADD && irref_isk(irr->op2)) {
308 ofs = sizeof(GCstr) + IR(irr->op2)->i;
309 if (emit_checkofs(ai, ofs)) {
310 Reg rm = ra_alloc1(as, irr->op1, rset_exclude(allow, rn));
311 m = A64F_M(rm) | A64F_EX(A64EX_SXTW);
312 goto skipopm;
313 }
314 }
315 m = asm_fuseopm(as, 0, refk, rset_exclude(allow, rn));
316 ofs = sizeof(GCstr);
317 skipopm:
318 emit_lso(as, ai, rd, rd, ofs);
319 emit_dn(as, A64I_ADDx^m, rd, rn);
320 return;
321 }
322 ofs += sizeof(GCstr);
323 if (!emit_checkofs(ai, ofs)) {
324 Reg rn = ra_alloc1(as, ref, allow);
325 Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
326 emit_dnm(as, (ai^A64I_LS_R)|A64I_LS_UXTWx, rd, rn, rm);
327 return;
328 }
329 }
330 }
331 base = ra_alloc1(as, ref, allow);
332 emit_lso(as, ai, (rd & 31), base, ofs);
333 }
334
335 /* Fuse FP multiply-add/sub. */
asm_fusemadd(ASMState * as,IRIns * ir,A64Ins ai,A64Ins air)336 static int asm_fusemadd(ASMState *as, IRIns *ir, A64Ins ai, A64Ins air)
337 {
338 IRRef lref = ir->op1, rref = ir->op2;
339 IRIns *irm;
340 if (lref != rref &&
341 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
342 ra_noreg(irm->r)) ||
343 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
344 (rref = lref, ai = air, ra_noreg(irm->r))))) {
345 Reg dest = ra_dest(as, ir, RSET_FPR);
346 Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
347 Reg left = ra_alloc2(as, irm,
348 rset_exclude(rset_exclude(RSET_FPR, dest), add));
349 Reg right = (left >> 8); left &= 255;
350 emit_dnma(as, ai, (dest & 31), (left & 31), (right & 31), (add & 31));
351 return 1;
352 }
353 return 0;
354 }
355
356 /* Fuse BAND + BSHL/BSHR into UBFM. */
asm_fuseandshift(ASMState * as,IRIns * ir)357 static int asm_fuseandshift(ASMState *as, IRIns *ir)
358 {
359 IRIns *irl = IR(ir->op1);
360 lj_assertA(ir->o == IR_BAND, "bad usage");
361 if (canfuse(as, irl) && irref_isk(ir->op2)) {
362 uint64_t mask = get_k64val(as, ir->op2);
363 if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) {
364 int32_t shmask = irt_is64(irl->t) ? 63 : 31;
365 int32_t shift = (IR(irl->op2)->i & shmask);
366 int32_t imms = shift;
367 if (irl->o == IR_BSHL) {
368 mask >>= shift;
369 shift = (shmask-shift+1) & shmask;
370 imms = 0;
371 }
372 if (mask && !((mask+1) & mask)) { /* Contiguous 1-bits at the bottom. */
373 Reg dest = ra_dest(as, ir, RSET_GPR);
374 Reg left = ra_alloc1(as, irl->op1, RSET_GPR);
375 A64Ins ai = shmask == 63 ? A64I_UBFMx : A64I_UBFMw;
376 imms += 63 - emit_clz64(mask);
377 if (imms > shmask) imms = shmask;
378 emit_dn(as, ai | A64F_IMMS(imms) | A64F_IMMR(shift), dest, left);
379 return 1;
380 }
381 }
382 }
383 return 0;
384 }
385
386 /* Fuse BOR(BSHL, BSHR) into EXTR/ROR. */
asm_fuseorshift(ASMState * as,IRIns * ir)387 static int asm_fuseorshift(ASMState *as, IRIns *ir)
388 {
389 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
390 lj_assertA(ir->o == IR_BOR, "bad usage");
391 if (canfuse(as, irl) && canfuse(as, irr) &&
392 ((irl->o == IR_BSHR && irr->o == IR_BSHL) ||
393 (irl->o == IR_BSHL && irr->o == IR_BSHR))) {
394 if (irref_isk(irl->op2) && irref_isk(irr->op2)) {
395 IRRef lref = irl->op1, rref = irr->op1;
396 uint32_t lshift = IR(irl->op2)->i, rshift = IR(irr->op2)->i;
397 if (irl->o == IR_BSHR) { /* BSHR needs to be the right operand. */
398 uint32_t tmp2;
399 IRRef tmp1 = lref; lref = rref; rref = tmp1;
400 tmp2 = lshift; lshift = rshift; rshift = tmp2;
401 }
402 if (rshift + lshift == (irt_is64(ir->t) ? 64 : 32)) {
403 A64Ins ai = irt_is64(ir->t) ? A64I_EXTRx : A64I_EXTRw;
404 Reg dest = ra_dest(as, ir, RSET_GPR);
405 Reg left = ra_alloc1(as, lref, RSET_GPR);
406 Reg right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
407 emit_dnm(as, ai | A64F_IMMS(rshift), dest, left, right);
408 return 1;
409 }
410 }
411 }
412 return 0;
413 }
414
415 /* -- Calls --------------------------------------------------------------- */
416
417 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)418 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
419 {
420 uint32_t n, nargs = CCI_XNARGS(ci);
421 int32_t ofs = 0;
422 Reg gpr, fpr = REGARG_FIRSTFPR;
423 if ((void *)ci->func)
424 emit_call(as, (void *)ci->func);
425 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
426 as->cost[gpr] = REGCOST(~0u, ASMREF_L);
427 gpr = REGARG_FIRSTGPR;
428 for (n = 0; n < nargs; n++) { /* Setup args. */
429 IRRef ref = args[n];
430 IRIns *ir = IR(ref);
431 if (ref) {
432 if (irt_isfp(ir->t)) {
433 if (fpr <= REGARG_LASTFPR) {
434 lj_assertA(rset_test(as->freeset, fpr),
435 "reg %d not free", fpr); /* Must have been evicted. */
436 ra_leftov(as, fpr, ref);
437 fpr++;
438 } else {
439 Reg r = ra_alloc1(as, ref, RSET_FPR);
440 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isnum(ir->t)) ? 4 : 0));
441 ofs += 8;
442 }
443 } else {
444 if (gpr <= REGARG_LASTGPR) {
445 lj_assertA(rset_test(as->freeset, gpr),
446 "reg %d not free", gpr); /* Must have been evicted. */
447 ra_leftov(as, gpr, ref);
448 gpr++;
449 } else {
450 Reg r = ra_alloc1(as, ref, RSET_GPR);
451 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_is64(ir->t)) ? 4 : 0));
452 ofs += 8;
453 }
454 }
455 }
456 }
457 }
458
459 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)460 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
461 {
462 RegSet drop = RSET_SCRATCH;
463 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
464 if (ra_hasreg(ir->r))
465 rset_clear(drop, ir->r); /* Dest reg handled below. */
466 if (hiop && ra_hasreg((ir+1)->r))
467 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
468 ra_evictset(as, drop); /* Evictions must be performed first. */
469 if (ra_used(ir)) {
470 lj_assertA(!irt_ispri(ir->t), "PRI dest");
471 if (irt_isfp(ir->t)) {
472 if (ci->flags & CCI_CASTU64) {
473 Reg dest = ra_dest(as, ir, RSET_FPR) & 31;
474 emit_dn(as, irt_isnum(ir->t) ? A64I_FMOV_D_R : A64I_FMOV_S_R,
475 dest, RID_RET);
476 } else {
477 ra_destreg(as, ir, RID_FPRET);
478 }
479 } else if (hiop) {
480 ra_destpair(as, ir);
481 } else {
482 ra_destreg(as, ir, RID_RET);
483 }
484 }
485 UNUSED(ci);
486 }
487
asm_callx(ASMState * as,IRIns * ir)488 static void asm_callx(ASMState *as, IRIns *ir)
489 {
490 IRRef args[CCI_NARGS_MAX*2];
491 CCallInfo ci;
492 IRRef func;
493 IRIns *irf;
494 ci.flags = asm_callx_flags(as, ir);
495 asm_collectargs(as, ir, &ci, args);
496 asm_setupresult(as, ir, &ci);
497 func = ir->op2; irf = IR(func);
498 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
499 if (irref_isk(func)) { /* Call to constant address. */
500 ci.func = (ASMFunction)(ir_k64(irf)->u64);
501 } else { /* Need a non-argument register for indirect calls. */
502 Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_X8, RID_MAX_GPR)-RSET_FIXED);
503 emit_n(as, A64I_BLR, freg);
504 ci.func = (ASMFunction)(void *)0;
505 }
506 asm_gencall(as, &ci, args);
507 }
508
509 /* -- Returns ------------------------------------------------------------- */
510
511 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)512 static void asm_retf(ASMState *as, IRIns *ir)
513 {
514 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
515 void *pc = ir_kptr(IR(ir->op2));
516 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
517 as->topslot -= (BCReg)delta;
518 if ((int32_t)as->topslot < 0) as->topslot = 0;
519 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
520 /* Need to force a spill on REF_BASE now to update the stack slot. */
521 emit_lso(as, A64I_STRx, base, RID_SP, ra_spill(as, IR(REF_BASE)));
522 emit_setgl(as, base, jit_base);
523 emit_addptr(as, base, -8*delta);
524 asm_guardcc(as, CC_NE);
525 emit_nm(as, A64I_CMPx, RID_TMP,
526 ra_allock(as, i64ptr(pc), rset_exclude(RSET_GPR, base)));
527 emit_lso(as, A64I_LDRx, RID_TMP, base, -8);
528 }
529
530 /* -- Buffer operations --------------------------------------------------- */
531
532 #if LJ_HASBUFFER
asm_bufhdr_write(ASMState * as,Reg sb)533 static void asm_bufhdr_write(ASMState *as, Reg sb)
534 {
535 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
536 IRIns irgc;
537 irgc.ot = IRT(0, IRT_PGC); /* GC type. */
538 emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
539 emit_dn(as, A64I_BFMx | A64F_IMMS(lj_fls(SBUF_MASK_FLAG)) | A64F_IMMR(0), RID_TMP, tmp);
540 emit_getgl(as, RID_TMP, cur_L);
541 emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
542 }
543 #endif
544
545 /* -- Type conversions ---------------------------------------------------- */
546
asm_tointg(ASMState * as,IRIns * ir,Reg left)547 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
548 {
549 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
550 Reg dest = ra_dest(as, ir, RSET_GPR);
551 asm_guardcc(as, CC_NE);
552 emit_nm(as, A64I_FCMPd, (tmp & 31), (left & 31));
553 emit_dn(as, A64I_FCVT_F64_S32, (tmp & 31), dest);
554 emit_dn(as, A64I_FCVT_S32_F64, dest, (left & 31));
555 }
556
asm_tobit(ASMState * as,IRIns * ir)557 static void asm_tobit(ASMState *as, IRIns *ir)
558 {
559 RegSet allow = RSET_FPR;
560 Reg left = ra_alloc1(as, ir->op1, allow);
561 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
562 Reg tmp = ra_scratch(as, rset_clear(allow, right));
563 Reg dest = ra_dest(as, ir, RSET_GPR);
564 emit_dn(as, A64I_FMOV_R_S, dest, (tmp & 31));
565 emit_dnm(as, A64I_FADDd, (tmp & 31), (left & 31), (right & 31));
566 }
567
asm_conv(ASMState * as,IRIns * ir)568 static void asm_conv(ASMState *as, IRIns *ir)
569 {
570 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
571 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64);
572 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
573 IRRef lref = ir->op1;
574 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
575 if (irt_isfp(ir->t)) {
576 Reg dest = ra_dest(as, ir, RSET_FPR);
577 if (stfp) { /* FP to FP conversion. */
578 emit_dn(as, st == IRT_NUM ? A64I_FCVT_F32_F64 : A64I_FCVT_F64_F32,
579 (dest & 31), (ra_alloc1(as, lref, RSET_FPR) & 31));
580 } else { /* Integer to FP conversion. */
581 Reg left = ra_alloc1(as, lref, RSET_GPR);
582 A64Ins ai = irt_isfloat(ir->t) ?
583 (((IRT_IS64 >> st) & 1) ?
584 (st == IRT_I64 ? A64I_FCVT_F32_S64 : A64I_FCVT_F32_U64) :
585 (st == IRT_INT ? A64I_FCVT_F32_S32 : A64I_FCVT_F32_U32)) :
586 (((IRT_IS64 >> st) & 1) ?
587 (st == IRT_I64 ? A64I_FCVT_F64_S64 : A64I_FCVT_F64_U64) :
588 (st == IRT_INT ? A64I_FCVT_F64_S32 : A64I_FCVT_F64_U32));
589 emit_dn(as, ai, (dest & 31), left);
590 }
591 } else if (stfp) { /* FP to integer conversion. */
592 if (irt_isguard(ir->t)) {
593 /* Checked conversions are only supported from number to int. */
594 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
595 "bad type for checked CONV");
596 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
597 } else {
598 Reg left = ra_alloc1(as, lref, RSET_FPR);
599 Reg dest = ra_dest(as, ir, RSET_GPR);
600 A64Ins ai = irt_is64(ir->t) ?
601 (st == IRT_NUM ?
602 (irt_isi64(ir->t) ? A64I_FCVT_S64_F64 : A64I_FCVT_U64_F64) :
603 (irt_isi64(ir->t) ? A64I_FCVT_S64_F32 : A64I_FCVT_U64_F32)) :
604 (st == IRT_NUM ?
605 (irt_isint(ir->t) ? A64I_FCVT_S32_F64 : A64I_FCVT_U32_F64) :
606 (irt_isint(ir->t) ? A64I_FCVT_S32_F32 : A64I_FCVT_U32_F32));
607 emit_dn(as, ai, dest, (left & 31));
608 }
609 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
610 Reg dest = ra_dest(as, ir, RSET_GPR);
611 Reg left = ra_alloc1(as, lref, RSET_GPR);
612 A64Ins ai = st == IRT_I8 ? A64I_SXTBw :
613 st == IRT_U8 ? A64I_UXTBw :
614 st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw;
615 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
616 emit_dn(as, ai, dest, left);
617 } else {
618 Reg dest = ra_dest(as, ir, RSET_GPR);
619 if (irt_is64(ir->t)) {
620 if (st64 || !(ir->op2 & IRCONV_SEXT)) {
621 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
622 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
623 } else { /* 32 to 64 bit sign extension. */
624 Reg left = ra_alloc1(as, lref, RSET_GPR);
625 emit_dn(as, A64I_SXTW, dest, left);
626 }
627 } else {
628 if (st64 && !(ir->op2 & IRCONV_NONE)) {
629 /* This is either a 32 bit reg/reg mov which zeroes the hiword
630 ** or a load of the loword from a 64 bit address.
631 */
632 Reg left = ra_alloc1(as, lref, RSET_GPR);
633 emit_dm(as, A64I_MOVw, dest, left);
634 } else { /* 32/32 bit no-op (cast). */
635 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
636 }
637 }
638 }
639 }
640
asm_strto(ASMState * as,IRIns * ir)641 static void asm_strto(ASMState *as, IRIns *ir)
642 {
643 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
644 IRRef args[2];
645 Reg dest = 0, tmp;
646 int destused = ra_used(ir);
647 int32_t ofs = 0;
648 ra_evictset(as, RSET_SCRATCH);
649 if (destused) {
650 if (ra_hasspill(ir->s)) {
651 ofs = sps_scale(ir->s);
652 destused = 0;
653 if (ra_hasreg(ir->r)) {
654 ra_free(as, ir->r);
655 ra_modified(as, ir->r);
656 emit_spload(as, ir, ir->r, ofs);
657 }
658 } else {
659 dest = ra_dest(as, ir, RSET_FPR);
660 }
661 }
662 if (destused)
663 emit_lso(as, A64I_LDRd, (dest & 31), RID_SP, 0);
664 asm_guardcnb(as, A64I_CBZ, RID_RET);
665 args[0] = ir->op1; /* GCstr *str */
666 args[1] = ASMREF_TMP1; /* TValue *n */
667 asm_gencall(as, ci, args);
668 tmp = ra_releasetmp(as, ASMREF_TMP1);
669 emit_opk(as, A64I_ADDx, tmp, RID_SP, ofs, RSET_GPR);
670 }
671
672 /* -- Memory references --------------------------------------------------- */
673
674 /* Store tagged value for ref at base+ofs. */
asm_tvstore64(ASMState * as,Reg base,int32_t ofs,IRRef ref)675 static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref)
676 {
677 RegSet allow = rset_exclude(RSET_GPR, base);
678 IRIns *ir = IR(ref);
679 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
680 "store of IR type %d", irt_type(ir->t));
681 if (irref_isk(ref)) {
682 TValue k;
683 lj_ir_kvalue(as->J->L, &k, ir);
684 emit_lso(as, A64I_STRx, ra_allock(as, k.u64, allow), base, ofs);
685 } else {
686 Reg src = ra_alloc1(as, ref, allow);
687 rset_clear(allow, src);
688 if (irt_isinteger(ir->t)) {
689 Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow);
690 emit_lso(as, A64I_STRx, RID_TMP, base, ofs);
691 emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), RID_TMP, type, src);
692 } else {
693 Reg type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
694 emit_lso(as, A64I_STRx, RID_TMP, base, ofs);
695 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), RID_TMP, src, type);
696 }
697 }
698 }
699
700 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref,MSize mode)701 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
702 {
703 if ((mode & IRTMPREF_IN1)) {
704 IRIns *ir = IR(ref);
705 if (irt_isnum(ir->t)) {
706 if (irref_isk(ref) && !(mode & IRTMPREF_OUT1)) {
707 /* Use the number constant itself as a TValue. */
708 ra_allockreg(as, i64ptr(ir_knum(ir)), dest);
709 return;
710 }
711 emit_lso(as, A64I_STRd, (ra_alloc1(as, ref, RSET_FPR) & 31), dest, 0);
712 } else {
713 asm_tvstore64(as, dest, 0, ref);
714 }
715 }
716 /* g->tmptv holds the TValue(s). */
717 emit_dn(as, A64I_ADDx^emit_isk12(glofs(as, &J2G(as->J)->tmptv)), dest, RID_GL);
718 }
719
asm_aref(ASMState * as,IRIns * ir)720 static void asm_aref(ASMState *as, IRIns *ir)
721 {
722 Reg dest = ra_dest(as, ir, RSET_GPR);
723 Reg idx, base;
724 if (irref_isk(ir->op2)) {
725 IRRef tab = IR(ir->op1)->op1;
726 int32_t ofs = asm_fuseabase(as, tab);
727 IRRef refa = ofs ? tab : ir->op1;
728 uint32_t k = emit_isk12(ofs + 8*IR(ir->op2)->i);
729 if (k) {
730 base = ra_alloc1(as, refa, RSET_GPR);
731 emit_dn(as, A64I_ADDx^k, dest, base);
732 return;
733 }
734 }
735 base = ra_alloc1(as, ir->op1, RSET_GPR);
736 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
737 emit_dnm(as, A64I_ADDx | A64F_EXSH(A64EX_UXTW, 3), dest, base, idx);
738 }
739
740 /* Inlined hash lookup. Specialized for key type and for const keys.
741 ** The equivalent C code is:
742 ** Node *n = hashkey(t, key);
743 ** do {
744 ** if (lj_obj_equal(&n->key, key)) return &n->val;
745 ** } while ((n = nextnode(n)));
746 ** return niltv(L);
747 */
asm_href(ASMState * as,IRIns * ir,IROp merge)748 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
749 {
750 RegSet allow = RSET_GPR;
751 int destused = ra_used(ir);
752 Reg dest = ra_dest(as, ir, allow);
753 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
754 Reg key = 0, tmp = RID_TMP;
755 Reg ftmp = RID_NONE, type = RID_NONE, scr = RID_NONE, tisnum = RID_NONE;
756 IRRef refkey = ir->op2;
757 IRIns *irkey = IR(refkey);
758 int isk = irref_isk(ir->op2);
759 IRType1 kt = irkey->t;
760 uint32_t k = 0;
761 uint32_t khash;
762 MCLabel l_end, l_loop, l_next;
763 rset_clear(allow, tab);
764
765 if (!isk) {
766 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
767 rset_clear(allow, key);
768 if (!irt_isstr(kt)) {
769 tmp = ra_scratch(as, allow);
770 rset_clear(allow, tmp);
771 }
772 } else if (irt_isnum(kt)) {
773 int64_t val = (int64_t)ir_knum(irkey)->u64;
774 if (!(k = emit_isk12(val))) {
775 key = ra_allock(as, val, allow);
776 rset_clear(allow, key);
777 }
778 } else if (!irt_ispri(kt)) {
779 if (!(k = emit_isk12(irkey->i))) {
780 key = ra_alloc1(as, refkey, allow);
781 rset_clear(allow, key);
782 }
783 }
784
785 /* Allocate constants early. */
786 if (irt_isnum(kt)) {
787 if (!isk) {
788 tisnum = ra_allock(as, LJ_TISNUM << 15, allow);
789 ftmp = ra_scratch(as, rset_exclude(RSET_FPR, key));
790 rset_clear(allow, tisnum);
791 }
792 } else if (irt_isaddr(kt)) {
793 if (isk) {
794 int64_t kk = ((int64_t)irt_toitype(kt) << 47) | irkey[1].tv.u64;
795 scr = ra_allock(as, kk, allow);
796 } else {
797 scr = ra_scratch(as, allow);
798 }
799 rset_clear(allow, scr);
800 } else {
801 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type");
802 type = ra_allock(as, ~((int64_t)~irt_toitype(kt) << 47), allow);
803 scr = ra_scratch(as, rset_clear(allow, type));
804 rset_clear(allow, scr);
805 }
806
807 /* Key not found in chain: jump to exit (if merged) or load niltv. */
808 l_end = emit_label(as);
809 as->invmcp = NULL;
810 if (merge == IR_NE)
811 asm_guardcc(as, CC_AL);
812 else if (destused)
813 emit_loada(as, dest, niltvg(J2G(as->J)));
814
815 /* Follow hash chain until the end. */
816 l_loop = --as->mcp;
817 emit_n(as, A64I_CMPx^A64I_K12^0, dest);
818 emit_lso(as, A64I_LDRx, dest, dest, offsetof(Node, next));
819 l_next = emit_label(as);
820
821 /* Type and value comparison. */
822 if (merge == IR_EQ)
823 asm_guardcc(as, CC_EQ);
824 else
825 emit_cond_branch(as, CC_EQ, l_end);
826
827 if (irt_isnum(kt)) {
828 if (isk) {
829 /* Assumes -0.0 is already canonicalized to +0.0. */
830 if (k)
831 emit_n(as, A64I_CMPx^k, tmp);
832 else
833 emit_nm(as, A64I_CMPx, key, tmp);
834 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64));
835 } else {
836 emit_nm(as, A64I_FCMPd, key, ftmp);
837 emit_dn(as, A64I_FMOV_D_R, (ftmp & 31), (tmp & 31));
838 emit_cond_branch(as, CC_LO, l_next);
839 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), tisnum, tmp);
840 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.n));
841 }
842 } else if (irt_isaddr(kt)) {
843 if (isk) {
844 emit_nm(as, A64I_CMPx, scr, tmp);
845 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64));
846 } else {
847 emit_nm(as, A64I_CMPx, tmp, scr);
848 emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key.u64));
849 }
850 } else {
851 emit_nm(as, A64I_CMPx, scr, type);
852 emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key));
853 }
854
855 *l_loop = A64I_BCC | A64F_S19(as->mcp - l_loop) | CC_NE;
856 if (!isk && irt_isaddr(kt)) {
857 type = ra_allock(as, (int32_t)irt_toitype(kt), allow);
858 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, key, type);
859 rset_clear(allow, type);
860 }
861 /* Load main position relative to tab->node into dest. */
862 khash = isk ? ir_khash(as, irkey) : 1;
863 if (khash == 0) {
864 emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node));
865 } else {
866 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 3), dest, tmp, dest);
867 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 1), dest, dest, dest);
868 emit_lso(as, A64I_LDRx, tmp, tab, offsetof(GCtab, node));
869 if (isk) {
870 Reg tmphash = ra_allock(as, khash, allow);
871 emit_dnm(as, A64I_ANDw, dest, dest, tmphash);
872 emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask));
873 } else if (irt_isstr(kt)) {
874 /* Fetch of str->sid is cheaper than ra_allock. */
875 emit_dnm(as, A64I_ANDw, dest, dest, tmp);
876 emit_lso(as, A64I_LDRw, tmp, key, offsetof(GCstr, sid));
877 emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask));
878 } else { /* Must match with hash*() in lj_tab.c. */
879 emit_dnm(as, A64I_ANDw, dest, dest, tmp);
880 emit_lso(as, A64I_LDRw, tmp, tab, offsetof(GCtab, hmask));
881 emit_dnm(as, A64I_SUBw, dest, dest, tmp);
882 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT3)), tmp, tmp, tmp);
883 emit_dnm(as, A64I_EORw, dest, dest, tmp);
884 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT2)), dest, dest, dest);
885 emit_dnm(as, A64I_SUBw, tmp, tmp, dest);
886 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT1)), dest, dest, dest);
887 emit_dnm(as, A64I_EORw, tmp, tmp, dest);
888 if (irt_isnum(kt)) {
889 emit_dnm(as, A64I_ADDw, dest, dest, dest);
890 emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest);
891 emit_dm(as, A64I_MOVw, tmp, dest);
892 emit_dn(as, A64I_FMOV_R_D, dest, (key & 31));
893 } else {
894 checkmclim(as);
895 emit_dm(as, A64I_MOVw, tmp, key);
896 emit_dnm(as, A64I_EORw, dest, dest,
897 ra_allock(as, irt_toitype(kt) << 15, allow));
898 emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest);
899 emit_dm(as, A64I_MOVx, dest, key);
900 }
901 }
902 }
903 }
904
asm_hrefk(ASMState * as,IRIns * ir)905 static void asm_hrefk(ASMState *as, IRIns *ir)
906 {
907 IRIns *kslot = IR(ir->op2);
908 IRIns *irkey = IR(kslot->op1);
909 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
910 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
911 int bigofs = !emit_checkofs(A64I_LDRx, ofs);
912 Reg dest = (ra_used(ir) || bigofs) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
913 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
914 Reg key, idx = node;
915 RegSet allow = rset_exclude(RSET_GPR, node);
916 uint64_t k;
917 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
918 if (bigofs) {
919 idx = dest;
920 rset_clear(allow, dest);
921 kofs = (int32_t)offsetof(Node, key);
922 } else if (ra_hasreg(dest)) {
923 emit_opk(as, A64I_ADDx, dest, node, ofs, allow);
924 }
925 asm_guardcc(as, CC_NE);
926 if (irt_ispri(irkey->t)) {
927 k = ~((int64_t)~irt_toitype(irkey->t) << 47);
928 } else if (irt_isnum(irkey->t)) {
929 k = ir_knum(irkey)->u64;
930 } else {
931 k = ((uint64_t)irt_toitype(irkey->t) << 47) | (uint64_t)ir_kgc(irkey);
932 }
933 key = ra_scratch(as, allow);
934 emit_nm(as, A64I_CMPx, key, ra_allock(as, k, rset_exclude(allow, key)));
935 emit_lso(as, A64I_LDRx, key, idx, kofs);
936 if (bigofs)
937 emit_opk(as, A64I_ADDx, dest, node, ofs, RSET_GPR);
938 }
939
asm_uref(ASMState * as,IRIns * ir)940 static void asm_uref(ASMState *as, IRIns *ir)
941 {
942 Reg dest = ra_dest(as, ir, RSET_GPR);
943 if (irref_isk(ir->op1)) {
944 GCfunc *fn = ir_kfunc(IR(ir->op1));
945 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
946 emit_lsptr(as, A64I_LDRx, dest, v);
947 } else {
948 Reg uv = ra_scratch(as, RSET_GPR);
949 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
950 if (ir->o == IR_UREFC) {
951 asm_guardcc(as, CC_NE);
952 emit_n(as, (A64I_CMPx^A64I_K12) | A64F_U12(1), RID_TMP);
953 emit_opk(as, A64I_ADDx, dest, uv,
954 (int32_t)offsetof(GCupval, tv), RSET_GPR);
955 emit_lso(as, A64I_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
956 } else {
957 emit_lso(as, A64I_LDRx, dest, uv, (int32_t)offsetof(GCupval, v));
958 }
959 emit_lso(as, A64I_LDRx, uv, func,
960 (int32_t)offsetof(GCfuncL, uvptr) + 8*(int32_t)(ir->op2 >> 8));
961 }
962 }
963
asm_fref(ASMState * as,IRIns * ir)964 static void asm_fref(ASMState *as, IRIns *ir)
965 {
966 UNUSED(as); UNUSED(ir);
967 lj_assertA(!ra_used(ir), "unfused FREF");
968 }
969
asm_strref(ASMState * as,IRIns * ir)970 static void asm_strref(ASMState *as, IRIns *ir)
971 {
972 RegSet allow = RSET_GPR;
973 Reg dest = ra_dest(as, ir, allow);
974 Reg base = ra_alloc1(as, ir->op1, allow);
975 IRIns *irr = IR(ir->op2);
976 int32_t ofs = sizeof(GCstr);
977 uint32_t m;
978 rset_clear(allow, base);
979 if (irref_isk(ir->op2) && (m = emit_isk12(ofs + irr->i))) {
980 emit_dn(as, A64I_ADDx^m, dest, base);
981 } else {
982 emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, dest);
983 emit_dnm(as, A64I_ADDx, dest, base, ra_alloc1(as, ir->op2, allow));
984 }
985 }
986
987 /* -- Loads and stores ---------------------------------------------------- */
988
asm_fxloadins(IRIns * ir)989 static A64Ins asm_fxloadins(IRIns *ir)
990 {
991 switch (irt_type(ir->t)) {
992 case IRT_I8: return A64I_LDRB ^ A64I_LS_S;
993 case IRT_U8: return A64I_LDRB;
994 case IRT_I16: return A64I_LDRH ^ A64I_LS_S;
995 case IRT_U16: return A64I_LDRH;
996 case IRT_NUM: return A64I_LDRd;
997 case IRT_FLOAT: return A64I_LDRs;
998 default: return irt_is64(ir->t) ? A64I_LDRx : A64I_LDRw;
999 }
1000 }
1001
asm_fxstoreins(IRIns * ir)1002 static A64Ins asm_fxstoreins(IRIns *ir)
1003 {
1004 switch (irt_type(ir->t)) {
1005 case IRT_I8: case IRT_U8: return A64I_STRB;
1006 case IRT_I16: case IRT_U16: return A64I_STRH;
1007 case IRT_NUM: return A64I_STRd;
1008 case IRT_FLOAT: return A64I_STRs;
1009 default: return irt_is64(ir->t) ? A64I_STRx : A64I_STRw;
1010 }
1011 }
1012
asm_fload(ASMState * as,IRIns * ir)1013 static void asm_fload(ASMState *as, IRIns *ir)
1014 {
1015 Reg dest = ra_dest(as, ir, RSET_GPR);
1016 Reg idx;
1017 A64Ins ai = asm_fxloadins(ir);
1018 int32_t ofs;
1019 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
1020 idx = RID_GL;
1021 ofs = (ir->op2 << 2) - GG_OFS(g);
1022 } else {
1023 idx = ra_alloc1(as, ir->op1, RSET_GPR);
1024 if (ir->op2 == IRFL_TAB_ARRAY) {
1025 ofs = asm_fuseabase(as, ir->op1);
1026 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
1027 emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, idx);
1028 return;
1029 }
1030 }
1031 ofs = field_ofs[ir->op2];
1032 }
1033 emit_lso(as, ai, (dest & 31), idx, ofs);
1034 }
1035
asm_fstore(ASMState * as,IRIns * ir)1036 static void asm_fstore(ASMState *as, IRIns *ir)
1037 {
1038 if (ir->r != RID_SINK) {
1039 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1040 IRIns *irf = IR(ir->op1);
1041 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
1042 int32_t ofs = field_ofs[irf->op2];
1043 emit_lso(as, asm_fxstoreins(ir), (src & 31), idx, ofs);
1044 }
1045 }
1046
asm_xload(ASMState * as,IRIns * ir)1047 static void asm_xload(ASMState *as, IRIns *ir)
1048 {
1049 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1050 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
1051 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR);
1052 }
1053
asm_xstore(ASMState * as,IRIns * ir)1054 static void asm_xstore(ASMState *as, IRIns *ir)
1055 {
1056 if (ir->r != RID_SINK) {
1057 Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
1058 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
1059 rset_exclude(RSET_GPR, src));
1060 }
1061 }
1062
asm_ahuvload(ASMState * as,IRIns * ir)1063 static void asm_ahuvload(ASMState *as, IRIns *ir)
1064 {
1065 Reg idx, tmp, type;
1066 int32_t ofs = 0;
1067 RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
1068 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
1069 irt_isint(ir->t),
1070 "bad load type %d", irt_type(ir->t));
1071 if (ra_used(ir)) {
1072 Reg dest = ra_dest(as, ir, allow);
1073 tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest;
1074 if (irt_isaddr(ir->t)) {
1075 emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest);
1076 } else if (irt_isnum(ir->t)) {
1077 emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp);
1078 } else if (irt_isint(ir->t)) {
1079 emit_dm(as, A64I_MOVw, dest, dest);
1080 }
1081 } else {
1082 tmp = ra_scratch(as, gpr);
1083 }
1084 type = ra_scratch(as, rset_clear(gpr, tmp));
1085 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_clear(gpr, type), A64I_LDRx);
1086 if (ir->o == IR_VLOAD) ofs += 8 * ir->op2;
1087 /* Always do the type check, even if the load result is unused. */
1088 asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE);
1089 if (irt_type(ir->t) >= IRT_NUM) {
1090 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t),
1091 "bad load type %d", irt_type(ir->t));
1092 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1093 ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp);
1094 } else if (irt_isaddr(ir->t)) {
1095 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(ir->t)), type);
1096 emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp);
1097 } else if (irt_isnil(ir->t)) {
1098 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp);
1099 } else {
1100 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1101 ra_allock(as, (irt_toitype(ir->t) << 15) | 0x7fff, gpr), tmp);
1102 }
1103 if (ofs & FUSE_REG)
1104 emit_dnm(as, (A64I_LDRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31));
1105 else
1106 emit_lso(as, A64I_LDRx, tmp, idx, ofs);
1107 }
1108
asm_ahustore(ASMState * as,IRIns * ir)1109 static void asm_ahustore(ASMState *as, IRIns *ir)
1110 {
1111 if (ir->r != RID_SINK) {
1112 RegSet allow = RSET_GPR;
1113 Reg idx, src = RID_NONE, tmp = RID_TMP, type = RID_NONE;
1114 int32_t ofs = 0;
1115 if (irt_isnum(ir->t)) {
1116 src = ra_alloc1(as, ir->op2, RSET_FPR);
1117 idx = asm_fuseahuref(as, ir->op1, &ofs, allow, A64I_STRd);
1118 if (ofs & FUSE_REG)
1119 emit_dnm(as, (A64I_STRd^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, (src & 31), idx, (ofs &31));
1120 else
1121 emit_lso(as, A64I_STRd, (src & 31), idx, ofs);
1122 } else {
1123 if (!irt_ispri(ir->t)) {
1124 src = ra_alloc1(as, ir->op2, allow);
1125 rset_clear(allow, src);
1126 if (irt_isinteger(ir->t))
1127 type = ra_allock(as, (uint64_t)(int32_t)LJ_TISNUM << 47, allow);
1128 else
1129 type = ra_allock(as, irt_toitype(ir->t), allow);
1130 } else {
1131 tmp = type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t)<<47), allow);
1132 }
1133 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type),
1134 A64I_STRx);
1135 if (ofs & FUSE_REG)
1136 emit_dnm(as, (A64I_STRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31));
1137 else
1138 emit_lso(as, A64I_STRx, tmp, idx, ofs);
1139 if (ra_hasreg(src)) {
1140 if (irt_isinteger(ir->t)) {
1141 emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), tmp, type, src);
1142 } else {
1143 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, src, type);
1144 }
1145 }
1146 }
1147 }
1148 }
1149
asm_sload(ASMState * as,IRIns * ir)1150 static void asm_sload(ASMState *as, IRIns *ir)
1151 {
1152 int32_t ofs = 8*((int32_t)ir->op1-2);
1153 IRType1 t = ir->t;
1154 Reg dest = RID_NONE, base;
1155 RegSet allow = RSET_GPR;
1156 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1157 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1158 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1159 "inconsistent SLOAD variant");
1160 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1161 dest = ra_scratch(as, RSET_FPR);
1162 asm_tointg(as, ir, dest);
1163 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1164 } else if (ra_used(ir)) {
1165 Reg tmp = RID_NONE;
1166 if ((ir->op2 & IRSLOAD_CONVERT))
1167 tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR);
1168 lj_assertA((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t),
1169 "bad SLOAD type %d", irt_type(t));
1170 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow);
1171 base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest));
1172 if (irt_isaddr(t)) {
1173 emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest);
1174 } else if ((ir->op2 & IRSLOAD_CONVERT)) {
1175 if (irt_isint(t)) {
1176 emit_dn(as, A64I_FCVT_S32_F64, dest, (tmp & 31));
1177 /* If value is already loaded for type check, move it to FPR. */
1178 if ((ir->op2 & IRSLOAD_TYPECHECK))
1179 emit_dn(as, A64I_FMOV_D_R, (tmp & 31), dest);
1180 else
1181 dest = tmp;
1182 t.irt = IRT_NUM; /* Check for original type. */
1183 } else {
1184 emit_dn(as, A64I_FCVT_F64_S32, (dest & 31), tmp);
1185 dest = tmp;
1186 t.irt = IRT_INT; /* Check for original type. */
1187 }
1188 } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) {
1189 emit_dm(as, A64I_MOVw, dest, dest);
1190 }
1191 goto dotypecheck;
1192 }
1193 base = ra_alloc1(as, REF_BASE, allow);
1194 dotypecheck:
1195 rset_clear(allow, base);
1196 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1197 Reg tmp;
1198 if (ra_hasreg(dest) && rset_test(RSET_GPR, dest)) {
1199 tmp = dest;
1200 } else {
1201 tmp = ra_scratch(as, allow);
1202 rset_clear(allow, tmp);
1203 }
1204 if (irt_isnum(t) && !(ir->op2 & IRSLOAD_CONVERT))
1205 emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp);
1206 /* Need type check, even if the load result is unused. */
1207 asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE);
1208 if (irt_type(t) >= IRT_NUM) {
1209 lj_assertA(irt_isinteger(t) || irt_isnum(t),
1210 "bad SLOAD type %d", irt_type(t));
1211 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32),
1212 ra_allock(as, LJ_TISNUM << 15, allow), tmp);
1213 } else if (irt_isnil(t)) {
1214 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp);
1215 } else if (irt_ispri(t)) {
1216 emit_nm(as, A64I_CMPx,
1217 ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow), tmp);
1218 } else {
1219 Reg type = ra_scratch(as, allow);
1220 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(t)), type);
1221 emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp);
1222 }
1223 emit_lso(as, A64I_LDRx, tmp, base, ofs);
1224 return;
1225 }
1226 if (ra_hasreg(dest)) {
1227 emit_lso(as, irt_isnum(t) ? A64I_LDRd :
1228 (irt_isint(t) ? A64I_LDRw : A64I_LDRx), (dest & 31), base,
1229 ofs ^ ((LJ_BE && irt_isint(t) ? 4 : 0)));
1230 }
1231 }
1232
1233 /* -- Allocations --------------------------------------------------------- */
1234
1235 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1236 static void asm_cnew(ASMState *as, IRIns *ir)
1237 {
1238 CTState *cts = ctype_ctsG(J2G(as->J));
1239 CTypeID id = (CTypeID)IR(ir->op1)->i;
1240 CTSize sz;
1241 CTInfo info = lj_ctype_info(cts, id, &sz);
1242 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1243 IRRef args[4];
1244 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1245 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1246 "bad CNEW/CNEWI operands");
1247
1248 as->gcsteps++;
1249 asm_setupresult(as, ir, ci); /* GCcdata * */
1250 /* Initialize immutable cdata object. */
1251 if (ir->o == IR_CNEWI) {
1252 int32_t ofs = sizeof(GCcdata);
1253 Reg r = ra_alloc1(as, ir->op2, allow);
1254 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1255 emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs);
1256 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1257 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1258 args[0] = ASMREF_L; /* lua_State *L */
1259 args[1] = ir->op1; /* CTypeID id */
1260 args[2] = ir->op2; /* CTSize sz */
1261 args[3] = ASMREF_TMP1; /* CTSize align */
1262 asm_gencall(as, ci, args);
1263 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1264 return;
1265 }
1266
1267 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1268 {
1269 Reg r = (id < 65536) ? RID_X1 : ra_allock(as, id, allow);
1270 emit_lso(as, A64I_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
1271 emit_lso(as, A64I_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
1272 emit_d(as, A64I_MOVZw | A64F_U16(~LJ_TCDATA), RID_TMP);
1273 if (id < 65536) emit_d(as, A64I_MOVZw | A64F_U16(id), RID_X1);
1274 }
1275 args[0] = ASMREF_L; /* lua_State *L */
1276 args[1] = ASMREF_TMP1; /* MSize size */
1277 asm_gencall(as, ci, args);
1278 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1279 ra_releasetmp(as, ASMREF_TMP1));
1280 }
1281 #endif
1282
1283 /* -- Write barriers ------------------------------------------------------ */
1284
asm_tbar(ASMState * as,IRIns * ir)1285 static void asm_tbar(ASMState *as, IRIns *ir)
1286 {
1287 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1288 Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1289 Reg mark = RID_TMP;
1290 MCLabel l_end = emit_label(as);
1291 emit_lso(as, A64I_STRx, link, tab, (int32_t)offsetof(GCtab, gclist));
1292 emit_lso(as, A64I_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1293 emit_setgl(as, tab, gc.grayagain);
1294 emit_dn(as, A64I_ANDw^emit_isk13(~LJ_GC_BLACK, 0), mark, mark);
1295 emit_getgl(as, link, gc.grayagain);
1296 emit_cond_branch(as, CC_EQ, l_end);
1297 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), mark);
1298 emit_lso(as, A64I_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1299 }
1300
asm_obar(ASMState * as,IRIns * ir)1301 static void asm_obar(ASMState *as, IRIns *ir)
1302 {
1303 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1304 IRRef args[2];
1305 MCLabel l_end;
1306 RegSet allow = RSET_GPR;
1307 Reg obj, val, tmp;
1308 /* No need for other object barriers (yet). */
1309 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1310 ra_evictset(as, RSET_SCRATCH);
1311 l_end = emit_label(as);
1312 args[0] = ASMREF_TMP1; /* global_State *g */
1313 args[1] = ir->op1; /* TValue *tv */
1314 asm_gencall(as, ci, args);
1315 emit_dm(as, A64I_MOVx, ra_releasetmp(as, ASMREF_TMP1), RID_GL);
1316 obj = IR(ir->op1)->r;
1317 tmp = ra_scratch(as, rset_exclude(allow, obj));
1318 emit_cond_branch(as, CC_EQ, l_end);
1319 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), tmp);
1320 emit_cond_branch(as, CC_EQ, l_end);
1321 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_WHITES, 0), RID_TMP);
1322 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1323 emit_lso(as, A64I_LDRB, tmp, obj,
1324 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1325 emit_lso(as, A64I_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1326 }
1327
1328 /* -- Arithmetic and logic operations ------------------------------------- */
1329
asm_fparith(ASMState * as,IRIns * ir,A64Ins ai)1330 static void asm_fparith(ASMState *as, IRIns *ir, A64Ins ai)
1331 {
1332 Reg dest = ra_dest(as, ir, RSET_FPR);
1333 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1334 right = (left >> 8); left &= 255;
1335 emit_dnm(as, ai, (dest & 31), (left & 31), (right & 31));
1336 }
1337
asm_fpunary(ASMState * as,IRIns * ir,A64Ins ai)1338 static void asm_fpunary(ASMState *as, IRIns *ir, A64Ins ai)
1339 {
1340 Reg dest = ra_dest(as, ir, RSET_FPR);
1341 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1342 emit_dn(as, ai, (dest & 31), (left & 31));
1343 }
1344
asm_fpmath(ASMState * as,IRIns * ir)1345 static void asm_fpmath(ASMState *as, IRIns *ir)
1346 {
1347 IRFPMathOp fpm = (IRFPMathOp)ir->op2;
1348 if (fpm == IRFPM_SQRT) {
1349 asm_fpunary(as, ir, A64I_FSQRTd);
1350 } else if (fpm <= IRFPM_TRUNC) {
1351 asm_fpunary(as, ir, fpm == IRFPM_FLOOR ? A64I_FRINTMd :
1352 fpm == IRFPM_CEIL ? A64I_FRINTPd : A64I_FRINTZd);
1353 } else {
1354 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm);
1355 }
1356 }
1357
asm_swapops(ASMState * as,IRRef lref,IRRef rref)1358 static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
1359 {
1360 IRIns *ir;
1361 if (irref_isk(rref))
1362 return 0; /* Don't swap constants to the left. */
1363 if (irref_isk(lref))
1364 return 1; /* But swap constants to the right. */
1365 ir = IR(rref);
1366 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) ||
1367 (ir->o == IR_ADD && ir->op1 == ir->op2) ||
1368 (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)))
1369 return 0; /* Don't swap fusable operands to the left. */
1370 ir = IR(lref);
1371 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) ||
1372 (ir->o == IR_ADD && ir->op1 == ir->op2) ||
1373 (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)))
1374 return 1; /* But swap fusable operands to the right. */
1375 return 0; /* Otherwise don't swap. */
1376 }
1377
asm_intop(ASMState * as,IRIns * ir,A64Ins ai)1378 static void asm_intop(ASMState *as, IRIns *ir, A64Ins ai)
1379 {
1380 IRRef lref = ir->op1, rref = ir->op2;
1381 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1382 uint32_t m;
1383 if ((ai & ~A64I_S) != A64I_SUBw && asm_swapops(as, lref, rref)) {
1384 IRRef tmp = lref; lref = rref; rref = tmp;
1385 }
1386 left = ra_hintalloc(as, lref, dest, RSET_GPR);
1387 if (irt_is64(ir->t)) ai |= A64I_X;
1388 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
1389 if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
1390 asm_guardcc(as, CC_VS);
1391 ai |= A64I_S;
1392 }
1393 emit_dn(as, ai^m, dest, left);
1394 }
1395
asm_intop_s(ASMState * as,IRIns * ir,A64Ins ai)1396 static void asm_intop_s(ASMState *as, IRIns *ir, A64Ins ai)
1397 {
1398 if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
1399 as->flagmcp = NULL;
1400 as->mcp++;
1401 ai |= A64I_S;
1402 }
1403 asm_intop(as, ir, ai);
1404 }
1405
asm_intneg(ASMState * as,IRIns * ir)1406 static void asm_intneg(ASMState *as, IRIns *ir)
1407 {
1408 Reg dest = ra_dest(as, ir, RSET_GPR);
1409 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1410 emit_dm(as, irt_is64(ir->t) ? A64I_NEGx : A64I_NEGw, dest, left);
1411 }
1412
1413 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
asm_intmul(ASMState * as,IRIns * ir)1414 static void asm_intmul(ASMState *as, IRIns *ir)
1415 {
1416 Reg dest = ra_dest(as, ir, RSET_GPR);
1417 Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
1418 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1419 if (irt_isguard(ir->t)) { /* IR_MULOV */
1420 asm_guardcc(as, CC_NE);
1421 emit_dm(as, A64I_MOVw, dest, dest); /* Zero-extend. */
1422 emit_nm(as, A64I_CMPw | A64F_SH(A64SH_ASR, 31), RID_TMP, dest);
1423 emit_dn(as, A64I_ASRx | A64F_IMMR(32), RID_TMP, dest);
1424 emit_dnm(as, A64I_SMULL, dest, right, left);
1425 } else {
1426 emit_dnm(as, irt_is64(ir->t) ? A64I_MULx : A64I_MULw, dest, left, right);
1427 }
1428 }
1429
asm_add(ASMState * as,IRIns * ir)1430 static void asm_add(ASMState *as, IRIns *ir)
1431 {
1432 if (irt_isnum(ir->t)) {
1433 if (!asm_fusemadd(as, ir, A64I_FMADDd, A64I_FMADDd))
1434 asm_fparith(as, ir, A64I_FADDd);
1435 return;
1436 }
1437 asm_intop_s(as, ir, A64I_ADDw);
1438 }
1439
asm_sub(ASMState * as,IRIns * ir)1440 static void asm_sub(ASMState *as, IRIns *ir)
1441 {
1442 if (irt_isnum(ir->t)) {
1443 if (!asm_fusemadd(as, ir, A64I_FNMSUBd, A64I_FMSUBd))
1444 asm_fparith(as, ir, A64I_FSUBd);
1445 return;
1446 }
1447 asm_intop_s(as, ir, A64I_SUBw);
1448 }
1449
asm_mul(ASMState * as,IRIns * ir)1450 static void asm_mul(ASMState *as, IRIns *ir)
1451 {
1452 if (irt_isnum(ir->t)) {
1453 asm_fparith(as, ir, A64I_FMULd);
1454 return;
1455 }
1456 asm_intmul(as, ir);
1457 }
1458
1459 #define asm_addov(as, ir) asm_add(as, ir)
1460 #define asm_subov(as, ir) asm_sub(as, ir)
1461 #define asm_mulov(as, ir) asm_mul(as, ir)
1462
1463 #define asm_fpdiv(as, ir) asm_fparith(as, ir, A64I_FDIVd)
1464 #define asm_abs(as, ir) asm_fpunary(as, ir, A64I_FABS)
1465
asm_neg(ASMState * as,IRIns * ir)1466 static void asm_neg(ASMState *as, IRIns *ir)
1467 {
1468 if (irt_isnum(ir->t)) {
1469 asm_fpunary(as, ir, A64I_FNEGd);
1470 return;
1471 }
1472 asm_intneg(as, ir);
1473 }
1474
asm_band(ASMState * as,IRIns * ir)1475 static void asm_band(ASMState *as, IRIns *ir)
1476 {
1477 A64Ins ai = A64I_ANDw;
1478 if (asm_fuseandshift(as, ir))
1479 return;
1480 if (as->flagmcp == as->mcp) {
1481 /* Try to drop cmp r, #0. */
1482 as->flagmcp = NULL;
1483 as->mcp++;
1484 ai = A64I_ANDSw;
1485 }
1486 asm_intop(as, ir, ai);
1487 }
1488
asm_borbxor(ASMState * as,IRIns * ir,A64Ins ai)1489 static void asm_borbxor(ASMState *as, IRIns *ir, A64Ins ai)
1490 {
1491 IRRef lref = ir->op1, rref = ir->op2;
1492 IRIns *irl = IR(lref), *irr = IR(rref);
1493 if ((canfuse(as, irl) && irl->o == IR_BNOT && !irref_isk(rref)) ||
1494 (canfuse(as, irr) && irr->o == IR_BNOT && !irref_isk(lref))) {
1495 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1496 uint32_t m;
1497 if (irl->o == IR_BNOT) {
1498 IRRef tmp = lref; lref = rref; rref = tmp;
1499 }
1500 left = ra_alloc1(as, lref, RSET_GPR);
1501 ai |= A64I_ON;
1502 if (irt_is64(ir->t)) ai |= A64I_X;
1503 m = asm_fuseopm(as, ai, IR(rref)->op1, rset_exclude(RSET_GPR, left));
1504 emit_dn(as, ai^m, dest, left);
1505 } else {
1506 asm_intop(as, ir, ai);
1507 }
1508 }
1509
asm_bor(ASMState * as,IRIns * ir)1510 static void asm_bor(ASMState *as, IRIns *ir)
1511 {
1512 if (asm_fuseorshift(as, ir))
1513 return;
1514 asm_borbxor(as, ir, A64I_ORRw);
1515 }
1516
1517 #define asm_bxor(as, ir) asm_borbxor(as, ir, A64I_EORw)
1518
asm_bnot(ASMState * as,IRIns * ir)1519 static void asm_bnot(ASMState *as, IRIns *ir)
1520 {
1521 A64Ins ai = A64I_MVNw;
1522 Reg dest = ra_dest(as, ir, RSET_GPR);
1523 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
1524 if (irt_is64(ir->t)) ai |= A64I_X;
1525 emit_d(as, ai^m, dest);
1526 }
1527
asm_bswap(ASMState * as,IRIns * ir)1528 static void asm_bswap(ASMState *as, IRIns *ir)
1529 {
1530 Reg dest = ra_dest(as, ir, RSET_GPR);
1531 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1532 emit_dn(as, irt_is64(ir->t) ? A64I_REVx : A64I_REVw, dest, left);
1533 }
1534
asm_bitshift(ASMState * as,IRIns * ir,A64Ins ai,A64Shift sh)1535 static void asm_bitshift(ASMState *as, IRIns *ir, A64Ins ai, A64Shift sh)
1536 {
1537 int32_t shmask = irt_is64(ir->t) ? 63 : 31;
1538 if (irref_isk(ir->op2)) { /* Constant shifts. */
1539 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1540 int32_t shift = (IR(ir->op2)->i & shmask);
1541 IRIns *irl = IR(ir->op1);
1542 if (shmask == 63) ai += A64I_UBFMx - A64I_UBFMw;
1543
1544 /* Fuse BSHL + BSHR/BSAR into UBFM/SBFM aka UBFX/SBFX/UBFIZ/SBFIZ. */
1545 if ((sh == A64SH_LSR || sh == A64SH_ASR) && canfuse(as, irl)) {
1546 if (irl->o == IR_BSHL && irref_isk(irl->op2)) {
1547 int32_t shift2 = (IR(irl->op2)->i & shmask);
1548 shift = ((shift - shift2) & shmask);
1549 shmask -= shift2;
1550 ir = irl;
1551 }
1552 }
1553
1554 left = ra_alloc1(as, ir->op1, RSET_GPR);
1555 switch (sh) {
1556 case A64SH_LSL:
1557 emit_dn(as, ai | A64F_IMMS(shmask-shift) |
1558 A64F_IMMR((shmask-shift+1)&shmask), dest, left);
1559 break;
1560 case A64SH_LSR: case A64SH_ASR:
1561 emit_dn(as, ai | A64F_IMMS(shmask) | A64F_IMMR(shift), dest, left);
1562 break;
1563 case A64SH_ROR:
1564 emit_dnm(as, ai | A64F_IMMS(shift), dest, left, left);
1565 break;
1566 }
1567 } else { /* Variable-length shifts. */
1568 Reg dest = ra_dest(as, ir, RSET_GPR);
1569 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1570 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1571 emit_dnm(as, (shmask == 63 ? A64I_SHRx : A64I_SHRw) | A64F_BSH(sh), dest, left, right);
1572 }
1573 }
1574
1575 #define asm_bshl(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSL)
1576 #define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR)
1577 #define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR)
1578 #define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR)
1579 #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
1580
asm_intmin_max(ASMState * as,IRIns * ir,A64CC cc)1581 static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc)
1582 {
1583 Reg dest = ra_dest(as, ir, RSET_GPR);
1584 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1585 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1586 emit_dnm(as, A64I_CSELw|A64F_CC(cc), dest, left, right);
1587 emit_nm(as, A64I_CMPw, left, right);
1588 }
1589
asm_fpmin_max(ASMState * as,IRIns * ir,A64CC fcc)1590 static void asm_fpmin_max(ASMState *as, IRIns *ir, A64CC fcc)
1591 {
1592 Reg dest = (ra_dest(as, ir, RSET_FPR) & 31);
1593 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1594 right = ((left >> 8) & 31); left &= 31;
1595 emit_dnm(as, A64I_FCSELd | A64F_CC(fcc), dest, right, left);
1596 emit_nm(as, A64I_FCMPd, left, right);
1597 }
1598
asm_min_max(ASMState * as,IRIns * ir,A64CC cc,A64CC fcc)1599 static void asm_min_max(ASMState *as, IRIns *ir, A64CC cc, A64CC fcc)
1600 {
1601 if (irt_isnum(ir->t))
1602 asm_fpmin_max(as, ir, fcc);
1603 else
1604 asm_intmin_max(as, ir, cc);
1605 }
1606
1607 #define asm_min(as, ir) asm_min_max(as, ir, CC_LT, CC_PL)
1608 #define asm_max(as, ir) asm_min_max(as, ir, CC_GT, CC_LE)
1609
1610 /* -- Comparisons --------------------------------------------------------- */
1611
1612 /* Map of comparisons to flags. ORDER IR. */
1613 static const uint8_t asm_compmap[IR_ABC+1] = {
1614 /* op FP swp int cc FP cc */
1615 /* LT */ CC_GE + (CC_HS << 4),
1616 /* GE x */ CC_LT + (CC_HI << 4),
1617 /* LE */ CC_GT + (CC_HI << 4),
1618 /* GT x */ CC_LE + (CC_HS << 4),
1619 /* ULT x */ CC_HS + (CC_LS << 4),
1620 /* UGE */ CC_LO + (CC_LO << 4),
1621 /* ULE x */ CC_HI + (CC_LO << 4),
1622 /* UGT */ CC_LS + (CC_LS << 4),
1623 /* EQ */ CC_NE + (CC_NE << 4),
1624 /* NE */ CC_EQ + (CC_EQ << 4),
1625 /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
1626 };
1627
1628 /* FP comparisons. */
asm_fpcomp(ASMState * as,IRIns * ir)1629 static void asm_fpcomp(ASMState *as, IRIns *ir)
1630 {
1631 Reg left, right;
1632 A64Ins ai;
1633 int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
1634 if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
1635 left = (ra_alloc1(as, ir->op1, RSET_FPR) & 31);
1636 right = 0;
1637 ai = A64I_FCMPZd;
1638 } else {
1639 left = ra_alloc2(as, ir, RSET_FPR);
1640 if (swp) {
1641 right = (left & 31); left = ((left >> 8) & 31);
1642 } else {
1643 right = ((left >> 8) & 31); left &= 31;
1644 }
1645 ai = A64I_FCMPd;
1646 }
1647 asm_guardcc(as, (asm_compmap[ir->o] >> 4));
1648 emit_nm(as, ai, left, right);
1649 }
1650
1651 /* Integer comparisons. */
asm_intcomp(ASMState * as,IRIns * ir)1652 static void asm_intcomp(ASMState *as, IRIns *ir)
1653 {
1654 A64CC oldcc, cc = (asm_compmap[ir->o] & 15);
1655 A64Ins ai = irt_is64(ir->t) ? A64I_CMPx : A64I_CMPw;
1656 IRRef lref = ir->op1, rref = ir->op2;
1657 Reg left;
1658 uint32_t m;
1659 int cmpprev0 = 0;
1660 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) ||
1661 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t),
1662 "bad comparison data type %d", irt_type(ir->t));
1663 if (asm_swapops(as, lref, rref)) {
1664 IRRef tmp = lref; lref = rref; rref = tmp;
1665 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
1666 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
1667 }
1668 oldcc = cc;
1669 if (irref_isk(rref) && get_k64val(as, rref) == 0) {
1670 IRIns *irl = IR(lref);
1671 if (cc == CC_GE) cc = CC_PL;
1672 else if (cc == CC_LT) cc = CC_MI;
1673 else if (cc > CC_NE) goto nocombine; /* Other conds don't work with tst. */
1674 cmpprev0 = (irl+1 == ir);
1675 /* Combine and-cmp-bcc into tbz/tbnz or and-cmp into tst. */
1676 if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
1677 IRRef blref = irl->op1, brref = irl->op2;
1678 uint32_t m2 = 0;
1679 Reg bleft;
1680 if (asm_swapops(as, blref, brref)) {
1681 Reg tmp = blref; blref = brref; brref = tmp;
1682 }
1683 if (irref_isk(brref)) {
1684 uint64_t k = get_k64val(as, brref);
1685 if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) {
1686 asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ,
1687 ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k));
1688 return;
1689 }
1690 m2 = emit_isk13(k, irt_is64(irl->t));
1691 }
1692 bleft = ra_alloc1(as, blref, RSET_GPR);
1693 ai = (irt_is64(irl->t) ? A64I_TSTx : A64I_TSTw);
1694 if (!m2)
1695 m2 = asm_fuseopm(as, ai, brref, rset_exclude(RSET_GPR, bleft));
1696 asm_guardcc(as, cc);
1697 emit_n(as, ai^m2, bleft);
1698 return;
1699 }
1700 if (cc == CC_EQ || cc == CC_NE) {
1701 /* Combine cmp-bcc into cbz/cbnz. */
1702 ai = cc == CC_EQ ? A64I_CBZ : A64I_CBNZ;
1703 if (irt_is64(ir->t)) ai |= A64I_X;
1704 asm_guardcnb(as, ai, ra_alloc1(as, lref, RSET_GPR));
1705 return;
1706 }
1707 }
1708 nocombine:
1709 left = ra_alloc1(as, lref, RSET_GPR);
1710 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
1711 asm_guardcc(as, cc);
1712 emit_n(as, ai^m, left);
1713 /* Signed comparison with zero and referencing previous ins? */
1714 if (cmpprev0 && (oldcc <= CC_NE || oldcc >= CC_GE))
1715 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1716 }
1717
asm_comp(ASMState * as,IRIns * ir)1718 static void asm_comp(ASMState *as, IRIns *ir)
1719 {
1720 if (irt_isnum(ir->t))
1721 asm_fpcomp(as, ir);
1722 else
1723 asm_intcomp(as, ir);
1724 }
1725
1726 #define asm_equal(as, ir) asm_comp(as, ir)
1727
1728 /* -- Split register ops -------------------------------------------------- */
1729
1730 /* Hiword op of a split 64/64 bit op. Previous op is the loword op. */
asm_hiop(ASMState * as,IRIns * ir)1731 static void asm_hiop(ASMState *as, IRIns *ir)
1732 {
1733 /* HIOP is marked as a store because it needs its own DCE logic. */
1734 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1735 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1736 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1737 switch ((ir-1)->o) {
1738 case IR_CALLN:
1739 case IR_CALLL:
1740 case IR_CALLS:
1741 case IR_CALLXS:
1742 if (!uselo)
1743 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
1744 break;
1745 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
1746 }
1747 }
1748
1749 /* -- Profiling ----------------------------------------------------------- */
1750
asm_prof(ASMState * as,IRIns * ir)1751 static void asm_prof(ASMState *as, IRIns *ir)
1752 {
1753 uint32_t k = emit_isk13(HOOK_PROFILE, 0);
1754 lj_assertA(k != 0, "HOOK_PROFILE does not fit in K13");
1755 UNUSED(ir);
1756 asm_guardcc(as, CC_NE);
1757 emit_n(as, A64I_TSTw^k, RID_TMP);
1758 emit_lsptr(as, A64I_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask);
1759 }
1760
1761 /* -- Stack handling ------------------------------------------------------ */
1762
1763 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)1764 static void asm_stack_check(ASMState *as, BCReg topslot,
1765 IRIns *irp, RegSet allow, ExitNo exitno)
1766 {
1767 Reg pbase;
1768 uint32_t k;
1769 if (irp) {
1770 if (!ra_hasspill(irp->s)) {
1771 pbase = irp->r;
1772 lj_assertA(ra_hasreg(pbase), "base reg lost");
1773 } else if (allow) {
1774 pbase = rset_pickbot(allow);
1775 } else {
1776 pbase = RID_RET;
1777 emit_lso(as, A64I_LDRx, RID_RET, RID_SP, 0); /* Restore temp register. */
1778 }
1779 } else {
1780 pbase = RID_BASE;
1781 }
1782 emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno));
1783 k = emit_isk12((8*topslot));
1784 lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
1785 emit_n(as, A64I_CMPx^k, RID_TMP);
1786 emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase);
1787 emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP,
1788 (int32_t)offsetof(lua_State, maxstack));
1789 if (irp) { /* Must not spill arbitrary registers in head of side trace. */
1790 if (ra_hasspill(irp->s))
1791 emit_lso(as, A64I_LDRx, pbase, RID_SP, sps_scale(irp->s));
1792 emit_lso(as, A64I_LDRx, RID_TMP, RID_GL, glofs(as, &J2G(as->J)->cur_L));
1793 if (ra_hasspill(irp->s) && !allow)
1794 emit_lso(as, A64I_STRx, RID_RET, RID_SP, 0); /* Save temp register. */
1795 } else {
1796 emit_getgl(as, RID_TMP, cur_L);
1797 }
1798 }
1799
1800 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)1801 static void asm_stack_restore(ASMState *as, SnapShot *snap)
1802 {
1803 SnapEntry *map = &as->T->snapmap[snap->mapofs];
1804 #ifdef LUA_USE_ASSERT
1805 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2];
1806 #endif
1807 MSize n, nent = snap->nent;
1808 /* Store the value of all modified slots to the Lua stack. */
1809 for (n = 0; n < nent; n++) {
1810 SnapEntry sn = map[n];
1811 BCReg s = snap_slot(sn);
1812 int32_t ofs = 8*((int32_t)s-1-LJ_FR2);
1813 IRRef ref = snap_ref(sn);
1814 IRIns *ir = IR(ref);
1815 if ((sn & SNAP_NORESTORE))
1816 continue;
1817 if ((sn & SNAP_KEYINDEX)) {
1818 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
1819 Reg r = irref_isk(ref) ? ra_allock(as, ir->i, allow) :
1820 ra_alloc1(as, ref, allow);
1821 rset_clear(allow, r);
1822 emit_lso(as, A64I_STRw, r, RID_BASE, ofs);
1823 emit_lso(as, A64I_STRw, ra_allock(as, LJ_KEYINDEX, allow), RID_BASE, ofs+4);
1824 } else if (irt_isnum(ir->t)) {
1825 Reg src = ra_alloc1(as, ref, RSET_FPR);
1826 emit_lso(as, A64I_STRd, (src & 31), RID_BASE, ofs);
1827 } else {
1828 asm_tvstore64(as, RID_BASE, ofs, ref);
1829 }
1830 checkmclim(as);
1831 }
1832 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
1833 }
1834
1835 /* -- GC handling --------------------------------------------------------- */
1836
1837 /* Marker to prevent patching the GC check exit. */
1838 #define ARM64_NOPATCH_GC_CHECK \
1839 (A64I_ORRx|A64F_D(RID_TMP)|A64F_M(RID_TMP)|A64F_N(RID_TMP))
1840
1841 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)1842 static void asm_gc_check(ASMState *as)
1843 {
1844 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
1845 IRRef args[2];
1846 MCLabel l_end;
1847 Reg tmp2;
1848 ra_evictset(as, RSET_SCRATCH);
1849 l_end = emit_label(as);
1850 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
1851 asm_guardcnb(as, A64I_CBNZ, RID_RET); /* Assumes asm_snap_prep() is done. */
1852 *--as->mcp = ARM64_NOPATCH_GC_CHECK;
1853 args[0] = ASMREF_TMP1; /* global_State *g */
1854 args[1] = ASMREF_TMP2; /* MSize steps */
1855 asm_gencall(as, ci, args);
1856 emit_dm(as, A64I_MOVx, ra_releasetmp(as, ASMREF_TMP1), RID_GL);
1857 tmp2 = ra_releasetmp(as, ASMREF_TMP2);
1858 emit_loadi(as, tmp2, as->gcsteps);
1859 /* Jump around GC step if GC total < GC threshold. */
1860 emit_cond_branch(as, CC_LS, l_end);
1861 emit_nm(as, A64I_CMPx, RID_TMP, tmp2);
1862 emit_getgl(as, tmp2, gc.threshold);
1863 emit_getgl(as, RID_TMP, gc.total);
1864 as->gcsteps = 0;
1865 checkmclim(as);
1866 }
1867
1868 /* -- Loop handling ------------------------------------------------------- */
1869
1870 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)1871 static void asm_loop_fixup(ASMState *as)
1872 {
1873 MCode *p = as->mctop;
1874 MCode *target = as->mcp;
1875 if (as->loopinv) { /* Inverted loop branch? */
1876 uint32_t mask = (p[-2] & 0x7e000000) == 0x36000000 ? 0x3fffu : 0x7ffffu;
1877 ptrdiff_t delta = target - (p - 2);
1878 /* asm_guard* already inverted the bcc/tnb/cnb and patched the final b. */
1879 p[-2] |= ((uint32_t)delta & mask) << 5;
1880 } else {
1881 ptrdiff_t delta = target - (p - 1);
1882 p[-1] = A64I_B | A64F_S26(delta);
1883 }
1884 }
1885
1886 /* Fixup the tail of the loop. */
asm_loop_tail_fixup(ASMState * as)1887 static void asm_loop_tail_fixup(ASMState *as)
1888 {
1889 UNUSED(as); /* Nothing to do. */
1890 }
1891
1892 /* -- Head of trace ------------------------------------------------------- */
1893
1894 /* Reload L register from g->cur_L. */
asm_head_lreg(ASMState * as)1895 static void asm_head_lreg(ASMState *as)
1896 {
1897 IRIns *ir = IR(ASMREF_L);
1898 if (ra_used(ir)) {
1899 Reg r = ra_dest(as, ir, RSET_GPR);
1900 emit_getgl(as, r, cur_L);
1901 ra_evictk(as);
1902 }
1903 }
1904
1905 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)1906 static void asm_head_root_base(ASMState *as)
1907 {
1908 IRIns *ir;
1909 asm_head_lreg(as);
1910 ir = IR(REF_BASE);
1911 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
1912 ra_spill(as, ir);
1913 ra_destreg(as, ir, RID_BASE);
1914 }
1915
1916 /* Coalesce BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)1917 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
1918 {
1919 IRIns *ir;
1920 asm_head_lreg(as);
1921 ir = IR(REF_BASE);
1922 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
1923 ra_spill(as, ir);
1924 if (ra_hasspill(irp->s)) {
1925 rset_clear(allow, ra_dest(as, ir, allow));
1926 } else {
1927 Reg r = irp->r;
1928 lj_assertA(ra_hasreg(r), "base reg lost");
1929 rset_clear(allow, r);
1930 if (r != ir->r && !rset_test(as->freeset, r))
1931 ra_restore(as, regcost_ref(as->cost[r]));
1932 ra_destreg(as, ir, r);
1933 }
1934 return allow;
1935 }
1936
1937 /* -- Tail of trace ------------------------------------------------------- */
1938
1939 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)1940 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
1941 {
1942 MCode *p = as->mctop;
1943 MCode *target;
1944 /* Undo the sp adjustment in BC_JLOOP when exiting to the interpreter. */
1945 int32_t spadj = as->T->spadjust + (lnk ? 0 : sps_scale(SPS_FIXED));
1946 if (spadj == 0) {
1947 *--p = A64I_LE(A64I_NOP);
1948 as->mctop = p;
1949 } else {
1950 /* Patch stack adjustment. */
1951 uint32_t k = emit_isk12(spadj);
1952 lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
1953 p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP);
1954 }
1955 /* Patch exit branch. */
1956 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
1957 p[-1] = A64I_B | A64F_S26((target-p)+1);
1958 }
1959
1960 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)1961 static void asm_tail_prep(ASMState *as)
1962 {
1963 MCode *p = as->mctop - 1; /* Leave room for exit branch. */
1964 if (as->loopref) {
1965 as->invmcp = as->mcp = p;
1966 } else {
1967 as->mcp = p-1; /* Leave room for stack pointer adjustment. */
1968 as->invmcp = NULL;
1969 }
1970 *p = 0; /* Prevent load/store merging. */
1971 }
1972
1973 /* -- Trace setup --------------------------------------------------------- */
1974
1975 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)1976 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
1977 {
1978 IRRef args[CCI_NARGS_MAX*2];
1979 uint32_t i, nargs = CCI_XNARGS(ci);
1980 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
1981 asm_collectargs(as, ir, ci, args);
1982 for (i = 0; i < nargs; i++) {
1983 if (args[i] && irt_isfp(IR(args[i])->t)) {
1984 if (nfpr > 0) nfpr--; else nslots += 2;
1985 } else {
1986 if (ngpr > 0) ngpr--; else nslots += 2;
1987 }
1988 }
1989 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
1990 as->evenspill = nslots;
1991 return REGSP_HINT(RID_RET);
1992 }
1993
asm_setup_target(ASMState * as)1994 static void asm_setup_target(ASMState *as)
1995 {
1996 /* May need extra exit for asm_stack_check on side traces. */
1997 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
1998 }
1999
2000 #if LJ_BE
2001 /* ARM64 instructions are always little-endian. Swap for ARM64BE. */
asm_mcode_fixup(MCode * mcode,MSize size)2002 static void asm_mcode_fixup(MCode *mcode, MSize size)
2003 {
2004 MCode *pe = (MCode *)((char *)mcode + size);
2005 while (mcode < pe) {
2006 MCode ins = *mcode;
2007 *mcode++ = lj_bswap(ins);
2008 }
2009 }
2010 #define LJ_TARGET_MCODE_FIXUP 1
2011 #endif
2012
2013 /* -- Trace patching ------------------------------------------------------ */
2014
2015 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)2016 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2017 {
2018 MCode *p = T->mcode;
2019 MCode *pe = (MCode *)((char *)p + T->szmcode);
2020 MCode *cstart = NULL;
2021 MCode *mcarea = lj_mcode_patch(J, p, 0);
2022 MCode *px = exitstub_trace_addr(T, exitno);
2023 int patchlong = 1;
2024 /* Note: this assumes a trace exit is only ever patched once. */
2025 for (; p < pe; p++) {
2026 /* Look for exitstub branch, replace with branch to target. */
2027 ptrdiff_t delta = target - p;
2028 MCode ins = A64I_LE(*p);
2029 if ((ins & 0xff000000u) == 0x54000000u &&
2030 ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) {
2031 /* Patch bcc, if within range. */
2032 if (A64F_S_OK(delta, 19)) {
2033 *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta));
2034 if (!cstart) cstart = p;
2035 }
2036 } else if ((ins & 0xfc000000u) == 0x14000000u &&
2037 ((ins ^ (px-p)) & 0x03ffffffu) == 0) {
2038 /* Patch b. */
2039 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
2040 *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta));
2041 if (!cstart) cstart = p;
2042 } else if ((ins & 0x7e000000u) == 0x34000000u &&
2043 ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) {
2044 /* Patch cbz/cbnz, if within range. */
2045 if (p[-1] == ARM64_NOPATCH_GC_CHECK) {
2046 patchlong = 0;
2047 } else if (A64F_S_OK(delta, 19)) {
2048 *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta));
2049 if (!cstart) cstart = p;
2050 }
2051 } else if ((ins & 0x7e000000u) == 0x36000000u &&
2052 ((ins ^ ((px-p)<<5)) & 0x0007ffe0u) == 0) {
2053 /* Patch tbz/tbnz, if within range. */
2054 if (A64F_S_OK(delta, 14)) {
2055 *p = A64I_LE((ins & 0xfff8001fu) | A64F_S14(delta));
2056 if (!cstart) cstart = p;
2057 }
2058 }
2059 }
2060 /* Always patch long-range branch in exit stub itself. Except, if we can't. */
2061 if (patchlong) {
2062 ptrdiff_t delta = target - px;
2063 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range");
2064 *px = A64I_B | A64F_S26(delta);
2065 if (!cstart) cstart = px;
2066 }
2067 if (cstart) lj_mcode_sync(cstart, px+1);
2068 lj_mcode_patch(J, mcarea, 1);
2069 }
2070
2071