1 /*
2 ** ARM IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 /* -- Register allocator extensions --------------------------------------- */
7
8 /* Allocate a register with a hint. */
ra_hintalloc(ASMState * as,IRRef ref,Reg hint,RegSet allow)9 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
10 {
11 Reg r = IR(ref)->r;
12 if (ra_noreg(r)) {
13 if (!ra_hashint(r) && !iscrossref(as, ref))
14 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
15 r = ra_allocref(as, ref, allow);
16 }
17 ra_noweak(as, r);
18 return r;
19 }
20
21 /* Allocate a scratch register pair. */
ra_scratchpair(ASMState * as,RegSet allow)22 static Reg ra_scratchpair(ASMState *as, RegSet allow)
23 {
24 RegSet pick1 = as->freeset & allow;
25 RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
26 Reg r;
27 if (pick2) {
28 r = rset_picktop(pick2);
29 } else {
30 RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
31 if (pick) {
32 r = rset_picktop(pick);
33 ra_restore(as, regcost_ref(as->cost[r+1]));
34 } else {
35 pick = pick1 & (allow << 1) & RSET_GPRODD;
36 if (pick) {
37 r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
38 } else {
39 r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
40 ra_restore(as, regcost_ref(as->cost[r+1]));
41 }
42 }
43 }
44 lj_assertA(rset_test(RSET_GPREVEN, r), "odd reg %d", r);
45 ra_modified(as, r);
46 ra_modified(as, r+1);
47 RA_DBGX((as, "scratchpair $r $r", r, r+1));
48 return r;
49 }
50
51 #if !LJ_SOFTFP
52 /* Allocate two source registers for three-operand instructions. */
ra_alloc2(ASMState * as,IRIns * ir,RegSet allow)53 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
54 {
55 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
56 Reg left = irl->r, right = irr->r;
57 if (ra_hasreg(left)) {
58 ra_noweak(as, left);
59 if (ra_noreg(right))
60 right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
61 else
62 ra_noweak(as, right);
63 } else if (ra_hasreg(right)) {
64 ra_noweak(as, right);
65 left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
66 } else if (ra_hashint(right)) {
67 right = ra_allocref(as, ir->op2, allow);
68 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
69 } else {
70 left = ra_allocref(as, ir->op1, allow);
71 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
72 }
73 return left | (right << 8);
74 }
75 #endif
76
77 /* -- Guard handling ------------------------------------------------------ */
78
79 /* Generate an exit stub group at the bottom of the reserved MCode memory. */
asm_exitstub_gen(ASMState * as,ExitNo group)80 static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
81 {
82 MCode *mxp = as->mcbot;
83 int i;
84 if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
85 asm_mclimit(as);
86 /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
87 *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
88 *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
89 mxp++;
90 *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
91 *mxp++ = group*EXITSTUBS_PER_GROUP;
92 for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
93 *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
94 lj_mcode_sync(as->mcbot, mxp);
95 lj_mcode_commitbot(as->J, mxp);
96 as->mcbot = mxp;
97 as->mclim = as->mcbot + MCLIM_REDZONE;
98 return mxp - EXITSTUBS_PER_GROUP;
99 }
100
101 /* Setup all needed exit stubs. */
asm_exitstub_setup(ASMState * as,ExitNo nexits)102 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
103 {
104 ExitNo i;
105 if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
106 lj_trace_err(as->J, LJ_TRERR_SNAPOV);
107 for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
108 if (as->J->exitstubgroup[i] == NULL)
109 as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
110 }
111
112 /* Emit conditional branch to exit for guard. */
asm_guardcc(ASMState * as,ARMCC cc)113 static void asm_guardcc(ASMState *as, ARMCC cc)
114 {
115 MCode *target = exitstub_addr(as->J, as->snapno);
116 MCode *p = as->mcp;
117 if (LJ_UNLIKELY(p == as->invmcp)) {
118 as->loopinv = 1;
119 *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
120 emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
121 return;
122 }
123 emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
124 }
125
126 /* -- Operand fusion ------------------------------------------------------ */
127
128 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
129 #define CONFLICT_SEARCH_LIM 31
130
131 /* Check if there's no conflicting instruction between curins and ref. */
noconflict(ASMState * as,IRRef ref,IROp conflict)132 static int noconflict(ASMState *as, IRRef ref, IROp conflict)
133 {
134 IRIns *ir = as->ir;
135 IRRef i = as->curins;
136 if (i > ref + CONFLICT_SEARCH_LIM)
137 return 0; /* Give up, ref is too far away. */
138 while (--i > ref)
139 if (ir[i].o == conflict)
140 return 0; /* Conflict found. */
141 return 1; /* Ok, no conflict. */
142 }
143
144 /* Fuse the array base of colocated arrays. */
asm_fuseabase(ASMState * as,IRRef ref)145 static int32_t asm_fuseabase(ASMState *as, IRRef ref)
146 {
147 IRIns *ir = IR(ref);
148 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
149 !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
150 return (int32_t)sizeof(GCtab);
151 return 0;
152 }
153
154 /* Fuse array/hash/upvalue reference into register+offset operand. */
asm_fuseahuref(ASMState * as,IRRef ref,int32_t * ofsp,RegSet allow,int lim)155 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
156 int lim)
157 {
158 IRIns *ir = IR(ref);
159 if (ra_noreg(ir->r)) {
160 if (ir->o == IR_AREF) {
161 if (mayfuse(as, ref)) {
162 if (irref_isk(ir->op2)) {
163 IRRef tab = IR(ir->op1)->op1;
164 int32_t ofs = asm_fuseabase(as, tab);
165 IRRef refa = ofs ? tab : ir->op1;
166 ofs += 8*IR(ir->op2)->i;
167 if (ofs > -lim && ofs < lim) {
168 *ofsp = ofs;
169 return ra_alloc1(as, refa, allow);
170 }
171 }
172 }
173 } else if (ir->o == IR_HREFK) {
174 if (mayfuse(as, ref)) {
175 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
176 if (ofs < lim) {
177 *ofsp = ofs;
178 return ra_alloc1(as, ir->op1, allow);
179 }
180 }
181 } else if (ir->o == IR_UREFC) {
182 if (irref_isk(ir->op1)) {
183 GCfunc *fn = ir_kfunc(IR(ir->op1));
184 int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
185 *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
186 return ra_allock(as, (ofs & ~255), allow);
187 }
188 } else if (ir->o == IR_TMPREF) {
189 *ofsp = 0;
190 return RID_SP;
191 }
192 }
193 *ofsp = 0;
194 return ra_alloc1(as, ref, allow);
195 }
196
197 /* Fuse m operand into arithmetic/logic instructions. */
asm_fuseopm(ASMState * as,ARMIns ai,IRRef ref,RegSet allow)198 static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
199 {
200 IRIns *ir = IR(ref);
201 if (ra_hasreg(ir->r)) {
202 ra_noweak(as, ir->r);
203 return ARMF_M(ir->r);
204 } else if (irref_isk(ref)) {
205 uint32_t k = emit_isk12(ai, ir->i);
206 if (k)
207 return k;
208 } else if (mayfuse(as, ref)) {
209 if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
210 Reg m = ra_alloc1(as, ir->op1, allow);
211 ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
212 ir->o == IR_BSHR ? ARMSH_LSR :
213 ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
214 if (irref_isk(ir->op2)) {
215 return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
216 } else {
217 Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
218 return m | ARMF_RSH(sh, s);
219 }
220 } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
221 Reg m = ra_alloc1(as, ir->op1, allow);
222 return m | ARMF_SH(ARMSH_LSL, 1);
223 }
224 }
225 return ra_allocref(as, ref, allow);
226 }
227
228 /* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
asm_fuselsl2(ASMState * as,IRRef ref)229 static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
230 {
231 IRIns *ir = IR(ref);
232 if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
233 irref_isk(ir->op2) && IR(ir->op2)->i == 2)
234 return ir->op1;
235 return 0; /* No fusion. */
236 }
237
238 /* Fuse XLOAD/XSTORE reference into load/store operand. */
asm_fusexref(ASMState * as,ARMIns ai,Reg rd,IRRef ref,RegSet allow,int32_t ofs)239 static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
240 RegSet allow, int32_t ofs)
241 {
242 IRIns *ir = IR(ref);
243 Reg base;
244 if (ra_noreg(ir->r) && canfuse(as, ir)) {
245 int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 :
246 (ai & 0x04000000) ? 4096 : 256;
247 if (ir->o == IR_ADD) {
248 int32_t ofs2;
249 if (irref_isk(ir->op2) &&
250 (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim &&
251 (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) {
252 ofs = ofs2;
253 ref = ir->op1;
254 } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) {
255 IRRef lref = ir->op1, rref = ir->op2;
256 Reg rn, rm;
257 if ((ai & 0x04000000)) {
258 IRRef sref = asm_fuselsl2(as, rref);
259 if (sref) {
260 rref = sref;
261 ai |= ARMF_SH(ARMSH_LSL, 2);
262 } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
263 lref = rref;
264 rref = sref;
265 ai |= ARMF_SH(ARMSH_LSL, 2);
266 }
267 }
268 rn = ra_alloc1(as, lref, allow);
269 rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
270 if ((ai & 0x04000000)) ai |= ARMI_LS_R;
271 emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
272 return;
273 }
274 } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
275 lj_assertA(ofs == 0, "bad usage");
276 ofs = (int32_t)sizeof(GCstr);
277 if (irref_isk(ir->op2)) {
278 ofs += IR(ir->op2)->i;
279 ref = ir->op1;
280 } else if (irref_isk(ir->op1)) {
281 ofs += IR(ir->op1)->i;
282 ref = ir->op2;
283 } else {
284 /* NYI: Fuse ADD with constant. */
285 Reg rn = ra_alloc1(as, ir->op1, allow);
286 uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
287 if ((ai & 0x04000000))
288 emit_lso(as, ai, rd, rd, ofs);
289 else
290 emit_lsox(as, ai, rd, rd, ofs);
291 emit_dn(as, ARMI_ADD^m, rd, rn);
292 return;
293 }
294 if (ofs <= -lim || ofs >= lim) {
295 Reg rn = ra_alloc1(as, ref, allow);
296 Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
297 if ((ai & 0x04000000)) ai |= ARMI_LS_R;
298 emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
299 return;
300 }
301 }
302 }
303 base = ra_alloc1(as, ref, allow);
304 #if !LJ_SOFTFP
305 if ((ai & 0x08000000))
306 emit_vlso(as, ai, rd, base, ofs);
307 else
308 #endif
309 if ((ai & 0x04000000))
310 emit_lso(as, ai, rd, base, ofs);
311 else
312 emit_lsox(as, ai, rd, base, ofs);
313 }
314
315 #if !LJ_SOFTFP
316 /* Fuse to multiply-add/sub instruction. */
asm_fusemadd(ASMState * as,IRIns * ir,ARMIns ai,ARMIns air)317 static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
318 {
319 IRRef lref = ir->op1, rref = ir->op2;
320 IRIns *irm;
321 if (lref != rref &&
322 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
323 ra_noreg(irm->r)) ||
324 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
325 (rref = lref, ai = air, ra_noreg(irm->r))))) {
326 Reg dest = ra_dest(as, ir, RSET_FPR);
327 Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
328 Reg right, left = ra_alloc2(as, irm,
329 rset_exclude(rset_exclude(RSET_FPR, dest), add));
330 right = (left >> 8); left &= 255;
331 emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
332 if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15));
333 return 1;
334 }
335 return 0;
336 }
337 #endif
338
339 /* -- Calls --------------------------------------------------------------- */
340
341 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)342 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
343 {
344 uint32_t n, nargs = CCI_XNARGS(ci);
345 int32_t ofs = 0;
346 #if LJ_SOFTFP
347 Reg gpr = REGARG_FIRSTGPR;
348 #else
349 Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0;
350 #endif
351 if ((void *)ci->func)
352 emit_call(as, (void *)ci->func);
353 #if !LJ_SOFTFP
354 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
355 as->cost[gpr] = REGCOST(~0u, ASMREF_L);
356 gpr = REGARG_FIRSTGPR;
357 #endif
358 for (n = 0; n < nargs; n++) { /* Setup args. */
359 IRRef ref = args[n];
360 IRIns *ir = IR(ref);
361 #if !LJ_SOFTFP
362 if (ref && irt_isfp(ir->t)) {
363 RegSet of = as->freeset;
364 Reg src;
365 if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
366 if (irt_isnum(ir->t)) {
367 if (fpr <= REGARG_LASTFPR) {
368 ra_leftov(as, fpr, ref);
369 fpr++;
370 continue;
371 }
372 } else if (fprodd) { /* Ick. */
373 src = ra_alloc1(as, ref, RSET_FPR);
374 emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000);
375 fprodd = 0;
376 continue;
377 } else if (fpr <= REGARG_LASTFPR) {
378 ra_leftov(as, fpr, ref);
379 fprodd = fpr++;
380 continue;
381 }
382 /* Workaround to protect argument GPRs from being used for remat. */
383 as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
384 src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
385 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
386 fprodd = 0;
387 goto stackfp;
388 }
389 /* Workaround to protect argument GPRs from being used for remat. */
390 as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
391 src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
392 as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
393 if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
394 if (gpr <= REGARG_LASTGPR) {
395 lj_assertA(rset_test(as->freeset, gpr),
396 "reg %d not free", gpr); /* Must have been evicted. */
397 if (irt_isnum(ir->t)) {
398 lj_assertA(rset_test(as->freeset, gpr+1),
399 "reg %d not free", gpr+1); /* Ditto. */
400 emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
401 gpr += 2;
402 } else {
403 emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15));
404 gpr++;
405 }
406 } else {
407 stackfp:
408 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
409 emit_spstore(as, ir, src, ofs);
410 ofs += irt_isnum(ir->t) ? 8 : 4;
411 }
412 } else
413 #endif
414 {
415 if (gpr <= REGARG_LASTGPR) {
416 lj_assertA(rset_test(as->freeset, gpr),
417 "reg %d not free", gpr); /* Must have been evicted. */
418 if (ref) ra_leftov(as, gpr, ref);
419 gpr++;
420 } else {
421 if (ref) {
422 Reg r = ra_alloc1(as, ref, RSET_GPR);
423 emit_spstore(as, ir, r, ofs);
424 }
425 ofs += 4;
426 }
427 }
428 }
429 }
430
431 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)432 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
433 {
434 RegSet drop = RSET_SCRATCH;
435 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
436 if (ra_hasreg(ir->r))
437 rset_clear(drop, ir->r); /* Dest reg handled below. */
438 if (hiop && ra_hasreg((ir+1)->r))
439 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
440 ra_evictset(as, drop); /* Evictions must be performed first. */
441 if (ra_used(ir)) {
442 lj_assertA(!irt_ispri(ir->t), "PRI dest");
443 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
444 if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
445 Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
446 if (irt_isnum(ir->t))
447 emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest);
448 else
449 emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest);
450 } else {
451 ra_destreg(as, ir, RID_FPRET);
452 }
453 } else if (hiop) {
454 ra_destpair(as, ir);
455 } else {
456 ra_destreg(as, ir, RID_RET);
457 }
458 }
459 UNUSED(ci);
460 }
461
asm_callx(ASMState * as,IRIns * ir)462 static void asm_callx(ASMState *as, IRIns *ir)
463 {
464 IRRef args[CCI_NARGS_MAX*2];
465 CCallInfo ci;
466 IRRef func;
467 IRIns *irf;
468 ci.flags = asm_callx_flags(as, ir);
469 asm_collectargs(as, ir, &ci, args);
470 asm_setupresult(as, ir, &ci);
471 func = ir->op2; irf = IR(func);
472 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
473 if (irref_isk(func)) { /* Call to constant address. */
474 ci.func = (ASMFunction)(void *)(irf->i);
475 } else { /* Need a non-argument register for indirect calls. */
476 Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
477 emit_m(as, ARMI_BLXr, freg);
478 ci.func = (ASMFunction)(void *)0;
479 }
480 asm_gencall(as, &ci, args);
481 }
482
483 /* -- Returns ------------------------------------------------------------- */
484
485 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)486 static void asm_retf(ASMState *as, IRIns *ir)
487 {
488 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
489 void *pc = ir_kptr(IR(ir->op2));
490 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
491 as->topslot -= (BCReg)delta;
492 if ((int32_t)as->topslot < 0) as->topslot = 0;
493 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
494 /* Need to force a spill on REF_BASE now to update the stack slot. */
495 emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
496 emit_setgl(as, base, jit_base);
497 emit_addptr(as, base, -8*delta);
498 asm_guardcc(as, CC_NE);
499 emit_nm(as, ARMI_CMP, RID_TMP,
500 ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
501 emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
502 }
503
504 /* -- Buffer operations --------------------------------------------------- */
505
506 #if LJ_HASBUFFER
asm_bufhdr_write(ASMState * as,Reg sb)507 static void asm_bufhdr_write(ASMState *as, Reg sb)
508 {
509 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
510 IRIns irgc;
511 int32_t addr = i32ptr((void *)&J2G(as->J)->cur_L);
512 irgc.ot = IRT(0, IRT_PGC); /* GC type. */
513 emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
514 if ((as->flags & JIT_F_ARMV6T2)) {
515 emit_dnm(as, ARMI_BFI, RID_TMP, lj_fls(SBUF_MASK_FLAG), tmp);
516 } else {
517 emit_dnm(as, ARMI_ORR, RID_TMP, RID_TMP, tmp);
518 emit_dn(as, ARMI_AND|ARMI_K12|SBUF_MASK_FLAG, tmp, tmp);
519 }
520 emit_lso(as, ARMI_LDR, RID_TMP,
521 ra_allock(as, (addr & ~4095),
522 rset_exclude(rset_exclude(RSET_GPR, sb), tmp)),
523 (addr & 4095));
524 emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
525 }
526 #endif
527
528 /* -- Type conversions ---------------------------------------------------- */
529
530 #if !LJ_SOFTFP
asm_tointg(ASMState * as,IRIns * ir,Reg left)531 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
532 {
533 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
534 Reg dest = ra_dest(as, ir, RSET_GPR);
535 asm_guardcc(as, CC_NE);
536 emit_d(as, ARMI_VMRS, 0);
537 emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15));
538 emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15));
539 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
540 emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15));
541 }
542
asm_tobit(ASMState * as,IRIns * ir)543 static void asm_tobit(ASMState *as, IRIns *ir)
544 {
545 RegSet allow = RSET_FPR;
546 Reg left = ra_alloc1(as, ir->op1, allow);
547 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
548 Reg tmp = ra_scratch(as, rset_clear(allow, right));
549 Reg dest = ra_dest(as, ir, RSET_GPR);
550 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
551 emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15));
552 }
553 #endif
554
asm_conv(ASMState * as,IRIns * ir)555 static void asm_conv(ASMState *as, IRIns *ir)
556 {
557 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
558 #if !LJ_SOFTFP
559 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
560 #endif
561 IRRef lref = ir->op1;
562 /* 64 bit integer conversions are handled by SPLIT. */
563 lj_assertA(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64),
564 "IR %04d has unsplit 64 bit type",
565 (int)(ir - as->ir) - REF_BIAS);
566 #if LJ_SOFTFP
567 /* FP conversions are handled by SPLIT. */
568 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
569 "IR %04d has FP type",
570 (int)(ir - as->ir) - REF_BIAS);
571 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
572 #else
573 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
574 if (irt_isfp(ir->t)) {
575 Reg dest = ra_dest(as, ir, RSET_FPR);
576 if (stfp) { /* FP to FP conversion. */
577 emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32,
578 (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15));
579 } else { /* Integer to FP conversion. */
580 Reg left = ra_alloc1(as, lref, RSET_GPR);
581 ARMIns ai = irt_isfloat(ir->t) ?
582 (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) :
583 (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32);
584 emit_dm(as, ai, (dest & 15), (dest & 15));
585 emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15));
586 }
587 } else if (stfp) { /* FP to integer conversion. */
588 if (irt_isguard(ir->t)) {
589 /* Checked conversions are only supported from number to int. */
590 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
591 "bad type for checked CONV");
592 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
593 } else {
594 Reg left = ra_alloc1(as, lref, RSET_FPR);
595 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
596 Reg dest = ra_dest(as, ir, RSET_GPR);
597 ARMIns ai;
598 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
599 ai = irt_isint(ir->t) ?
600 (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) :
601 (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32);
602 emit_dm(as, ai, (tmp & 15), (left & 15));
603 }
604 } else
605 #endif
606 {
607 Reg dest = ra_dest(as, ir, RSET_GPR);
608 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
609 Reg left = ra_alloc1(as, lref, RSET_GPR);
610 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
611 if ((as->flags & JIT_F_ARMV6)) {
612 ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
613 st == IRT_U8 ? ARMI_UXTB :
614 st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
615 emit_dm(as, ai, dest, left);
616 } else if (st == IRT_U8) {
617 emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
618 } else {
619 uint32_t shift = st == IRT_I8 ? 24 : 16;
620 ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
621 emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
622 emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
623 }
624 } else { /* Handle 32/32 bit no-op (cast). */
625 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
626 }
627 }
628 }
629
asm_strto(ASMState * as,IRIns * ir)630 static void asm_strto(ASMState *as, IRIns *ir)
631 {
632 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
633 IRRef args[2];
634 Reg rlo = 0, rhi = 0, tmp;
635 int destused = ra_used(ir);
636 int32_t ofs = 0;
637 ra_evictset(as, RSET_SCRATCH);
638 #if LJ_SOFTFP
639 if (destused) {
640 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
641 (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
642 int i;
643 for (i = 0; i < 2; i++) {
644 Reg r = (ir+i)->r;
645 if (ra_hasreg(r)) {
646 ra_free(as, r);
647 ra_modified(as, r);
648 emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
649 }
650 }
651 ofs = sps_scale(ir->s);
652 destused = 0;
653 } else {
654 rhi = ra_dest(as, ir+1, RSET_GPR);
655 rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
656 }
657 }
658 asm_guardcc(as, CC_EQ);
659 if (destused) {
660 emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
661 emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
662 }
663 #else
664 UNUSED(rhi);
665 if (destused) {
666 if (ra_hasspill(ir->s)) {
667 ofs = sps_scale(ir->s);
668 destused = 0;
669 if (ra_hasreg(ir->r)) {
670 ra_free(as, ir->r);
671 ra_modified(as, ir->r);
672 emit_spload(as, ir, ir->r, ofs);
673 }
674 } else {
675 rlo = ra_dest(as, ir, RSET_FPR);
676 }
677 }
678 asm_guardcc(as, CC_EQ);
679 if (destused)
680 emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0);
681 #endif
682 emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
683 args[0] = ir->op1; /* GCstr *str */
684 args[1] = ASMREF_TMP1; /* TValue *n */
685 asm_gencall(as, ci, args);
686 tmp = ra_releasetmp(as, ASMREF_TMP1);
687 if (ofs == 0)
688 emit_dm(as, ARMI_MOV, tmp, RID_SP);
689 else
690 emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
691 }
692
693 /* -- Memory references --------------------------------------------------- */
694
695 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref,MSize mode)696 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
697 {
698 if ((mode & IRTMPREF_IN1)) {
699 IRIns *ir = IR(ref);
700 if (irt_isnum(ir->t)) {
701 if ((mode & IRTMPREF_OUT1)) {
702 #if LJ_SOFTFP
703 lj_assertA(irref_isk(ref), "unsplit FP op");
704 emit_dm(as, ARMI_MOV, dest, RID_SP);
705 emit_lso(as, ARMI_STR,
706 ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, RSET_GPR),
707 RID_SP, 0);
708 emit_lso(as, ARMI_STR,
709 ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, RSET_GPR),
710 RID_SP, 4);
711 #else
712 Reg src = ra_alloc1(as, ref, RSET_FPR);
713 emit_dm(as, ARMI_MOV, dest, RID_SP);
714 emit_vlso(as, ARMI_VSTR_D, src, RID_SP, 0);
715 #endif
716 } else if (irref_isk(ref)) {
717 /* Use the number constant itself as a TValue. */
718 ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
719 } else {
720 #if LJ_SOFTFP
721 lj_assertA(0, "unsplit FP op");
722 #else
723 /* Otherwise force a spill and use the spill slot. */
724 emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
725 #endif
726 }
727 } else {
728 /* Otherwise use [sp] and [sp+4] to hold the TValue.
729 ** This assumes the following call has max. 4 args.
730 */
731 Reg type;
732 emit_dm(as, ARMI_MOV, dest, RID_SP);
733 if (!irt_ispri(ir->t)) {
734 Reg src = ra_alloc1(as, ref, RSET_GPR);
735 emit_lso(as, ARMI_STR, src, RID_SP, 0);
736 }
737 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t))
738 type = ra_alloc1(as, ref+1, RSET_GPR);
739 else
740 type = ra_allock(as, irt_toitype(ir->t), RSET_GPR);
741 emit_lso(as, ARMI_STR, type, RID_SP, 4);
742 }
743 } else {
744 emit_dm(as, ARMI_MOV, dest, RID_SP);
745 }
746 }
747
asm_aref(ASMState * as,IRIns * ir)748 static void asm_aref(ASMState *as, IRIns *ir)
749 {
750 Reg dest = ra_dest(as, ir, RSET_GPR);
751 Reg idx, base;
752 if (irref_isk(ir->op2)) {
753 IRRef tab = IR(ir->op1)->op1;
754 int32_t ofs = asm_fuseabase(as, tab);
755 IRRef refa = ofs ? tab : ir->op1;
756 uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
757 if (k) {
758 base = ra_alloc1(as, refa, RSET_GPR);
759 emit_dn(as, ARMI_ADD^k, dest, base);
760 return;
761 }
762 }
763 base = ra_alloc1(as, ir->op1, RSET_GPR);
764 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
765 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
766 }
767
768 /* Inlined hash lookup. Specialized for key type and for const keys.
769 ** The equivalent C code is:
770 ** Node *n = hashkey(t, key);
771 ** do {
772 ** if (lj_obj_equal(&n->key, key)) return &n->val;
773 ** } while ((n = nextnode(n)));
774 ** return niltv(L);
775 */
asm_href(ASMState * as,IRIns * ir,IROp merge)776 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
777 {
778 RegSet allow = RSET_GPR;
779 int destused = ra_used(ir);
780 Reg dest = ra_dest(as, ir, allow);
781 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
782 Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
783 IRRef refkey = ir->op2;
784 IRIns *irkey = IR(refkey);
785 IRType1 kt = irkey->t;
786 int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
787 uint32_t khash;
788 MCLabel l_end, l_loop;
789 rset_clear(allow, tab);
790 if (!irref_isk(refkey) || irt_isstr(kt)) {
791 #if LJ_SOFTFP
792 key = ra_alloc1(as, refkey, allow);
793 rset_clear(allow, key);
794 if (irkey[1].o == IR_HIOP) {
795 if (ra_hasreg((irkey+1)->r)) {
796 keynumhi = (irkey+1)->r;
797 keyhi = RID_TMP;
798 ra_noweak(as, keynumhi);
799 } else {
800 keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
801 }
802 rset_clear(allow, keynumhi);
803 khi = 0;
804 }
805 #else
806 if (irt_isnum(kt)) {
807 key = ra_scratch(as, allow);
808 rset_clear(allow, key);
809 keyhi = keynumhi = ra_scratch(as, allow);
810 rset_clear(allow, keyhi);
811 khi = 0;
812 } else {
813 key = ra_alloc1(as, refkey, allow);
814 rset_clear(allow, key);
815 }
816 #endif
817 } else if (irt_isnum(kt)) {
818 int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
819 k = emit_isk12(ARMI_CMP, val);
820 if (!k) {
821 key = ra_allock(as, val, allow);
822 rset_clear(allow, key);
823 }
824 val = (int32_t)ir_knum(irkey)->u32.hi;
825 khi = emit_isk12(ARMI_CMP, val);
826 if (!khi) {
827 keyhi = ra_allock(as, val, allow);
828 rset_clear(allow, keyhi);
829 }
830 } else if (!irt_ispri(kt)) {
831 k = emit_isk12(ARMI_CMP, irkey->i);
832 if (!k) {
833 key = ra_alloc1(as, refkey, allow);
834 rset_clear(allow, key);
835 }
836 }
837 if (!irt_ispri(kt))
838 tmp = ra_scratchpair(as, allow);
839
840 /* Key not found in chain: jump to exit (if merged) or load niltv. */
841 l_end = emit_label(as);
842 as->invmcp = NULL;
843 if (merge == IR_NE)
844 asm_guardcc(as, CC_AL);
845 else if (destused)
846 emit_loada(as, dest, niltvg(J2G(as->J)));
847
848 /* Follow hash chain until the end. */
849 l_loop = --as->mcp;
850 emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
851 emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
852
853 /* Type and value comparison. */
854 if (merge == IR_EQ)
855 asm_guardcc(as, CC_EQ);
856 else
857 emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
858 if (!irt_ispri(kt)) {
859 emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
860 emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
861 emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
862 } else {
863 emit_n(as, ARMI_CMP^khi, tmp);
864 emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
865 }
866 *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
867
868 /* Load main position relative to tab->node into dest. */
869 khash = irref_isk(refkey) ? ir_khash(as, irkey) : 1;
870 if (khash == 0) {
871 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
872 } else {
873 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
874 emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
875 if (irt_isstr(kt)) { /* Fetch of str->sid is cheaper than ra_allock. */
876 emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
877 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
878 emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, sid));
879 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
880 } else if (irref_isk(refkey)) {
881 emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
882 rset_exclude(rset_exclude(RSET_GPR, tab), dest));
883 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
884 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
885 } else { /* Must match with hash*() in lj_tab.c. */
886 if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
887 if (keyhi == RID_TMP)
888 emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
889 emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
890 }
891 emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
892 emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
893 emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
894 emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
895 tmp, tmp+1, tmp);
896 emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
897 emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
898 if (ra_hasreg(keynumhi)) {
899 emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
900 emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
901 emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
902 #if !LJ_SOFTFP
903 emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi,
904 (ra_alloc1(as, refkey, RSET_FPR) & 15));
905 #endif
906 } else {
907 emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
908 emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
909 rset_exclude(rset_exclude(RSET_GPR, tab), key));
910 }
911 }
912 }
913 }
914
asm_hrefk(ASMState * as,IRIns * ir)915 static void asm_hrefk(ASMState *as, IRIns *ir)
916 {
917 IRIns *kslot = IR(ir->op2);
918 IRIns *irkey = IR(kslot->op1);
919 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
920 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
921 Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
922 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
923 Reg key = RID_NONE, type = RID_TMP, idx = node;
924 RegSet allow = rset_exclude(RSET_GPR, node);
925 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
926 if (ofs > 4095) {
927 idx = dest;
928 rset_clear(allow, dest);
929 kofs = (int32_t)offsetof(Node, key);
930 } else if (ra_hasreg(dest)) {
931 emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
932 }
933 asm_guardcc(as, CC_NE);
934 if (!irt_ispri(irkey->t)) {
935 RegSet even = (as->freeset & allow);
936 even = even & (even >> 1) & RSET_GPREVEN;
937 if (even) {
938 key = ra_scratch(as, even);
939 if (rset_test(as->freeset, key+1)) {
940 type = key+1;
941 ra_modified(as, type);
942 }
943 } else {
944 key = ra_scratch(as, allow);
945 }
946 rset_clear(allow, key);
947 }
948 rset_clear(allow, type);
949 if (irt_isnum(irkey->t)) {
950 emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
951 (int32_t)ir_knum(irkey)->u32.hi, allow);
952 emit_opk(as, ARMI_CMP, 0, key,
953 (int32_t)ir_knum(irkey)->u32.lo, allow);
954 } else {
955 if (ra_hasreg(key))
956 emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
957 emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
958 }
959 emit_lso(as, ARMI_LDR, type, idx, kofs+4);
960 if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
961 if (ofs > 4095)
962 emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
963 }
964
asm_uref(ASMState * as,IRIns * ir)965 static void asm_uref(ASMState *as, IRIns *ir)
966 {
967 Reg dest = ra_dest(as, ir, RSET_GPR);
968 if (irref_isk(ir->op1)) {
969 GCfunc *fn = ir_kfunc(IR(ir->op1));
970 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
971 emit_lsptr(as, ARMI_LDR, dest, v);
972 } else {
973 Reg uv = ra_scratch(as, RSET_GPR);
974 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
975 if (ir->o == IR_UREFC) {
976 asm_guardcc(as, CC_NE);
977 emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
978 emit_opk(as, ARMI_ADD, dest, uv,
979 (int32_t)offsetof(GCupval, tv), RSET_GPR);
980 emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
981 } else {
982 emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
983 }
984 emit_lso(as, ARMI_LDR, uv, func,
985 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
986 }
987 }
988
asm_fref(ASMState * as,IRIns * ir)989 static void asm_fref(ASMState *as, IRIns *ir)
990 {
991 UNUSED(as); UNUSED(ir);
992 lj_assertA(!ra_used(ir), "unfused FREF");
993 }
994
asm_strref(ASMState * as,IRIns * ir)995 static void asm_strref(ASMState *as, IRIns *ir)
996 {
997 Reg dest = ra_dest(as, ir, RSET_GPR);
998 IRRef ref = ir->op2, refk = ir->op1;
999 Reg r;
1000 if (irref_isk(ref)) {
1001 IRRef tmp = refk; refk = ref; ref = tmp;
1002 } else if (!irref_isk(refk)) {
1003 uint32_t k, m = ARMI_K12|sizeof(GCstr);
1004 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1005 IRIns *irr = IR(ir->op2);
1006 if (ra_hasreg(irr->r)) {
1007 ra_noweak(as, irr->r);
1008 right = irr->r;
1009 } else if (mayfuse(as, irr->op2) &&
1010 irr->o == IR_ADD && irref_isk(irr->op2) &&
1011 (k = emit_isk12(ARMI_ADD,
1012 (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
1013 m = k;
1014 right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
1015 } else {
1016 right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
1017 }
1018 emit_dn(as, ARMI_ADD^m, dest, dest);
1019 emit_dnm(as, ARMI_ADD, dest, left, right);
1020 return;
1021 }
1022 r = ra_alloc1(as, ref, RSET_GPR);
1023 emit_opk(as, ARMI_ADD, dest, r,
1024 sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
1025 }
1026
1027 /* -- Loads and stores ---------------------------------------------------- */
1028
asm_fxloadins(ASMState * as,IRIns * ir)1029 static ARMIns asm_fxloadins(ASMState *as, IRIns *ir)
1030 {
1031 UNUSED(as);
1032 switch (irt_type(ir->t)) {
1033 case IRT_I8: return ARMI_LDRSB;
1034 case IRT_U8: return ARMI_LDRB;
1035 case IRT_I16: return ARMI_LDRSH;
1036 case IRT_U16: return ARMI_LDRH;
1037 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VLDR_D;
1038 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S; /* fallthrough */
1039 default: return ARMI_LDR;
1040 }
1041 }
1042
asm_fxstoreins(ASMState * as,IRIns * ir)1043 static ARMIns asm_fxstoreins(ASMState *as, IRIns *ir)
1044 {
1045 UNUSED(as);
1046 switch (irt_type(ir->t)) {
1047 case IRT_I8: case IRT_U8: return ARMI_STRB;
1048 case IRT_I16: case IRT_U16: return ARMI_STRH;
1049 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return ARMI_VSTR_D;
1050 case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S; /* fallthrough */
1051 default: return ARMI_STR;
1052 }
1053 }
1054
asm_fload(ASMState * as,IRIns * ir)1055 static void asm_fload(ASMState *as, IRIns *ir)
1056 {
1057 Reg dest = ra_dest(as, ir, RSET_GPR);
1058 ARMIns ai = asm_fxloadins(as, ir);
1059 Reg idx;
1060 int32_t ofs;
1061 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
1062 idx = ra_allock(as, (int32_t)(ir->op2<<2) + (int32_t)J2GG(as->J), RSET_GPR);
1063 ofs = 0;
1064 } else {
1065 idx = ra_alloc1(as, ir->op1, RSET_GPR);
1066 if (ir->op2 == IRFL_TAB_ARRAY) {
1067 ofs = asm_fuseabase(as, ir->op1);
1068 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
1069 emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
1070 return;
1071 }
1072 }
1073 ofs = field_ofs[ir->op2];
1074 }
1075 if ((ai & 0x04000000))
1076 emit_lso(as, ai, dest, idx, ofs);
1077 else
1078 emit_lsox(as, ai, dest, idx, ofs);
1079 }
1080
asm_fstore(ASMState * as,IRIns * ir)1081 static void asm_fstore(ASMState *as, IRIns *ir)
1082 {
1083 if (ir->r != RID_SINK) {
1084 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
1085 IRIns *irf = IR(ir->op1);
1086 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
1087 int32_t ofs = field_ofs[irf->op2];
1088 ARMIns ai = asm_fxstoreins(as, ir);
1089 if ((ai & 0x04000000))
1090 emit_lso(as, ai, src, idx, ofs);
1091 else
1092 emit_lsox(as, ai, src, idx, ofs);
1093 }
1094 }
1095
asm_xload(ASMState * as,IRIns * ir)1096 static void asm_xload(ASMState *as, IRIns *ir)
1097 {
1098 Reg dest = ra_dest(as, ir,
1099 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1100 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
1101 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
1102 }
1103
asm_xstore_(ASMState * as,IRIns * ir,int32_t ofs)1104 static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
1105 {
1106 if (ir->r != RID_SINK) {
1107 Reg src = ra_alloc1(as, ir->op2,
1108 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
1109 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
1110 rset_exclude(RSET_GPR, src), ofs);
1111 }
1112 }
1113
1114 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
1115
asm_ahuvload(ASMState * as,IRIns * ir)1116 static void asm_ahuvload(ASMState *as, IRIns *ir)
1117 {
1118 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1119 IRType t = hiop ? IRT_NUM : irt_type(ir->t);
1120 Reg dest = RID_NONE, type = RID_NONE, idx;
1121 RegSet allow = RSET_GPR;
1122 int32_t ofs = 0;
1123 if (hiop && ra_used(ir+1)) {
1124 type = ra_dest(as, ir+1, allow);
1125 rset_clear(allow, type);
1126 }
1127 if (ra_used(ir)) {
1128 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1129 irt_isint(ir->t) || irt_isaddr(ir->t),
1130 "bad load type %d", irt_type(ir->t));
1131 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
1132 rset_clear(allow, dest);
1133 }
1134 idx = asm_fuseahuref(as, ir->op1, &ofs, allow,
1135 (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096);
1136 if (ir->o == IR_VLOAD) ofs += 8 * ir->op2;
1137 if (!hiop || type == RID_NONE) {
1138 rset_clear(allow, idx);
1139 if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
1140 rset_test((as->freeset & allow), dest+1)) {
1141 type = dest+1;
1142 ra_modified(as, type);
1143 } else {
1144 type = RID_TMP;
1145 }
1146 }
1147 asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
1148 emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
1149 if (ra_hasreg(dest)) {
1150 #if !LJ_SOFTFP
1151 if (t == IRT_NUM)
1152 emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs);
1153 else
1154 #endif
1155 emit_lso(as, ARMI_LDR, dest, idx, ofs);
1156 }
1157 emit_lso(as, ARMI_LDR, type, idx, ofs+4);
1158 }
1159
asm_ahustore(ASMState * as,IRIns * ir)1160 static void asm_ahustore(ASMState *as, IRIns *ir)
1161 {
1162 if (ir->r != RID_SINK) {
1163 RegSet allow = RSET_GPR;
1164 Reg idx, src = RID_NONE, type = RID_NONE;
1165 int32_t ofs = 0;
1166 #if !LJ_SOFTFP
1167 if (irt_isnum(ir->t)) {
1168 src = ra_alloc1(as, ir->op2, RSET_FPR);
1169 idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024);
1170 emit_vlso(as, ARMI_VSTR_D, src, idx, ofs);
1171 } else
1172 #endif
1173 {
1174 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1175 if (!irt_ispri(ir->t)) {
1176 src = ra_alloc1(as, ir->op2, allow);
1177 rset_clear(allow, src);
1178 }
1179 if (hiop)
1180 type = ra_alloc1(as, (ir+1)->op2, allow);
1181 else
1182 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1183 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096);
1184 if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
1185 emit_lso(as, ARMI_STR, type, idx, ofs+4);
1186 }
1187 }
1188 }
1189
asm_sload(ASMState * as,IRIns * ir)1190 static void asm_sload(ASMState *as, IRIns *ir)
1191 {
1192 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
1193 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1194 IRType t = hiop ? IRT_NUM : irt_type(ir->t);
1195 Reg dest = RID_NONE, type = RID_NONE, base;
1196 RegSet allow = RSET_GPR;
1197 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1198 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1199 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1200 "inconsistent SLOAD variant");
1201 #if LJ_SOFTFP
1202 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1203 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1204 if (hiop && ra_used(ir+1)) {
1205 type = ra_dest(as, ir+1, allow);
1206 rset_clear(allow, type);
1207 }
1208 #else
1209 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) {
1210 dest = ra_scratch(as, RSET_FPR);
1211 asm_tointg(as, ir, dest);
1212 t = IRT_NUM; /* Continue with a regular number type check. */
1213 } else
1214 #endif
1215 if (ra_used(ir)) {
1216 Reg tmp = RID_NONE;
1217 if ((ir->op2 & IRSLOAD_CONVERT))
1218 tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR);
1219 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1220 irt_isint(ir->t) || irt_isaddr(ir->t),
1221 "bad SLOAD type %d", irt_type(ir->t));
1222 dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
1223 rset_clear(allow, dest);
1224 base = ra_alloc1(as, REF_BASE, allow);
1225 if ((ir->op2 & IRSLOAD_CONVERT)) {
1226 if (t == IRT_INT) {
1227 emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
1228 emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15));
1229 t = IRT_NUM; /* Check for original type. */
1230 } else {
1231 emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15));
1232 emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15));
1233 t = IRT_INT; /* Check for original type. */
1234 }
1235 dest = tmp;
1236 }
1237 goto dotypecheck;
1238 }
1239 base = ra_alloc1(as, REF_BASE, allow);
1240 dotypecheck:
1241 rset_clear(allow, base);
1242 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1243 if (ra_noreg(type)) {
1244 if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
1245 rset_test((as->freeset & allow), dest+1)) {
1246 type = dest+1;
1247 ra_modified(as, type);
1248 } else {
1249 type = RID_TMP;
1250 }
1251 }
1252 asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
1253 emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
1254 }
1255 if (ra_hasreg(dest)) {
1256 #if !LJ_SOFTFP
1257 if (t == IRT_NUM) {
1258 if (ofs < 1024) {
1259 emit_vlso(as, ARMI_VLDR_D, dest, base, ofs);
1260 } else {
1261 if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
1262 emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0);
1263 emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow);
1264 return;
1265 }
1266 } else
1267 #endif
1268 emit_lso(as, ARMI_LDR, dest, base, ofs);
1269 }
1270 if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
1271 }
1272
1273 /* -- Allocations --------------------------------------------------------- */
1274
1275 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1276 static void asm_cnew(ASMState *as, IRIns *ir)
1277 {
1278 CTState *cts = ctype_ctsG(J2G(as->J));
1279 CTypeID id = (CTypeID)IR(ir->op1)->i;
1280 CTSize sz;
1281 CTInfo info = lj_ctype_info(cts, id, &sz);
1282 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1283 IRRef args[4];
1284 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1285 RegSet drop = RSET_SCRATCH;
1286 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1287 "bad CNEW/CNEWI operands");
1288
1289 as->gcsteps++;
1290 if (ra_hasreg(ir->r))
1291 rset_clear(drop, ir->r); /* Dest reg handled below. */
1292 ra_evictset(as, drop);
1293 if (ra_used(ir))
1294 ra_destreg(as, ir, RID_RET); /* GCcdata * */
1295
1296 /* Initialize immutable cdata object. */
1297 if (ir->o == IR_CNEWI) {
1298 int32_t ofs = sizeof(GCcdata);
1299 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1300 if (sz == 8) {
1301 ofs += 4; ir++;
1302 lj_assertA(ir->o == IR_HIOP, "expected HIOP for CNEWI");
1303 }
1304 for (;;) {
1305 Reg r = ra_alloc1(as, ir->op2, allow);
1306 emit_lso(as, ARMI_STR, r, RID_RET, ofs);
1307 rset_clear(allow, r);
1308 if (ofs == sizeof(GCcdata)) break;
1309 ofs -= 4; ir--;
1310 }
1311 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1312 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1313 args[0] = ASMREF_L; /* lua_State *L */
1314 args[1] = ir->op1; /* CTypeID id */
1315 args[2] = ir->op2; /* CTSize sz */
1316 args[3] = ASMREF_TMP1; /* CTSize align */
1317 asm_gencall(as, ci, args);
1318 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1319 return;
1320 }
1321
1322 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1323 {
1324 uint32_t k = emit_isk12(ARMI_MOV, id);
1325 Reg r = k ? RID_R1 : ra_allock(as, id, allow);
1326 emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
1327 emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
1328 emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
1329 if (k) emit_d(as, ARMI_MOV^k, RID_R1);
1330 }
1331 args[0] = ASMREF_L; /* lua_State *L */
1332 args[1] = ASMREF_TMP1; /* MSize size */
1333 asm_gencall(as, ci, args);
1334 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1335 ra_releasetmp(as, ASMREF_TMP1));
1336 }
1337 #endif
1338
1339 /* -- Write barriers ------------------------------------------------------ */
1340
asm_tbar(ASMState * as,IRIns * ir)1341 static void asm_tbar(ASMState *as, IRIns *ir)
1342 {
1343 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1344 Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1345 Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
1346 rset_exclude(rset_exclude(RSET_GPR, tab), link));
1347 Reg mark = RID_TMP;
1348 MCLabel l_end = emit_label(as);
1349 emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
1350 emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1351 emit_lso(as, ARMI_STR, tab, gr,
1352 (int32_t)offsetof(global_State, gc.grayagain));
1353 emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
1354 emit_lso(as, ARMI_LDR, link, gr,
1355 (int32_t)offsetof(global_State, gc.grayagain));
1356 emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
1357 emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
1358 emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
1359 }
1360
asm_obar(ASMState * as,IRIns * ir)1361 static void asm_obar(ASMState *as, IRIns *ir)
1362 {
1363 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1364 IRRef args[2];
1365 MCLabel l_end;
1366 Reg obj, val, tmp;
1367 /* No need for other object barriers (yet). */
1368 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1369 ra_evictset(as, RSET_SCRATCH);
1370 l_end = emit_label(as);
1371 args[0] = ASMREF_TMP1; /* global_State *g */
1372 args[1] = ir->op1; /* TValue *tv */
1373 asm_gencall(as, ci, args);
1374 if ((l_end[-1] >> 28) == CC_AL)
1375 l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
1376 else
1377 emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
1378 ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
1379 obj = IR(ir->op1)->r;
1380 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
1381 emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
1382 emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
1383 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1384 emit_lso(as, ARMI_LDRB, tmp, obj,
1385 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1386 emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1387 }
1388
1389 /* -- Arithmetic and logic operations ------------------------------------- */
1390
1391 #if !LJ_SOFTFP
asm_fparith(ASMState * as,IRIns * ir,ARMIns ai)1392 static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai)
1393 {
1394 Reg dest = ra_dest(as, ir, RSET_FPR);
1395 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1396 right = (left >> 8); left &= 255;
1397 emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
1398 }
1399
asm_fpunary(ASMState * as,IRIns * ir,ARMIns ai)1400 static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
1401 {
1402 Reg dest = ra_dest(as, ir, RSET_FPR);
1403 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1404 emit_dm(as, ai, (dest & 15), (left & 15));
1405 }
1406
asm_callround(ASMState * as,IRIns * ir,int id)1407 static void asm_callround(ASMState *as, IRIns *ir, int id)
1408 {
1409 /* The modified regs must match with the *.dasc implementation. */
1410 RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
1411 RID2RSET(RID_R3)|RID2RSET(RID_R12);
1412 RegSet of;
1413 Reg dest, src;
1414 ra_evictset(as, drop);
1415 dest = ra_dest(as, ir, RSET_FPR);
1416 emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
1417 emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
1418 id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
1419 (void *)lj_vm_trunc_sf);
1420 /* Workaround to protect argument GPRs from being used for remat. */
1421 of = as->freeset;
1422 as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
1423 as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
1424 src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
1425 as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
1426 emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
1427 }
1428
asm_fpmath(ASMState * as,IRIns * ir)1429 static void asm_fpmath(ASMState *as, IRIns *ir)
1430 {
1431 if (ir->op2 <= IRFPM_TRUNC)
1432 asm_callround(as, ir, ir->op2);
1433 else if (ir->op2 == IRFPM_SQRT)
1434 asm_fpunary(as, ir, ARMI_VSQRT_D);
1435 else
1436 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1437 }
1438 #endif
1439
asm_swapops(ASMState * as,IRRef lref,IRRef rref)1440 static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
1441 {
1442 IRIns *ir;
1443 if (irref_isk(rref))
1444 return 0; /* Don't swap constants to the left. */
1445 if (irref_isk(lref))
1446 return 1; /* But swap constants to the right. */
1447 ir = IR(rref);
1448 if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
1449 (ir->o == IR_ADD && ir->op1 == ir->op2))
1450 return 0; /* Don't swap fusable operands to the left. */
1451 ir = IR(lref);
1452 if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
1453 (ir->o == IR_ADD && ir->op1 == ir->op2))
1454 return 1; /* But swap fusable operands to the right. */
1455 return 0; /* Otherwise don't swap. */
1456 }
1457
asm_intop(ASMState * as,IRIns * ir,ARMIns ai)1458 static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
1459 {
1460 IRRef lref = ir->op1, rref = ir->op2;
1461 Reg left, dest = ra_dest(as, ir, RSET_GPR);
1462 uint32_t m;
1463 if (asm_swapops(as, lref, rref)) {
1464 IRRef tmp = lref; lref = rref; rref = tmp;
1465 if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
1466 ai ^= (ARMI_SUB^ARMI_RSB);
1467 }
1468 left = ra_hintalloc(as, lref, dest, RSET_GPR);
1469 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
1470 if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
1471 asm_guardcc(as, CC_VS);
1472 ai |= ARMI_S;
1473 }
1474 emit_dn(as, ai^m, dest, left);
1475 }
1476
1477 /* Try to drop cmp r, #0. */
asm_drop_cmp0(ASMState * as,ARMIns ai)1478 static ARMIns asm_drop_cmp0(ASMState *as, ARMIns ai)
1479 {
1480 if (as->flagmcp == as->mcp) {
1481 uint32_t cc = (as->mcp[1] >> 28);
1482 as->flagmcp = NULL;
1483 if (cc <= CC_NE) {
1484 as->mcp++;
1485 ai |= ARMI_S;
1486 } else if (cc == CC_GE) {
1487 *++as->mcp ^= ((CC_GE^CC_PL) << 28);
1488 ai |= ARMI_S;
1489 } else if (cc == CC_LT) {
1490 *++as->mcp ^= ((CC_LT^CC_MI) << 28);
1491 ai |= ARMI_S;
1492 } /* else: other conds don't work in general. */
1493 }
1494 return ai;
1495 }
1496
asm_intop_s(ASMState * as,IRIns * ir,ARMIns ai)1497 static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
1498 {
1499 asm_intop(as, ir, asm_drop_cmp0(as, ai));
1500 }
1501
asm_intneg(ASMState * as,IRIns * ir,ARMIns ai)1502 static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
1503 {
1504 Reg dest = ra_dest(as, ir, RSET_GPR);
1505 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1506 emit_dn(as, ai|ARMI_K12|0, dest, left);
1507 }
1508
1509 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
asm_intmul(ASMState * as,IRIns * ir)1510 static void asm_intmul(ASMState *as, IRIns *ir)
1511 {
1512 Reg dest = ra_dest(as, ir, RSET_GPR);
1513 Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
1514 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1515 Reg tmp = RID_NONE;
1516 /* ARMv5 restriction: dest != left and dest_hi != left. */
1517 if (dest == left && left != right) { left = right; right = dest; }
1518 if (irt_isguard(ir->t)) { /* IR_MULOV */
1519 if (!(as->flags & JIT_F_ARMV6) && dest == left)
1520 tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left));
1521 asm_guardcc(as, CC_NE);
1522 emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
1523 emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
1524 } else {
1525 if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
1526 emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
1527 }
1528 /* Only need this for the dest == left == right case. */
1529 if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
1530 }
1531
asm_add(ASMState * as,IRIns * ir)1532 static void asm_add(ASMState *as, IRIns *ir)
1533 {
1534 #if !LJ_SOFTFP
1535 if (irt_isnum(ir->t)) {
1536 if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D))
1537 asm_fparith(as, ir, ARMI_VADD_D);
1538 return;
1539 }
1540 #endif
1541 asm_intop_s(as, ir, ARMI_ADD);
1542 }
1543
asm_sub(ASMState * as,IRIns * ir)1544 static void asm_sub(ASMState *as, IRIns *ir)
1545 {
1546 #if !LJ_SOFTFP
1547 if (irt_isnum(ir->t)) {
1548 if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D))
1549 asm_fparith(as, ir, ARMI_VSUB_D);
1550 return;
1551 }
1552 #endif
1553 asm_intop_s(as, ir, ARMI_SUB);
1554 }
1555
asm_mul(ASMState * as,IRIns * ir)1556 static void asm_mul(ASMState *as, IRIns *ir)
1557 {
1558 #if !LJ_SOFTFP
1559 if (irt_isnum(ir->t)) {
1560 asm_fparith(as, ir, ARMI_VMUL_D);
1561 return;
1562 }
1563 #endif
1564 asm_intmul(as, ir);
1565 }
1566
1567 #define asm_addov(as, ir) asm_add(as, ir)
1568 #define asm_subov(as, ir) asm_sub(as, ir)
1569 #define asm_mulov(as, ir) asm_mul(as, ir)
1570
1571 #if !LJ_SOFTFP
1572 #define asm_fpdiv(as, ir) asm_fparith(as, ir, ARMI_VDIV_D)
1573 #define asm_abs(as, ir) asm_fpunary(as, ir, ARMI_VABS_D)
1574 #endif
1575
asm_neg(ASMState * as,IRIns * ir)1576 static void asm_neg(ASMState *as, IRIns *ir)
1577 {
1578 #if !LJ_SOFTFP
1579 if (irt_isnum(ir->t)) {
1580 asm_fpunary(as, ir, ARMI_VNEG_D);
1581 return;
1582 }
1583 #endif
1584 asm_intneg(as, ir, ARMI_RSB);
1585 }
1586
asm_bitop(ASMState * as,IRIns * ir,ARMIns ai)1587 static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
1588 {
1589 ai = asm_drop_cmp0(as, ai);
1590 if (ir->op2 == 0) {
1591 Reg dest = ra_dest(as, ir, RSET_GPR);
1592 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
1593 emit_d(as, ai^m, dest);
1594 } else {
1595 /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
1596 asm_intop(as, ir, ai);
1597 }
1598 }
1599
1600 #define asm_bnot(as, ir) asm_bitop(as, ir, ARMI_MVN)
1601
asm_bswap(ASMState * as,IRIns * ir)1602 static void asm_bswap(ASMState *as, IRIns *ir)
1603 {
1604 Reg dest = ra_dest(as, ir, RSET_GPR);
1605 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1606 if ((as->flags & JIT_F_ARMV6)) {
1607 emit_dm(as, ARMI_REV, dest, left);
1608 } else {
1609 Reg tmp2 = dest;
1610 if (tmp2 == left)
1611 tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
1612 emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
1613 emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
1614 emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
1615 emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
1616 }
1617 }
1618
1619 #define asm_band(as, ir) asm_bitop(as, ir, ARMI_AND)
1620 #define asm_bor(as, ir) asm_bitop(as, ir, ARMI_ORR)
1621 #define asm_bxor(as, ir) asm_bitop(as, ir, ARMI_EOR)
1622
asm_bitshift(ASMState * as,IRIns * ir,ARMShift sh)1623 static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
1624 {
1625 if (irref_isk(ir->op2)) { /* Constant shifts. */
1626 /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
1627 /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
1628 Reg dest = ra_dest(as, ir, RSET_GPR);
1629 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1630 int32_t shift = (IR(ir->op2)->i & 31);
1631 emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
1632 } else {
1633 Reg dest = ra_dest(as, ir, RSET_GPR);
1634 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1635 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1636 emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
1637 }
1638 }
1639
1640 #define asm_bshl(as, ir) asm_bitshift(as, ir, ARMSH_LSL)
1641 #define asm_bshr(as, ir) asm_bitshift(as, ir, ARMSH_LSR)
1642 #define asm_bsar(as, ir) asm_bitshift(as, ir, ARMSH_ASR)
1643 #define asm_bror(as, ir) asm_bitshift(as, ir, ARMSH_ROR)
1644 #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL")
1645
asm_intmin_max(ASMState * as,IRIns * ir,int cc)1646 static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
1647 {
1648 uint32_t kcmp = 0, kmov = 0;
1649 Reg dest = ra_dest(as, ir, RSET_GPR);
1650 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1651 Reg right = 0;
1652 if (irref_isk(ir->op2)) {
1653 kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
1654 if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
1655 }
1656 if (!kmov) {
1657 kcmp = 0;
1658 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1659 }
1660 if (kmov || dest != right) {
1661 emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
1662 cc ^= 1; /* Must use opposite conditions for paired moves. */
1663 } else {
1664 cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
1665 }
1666 if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left);
1667 emit_nm(as, ARMI_CMP^kcmp, left, right);
1668 }
1669
1670 #if LJ_SOFTFP
asm_sfpmin_max(ASMState * as,IRIns * ir,int cc)1671 static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc)
1672 {
1673 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1674 RegSet drop = RSET_SCRATCH;
1675 Reg r;
1676 IRRef args[4];
1677 args[0] = ir->op1; args[1] = (ir+1)->op1;
1678 args[2] = ir->op2; args[3] = (ir+1)->op2;
1679 /* __aeabi_cdcmple preserves r0-r3. */
1680 if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
1681 if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
1682 if (!rset_test(as->freeset, RID_R2) &&
1683 regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
1684 if (!rset_test(as->freeset, RID_R3) &&
1685 regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
1686 ra_evictset(as, drop);
1687 ra_destpair(as, ir);
1688 emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
1689 emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
1690 emit_call(as, (void *)ci->func);
1691 for (r = RID_R0; r <= RID_R3; r++)
1692 ra_leftov(as, r, args[r-RID_R0]);
1693 }
1694 #else
asm_fpmin_max(ASMState * as,IRIns * ir,int cc)1695 static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
1696 {
1697 Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
1698 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1699 right = ((left >> 8) & 15); left &= 15;
1700 if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left);
1701 if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right);
1702 emit_d(as, ARMI_VMRS, 0);
1703 emit_dm(as, ARMI_VCMP_D, left, right);
1704 }
1705 #endif
1706
asm_min_max(ASMState * as,IRIns * ir,int cc,int fcc)1707 static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
1708 {
1709 #if LJ_SOFTFP
1710 UNUSED(fcc);
1711 #else
1712 if (irt_isnum(ir->t))
1713 asm_fpmin_max(as, ir, fcc);
1714 else
1715 #endif
1716 asm_intmin_max(as, ir, cc);
1717 }
1718
1719 #define asm_min(as, ir) asm_min_max(as, ir, CC_GT, CC_PL)
1720 #define asm_max(as, ir) asm_min_max(as, ir, CC_LT, CC_LE)
1721
1722 /* -- Comparisons --------------------------------------------------------- */
1723
1724 /* Map of comparisons to flags. ORDER IR. */
1725 static const uint8_t asm_compmap[IR_ABC+1] = {
1726 /* op FP swp int cc FP cc */
1727 /* LT */ CC_GE + (CC_HS << 4),
1728 /* GE x */ CC_LT + (CC_HI << 4),
1729 /* LE */ CC_GT + (CC_HI << 4),
1730 /* GT x */ CC_LE + (CC_HS << 4),
1731 /* ULT x */ CC_HS + (CC_LS << 4),
1732 /* UGE */ CC_LO + (CC_LO << 4),
1733 /* ULE x */ CC_HI + (CC_LO << 4),
1734 /* UGT */ CC_LS + (CC_LS << 4),
1735 /* EQ */ CC_NE + (CC_NE << 4),
1736 /* NE */ CC_EQ + (CC_EQ << 4),
1737 /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
1738 };
1739
1740 #if LJ_SOFTFP
1741 /* FP comparisons. */
asm_sfpcomp(ASMState * as,IRIns * ir)1742 static void asm_sfpcomp(ASMState *as, IRIns *ir)
1743 {
1744 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1745 RegSet drop = RSET_SCRATCH;
1746 Reg r;
1747 IRRef args[4];
1748 int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
1749 args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
1750 args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
1751 /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
1752 for (r = RID_R0; r <= RID_R3; r++)
1753 if (!rset_test(as->freeset, r) &&
1754 regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
1755 ra_evictset(as, drop);
1756 asm_guardcc(as, (asm_compmap[ir->o] >> 4));
1757 emit_call(as, (void *)ci->func);
1758 for (r = RID_R0; r <= RID_R3; r++)
1759 ra_leftov(as, r, args[r-RID_R0]);
1760 }
1761 #else
1762 /* FP comparisons. */
asm_fpcomp(ASMState * as,IRIns * ir)1763 static void asm_fpcomp(ASMState *as, IRIns *ir)
1764 {
1765 Reg left, right;
1766 ARMIns ai;
1767 int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
1768 if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
1769 left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15);
1770 right = 0;
1771 ai = ARMI_VCMPZ_D;
1772 } else {
1773 left = ra_alloc2(as, ir, RSET_FPR);
1774 if (swp) {
1775 right = (left & 15); left = ((left >> 8) & 15);
1776 } else {
1777 right = ((left >> 8) & 15); left &= 15;
1778 }
1779 ai = ARMI_VCMP_D;
1780 }
1781 asm_guardcc(as, (asm_compmap[ir->o] >> 4));
1782 emit_d(as, ARMI_VMRS, 0);
1783 emit_dm(as, ai, left, right);
1784 }
1785 #endif
1786
1787 /* Integer comparisons. */
asm_intcomp(ASMState * as,IRIns * ir)1788 static void asm_intcomp(ASMState *as, IRIns *ir)
1789 {
1790 ARMCC cc = (asm_compmap[ir->o] & 15);
1791 IRRef lref = ir->op1, rref = ir->op2;
1792 Reg left;
1793 uint32_t m;
1794 int cmpprev0 = 0;
1795 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t),
1796 "bad comparison data type %d", irt_type(ir->t));
1797 if (asm_swapops(as, lref, rref)) {
1798 Reg tmp = lref; lref = rref; rref = tmp;
1799 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
1800 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
1801 }
1802 if (irref_isk(rref) && IR(rref)->i == 0) {
1803 IRIns *irl = IR(lref);
1804 cmpprev0 = (irl+1 == ir);
1805 /* Combine comp(BAND(left, right), 0) into tst left, right. */
1806 if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
1807 IRRef blref = irl->op1, brref = irl->op2;
1808 uint32_t m2 = 0;
1809 Reg bleft;
1810 if (asm_swapops(as, blref, brref)) {
1811 Reg tmp = blref; blref = brref; brref = tmp;
1812 }
1813 if (irref_isk(brref)) {
1814 m2 = emit_isk12(ARMI_AND, IR(brref)->i);
1815 if ((m2 & (ARMI_AND^ARMI_BIC)))
1816 goto notst; /* Not beneficial if we miss a constant operand. */
1817 }
1818 if (cc == CC_GE) cc = CC_PL;
1819 else if (cc == CC_LT) cc = CC_MI;
1820 else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
1821 bleft = ra_alloc1(as, blref, RSET_GPR);
1822 if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
1823 asm_guardcc(as, cc);
1824 emit_n(as, ARMI_TST^m2, bleft);
1825 return;
1826 }
1827 }
1828 notst:
1829 left = ra_alloc1(as, lref, RSET_GPR);
1830 m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
1831 asm_guardcc(as, cc);
1832 emit_n(as, ARMI_CMP^m, left);
1833 /* Signed comparison with zero and referencing previous ins? */
1834 if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
1835 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1836 }
1837
asm_comp(ASMState * as,IRIns * ir)1838 static void asm_comp(ASMState *as, IRIns *ir)
1839 {
1840 #if !LJ_SOFTFP
1841 if (irt_isnum(ir->t))
1842 asm_fpcomp(as, ir);
1843 else
1844 #endif
1845 asm_intcomp(as, ir);
1846 }
1847
1848 #define asm_equal(as, ir) asm_comp(as, ir)
1849
1850 #if LJ_HASFFI
1851 /* 64 bit integer comparisons. */
asm_int64comp(ASMState * as,IRIns * ir)1852 static void asm_int64comp(ASMState *as, IRIns *ir)
1853 {
1854 int signedcomp = (ir->o <= IR_GT);
1855 ARMCC cclo, cchi;
1856 Reg leftlo, lefthi;
1857 uint32_t mlo, mhi;
1858 RegSet allow = RSET_GPR, oldfree;
1859
1860 /* Always use unsigned comparison for loword. */
1861 cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
1862 leftlo = ra_alloc1(as, ir->op1, allow);
1863 oldfree = as->freeset;
1864 mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
1865 allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
1866
1867 /* Use signed or unsigned comparison for hiword. */
1868 cchi = asm_compmap[ir->o] & 15;
1869 lefthi = ra_alloc1(as, (ir+1)->op1, allow);
1870 mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
1871
1872 /* All register allocations must be performed _before_ this point. */
1873 if (signedcomp) {
1874 MCLabel l_around = emit_label(as);
1875 asm_guardcc(as, cclo);
1876 emit_n(as, ARMI_CMP^mlo, leftlo);
1877 emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
1878 if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
1879 asm_guardcc(as, cchi);
1880 } else {
1881 asm_guardcc(as, cclo);
1882 emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
1883 }
1884 emit_n(as, ARMI_CMP^mhi, lefthi);
1885 }
1886 #endif
1887
1888 /* -- Split register ops -------------------------------------------------- */
1889
1890 /* Hiword op of a split 32/32 bit op. Previous op is the loword op. */
asm_hiop(ASMState * as,IRIns * ir)1891 static void asm_hiop(ASMState *as, IRIns *ir)
1892 {
1893 /* HIOP is marked as a store because it needs its own DCE logic. */
1894 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1895 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1896 #if LJ_HASFFI || LJ_SOFTFP
1897 if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
1898 as->curins--; /* Always skip the loword comparison. */
1899 #if LJ_SOFTFP
1900 if (!irt_isint(ir->t)) {
1901 asm_sfpcomp(as, ir-1);
1902 return;
1903 }
1904 #endif
1905 #if LJ_HASFFI
1906 asm_int64comp(as, ir-1);
1907 #endif
1908 return;
1909 #if LJ_SOFTFP
1910 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
1911 as->curins--; /* Always skip the loword min/max. */
1912 if (uselo || usehi)
1913 asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_PL : CC_LE);
1914 return;
1915 #elif LJ_HASFFI
1916 } else if ((ir-1)->o == IR_CONV) {
1917 as->curins--; /* Always skip the CONV. */
1918 if (usehi || uselo)
1919 asm_conv64(as, ir);
1920 return;
1921 #endif
1922 } else if ((ir-1)->o == IR_XSTORE) {
1923 if ((ir-1)->r != RID_SINK)
1924 asm_xstore_(as, ir, 4);
1925 return;
1926 }
1927 #endif
1928 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1929 switch ((ir-1)->o) {
1930 #if LJ_HASFFI
1931 case IR_ADD:
1932 as->curins--;
1933 asm_intop(as, ir, ARMI_ADC);
1934 asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
1935 break;
1936 case IR_SUB:
1937 as->curins--;
1938 asm_intop(as, ir, ARMI_SBC);
1939 asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
1940 break;
1941 case IR_NEG:
1942 as->curins--;
1943 asm_intneg(as, ir, ARMI_RSC);
1944 asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
1945 break;
1946 case IR_CNEWI:
1947 /* Nothing to do here. Handled by lo op itself. */
1948 break;
1949 #endif
1950 #if LJ_SOFTFP
1951 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1952 case IR_STRTO:
1953 if (!uselo)
1954 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
1955 break;
1956 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: case IR_TMPREF:
1957 /* Nothing to do here. Handled by lo op itself. */
1958 break;
1959 #endif
1960 case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
1961 if (!uselo)
1962 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
1963 break;
1964 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
1965 }
1966 }
1967
1968 /* -- Profiling ----------------------------------------------------------- */
1969
asm_prof(ASMState * as,IRIns * ir)1970 static void asm_prof(ASMState *as, IRIns *ir)
1971 {
1972 UNUSED(ir);
1973 asm_guardcc(as, CC_NE);
1974 emit_n(as, ARMI_TST|ARMI_K12|HOOK_PROFILE, RID_TMP);
1975 emit_lsptr(as, ARMI_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask);
1976 }
1977
1978 /* -- Stack handling ------------------------------------------------------ */
1979
1980 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)1981 static void asm_stack_check(ASMState *as, BCReg topslot,
1982 IRIns *irp, RegSet allow, ExitNo exitno)
1983 {
1984 Reg pbase;
1985 uint32_t k;
1986 if (irp) {
1987 if (!ra_hasspill(irp->s)) {
1988 pbase = irp->r;
1989 lj_assertA(ra_hasreg(pbase), "base reg lost");
1990 } else if (allow) {
1991 pbase = rset_pickbot(allow);
1992 } else {
1993 pbase = RID_RET;
1994 emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
1995 }
1996 } else {
1997 pbase = RID_BASE;
1998 }
1999 emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
2000 k = emit_isk12(0, (int32_t)(8*topslot));
2001 lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot);
2002 emit_n(as, ARMI_CMP^k, RID_TMP);
2003 emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
2004 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
2005 (int32_t)offsetof(lua_State, maxstack));
2006 if (irp) { /* Must not spill arbitrary registers in head of side trace. */
2007 int32_t i = i32ptr(&J2G(as->J)->cur_L);
2008 if (ra_hasspill(irp->s))
2009 emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
2010 emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
2011 if (ra_hasspill(irp->s) && !allow)
2012 emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
2013 emit_loadi(as, RID_TMP, (i & ~4095));
2014 } else {
2015 emit_getgl(as, RID_TMP, cur_L);
2016 }
2017 }
2018
2019 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)2020 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2021 {
2022 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2023 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
2024 MSize n, nent = snap->nent;
2025 /* Store the value of all modified slots to the Lua stack. */
2026 for (n = 0; n < nent; n++) {
2027 SnapEntry sn = map[n];
2028 BCReg s = snap_slot(sn);
2029 int32_t ofs = 8*((int32_t)s-1);
2030 IRRef ref = snap_ref(sn);
2031 IRIns *ir = IR(ref);
2032 if ((sn & SNAP_NORESTORE))
2033 continue;
2034 if (irt_isnum(ir->t)) {
2035 #if LJ_SOFTFP
2036 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
2037 Reg tmp;
2038 /* LJ_SOFTFP: must be a number constant. */
2039 lj_assertA(irref_isk(ref), "unsplit FP op");
2040 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
2041 rset_exclude(RSET_GPREVEN, RID_BASE));
2042 emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
2043 if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
2044 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
2045 emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
2046 #else
2047 Reg src = ra_alloc1(as, ref, RSET_FPR);
2048 emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs);
2049 #endif
2050 } else {
2051 RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
2052 Reg type;
2053 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2054 "restore of IR type %d", irt_type(ir->t));
2055 if (!irt_ispri(ir->t)) {
2056 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
2057 emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
2058 if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
2059 }
2060 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2061 if (s == 0) continue; /* Do not overwrite link to previous frame. */
2062 type = ra_allock(as, (int32_t)(*flinks--), odd);
2063 #if LJ_SOFTFP
2064 } else if ((sn & SNAP_SOFTFPNUM)) {
2065 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
2066 #endif
2067 } else if ((sn & SNAP_KEYINDEX)) {
2068 type = ra_allock(as, (int32_t)LJ_KEYINDEX, odd);
2069 } else {
2070 type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
2071 }
2072 emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
2073 }
2074 checkmclim(as);
2075 }
2076 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2077 }
2078
2079 /* -- GC handling --------------------------------------------------------- */
2080
2081 /* Marker to prevent patching the GC check exit. */
2082 #define ARM_NOPATCH_GC_CHECK (ARMI_BIC|ARMI_K12)
2083
2084 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)2085 static void asm_gc_check(ASMState *as)
2086 {
2087 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2088 IRRef args[2];
2089 MCLabel l_end;
2090 Reg tmp1, tmp2;
2091 ra_evictset(as, RSET_SCRATCH);
2092 l_end = emit_label(as);
2093 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2094 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2095 *--as->mcp = ARM_NOPATCH_GC_CHECK;
2096 emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
2097 args[0] = ASMREF_TMP1; /* global_State *g */
2098 args[1] = ASMREF_TMP2; /* MSize steps */
2099 asm_gencall(as, ci, args);
2100 tmp1 = ra_releasetmp(as, ASMREF_TMP1);
2101 tmp2 = ra_releasetmp(as, ASMREF_TMP2);
2102 emit_loadi(as, tmp2, as->gcsteps);
2103 /* Jump around GC step if GC total < GC threshold. */
2104 emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
2105 emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
2106 emit_lso(as, ARMI_LDR, tmp2, tmp1,
2107 (int32_t)offsetof(global_State, gc.threshold));
2108 emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
2109 (int32_t)offsetof(global_State, gc.total));
2110 ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
2111 as->gcsteps = 0;
2112 checkmclim(as);
2113 }
2114
2115 /* -- Loop handling ------------------------------------------------------- */
2116
2117 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)2118 static void asm_loop_fixup(ASMState *as)
2119 {
2120 MCode *p = as->mctop;
2121 MCode *target = as->mcp;
2122 if (as->loopinv) { /* Inverted loop branch? */
2123 /* asm_guardcc already inverted the bcc and patched the final bl. */
2124 p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
2125 } else {
2126 p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
2127 }
2128 }
2129
2130 /* Fixup the tail of the loop. */
asm_loop_tail_fixup(ASMState * as)2131 static void asm_loop_tail_fixup(ASMState *as)
2132 {
2133 UNUSED(as); /* Nothing to do. */
2134 }
2135
2136 /* -- Head of trace ------------------------------------------------------- */
2137
2138 /* Reload L register from g->cur_L. */
asm_head_lreg(ASMState * as)2139 static void asm_head_lreg(ASMState *as)
2140 {
2141 IRIns *ir = IR(ASMREF_L);
2142 if (ra_used(ir)) {
2143 Reg r = ra_dest(as, ir, RSET_GPR);
2144 emit_getgl(as, r, cur_L);
2145 ra_evictk(as);
2146 }
2147 }
2148
2149 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)2150 static void asm_head_root_base(ASMState *as)
2151 {
2152 IRIns *ir;
2153 asm_head_lreg(as);
2154 ir = IR(REF_BASE);
2155 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
2156 ra_spill(as, ir);
2157 ra_destreg(as, ir, RID_BASE);
2158 }
2159
2160 /* Coalesce BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)2161 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2162 {
2163 IRIns *ir;
2164 asm_head_lreg(as);
2165 ir = IR(REF_BASE);
2166 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
2167 ra_spill(as, ir);
2168 if (ra_hasspill(irp->s)) {
2169 rset_clear(allow, ra_dest(as, ir, allow));
2170 } else {
2171 Reg r = irp->r;
2172 lj_assertA(ra_hasreg(r), "base reg lost");
2173 rset_clear(allow, r);
2174 if (r != ir->r && !rset_test(as->freeset, r))
2175 ra_restore(as, regcost_ref(as->cost[r]));
2176 ra_destreg(as, ir, r);
2177 }
2178 return allow;
2179 }
2180
2181 /* -- Tail of trace ------------------------------------------------------- */
2182
2183 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)2184 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2185 {
2186 MCode *p = as->mctop;
2187 MCode *target;
2188 int32_t spadj = as->T->spadjust;
2189 if (spadj == 0) {
2190 as->mctop = --p;
2191 } else {
2192 /* Patch stack adjustment. */
2193 uint32_t k = emit_isk12(ARMI_ADD, spadj);
2194 lj_assertA(k, "stack adjustment %d does not fit in K12", spadj);
2195 p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
2196 }
2197 /* Patch exit branch. */
2198 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2199 p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
2200 }
2201
2202 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)2203 static void asm_tail_prep(ASMState *as)
2204 {
2205 MCode *p = as->mctop - 1; /* Leave room for exit branch. */
2206 if (as->loopref) {
2207 as->invmcp = as->mcp = p;
2208 } else {
2209 as->mcp = p-1; /* Leave room for stack pointer adjustment. */
2210 as->invmcp = NULL;
2211 }
2212 *p = 0; /* Prevent load/store merging. */
2213 }
2214
2215 /* -- Trace setup --------------------------------------------------------- */
2216
2217 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)2218 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2219 {
2220 IRRef args[CCI_NARGS_MAX*2];
2221 uint32_t i, nargs = CCI_XNARGS(ci);
2222 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
2223 asm_collectargs(as, ir, ci, args);
2224 for (i = 0; i < nargs; i++) {
2225 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
2226 if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
2227 if (irt_isnum(IR(args[i])->t)) {
2228 if (nfpr > 0) nfpr--;
2229 else fprodd = 0, nslots = (nslots + 3) & ~1;
2230 } else {
2231 if (fprodd) fprodd--;
2232 else if (nfpr > 0) fprodd = 1, nfpr--;
2233 else nslots++;
2234 }
2235 } else if (irt_isnum(IR(args[i])->t)) {
2236 ngpr &= ~1;
2237 if (ngpr > 0) ngpr -= 2; else nslots += 2;
2238 } else {
2239 if (ngpr > 0) ngpr--; else nslots++;
2240 }
2241 } else {
2242 if (ngpr > 0) ngpr--; else nslots++;
2243 }
2244 }
2245 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2246 as->evenspill = nslots;
2247 return REGSP_HINT(RID_RET);
2248 }
2249
asm_setup_target(ASMState * as)2250 static void asm_setup_target(ASMState *as)
2251 {
2252 /* May need extra exit for asm_stack_check on side traces. */
2253 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
2254 }
2255
2256 /* -- Trace patching ------------------------------------------------------ */
2257
2258 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)2259 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2260 {
2261 MCode *p = T->mcode;
2262 MCode *pe = (MCode *)((char *)p + T->szmcode);
2263 MCode *cstart = NULL, *cend = p;
2264 MCode *mcarea = lj_mcode_patch(J, p, 0);
2265 MCode *px = exitstub_addr(J, exitno) - 2;
2266 for (; p < pe; p++) {
2267 /* Look for bl_cc exitstub, replace with b_cc target. */
2268 uint32_t ins = *p;
2269 if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
2270 ((ins ^ (px-p)) & 0x00ffffffu) == 0 &&
2271 p[-1] != ARM_NOPATCH_GC_CHECK) {
2272 *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
2273 cend = p+1;
2274 if (!cstart) cstart = p;
2275 }
2276 }
2277 lj_assertJ(cstart != NULL, "exit stub %d not found", exitno);
2278 lj_mcode_sync(cstart, cend);
2279 lj_mcode_patch(J, mcarea, 1);
2280 }
2281
2282