1 /*
2 ** PPC IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 /* -- Register allocator extensions --------------------------------------- */
7
8 /* Allocate a register with a hint. */
ra_hintalloc(ASMState * as,IRRef ref,Reg hint,RegSet allow)9 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
10 {
11 Reg r = IR(ref)->r;
12 if (ra_noreg(r)) {
13 if (!ra_hashint(r) && !iscrossref(as, ref))
14 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
15 r = ra_allocref(as, ref, allow);
16 }
17 ra_noweak(as, r);
18 return r;
19 }
20
21 /* Allocate two source registers for three-operand instructions. */
ra_alloc2(ASMState * as,IRIns * ir,RegSet allow)22 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
23 {
24 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
25 Reg left = irl->r, right = irr->r;
26 if (ra_hasreg(left)) {
27 ra_noweak(as, left);
28 if (ra_noreg(right))
29 right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
30 else
31 ra_noweak(as, right);
32 } else if (ra_hasreg(right)) {
33 ra_noweak(as, right);
34 left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
35 } else if (ra_hashint(right)) {
36 right = ra_allocref(as, ir->op2, allow);
37 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
38 } else {
39 left = ra_allocref(as, ir->op1, allow);
40 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
41 }
42 return left | (right << 8);
43 }
44
45 /* -- Guard handling ------------------------------------------------------ */
46
47 /* Setup exit stubs after the end of each trace. */
asm_exitstub_setup(ASMState * as,ExitNo nexits)48 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
49 {
50 ExitNo i;
51 MCode *mxp = as->mctop;
52 if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
53 asm_mclimit(as);
54 /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
55 for (i = nexits-1; (int32_t)i >= 0; i--)
56 *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
57 *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
58 mxp--;
59 *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
60 *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
61 as->mctop = mxp;
62 }
63
asm_exitstub_addr(ASMState * as,ExitNo exitno)64 static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
65 {
66 /* Keep this in-sync with exitstub_trace_addr(). */
67 return as->mctop + exitno + 3;
68 }
69
70 /* Emit conditional branch to exit for guard. */
asm_guardcc(ASMState * as,PPCCC cc)71 static void asm_guardcc(ASMState *as, PPCCC cc)
72 {
73 MCode *target = asm_exitstub_addr(as, as->snapno);
74 MCode *p = as->mcp;
75 if (LJ_UNLIKELY(p == as->invmcp)) {
76 as->loopinv = 1;
77 *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
78 emit_condbranch(as, PPCI_BC, cc^4, p);
79 return;
80 }
81 emit_condbranch(as, PPCI_BC, cc, target);
82 }
83
84 /* -- Operand fusion ------------------------------------------------------ */
85
86 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
87 #define CONFLICT_SEARCH_LIM 31
88
89 /* Check if there's no conflicting instruction between curins and ref. */
noconflict(ASMState * as,IRRef ref,IROp conflict)90 static int noconflict(ASMState *as, IRRef ref, IROp conflict)
91 {
92 IRIns *ir = as->ir;
93 IRRef i = as->curins;
94 if (i > ref + CONFLICT_SEARCH_LIM)
95 return 0; /* Give up, ref is too far away. */
96 while (--i > ref)
97 if (ir[i].o == conflict)
98 return 0; /* Conflict found. */
99 return 1; /* Ok, no conflict. */
100 }
101
102 /* Fuse the array base of colocated arrays. */
asm_fuseabase(ASMState * as,IRRef ref)103 static int32_t asm_fuseabase(ASMState *as, IRRef ref)
104 {
105 IRIns *ir = IR(ref);
106 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
107 !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
108 return (int32_t)sizeof(GCtab);
109 return 0;
110 }
111
112 /* Indicates load/store indexed is ok. */
113 #define AHUREF_LSX ((int32_t)0x80000000)
114
115 /* Fuse array/hash/upvalue reference into register+offset operand. */
asm_fuseahuref(ASMState * as,IRRef ref,int32_t * ofsp,RegSet allow)116 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
117 {
118 IRIns *ir = IR(ref);
119 if (ra_noreg(ir->r)) {
120 if (ir->o == IR_AREF) {
121 if (mayfuse(as, ref)) {
122 if (irref_isk(ir->op2)) {
123 IRRef tab = IR(ir->op1)->op1;
124 int32_t ofs = asm_fuseabase(as, tab);
125 IRRef refa = ofs ? tab : ir->op1;
126 ofs += 8*IR(ir->op2)->i;
127 if (checki16(ofs)) {
128 *ofsp = ofs;
129 return ra_alloc1(as, refa, allow);
130 }
131 }
132 if (*ofsp == AHUREF_LSX) {
133 Reg base = ra_alloc1(as, ir->op1, allow);
134 Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
135 return base | (idx << 8);
136 }
137 }
138 } else if (ir->o == IR_HREFK) {
139 if (mayfuse(as, ref)) {
140 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
141 if (checki16(ofs)) {
142 *ofsp = ofs;
143 return ra_alloc1(as, ir->op1, allow);
144 }
145 }
146 } else if (ir->o == IR_UREFC) {
147 if (irref_isk(ir->op1)) {
148 GCfunc *fn = ir_kfunc(IR(ir->op1));
149 int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
150 int32_t jgl = (intptr_t)J2G(as->J);
151 if ((uint32_t)(ofs-jgl) < 65536) {
152 *ofsp = ofs-jgl-32768;
153 return RID_JGL;
154 } else {
155 *ofsp = (int16_t)ofs;
156 return ra_allock(as, ofs-(int16_t)ofs, allow);
157 }
158 }
159 }
160 }
161 *ofsp = 0;
162 return ra_alloc1(as, ref, allow);
163 }
164
165 /* Fuse XLOAD/XSTORE reference into load/store operand. */
asm_fusexref(ASMState * as,PPCIns pi,Reg rt,IRRef ref,RegSet allow,int32_t ofs)166 static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
167 RegSet allow, int32_t ofs)
168 {
169 IRIns *ir = IR(ref);
170 Reg base;
171 if (ra_noreg(ir->r) && canfuse(as, ir)) {
172 if (ir->o == IR_ADD) {
173 int32_t ofs2;
174 if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
175 ofs = ofs2;
176 ref = ir->op1;
177 } else if (ofs == 0) {
178 Reg right, left = ra_alloc2(as, ir, allow);
179 right = (left >> 8); left &= 255;
180 emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
181 return;
182 }
183 } else if (ir->o == IR_STRREF) {
184 lj_assertA(ofs == 0, "bad usage");
185 ofs = (int32_t)sizeof(GCstr);
186 if (irref_isk(ir->op2)) {
187 ofs += IR(ir->op2)->i;
188 ref = ir->op1;
189 } else if (irref_isk(ir->op1)) {
190 ofs += IR(ir->op1)->i;
191 ref = ir->op2;
192 } else {
193 /* NYI: Fuse ADD with constant. */
194 Reg tmp, right, left = ra_alloc2(as, ir, allow);
195 right = (left >> 8); left &= 255;
196 tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
197 emit_fai(as, pi, rt, tmp, ofs);
198 emit_tab(as, PPCI_ADD, tmp, left, right);
199 return;
200 }
201 if (!checki16(ofs)) {
202 Reg left = ra_alloc1(as, ref, allow);
203 Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
204 emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
205 return;
206 }
207 }
208 }
209 base = ra_alloc1(as, ref, allow);
210 emit_fai(as, pi, rt, base, ofs);
211 }
212
213 /* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
asm_fusexrefx(ASMState * as,PPCIns pi,Reg rt,IRRef ref,RegSet allow)214 static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
215 RegSet allow)
216 {
217 IRIns *ira = IR(ref);
218 Reg right, left;
219 if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) {
220 left = ra_alloc2(as, ira, allow);
221 right = (left >> 8); left &= 255;
222 } else {
223 right = ra_alloc1(as, ref, allow);
224 left = RID_R0;
225 }
226 emit_tab(as, pi, rt, left, right);
227 }
228
229 #if !LJ_SOFTFP
230 /* Fuse to multiply-add/sub instruction. */
asm_fusemadd(ASMState * as,IRIns * ir,PPCIns pi,PPCIns pir)231 static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
232 {
233 IRRef lref = ir->op1, rref = ir->op2;
234 IRIns *irm;
235 if (lref != rref &&
236 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
237 ra_noreg(irm->r)) ||
238 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
239 (rref = lref, pi = pir, ra_noreg(irm->r))))) {
240 Reg dest = ra_dest(as, ir, RSET_FPR);
241 Reg add = ra_alloc1(as, rref, RSET_FPR);
242 Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
243 right = (left >> 8); left &= 255;
244 emit_facb(as, pi, dest, left, right, add);
245 return 1;
246 }
247 return 0;
248 }
249 #endif
250
251 /* -- Calls --------------------------------------------------------------- */
252
253 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)254 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
255 {
256 uint32_t n, nargs = CCI_XNARGS(ci);
257 int32_t ofs = 8;
258 Reg gpr = REGARG_FIRSTGPR;
259 #if !LJ_SOFTFP
260 Reg fpr = REGARG_FIRSTFPR;
261 #endif
262 if ((void *)ci->func)
263 emit_call(as, (void *)ci->func);
264 for (n = 0; n < nargs; n++) { /* Setup args. */
265 IRRef ref = args[n];
266 if (ref) {
267 IRIns *ir = IR(ref);
268 #if !LJ_SOFTFP
269 if (irt_isfp(ir->t)) {
270 if (fpr <= REGARG_LASTFPR) {
271 lj_assertA(rset_test(as->freeset, fpr),
272 "reg %d not free", fpr); /* Already evicted. */
273 ra_leftov(as, fpr, ref);
274 fpr++;
275 } else {
276 Reg r = ra_alloc1(as, ref, RSET_FPR);
277 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
278 emit_spstore(as, ir, r, ofs);
279 ofs += irt_isnum(ir->t) ? 8 : 4;
280 }
281 } else
282 #endif
283 {
284 if (gpr <= REGARG_LASTGPR) {
285 lj_assertA(rset_test(as->freeset, gpr),
286 "reg %d not free", gpr); /* Already evicted. */
287 ra_leftov(as, gpr, ref);
288 gpr++;
289 } else {
290 Reg r = ra_alloc1(as, ref, RSET_GPR);
291 emit_spstore(as, ir, r, ofs);
292 ofs += 4;
293 }
294 }
295 } else {
296 if (gpr <= REGARG_LASTGPR)
297 gpr++;
298 else
299 ofs += 4;
300 }
301 checkmclim(as);
302 }
303 #if !LJ_SOFTFP
304 if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
305 emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
306 #endif
307 }
308
309 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)310 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
311 {
312 RegSet drop = RSET_SCRATCH;
313 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
314 #if !LJ_SOFTFP
315 if ((ci->flags & CCI_NOFPRCLOBBER))
316 drop &= ~RSET_FPR;
317 #endif
318 if (ra_hasreg(ir->r))
319 rset_clear(drop, ir->r); /* Dest reg handled below. */
320 if (hiop && ra_hasreg((ir+1)->r))
321 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
322 ra_evictset(as, drop); /* Evictions must be performed first. */
323 if (ra_used(ir)) {
324 lj_assertA(!irt_ispri(ir->t), "PRI dest");
325 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
326 if ((ci->flags & CCI_CASTU64)) {
327 /* Use spill slot or temp slots. */
328 int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
329 Reg dest = ir->r;
330 if (ra_hasreg(dest)) {
331 ra_free(as, dest);
332 ra_modified(as, dest);
333 emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
334 }
335 emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
336 emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
337 } else {
338 ra_destreg(as, ir, RID_FPRET);
339 }
340 #if LJ_32
341 } else if (hiop) {
342 ra_destpair(as, ir);
343 #endif
344 } else {
345 ra_destreg(as, ir, RID_RET);
346 }
347 }
348 }
349
asm_callx(ASMState * as,IRIns * ir)350 static void asm_callx(ASMState *as, IRIns *ir)
351 {
352 IRRef args[CCI_NARGS_MAX*2];
353 CCallInfo ci;
354 IRRef func;
355 IRIns *irf;
356 ci.flags = asm_callx_flags(as, ir);
357 asm_collectargs(as, ir, &ci, args);
358 asm_setupresult(as, ir, &ci);
359 func = ir->op2; irf = IR(func);
360 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
361 if (irref_isk(func)) { /* Call to constant address. */
362 ci.func = (ASMFunction)(void *)(intptr_t)(irf->i);
363 } else { /* Need a non-argument register for indirect calls. */
364 RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
365 Reg freg = ra_alloc1(as, func, allow);
366 *--as->mcp = PPCI_BCTRL;
367 *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
368 ci.func = (ASMFunction)(void *)0;
369 }
370 asm_gencall(as, &ci, args);
371 }
372
373 /* -- Returns ------------------------------------------------------------- */
374
375 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)376 static void asm_retf(ASMState *as, IRIns *ir)
377 {
378 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
379 void *pc = ir_kptr(IR(ir->op2));
380 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
381 as->topslot -= (BCReg)delta;
382 if ((int32_t)as->topslot < 0) as->topslot = 0;
383 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
384 emit_setgl(as, base, jit_base);
385 emit_addptr(as, base, -8*delta);
386 asm_guardcc(as, CC_NE);
387 emit_ab(as, PPCI_CMPW, RID_TMP,
388 ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
389 emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
390 }
391
392 /* -- Type conversions ---------------------------------------------------- */
393
394 #if !LJ_SOFTFP
asm_tointg(ASMState * as,IRIns * ir,Reg left)395 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
396 {
397 RegSet allow = RSET_FPR;
398 Reg tmp = ra_scratch(as, rset_clear(allow, left));
399 Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
400 Reg dest = ra_dest(as, ir, RSET_GPR);
401 Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
402 asm_guardcc(as, CC_NE);
403 emit_fab(as, PPCI_FCMPU, 0, tmp, left);
404 emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
405 emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
406 emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
407 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
408 emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
409 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
410 emit_lsptr(as, PPCI_LFS, (fbias & 31),
411 (void *)&as->J->k32[LJ_K32_2P52_2P31], RSET_GPR);
412 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
413 emit_fb(as, PPCI_FCTIWZ, tmp, left);
414 }
415
asm_tobit(ASMState * as,IRIns * ir)416 static void asm_tobit(ASMState *as, IRIns *ir)
417 {
418 RegSet allow = RSET_FPR;
419 Reg dest = ra_dest(as, ir, RSET_GPR);
420 Reg left = ra_alloc1(as, ir->op1, allow);
421 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
422 Reg tmp = ra_scratch(as, rset_clear(allow, right));
423 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
424 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
425 emit_fab(as, PPCI_FADD, tmp, left, right);
426 }
427 #endif
428
asm_conv(ASMState * as,IRIns * ir)429 static void asm_conv(ASMState *as, IRIns *ir)
430 {
431 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
432 #if !LJ_SOFTFP
433 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
434 #endif
435 IRRef lref = ir->op1;
436 /* 64 bit integer conversions are handled by SPLIT. */
437 lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
438 "IR %04d has unsplit 64 bit type",
439 (int)(ir - as->ir) - REF_BIAS);
440 #if LJ_SOFTFP
441 /* FP conversions are handled by SPLIT. */
442 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
443 "IR %04d has FP type",
444 (int)(ir - as->ir) - REF_BIAS);
445 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
446 #else
447 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
448 if (irt_isfp(ir->t)) {
449 Reg dest = ra_dest(as, ir, RSET_FPR);
450 if (stfp) { /* FP to FP conversion. */
451 if (st == IRT_NUM) /* double -> float conversion. */
452 emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
453 else /* float -> double conversion is a no-op on PPC. */
454 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
455 } else { /* Integer to FP conversion. */
456 /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
457 /* IRT_U32: Bias with 2^52, subtract 2^52. */
458 RegSet allow = RSET_GPR;
459 Reg left = ra_alloc1(as, lref, allow);
460 Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
461 Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
462 if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
463 emit_fab(as, PPCI_FSUB, dest, dest, fbias);
464 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
465 emit_lsptr(as, PPCI_LFS, (fbias & 31),
466 &as->J->k32[st == IRT_U32 ? LJ_K32_2P52 : LJ_K32_2P52_2P31],
467 rset_clear(allow, hibias));
468 emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
469 RID_SP, SPOFS_TMPLO);
470 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
471 if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
472 }
473 } else if (stfp) { /* FP to integer conversion. */
474 if (irt_isguard(ir->t)) {
475 /* Checked conversions are only supported from number to int. */
476 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
477 "bad type for checked CONV");
478 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
479 } else {
480 Reg dest = ra_dest(as, ir, RSET_GPR);
481 Reg left = ra_alloc1(as, lref, RSET_FPR);
482 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
483 if (irt_isu32(ir->t)) {
484 /* Convert both x and x-2^31 to int and merge results. */
485 Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
486 emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
487 emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
488 emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
489 emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
490 emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
491 emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
492 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
493 emit_tai(as, PPCI_LWZ, dest,
494 RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
495 emit_fb(as, PPCI_FCTIWZ, tmp, left);
496 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
497 emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
498 emit_fab(as, PPCI_FSUB, tmp, left, tmp);
499 emit_lsptr(as, PPCI_LFS, (tmp & 31),
500 (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
501 } else {
502 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
503 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
504 emit_fb(as, PPCI_FCTIWZ, tmp, left);
505 }
506 }
507 } else
508 #endif
509 {
510 Reg dest = ra_dest(as, ir, RSET_GPR);
511 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
512 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
513 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
514 if ((ir->op2 & IRCONV_SEXT))
515 emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
516 else
517 emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
518 } else { /* 32/64 bit integer conversions. */
519 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
520 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
521 }
522 }
523 }
524
asm_strto(ASMState * as,IRIns * ir)525 static void asm_strto(ASMState *as, IRIns *ir)
526 {
527 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
528 IRRef args[2];
529 int32_t ofs = SPOFS_TMP;
530 #if LJ_SOFTFP
531 ra_evictset(as, RSET_SCRATCH);
532 if (ra_used(ir)) {
533 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
534 (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
535 int i;
536 for (i = 0; i < 2; i++) {
537 Reg r = (ir+i)->r;
538 if (ra_hasreg(r)) {
539 ra_free(as, r);
540 ra_modified(as, r);
541 emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
542 }
543 }
544 ofs = sps_scale(ir->s & ~1);
545 } else {
546 Reg rhi = ra_dest(as, ir+1, RSET_GPR);
547 Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
548 emit_tai(as, PPCI_LWZ, rhi, RID_SP, ofs);
549 emit_tai(as, PPCI_LWZ, rlo, RID_SP, ofs+4);
550 }
551 }
552 #else
553 RegSet drop = RSET_SCRATCH;
554 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
555 ra_evictset(as, drop);
556 if (ir->s) ofs = sps_scale(ir->s);
557 #endif
558 asm_guardcc(as, CC_EQ);
559 emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
560 args[0] = ir->op1; /* GCstr *str */
561 args[1] = ASMREF_TMP1; /* TValue *n */
562 asm_gencall(as, ci, args);
563 /* Store the result to the spill slot or temp slots. */
564 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
565 }
566
567 /* -- Memory references --------------------------------------------------- */
568
569 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref)570 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
571 {
572 IRIns *ir = IR(ref);
573 if (irt_isnum(ir->t)) {
574 if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
575 ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
576 else /* Otherwise force a spill and use the spill slot. */
577 emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
578 } else {
579 /* Otherwise use g->tmptv to hold the TValue. */
580 RegSet allow = rset_exclude(RSET_GPR, dest);
581 Reg type;
582 emit_tai(as, PPCI_ADDI, dest, RID_JGL, (int32_t)offsetof(global_State, tmptv)-32768);
583 if (!irt_ispri(ir->t)) {
584 Reg src = ra_alloc1(as, ref, allow);
585 emit_setgl(as, src, tmptv.gcr);
586 }
587 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
588 type = ra_alloc1(as, ref+1, allow);
589 else
590 type = ra_allock(as, irt_toitype(ir->t), allow);
591 emit_setgl(as, type, tmptv.it);
592 }
593 }
594
asm_aref(ASMState * as,IRIns * ir)595 static void asm_aref(ASMState *as, IRIns *ir)
596 {
597 Reg dest = ra_dest(as, ir, RSET_GPR);
598 Reg idx, base;
599 if (irref_isk(ir->op2)) {
600 IRRef tab = IR(ir->op1)->op1;
601 int32_t ofs = asm_fuseabase(as, tab);
602 IRRef refa = ofs ? tab : ir->op1;
603 ofs += 8*IR(ir->op2)->i;
604 if (checki16(ofs)) {
605 base = ra_alloc1(as, refa, RSET_GPR);
606 emit_tai(as, PPCI_ADDI, dest, base, ofs);
607 return;
608 }
609 }
610 base = ra_alloc1(as, ir->op1, RSET_GPR);
611 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
612 emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
613 emit_slwi(as, RID_TMP, idx, 3);
614 }
615
616 /* Inlined hash lookup. Specialized for key type and for const keys.
617 ** The equivalent C code is:
618 ** Node *n = hashkey(t, key);
619 ** do {
620 ** if (lj_obj_equal(&n->key, key)) return &n->val;
621 ** } while ((n = nextnode(n)));
622 ** return niltv(L);
623 */
asm_href(ASMState * as,IRIns * ir,IROp merge)624 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
625 {
626 RegSet allow = RSET_GPR;
627 int destused = ra_used(ir);
628 Reg dest = ra_dest(as, ir, allow);
629 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
630 Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
631 Reg tisnum = RID_NONE, tmpnum = RID_NONE;
632 IRRef refkey = ir->op2;
633 IRIns *irkey = IR(refkey);
634 int isk = irref_isk(refkey);
635 IRType1 kt = irkey->t;
636 uint32_t khash;
637 MCLabel l_end, l_loop, l_next;
638
639 rset_clear(allow, tab);
640 #if LJ_SOFTFP
641 if (!isk) {
642 key = ra_alloc1(as, refkey, allow);
643 rset_clear(allow, key);
644 if (irkey[1].o == IR_HIOP) {
645 if (ra_hasreg((irkey+1)->r)) {
646 tmpnum = (irkey+1)->r;
647 ra_noweak(as, tmpnum);
648 } else {
649 tmpnum = ra_allocref(as, refkey+1, allow);
650 }
651 rset_clear(allow, tmpnum);
652 }
653 }
654 #else
655 if (irt_isnum(kt)) {
656 key = ra_alloc1(as, refkey, RSET_FPR);
657 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
658 tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
659 rset_clear(allow, tisnum);
660 } else if (!irt_ispri(kt)) {
661 key = ra_alloc1(as, refkey, allow);
662 rset_clear(allow, key);
663 }
664 #endif
665 tmp2 = ra_scratch(as, allow);
666 rset_clear(allow, tmp2);
667
668 /* Key not found in chain: jump to exit (if merged) or load niltv. */
669 l_end = emit_label(as);
670 as->invmcp = NULL;
671 if (merge == IR_NE)
672 asm_guardcc(as, CC_EQ);
673 else if (destused)
674 emit_loada(as, dest, niltvg(J2G(as->J)));
675
676 /* Follow hash chain until the end. */
677 l_loop = --as->mcp;
678 emit_ai(as, PPCI_CMPWI, dest, 0);
679 emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
680 l_next = emit_label(as);
681
682 /* Type and value comparison. */
683 if (merge == IR_EQ)
684 asm_guardcc(as, CC_EQ);
685 else
686 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
687 if (!LJ_SOFTFP && irt_isnum(kt)) {
688 emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
689 emit_condbranch(as, PPCI_BC, CC_GE, l_next);
690 emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
691 emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
692 } else {
693 if (!irt_ispri(kt)) {
694 emit_ab(as, PPCI_CMPW, tmp2, key);
695 emit_condbranch(as, PPCI_BC, CC_NE, l_next);
696 }
697 if (LJ_SOFTFP && ra_hasreg(tmpnum))
698 emit_ab(as, PPCI_CMPW, tmp1, tmpnum);
699 else
700 emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
701 if (!irt_ispri(kt))
702 emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
703 }
704 emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
705 *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
706 (((char *)as->mcp-(char *)l_loop) & 0xffffu);
707
708 /* Load main position relative to tab->node into dest. */
709 khash = isk ? ir_khash(as, irkey) : 1;
710 if (khash == 0) {
711 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
712 } else {
713 Reg tmphash = tmp1;
714 if (isk)
715 tmphash = ra_allock(as, khash, allow);
716 emit_tab(as, PPCI_ADD, dest, dest, tmp1);
717 emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
718 emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
719 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
720 emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
721 if (isk) {
722 /* Nothing to do. */
723 } else if (irt_isstr(kt)) {
724 emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, sid));
725 } else { /* Must match with hash*() in lj_tab.c. */
726 emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
727 emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
728 emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
729 emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
730 emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
731 if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
732 #if LJ_SOFTFP
733 emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
734 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
735 emit_tab(as, PPCI_ADD, tmp1, tmpnum, tmpnum);
736 #else
737 int32_t ofs = ra_spill(as, irkey);
738 emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
739 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
740 emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
741 emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
742 emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
743 #endif
744 } else {
745 emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
746 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
747 emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
748 emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
749 }
750 }
751 }
752 }
753
asm_hrefk(ASMState * as,IRIns * ir)754 static void asm_hrefk(ASMState *as, IRIns *ir)
755 {
756 IRIns *kslot = IR(ir->op2);
757 IRIns *irkey = IR(kslot->op1);
758 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
759 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
760 Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
761 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
762 Reg key = RID_NONE, type = RID_TMP, idx = node;
763 RegSet allow = rset_exclude(RSET_GPR, node);
764 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
765 if (ofs > 32736) {
766 idx = dest;
767 rset_clear(allow, dest);
768 kofs = (int32_t)offsetof(Node, key);
769 } else if (ra_hasreg(dest)) {
770 emit_tai(as, PPCI_ADDI, dest, node, ofs);
771 }
772 asm_guardcc(as, CC_NE);
773 if (!irt_ispri(irkey->t)) {
774 key = ra_scratch(as, allow);
775 rset_clear(allow, key);
776 }
777 rset_clear(allow, type);
778 if (irt_isnum(irkey->t)) {
779 emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
780 asm_guardcc(as, CC_NE);
781 emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
782 } else {
783 if (ra_hasreg(key)) {
784 emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
785 asm_guardcc(as, CC_NE);
786 }
787 emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
788 }
789 if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
790 emit_tai(as, PPCI_LWZ, type, idx, kofs);
791 if (ofs > 32736) {
792 emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
793 emit_tai(as, PPCI_ADDI, dest, node, ofs);
794 }
795 }
796
asm_uref(ASMState * as,IRIns * ir)797 static void asm_uref(ASMState *as, IRIns *ir)
798 {
799 Reg dest = ra_dest(as, ir, RSET_GPR);
800 if (irref_isk(ir->op1)) {
801 GCfunc *fn = ir_kfunc(IR(ir->op1));
802 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
803 emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
804 } else {
805 Reg uv = ra_scratch(as, RSET_GPR);
806 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
807 if (ir->o == IR_UREFC) {
808 asm_guardcc(as, CC_NE);
809 emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
810 emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
811 emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
812 } else {
813 emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
814 }
815 emit_tai(as, PPCI_LWZ, uv, func,
816 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
817 }
818 }
819
asm_fref(ASMState * as,IRIns * ir)820 static void asm_fref(ASMState *as, IRIns *ir)
821 {
822 UNUSED(as); UNUSED(ir);
823 lj_assertA(!ra_used(ir), "unfused FREF");
824 }
825
asm_strref(ASMState * as,IRIns * ir)826 static void asm_strref(ASMState *as, IRIns *ir)
827 {
828 Reg dest = ra_dest(as, ir, RSET_GPR);
829 IRRef ref = ir->op2, refk = ir->op1;
830 int32_t ofs = (int32_t)sizeof(GCstr);
831 Reg r;
832 if (irref_isk(ref)) {
833 IRRef tmp = refk; refk = ref; ref = tmp;
834 } else if (!irref_isk(refk)) {
835 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
836 IRIns *irr = IR(ir->op2);
837 if (ra_hasreg(irr->r)) {
838 ra_noweak(as, irr->r);
839 right = irr->r;
840 } else if (mayfuse(as, irr->op2) &&
841 irr->o == IR_ADD && irref_isk(irr->op2) &&
842 checki16(ofs + IR(irr->op2)->i)) {
843 ofs += IR(irr->op2)->i;
844 right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
845 } else {
846 right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
847 }
848 emit_tai(as, PPCI_ADDI, dest, dest, ofs);
849 emit_tab(as, PPCI_ADD, dest, left, right);
850 return;
851 }
852 r = ra_alloc1(as, ref, RSET_GPR);
853 ofs += IR(refk)->i;
854 if (checki16(ofs))
855 emit_tai(as, PPCI_ADDI, dest, r, ofs);
856 else
857 emit_tab(as, PPCI_ADD, dest, r,
858 ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
859 }
860
861 /* -- Loads and stores ---------------------------------------------------- */
862
asm_fxloadins(ASMState * as,IRIns * ir)863 static PPCIns asm_fxloadins(ASMState *as, IRIns *ir)
864 {
865 UNUSED(as);
866 switch (irt_type(ir->t)) {
867 case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
868 case IRT_U8: return PPCI_LBZ;
869 case IRT_I16: return PPCI_LHA;
870 case IRT_U16: return PPCI_LHZ;
871 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_LFD;
872 case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_LFS;
873 default: return PPCI_LWZ;
874 }
875 }
876
asm_fxstoreins(ASMState * as,IRIns * ir)877 static PPCIns asm_fxstoreins(ASMState *as, IRIns *ir)
878 {
879 UNUSED(as);
880 switch (irt_type(ir->t)) {
881 case IRT_I8: case IRT_U8: return PPCI_STB;
882 case IRT_I16: case IRT_U16: return PPCI_STH;
883 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_STFD;
884 case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_STFS;
885 default: return PPCI_STW;
886 }
887 }
888
asm_fload(ASMState * as,IRIns * ir)889 static void asm_fload(ASMState *as, IRIns *ir)
890 {
891 Reg dest = ra_dest(as, ir, RSET_GPR);
892 PPCIns pi = asm_fxloadins(as, ir);
893 Reg idx;
894 int32_t ofs;
895 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
896 idx = RID_JGL;
897 ofs = (ir->op2 << 2) - 32768;
898 } else {
899 idx = ra_alloc1(as, ir->op1, RSET_GPR);
900 if (ir->op2 == IRFL_TAB_ARRAY) {
901 ofs = asm_fuseabase(as, ir->op1);
902 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
903 emit_tai(as, PPCI_ADDI, dest, idx, ofs);
904 return;
905 }
906 }
907 ofs = field_ofs[ir->op2];
908 }
909 lj_assertA(!irt_isi8(ir->t), "unsupported FLOAD I8");
910 emit_tai(as, pi, dest, idx, ofs);
911 }
912
asm_fstore(ASMState * as,IRIns * ir)913 static void asm_fstore(ASMState *as, IRIns *ir)
914 {
915 if (ir->r != RID_SINK) {
916 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
917 IRIns *irf = IR(ir->op1);
918 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
919 int32_t ofs = field_ofs[irf->op2];
920 PPCIns pi = asm_fxstoreins(as, ir);
921 emit_tai(as, pi, src, idx, ofs);
922 }
923 }
924
asm_xload(ASMState * as,IRIns * ir)925 static void asm_xload(ASMState *as, IRIns *ir)
926 {
927 Reg dest = ra_dest(as, ir,
928 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
929 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
930 if (irt_isi8(ir->t))
931 emit_as(as, PPCI_EXTSB, dest, dest);
932 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
933 }
934
asm_xstore_(ASMState * as,IRIns * ir,int32_t ofs)935 static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
936 {
937 IRIns *irb;
938 if (ir->r == RID_SINK)
939 return;
940 if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
941 ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
942 /* Fuse BSWAP with XSTORE to stwbrx. */
943 Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
944 asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
945 } else {
946 Reg src = ra_alloc1(as, ir->op2,
947 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
948 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
949 rset_exclude(RSET_GPR, src), ofs);
950 }
951 }
952
953 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
954
asm_ahuvload(ASMState * as,IRIns * ir)955 static void asm_ahuvload(ASMState *as, IRIns *ir)
956 {
957 IRType1 t = ir->t;
958 Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
959 RegSet allow = RSET_GPR;
960 int32_t ofs = AHUREF_LSX;
961 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) {
962 t.irt = IRT_NUM;
963 if (ra_used(ir+1)) {
964 type = ra_dest(as, ir+1, allow);
965 rset_clear(allow, type);
966 }
967 ofs = 0;
968 }
969 if (ra_used(ir)) {
970 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
971 irt_isint(ir->t) || irt_isaddr(ir->t),
972 "bad load type %d", irt_type(ir->t));
973 if (LJ_SOFTFP || !irt_isnum(t)) ofs = 0;
974 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
975 rset_clear(allow, dest);
976 }
977 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
978 if (irt_isnum(t)) {
979 Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
980 asm_guardcc(as, CC_GE);
981 emit_ab(as, PPCI_CMPLW, type, tisnum);
982 if (ra_hasreg(dest)) {
983 if (!LJ_SOFTFP && ofs == AHUREF_LSX) {
984 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
985 (idx&255)), (idx>>8)));
986 emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
987 } else {
988 emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest, idx,
989 ofs+4*LJ_SOFTFP);
990 }
991 }
992 } else {
993 asm_guardcc(as, CC_NE);
994 emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
995 if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
996 }
997 if (ofs == AHUREF_LSX) {
998 emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
999 emit_slwi(as, tmp, (idx>>8), 3);
1000 } else {
1001 emit_tai(as, PPCI_LWZ, type, idx, ofs);
1002 }
1003 }
1004
asm_ahustore(ASMState * as,IRIns * ir)1005 static void asm_ahustore(ASMState *as, IRIns *ir)
1006 {
1007 RegSet allow = RSET_GPR;
1008 Reg idx, src = RID_NONE, type = RID_NONE;
1009 int32_t ofs = AHUREF_LSX;
1010 if (ir->r == RID_SINK)
1011 return;
1012 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1013 src = ra_alloc1(as, ir->op2, RSET_FPR);
1014 } else {
1015 if (!irt_ispri(ir->t)) {
1016 src = ra_alloc1(as, ir->op2, allow);
1017 rset_clear(allow, src);
1018 ofs = 0;
1019 }
1020 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
1021 type = ra_alloc1(as, (ir+1)->op2, allow);
1022 else
1023 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1024 rset_clear(allow, type);
1025 }
1026 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1027 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1028 if (ofs == AHUREF_LSX) {
1029 emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
1030 emit_slwi(as, RID_TMP, (idx>>8), 3);
1031 } else {
1032 emit_fai(as, PPCI_STFD, src, idx, ofs);
1033 }
1034 } else {
1035 if (ra_hasreg(src))
1036 emit_tai(as, PPCI_STW, src, idx, ofs+4);
1037 if (ofs == AHUREF_LSX) {
1038 emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
1039 emit_slwi(as, RID_TMP, (idx>>8), 3);
1040 } else {
1041 emit_tai(as, PPCI_STW, type, idx, ofs);
1042 }
1043 }
1044 }
1045
asm_sload(ASMState * as,IRIns * ir)1046 static void asm_sload(ASMState *as, IRIns *ir)
1047 {
1048 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
1049 IRType1 t = ir->t;
1050 Reg dest = RID_NONE, type = RID_NONE, base;
1051 RegSet allow = RSET_GPR;
1052 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1053 if (hiop)
1054 t.irt = IRT_NUM;
1055 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1056 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1057 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1058 "inconsistent SLOAD variant");
1059 lj_assertA(LJ_DUALNUM ||
1060 !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)),
1061 "bad SLOAD type");
1062 #if LJ_SOFTFP
1063 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1064 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1065 if (hiop && ra_used(ir+1)) {
1066 type = ra_dest(as, ir+1, allow);
1067 rset_clear(allow, type);
1068 }
1069 #else
1070 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1071 dest = ra_scratch(as, RSET_FPR);
1072 asm_tointg(as, ir, dest);
1073 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1074 } else
1075 #endif
1076 if (ra_used(ir)) {
1077 lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
1078 "bad SLOAD type %d", irt_type(ir->t));
1079 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
1080 rset_clear(allow, dest);
1081 base = ra_alloc1(as, REF_BASE, allow);
1082 rset_clear(allow, base);
1083 if (!LJ_SOFTFP && (ir->op2 & IRSLOAD_CONVERT)) {
1084 if (irt_isint(t)) {
1085 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
1086 dest = ra_scratch(as, RSET_FPR);
1087 emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
1088 emit_fb(as, PPCI_FCTIWZ, dest, dest);
1089 t.irt = IRT_NUM; /* Check for original type. */
1090 } else {
1091 Reg tmp = ra_scratch(as, allow);
1092 Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
1093 Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
1094 emit_fab(as, PPCI_FSUB, dest, dest, fbias);
1095 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
1096 emit_lsptr(as, PPCI_LFS, (fbias & 31),
1097 (void *)&as->J->k32[LJ_K32_2P52_2P31],
1098 rset_clear(allow, hibias));
1099 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
1100 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
1101 emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
1102 dest = tmp;
1103 t.irt = IRT_INT; /* Check for original type. */
1104 }
1105 }
1106 goto dotypecheck;
1107 }
1108 base = ra_alloc1(as, REF_BASE, allow);
1109 rset_clear(allow, base);
1110 dotypecheck:
1111 if (irt_isnum(t)) {
1112 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1113 Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
1114 asm_guardcc(as, CC_GE);
1115 #if !LJ_SOFTFP
1116 type = RID_TMP;
1117 #endif
1118 emit_ab(as, PPCI_CMPLW, type, tisnum);
1119 }
1120 if (ra_hasreg(dest)) emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest,
1121 base, ofs-(LJ_SOFTFP?0:4));
1122 } else {
1123 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1124 asm_guardcc(as, CC_NE);
1125 emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
1126 type = RID_TMP;
1127 }
1128 if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
1129 }
1130 if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
1131 }
1132
1133 /* -- Allocations --------------------------------------------------------- */
1134
1135 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1136 static void asm_cnew(ASMState *as, IRIns *ir)
1137 {
1138 CTState *cts = ctype_ctsG(J2G(as->J));
1139 CTypeID id = (CTypeID)IR(ir->op1)->i;
1140 CTSize sz;
1141 CTInfo info = lj_ctype_info(cts, id, &sz);
1142 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1143 IRRef args[4];
1144 RegSet drop = RSET_SCRATCH;
1145 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1146 "bad CNEW/CNEWI operands");
1147
1148 as->gcsteps++;
1149 if (ra_hasreg(ir->r))
1150 rset_clear(drop, ir->r); /* Dest reg handled below. */
1151 ra_evictset(as, drop);
1152 if (ra_used(ir))
1153 ra_destreg(as, ir, RID_RET); /* GCcdata * */
1154
1155 /* Initialize immutable cdata object. */
1156 if (ir->o == IR_CNEWI) {
1157 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1158 int32_t ofs = sizeof(GCcdata);
1159 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1160 if (sz == 8) {
1161 ofs += 4;
1162 lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
1163 }
1164 for (;;) {
1165 Reg r = ra_alloc1(as, ir->op2, allow);
1166 emit_tai(as, PPCI_STW, r, RID_RET, ofs);
1167 rset_clear(allow, r);
1168 if (ofs == sizeof(GCcdata)) break;
1169 ofs -= 4; ir++;
1170 }
1171 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1172 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1173 args[0] = ASMREF_L; /* lua_State *L */
1174 args[1] = ir->op1; /* CTypeID id */
1175 args[2] = ir->op2; /* CTSize sz */
1176 args[3] = ASMREF_TMP1; /* CTSize align */
1177 asm_gencall(as, ci, args);
1178 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1179 return;
1180 }
1181
1182 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1183 emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
1184 emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
1185 emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
1186 emit_ti(as, PPCI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
1187 args[0] = ASMREF_L; /* lua_State *L */
1188 args[1] = ASMREF_TMP1; /* MSize size */
1189 asm_gencall(as, ci, args);
1190 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1191 ra_releasetmp(as, ASMREF_TMP1));
1192 }
1193 #endif
1194
1195 /* -- Write barriers ------------------------------------------------------ */
1196
asm_tbar(ASMState * as,IRIns * ir)1197 static void asm_tbar(ASMState *as, IRIns *ir)
1198 {
1199 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1200 Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1201 Reg link = RID_TMP;
1202 MCLabel l_end = emit_label(as);
1203 emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
1204 emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
1205 emit_setgl(as, tab, gc.grayagain);
1206 lj_assertA(LJ_GC_BLACK == 0x04, "bad LJ_GC_BLACK");
1207 emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
1208 emit_getgl(as, link, gc.grayagain);
1209 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
1210 emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
1211 emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
1212 }
1213
asm_obar(ASMState * as,IRIns * ir)1214 static void asm_obar(ASMState *as, IRIns *ir)
1215 {
1216 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1217 IRRef args[2];
1218 MCLabel l_end;
1219 Reg obj, val, tmp;
1220 /* No need for other object barriers (yet). */
1221 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1222 ra_evictset(as, RSET_SCRATCH);
1223 l_end = emit_label(as);
1224 args[0] = ASMREF_TMP1; /* global_State *g */
1225 args[1] = ir->op1; /* TValue *tv */
1226 asm_gencall(as, ci, args);
1227 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
1228 obj = IR(ir->op1)->r;
1229 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
1230 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
1231 emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
1232 emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
1233 emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
1234 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1235 emit_tai(as, PPCI_LBZ, tmp, obj,
1236 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1237 emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1238 }
1239
1240 /* -- Arithmetic and logic operations ------------------------------------- */
1241
1242 #if !LJ_SOFTFP
asm_fparith(ASMState * as,IRIns * ir,PPCIns pi)1243 static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
1244 {
1245 Reg dest = ra_dest(as, ir, RSET_FPR);
1246 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1247 right = (left >> 8); left &= 255;
1248 if (pi == PPCI_FMUL)
1249 emit_fac(as, pi, dest, left, right);
1250 else
1251 emit_fab(as, pi, dest, left, right);
1252 }
1253
asm_fpunary(ASMState * as,IRIns * ir,PPCIns pi)1254 static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
1255 {
1256 Reg dest = ra_dest(as, ir, RSET_FPR);
1257 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1258 emit_fb(as, pi, dest, left);
1259 }
1260
asm_fpmath(ASMState * as,IRIns * ir)1261 static void asm_fpmath(ASMState *as, IRIns *ir)
1262 {
1263 if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
1264 asm_fpunary(as, ir, PPCI_FSQRT);
1265 else
1266 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1267 }
1268 #endif
1269
asm_add(ASMState * as,IRIns * ir)1270 static void asm_add(ASMState *as, IRIns *ir)
1271 {
1272 #if !LJ_SOFTFP
1273 if (irt_isnum(ir->t)) {
1274 if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
1275 asm_fparith(as, ir, PPCI_FADD);
1276 } else
1277 #endif
1278 {
1279 Reg dest = ra_dest(as, ir, RSET_GPR);
1280 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1281 PPCIns pi;
1282 if (irref_isk(ir->op2)) {
1283 int32_t k = IR(ir->op2)->i;
1284 if (checki16(k)) {
1285 pi = PPCI_ADDI;
1286 /* May fail due to spills/restores above, but simplifies the logic. */
1287 if (as->flagmcp == as->mcp) {
1288 as->flagmcp = NULL;
1289 as->mcp++;
1290 pi = PPCI_ADDICDOT;
1291 }
1292 emit_tai(as, pi, dest, left, k);
1293 return;
1294 } else if ((k & 0xffff) == 0) {
1295 emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
1296 return;
1297 } else if (!as->sectref) {
1298 emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
1299 emit_tai(as, PPCI_ADDI, dest, left, k);
1300 return;
1301 }
1302 }
1303 pi = PPCI_ADD;
1304 /* May fail due to spills/restores above, but simplifies the logic. */
1305 if (as->flagmcp == as->mcp) {
1306 as->flagmcp = NULL;
1307 as->mcp++;
1308 pi |= PPCF_DOT;
1309 }
1310 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1311 emit_tab(as, pi, dest, left, right);
1312 }
1313 }
1314
asm_sub(ASMState * as,IRIns * ir)1315 static void asm_sub(ASMState *as, IRIns *ir)
1316 {
1317 #if !LJ_SOFTFP
1318 if (irt_isnum(ir->t)) {
1319 if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
1320 asm_fparith(as, ir, PPCI_FSUB);
1321 } else
1322 #endif
1323 {
1324 PPCIns pi = PPCI_SUBF;
1325 Reg dest = ra_dest(as, ir, RSET_GPR);
1326 Reg left, right;
1327 if (irref_isk(ir->op1)) {
1328 int32_t k = IR(ir->op1)->i;
1329 if (checki16(k)) {
1330 right = ra_alloc1(as, ir->op2, RSET_GPR);
1331 emit_tai(as, PPCI_SUBFIC, dest, right, k);
1332 return;
1333 }
1334 }
1335 /* May fail due to spills/restores above, but simplifies the logic. */
1336 if (as->flagmcp == as->mcp) {
1337 as->flagmcp = NULL;
1338 as->mcp++;
1339 pi |= PPCF_DOT;
1340 }
1341 left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1342 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1343 emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
1344 }
1345 }
1346
asm_mul(ASMState * as,IRIns * ir)1347 static void asm_mul(ASMState *as, IRIns *ir)
1348 {
1349 #if !LJ_SOFTFP
1350 if (irt_isnum(ir->t)) {
1351 asm_fparith(as, ir, PPCI_FMUL);
1352 } else
1353 #endif
1354 {
1355 PPCIns pi = PPCI_MULLW;
1356 Reg dest = ra_dest(as, ir, RSET_GPR);
1357 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1358 if (irref_isk(ir->op2)) {
1359 int32_t k = IR(ir->op2)->i;
1360 if (checki16(k)) {
1361 emit_tai(as, PPCI_MULLI, dest, left, k);
1362 return;
1363 }
1364 }
1365 /* May fail due to spills/restores above, but simplifies the logic. */
1366 if (as->flagmcp == as->mcp) {
1367 as->flagmcp = NULL;
1368 as->mcp++;
1369 pi |= PPCF_DOT;
1370 }
1371 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1372 emit_tab(as, pi, dest, left, right);
1373 }
1374 }
1375
1376 #define asm_fpdiv(as, ir) asm_fparith(as, ir, PPCI_FDIV)
1377
asm_neg(ASMState * as,IRIns * ir)1378 static void asm_neg(ASMState *as, IRIns *ir)
1379 {
1380 #if !LJ_SOFTFP
1381 if (irt_isnum(ir->t)) {
1382 asm_fpunary(as, ir, PPCI_FNEG);
1383 } else
1384 #endif
1385 {
1386 Reg dest, left;
1387 PPCIns pi = PPCI_NEG;
1388 if (as->flagmcp == as->mcp) {
1389 as->flagmcp = NULL;
1390 as->mcp++;
1391 pi |= PPCF_DOT;
1392 }
1393 dest = ra_dest(as, ir, RSET_GPR);
1394 left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1395 emit_tab(as, pi, dest, left, 0);
1396 }
1397 }
1398
1399 #define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS)
1400
asm_arithov(ASMState * as,IRIns * ir,PPCIns pi)1401 static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
1402 {
1403 Reg dest, left, right;
1404 if (as->flagmcp == as->mcp) {
1405 as->flagmcp = NULL;
1406 as->mcp++;
1407 }
1408 asm_guardcc(as, CC_SO);
1409 dest = ra_dest(as, ir, RSET_GPR);
1410 left = ra_alloc2(as, ir, RSET_GPR);
1411 right = (left >> 8); left &= 255;
1412 if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
1413 emit_tab(as, pi|PPCF_DOT, dest, left, right);
1414 }
1415
1416 #define asm_addov(as, ir) asm_arithov(as, ir, PPCI_ADDO)
1417 #define asm_subov(as, ir) asm_arithov(as, ir, PPCI_SUBFO)
1418 #define asm_mulov(as, ir) asm_arithov(as, ir, PPCI_MULLWO)
1419
1420 #if LJ_HASFFI
asm_add64(ASMState * as,IRIns * ir)1421 static void asm_add64(ASMState *as, IRIns *ir)
1422 {
1423 Reg dest = ra_dest(as, ir, RSET_GPR);
1424 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1425 PPCIns pi = PPCI_ADDE;
1426 if (irref_isk(ir->op2)) {
1427 int32_t k = IR(ir->op2)->i;
1428 if (k == 0)
1429 pi = PPCI_ADDZE;
1430 else if (k == -1)
1431 pi = PPCI_ADDME;
1432 else
1433 goto needright;
1434 right = 0;
1435 } else {
1436 needright:
1437 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1438 }
1439 emit_tab(as, pi, dest, left, right);
1440 ir--;
1441 dest = ra_dest(as, ir, RSET_GPR);
1442 left = ra_alloc1(as, ir->op1, RSET_GPR);
1443 if (irref_isk(ir->op2)) {
1444 int32_t k = IR(ir->op2)->i;
1445 if (checki16(k)) {
1446 emit_tai(as, PPCI_ADDIC, dest, left, k);
1447 return;
1448 }
1449 }
1450 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1451 emit_tab(as, PPCI_ADDC, dest, left, right);
1452 }
1453
asm_sub64(ASMState * as,IRIns * ir)1454 static void asm_sub64(ASMState *as, IRIns *ir)
1455 {
1456 Reg dest = ra_dest(as, ir, RSET_GPR);
1457 Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
1458 PPCIns pi = PPCI_SUBFE;
1459 if (irref_isk(ir->op1)) {
1460 int32_t k = IR(ir->op1)->i;
1461 if (k == 0)
1462 pi = PPCI_SUBFZE;
1463 else if (k == -1)
1464 pi = PPCI_SUBFME;
1465 else
1466 goto needleft;
1467 left = 0;
1468 } else {
1469 needleft:
1470 left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
1471 }
1472 emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
1473 ir--;
1474 dest = ra_dest(as, ir, RSET_GPR);
1475 right = ra_alloc1(as, ir->op2, RSET_GPR);
1476 if (irref_isk(ir->op1)) {
1477 int32_t k = IR(ir->op1)->i;
1478 if (checki16(k)) {
1479 emit_tai(as, PPCI_SUBFIC, dest, right, k);
1480 return;
1481 }
1482 }
1483 left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
1484 emit_tab(as, PPCI_SUBFC, dest, right, left);
1485 }
1486
asm_neg64(ASMState * as,IRIns * ir)1487 static void asm_neg64(ASMState *as, IRIns *ir)
1488 {
1489 Reg dest = ra_dest(as, ir, RSET_GPR);
1490 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1491 emit_tab(as, PPCI_SUBFZE, dest, left, 0);
1492 ir--;
1493 dest = ra_dest(as, ir, RSET_GPR);
1494 left = ra_alloc1(as, ir->op1, RSET_GPR);
1495 emit_tai(as, PPCI_SUBFIC, dest, left, 0);
1496 }
1497 #endif
1498
asm_bnot(ASMState * as,IRIns * ir)1499 static void asm_bnot(ASMState *as, IRIns *ir)
1500 {
1501 Reg dest, left, right;
1502 PPCIns pi = PPCI_NOR;
1503 if (as->flagmcp == as->mcp) {
1504 as->flagmcp = NULL;
1505 as->mcp++;
1506 pi |= PPCF_DOT;
1507 }
1508 dest = ra_dest(as, ir, RSET_GPR);
1509 if (mayfuse(as, ir->op1)) {
1510 IRIns *irl = IR(ir->op1);
1511 if (irl->o == IR_BAND)
1512 pi ^= (PPCI_NOR ^ PPCI_NAND);
1513 else if (irl->o == IR_BXOR)
1514 pi ^= (PPCI_NOR ^ PPCI_EQV);
1515 else if (irl->o != IR_BOR)
1516 goto nofuse;
1517 left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
1518 right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
1519 } else {
1520 nofuse:
1521 left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1522 }
1523 emit_asb(as, pi, dest, left, right);
1524 }
1525
asm_bswap(ASMState * as,IRIns * ir)1526 static void asm_bswap(ASMState *as, IRIns *ir)
1527 {
1528 Reg dest = ra_dest(as, ir, RSET_GPR);
1529 IRIns *irx;
1530 if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
1531 ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
1532 /* Fuse BSWAP with XLOAD to lwbrx. */
1533 asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
1534 } else {
1535 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1536 Reg tmp = dest;
1537 if (tmp == left) {
1538 tmp = RID_TMP;
1539 emit_mr(as, dest, RID_TMP);
1540 }
1541 emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
1542 emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
1543 emit_rotlwi(as, tmp, left, 8);
1544 }
1545 }
1546
1547 /* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
asm_fuseandsh(ASMState * as,PPCIns pi,int32_t mask,IRRef ref)1548 static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
1549 {
1550 IRIns *ir;
1551 Reg left;
1552 if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
1553 irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
1554 int32_t sh = (IR(ir->op2)->i & 31);
1555 switch (ir->o) {
1556 case IR_BSHL:
1557 if ((mask & ((1u<<sh)-1))) goto nofuse;
1558 break;
1559 case IR_BSHR:
1560 if ((mask & ~((~0u)>>sh))) goto nofuse;
1561 sh = ((32-sh)&31);
1562 break;
1563 case IR_BROL:
1564 break;
1565 default:
1566 goto nofuse;
1567 }
1568 left = ra_alloc1(as, ir->op1, RSET_GPR);
1569 *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
1570 return;
1571 }
1572 nofuse:
1573 left = ra_alloc1(as, ref, RSET_GPR);
1574 *--as->mcp = pi | PPCF_T(left);
1575 }
1576
asm_band(ASMState * as,IRIns * ir)1577 static void asm_band(ASMState *as, IRIns *ir)
1578 {
1579 Reg dest, left, right;
1580 IRRef lref = ir->op1;
1581 PPCIns dot = 0;
1582 IRRef op2;
1583 if (as->flagmcp == as->mcp) {
1584 as->flagmcp = NULL;
1585 as->mcp++;
1586 dot = PPCF_DOT;
1587 }
1588 dest = ra_dest(as, ir, RSET_GPR);
1589 if (irref_isk(ir->op2)) {
1590 int32_t k = IR(ir->op2)->i;
1591 if (k) {
1592 /* First check for a contiguous bitmask as used by rlwinm. */
1593 uint32_t s1 = lj_ffs((uint32_t)k);
1594 uint32_t k1 = ((uint32_t)k >> s1);
1595 if ((k1 & (k1+1)) == 0) {
1596 asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
1597 PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
1598 k, lref);
1599 return;
1600 }
1601 if (~(uint32_t)k) {
1602 uint32_t s2 = lj_ffs(~(uint32_t)k);
1603 uint32_t k2 = (~(uint32_t)k >> s2);
1604 if ((k2 & (k2+1)) == 0) {
1605 asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
1606 PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
1607 k, lref);
1608 return;
1609 }
1610 }
1611 }
1612 if (checku16(k)) {
1613 left = ra_alloc1(as, lref, RSET_GPR);
1614 emit_asi(as, PPCI_ANDIDOT, dest, left, k);
1615 return;
1616 } else if ((k & 0xffff) == 0) {
1617 left = ra_alloc1(as, lref, RSET_GPR);
1618 emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
1619 return;
1620 }
1621 }
1622 op2 = ir->op2;
1623 if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
1624 dot ^= (PPCI_AND ^ PPCI_ANDC);
1625 op2 = IR(op2)->op1;
1626 }
1627 left = ra_hintalloc(as, lref, dest, RSET_GPR);
1628 right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
1629 emit_asb(as, PPCI_AND ^ dot, dest, left, right);
1630 }
1631
asm_bitop(ASMState * as,IRIns * ir,PPCIns pi,PPCIns pik)1632 static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1633 {
1634 Reg dest = ra_dest(as, ir, RSET_GPR);
1635 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1636 if (irref_isk(ir->op2)) {
1637 int32_t k = IR(ir->op2)->i;
1638 Reg tmp = left;
1639 if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
1640 if (!checku16(k)) {
1641 emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
1642 if ((k & 0xffff) == 0) return;
1643 }
1644 emit_asi(as, pik, dest, left, k);
1645 return;
1646 }
1647 }
1648 /* May fail due to spills/restores above, but simplifies the logic. */
1649 if (as->flagmcp == as->mcp) {
1650 as->flagmcp = NULL;
1651 as->mcp++;
1652 pi |= PPCF_DOT;
1653 }
1654 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1655 emit_asb(as, pi, dest, left, right);
1656 }
1657
1658 #define asm_bor(as, ir) asm_bitop(as, ir, PPCI_OR, PPCI_ORI)
1659 #define asm_bxor(as, ir) asm_bitop(as, ir, PPCI_XOR, PPCI_XORI)
1660
asm_bitshift(ASMState * as,IRIns * ir,PPCIns pi,PPCIns pik)1661 static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1662 {
1663 Reg dest, left;
1664 Reg dot = 0;
1665 if (as->flagmcp == as->mcp) {
1666 as->flagmcp = NULL;
1667 as->mcp++;
1668 dot = PPCF_DOT;
1669 }
1670 dest = ra_dest(as, ir, RSET_GPR);
1671 left = ra_alloc1(as, ir->op1, RSET_GPR);
1672 if (irref_isk(ir->op2)) { /* Constant shifts. */
1673 int32_t shift = (IR(ir->op2)->i & 31);
1674 if (pik == 0) /* SLWI */
1675 emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
1676 else if (pik == 1) /* SRWI */
1677 emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
1678 else
1679 emit_asb(as, pik|dot, dest, left, shift);
1680 } else {
1681 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1682 emit_asb(as, pi|dot, dest, left, right);
1683 }
1684 }
1685
1686 #define asm_bshl(as, ir) asm_bitshift(as, ir, PPCI_SLW, 0)
1687 #define asm_bshr(as, ir) asm_bitshift(as, ir, PPCI_SRW, 1)
1688 #define asm_bsar(as, ir) asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI)
1689 #define asm_brol(as, ir) \
1690 asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \
1691 PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31))
1692 #define asm_bror(as, ir) lj_assertA(0, "unexpected BROR")
1693
1694 #if LJ_SOFTFP
asm_sfpmin_max(ASMState * as,IRIns * ir)1695 static void asm_sfpmin_max(ASMState *as, IRIns *ir)
1696 {
1697 CCallInfo ci = lj_ir_callinfo[IRCALL_softfp_cmp];
1698 IRRef args[4];
1699 MCLabel l_right, l_end;
1700 Reg desthi = ra_dest(as, ir, RSET_GPR), destlo = ra_dest(as, ir+1, RSET_GPR);
1701 Reg righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
1702 Reg rightlo, leftlo = ra_alloc2(as, ir+1, RSET_GPR);
1703 PPCCC cond = (IROp)ir->o == IR_MIN ? CC_EQ : CC_NE;
1704 righthi = (lefthi >> 8); lefthi &= 255;
1705 rightlo = (leftlo >> 8); leftlo &= 255;
1706 args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
1707 args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
1708 l_end = emit_label(as);
1709 if (desthi != righthi) emit_mr(as, desthi, righthi);
1710 if (destlo != rightlo) emit_mr(as, destlo, rightlo);
1711 l_right = emit_label(as);
1712 if (l_end != l_right) emit_jmp(as, l_end);
1713 if (desthi != lefthi) emit_mr(as, desthi, lefthi);
1714 if (destlo != leftlo) emit_mr(as, destlo, leftlo);
1715 if (l_right == as->mcp+1) {
1716 cond ^= 4; l_right = l_end; ++as->mcp;
1717 }
1718 emit_condbranch(as, PPCI_BC, cond, l_right);
1719 ra_evictset(as, RSET_SCRATCH);
1720 emit_cmpi(as, RID_RET, 1);
1721 asm_gencall(as, &ci, args);
1722 }
1723 #endif
1724
asm_min_max(ASMState * as,IRIns * ir,int ismax)1725 static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
1726 {
1727 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1728 Reg dest = ra_dest(as, ir, RSET_FPR);
1729 Reg tmp = dest;
1730 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1731 right = (left >> 8); left &= 255;
1732 if (tmp == left || tmp == right)
1733 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
1734 dest), left), right));
1735 emit_facb(as, PPCI_FSEL, dest, tmp, left, right);
1736 emit_fab(as, PPCI_FSUB, tmp, ismax ? left : right, ismax ? right : left);
1737 } else {
1738 Reg dest = ra_dest(as, ir, RSET_GPR);
1739 Reg tmp1 = RID_TMP, tmp2 = dest;
1740 Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1741 right = (left >> 8); left &= 255;
1742 if (tmp2 == left || tmp2 == right)
1743 tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
1744 dest), left), right));
1745 emit_tab(as, PPCI_ADD, dest, tmp2, right);
1746 emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
1747 emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
1748 emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
1749 emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
1750 emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
1751 }
1752 }
1753
1754 #define asm_min(as, ir) asm_min_max(as, ir, 0)
1755 #define asm_max(as, ir) asm_min_max(as, ir, 1)
1756
1757 /* -- Comparisons --------------------------------------------------------- */
1758
1759 #define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
1760 #define CC_TWO 0x80 /* Check two flags for FP comparison. */
1761
1762 /* Map of comparisons to flags. ORDER IR. */
1763 static const uint8_t asm_compmap[IR_ABC+1] = {
1764 /* op int cc FP cc */
1765 /* LT */ CC_GE + (CC_GE<<4),
1766 /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
1767 /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
1768 /* GT */ CC_LE + (CC_LE<<4),
1769 /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
1770 /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
1771 /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
1772 /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
1773 /* EQ */ CC_NE + (CC_NE<<4),
1774 /* NE */ CC_EQ + (CC_EQ<<4),
1775 /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
1776 };
1777
asm_intcomp_(ASMState * as,IRRef lref,IRRef rref,Reg cr,PPCCC cc)1778 static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
1779 {
1780 Reg right, left = ra_alloc1(as, lref, RSET_GPR);
1781 if (irref_isk(rref)) {
1782 int32_t k = IR(rref)->i;
1783 if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
1784 if (checki16(k)) {
1785 emit_tai(as, PPCI_CMPWI, cr, left, k);
1786 /* Signed comparison with zero and referencing previous ins? */
1787 if (k == 0 && lref == as->curins-1)
1788 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1789 return;
1790 } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
1791 if (checku16(k)) {
1792 emit_tai(as, PPCI_CMPLWI, cr, left, k);
1793 return;
1794 } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
1795 emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
1796 emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
1797 return;
1798 }
1799 }
1800 } else { /* Unsigned comparison with constant. */
1801 if (checku16(k)) {
1802 emit_tai(as, PPCI_CMPLWI, cr, left, k);
1803 return;
1804 }
1805 }
1806 }
1807 right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
1808 emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
1809 }
1810
asm_comp(ASMState * as,IRIns * ir)1811 static void asm_comp(ASMState *as, IRIns *ir)
1812 {
1813 PPCCC cc = asm_compmap[ir->o];
1814 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1815 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1816 right = (left >> 8); left &= 255;
1817 asm_guardcc(as, (cc >> 4));
1818 if ((cc & CC_TWO))
1819 emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
1820 emit_fab(as, PPCI_FCMPU, 0, left, right);
1821 } else {
1822 IRRef lref = ir->op1, rref = ir->op2;
1823 if (irref_isk(lref) && !irref_isk(rref)) {
1824 /* Swap constants to the right (only for ABC). */
1825 IRRef tmp = lref; lref = rref; rref = tmp;
1826 if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
1827 }
1828 asm_guardcc(as, cc);
1829 asm_intcomp_(as, lref, rref, 0, cc);
1830 }
1831 }
1832
1833 #define asm_equal(as, ir) asm_comp(as, ir)
1834
1835 #if LJ_SOFTFP
1836 /* SFP comparisons. */
asm_sfpcomp(ASMState * as,IRIns * ir)1837 static void asm_sfpcomp(ASMState *as, IRIns *ir)
1838 {
1839 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1840 RegSet drop = RSET_SCRATCH;
1841 Reg r;
1842 IRRef args[4];
1843 args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
1844 args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
1845
1846 for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+3; r++) {
1847 if (!rset_test(as->freeset, r) &&
1848 regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
1849 rset_clear(drop, r);
1850 }
1851 ra_evictset(as, drop);
1852 asm_setupresult(as, ir, ci);
1853 switch ((IROp)ir->o) {
1854 case IR_ULT:
1855 asm_guardcc(as, CC_EQ);
1856 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
1857 case IR_ULE:
1858 asm_guardcc(as, CC_EQ);
1859 emit_ai(as, PPCI_CMPWI, RID_RET, 1);
1860 break;
1861 case IR_GE: case IR_GT:
1862 asm_guardcc(as, CC_EQ);
1863 emit_ai(as, PPCI_CMPWI, RID_RET, 2);
1864 default:
1865 asm_guardcc(as, (asm_compmap[ir->o] & 0xf));
1866 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
1867 break;
1868 }
1869 asm_gencall(as, ci, args);
1870 }
1871 #endif
1872
1873 #if LJ_HASFFI
1874 /* 64 bit integer comparisons. */
asm_comp64(ASMState * as,IRIns * ir)1875 static void asm_comp64(ASMState *as, IRIns *ir)
1876 {
1877 PPCCC cc = asm_compmap[(ir-1)->o];
1878 if ((cc&3) == (CC_EQ&3)) {
1879 asm_guardcc(as, cc);
1880 emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
1881 (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
1882 } else {
1883 asm_guardcc(as, CC_EQ);
1884 emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
1885 emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
1886 (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
1887 }
1888 /* Loword comparison sets cr1 and is unsigned, except for equality. */
1889 asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
1890 cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
1891 /* Hiword comparison sets cr0. */
1892 asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
1893 as->flagmcp = NULL; /* Doesn't work here. */
1894 }
1895 #endif
1896
1897 /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
1898
1899 /* Hiword op of a split 64 bit op. Previous op must be the loword op. */
asm_hiop(ASMState * as,IRIns * ir)1900 static void asm_hiop(ASMState *as, IRIns *ir)
1901 {
1902 #if LJ_HASFFI || LJ_SOFTFP
1903 /* HIOP is marked as a store because it needs its own DCE logic. */
1904 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1905 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1906 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
1907 as->curins--; /* Always skip the CONV. */
1908 #if LJ_HASFFI && !LJ_SOFTFP
1909 if (usehi || uselo)
1910 asm_conv64(as, ir);
1911 return;
1912 #endif
1913 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
1914 as->curins--; /* Always skip the loword comparison. */
1915 #if LJ_SOFTFP
1916 if (!irt_isint(ir->t)) {
1917 asm_sfpcomp(as, ir-1);
1918 return;
1919 }
1920 #endif
1921 #if LJ_HASFFI
1922 asm_comp64(as, ir);
1923 #endif
1924 return;
1925 #if LJ_SOFTFP
1926 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
1927 as->curins--; /* Always skip the loword min/max. */
1928 if (uselo || usehi)
1929 asm_sfpmin_max(as, ir-1);
1930 return;
1931 #endif
1932 } else if ((ir-1)->o == IR_XSTORE) {
1933 as->curins--; /* Handle both stores here. */
1934 if ((ir-1)->r != RID_SINK) {
1935 asm_xstore_(as, ir, 0);
1936 asm_xstore_(as, ir-1, 4);
1937 }
1938 return;
1939 }
1940 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1941 switch ((ir-1)->o) {
1942 #if LJ_HASFFI
1943 case IR_ADD: as->curins--; asm_add64(as, ir); break;
1944 case IR_SUB: as->curins--; asm_sub64(as, ir); break;
1945 case IR_NEG: as->curins--; asm_neg64(as, ir); break;
1946 #endif
1947 #if LJ_SOFTFP
1948 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1949 case IR_STRTO:
1950 if (!uselo)
1951 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
1952 break;
1953 #endif
1954 case IR_CALLN:
1955 case IR_CALLS:
1956 case IR_CALLXS:
1957 if (!uselo)
1958 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
1959 break;
1960 #if LJ_SOFTFP
1961 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
1962 #endif
1963 case IR_CNEWI:
1964 /* Nothing to do here. Handled by lo op itself. */
1965 break;
1966 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
1967 }
1968 #else
1969 /* Unused without SOFTFP or FFI. */
1970 UNUSED(as); UNUSED(ir); lj_assertA(0, "unexpected HIOP");
1971 #endif
1972 }
1973
1974 /* -- Profiling ----------------------------------------------------------- */
1975
asm_prof(ASMState * as,IRIns * ir)1976 static void asm_prof(ASMState *as, IRIns *ir)
1977 {
1978 UNUSED(ir);
1979 asm_guardcc(as, CC_NE);
1980 emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, HOOK_PROFILE);
1981 emit_lsglptr(as, PPCI_LBZ, RID_TMP,
1982 (int32_t)offsetof(global_State, hookmask));
1983 }
1984
1985 /* -- Stack handling ------------------------------------------------------ */
1986
1987 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)1988 static void asm_stack_check(ASMState *as, BCReg topslot,
1989 IRIns *irp, RegSet allow, ExitNo exitno)
1990 {
1991 /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
1992 Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
1993 rset_clear(allow, pbase);
1994 tmp = allow ? rset_pickbot(allow) :
1995 (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
1996 emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
1997 if (allow == RSET_EMPTY) /* Restore temp. register. */
1998 emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
1999 else
2000 ra_modified(as, tmp);
2001 emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
2002 emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
2003 emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
2004 if (pbase == RID_TMP)
2005 emit_getgl(as, RID_TMP, jit_base);
2006 emit_getgl(as, tmp, cur_L);
2007 if (allow == RSET_EMPTY) /* Spill temp. register. */
2008 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
2009 }
2010
2011 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)2012 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2013 {
2014 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2015 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
2016 MSize n, nent = snap->nent;
2017 /* Store the value of all modified slots to the Lua stack. */
2018 for (n = 0; n < nent; n++) {
2019 SnapEntry sn = map[n];
2020 BCReg s = snap_slot(sn);
2021 int32_t ofs = 8*((int32_t)s-1);
2022 IRRef ref = snap_ref(sn);
2023 IRIns *ir = IR(ref);
2024 if ((sn & SNAP_NORESTORE))
2025 continue;
2026 if (irt_isnum(ir->t)) {
2027 #if LJ_SOFTFP
2028 Reg tmp;
2029 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2030 /* LJ_SOFTFP: must be a number constant. */
2031 lj_assertA(irref_isk(ref), "unsplit FP op");
2032 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
2033 emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
2034 if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
2035 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
2036 emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
2037 #else
2038 Reg src = ra_alloc1(as, ref, RSET_FPR);
2039 emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
2040 #endif
2041 } else {
2042 Reg type;
2043 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2044 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2045 "restore of IR type %d", irt_type(ir->t));
2046 if (!irt_ispri(ir->t)) {
2047 Reg src = ra_alloc1(as, ref, allow);
2048 rset_clear(allow, src);
2049 emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
2050 }
2051 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2052 if (s == 0) continue; /* Do not overwrite link to previous frame. */
2053 type = ra_allock(as, (int32_t)(*flinks--), allow);
2054 #if LJ_SOFTFP
2055 } else if ((sn & SNAP_SOFTFPNUM)) {
2056 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
2057 #endif
2058 } else {
2059 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
2060 }
2061 emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
2062 }
2063 checkmclim(as);
2064 }
2065 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2066 }
2067
2068 /* -- GC handling --------------------------------------------------------- */
2069
2070 /* Marker to prevent patching the GC check exit. */
2071 #define PPC_NOPATCH_GC_CHECK PPCI_ORIS
2072
2073 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)2074 static void asm_gc_check(ASMState *as)
2075 {
2076 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2077 IRRef args[2];
2078 MCLabel l_end;
2079 Reg tmp;
2080 ra_evictset(as, RSET_SCRATCH);
2081 l_end = emit_label(as);
2082 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2083 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2084 *--as->mcp = PPC_NOPATCH_GC_CHECK;
2085 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
2086 args[0] = ASMREF_TMP1; /* global_State *g */
2087 args[1] = ASMREF_TMP2; /* MSize steps */
2088 asm_gencall(as, ci, args);
2089 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
2090 tmp = ra_releasetmp(as, ASMREF_TMP2);
2091 emit_loadi(as, tmp, as->gcsteps);
2092 /* Jump around GC step if GC total < GC threshold. */
2093 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
2094 emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
2095 emit_getgl(as, tmp, gc.threshold);
2096 emit_getgl(as, RID_TMP, gc.total);
2097 as->gcsteps = 0;
2098 checkmclim(as);
2099 }
2100
2101 /* -- Loop handling ------------------------------------------------------- */
2102
2103 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)2104 static void asm_loop_fixup(ASMState *as)
2105 {
2106 MCode *p = as->mctop;
2107 MCode *target = as->mcp;
2108 if (as->loopinv) { /* Inverted loop branch? */
2109 /* asm_guardcc already inverted the cond branch and patched the final b. */
2110 p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
2111 } else {
2112 p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
2113 }
2114 }
2115
2116 /* -- Head of trace ------------------------------------------------------- */
2117
2118 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)2119 static void asm_head_root_base(ASMState *as)
2120 {
2121 IRIns *ir = IR(REF_BASE);
2122 Reg r = ir->r;
2123 if (ra_hasreg(r)) {
2124 ra_free(as, r);
2125 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2126 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2127 if (r != RID_BASE)
2128 emit_mr(as, r, RID_BASE);
2129 }
2130 }
2131
2132 /* Coalesce BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)2133 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2134 {
2135 IRIns *ir = IR(REF_BASE);
2136 Reg r = ir->r;
2137 if (ra_hasreg(r)) {
2138 ra_free(as, r);
2139 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2140 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2141 if (irp->r == r) {
2142 rset_clear(allow, r); /* Mark same BASE register as coalesced. */
2143 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2144 rset_clear(allow, irp->r);
2145 emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
2146 } else {
2147 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
2148 }
2149 }
2150 return allow;
2151 }
2152
2153 /* -- Tail of trace ------------------------------------------------------- */
2154
2155 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)2156 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2157 {
2158 MCode *p = as->mctop;
2159 MCode *target;
2160 int32_t spadj = as->T->spadjust;
2161 if (spadj == 0) {
2162 *--p = PPCI_NOP;
2163 *--p = PPCI_NOP;
2164 as->mctop = p;
2165 } else {
2166 /* Patch stack adjustment. */
2167 lj_assertA(checki16(CFRAME_SIZE+spadj), "stack adjustment out of range");
2168 p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
2169 p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
2170 }
2171 /* Patch exit branch. */
2172 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2173 p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
2174 }
2175
2176 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)2177 static void asm_tail_prep(ASMState *as)
2178 {
2179 MCode *p = as->mctop - 1; /* Leave room for exit branch. */
2180 if (as->loopref) {
2181 as->invmcp = as->mcp = p;
2182 } else {
2183 as->mcp = p-2; /* Leave room for stack pointer adjustment. */
2184 as->invmcp = NULL;
2185 }
2186 }
2187
2188 /* -- Trace setup --------------------------------------------------------- */
2189
2190 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)2191 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2192 {
2193 IRRef args[CCI_NARGS_MAX*2];
2194 uint32_t i, nargs = CCI_XNARGS(ci);
2195 int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
2196 asm_collectargs(as, ir, ci, args);
2197 for (i = 0; i < nargs; i++)
2198 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
2199 if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
2200 } else {
2201 if (ngpr > 0) ngpr--; else nslots++;
2202 }
2203 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2204 as->evenspill = nslots;
2205 return (!LJ_SOFTFP && irt_isfp(ir->t)) ? REGSP_HINT(RID_FPRET) :
2206 REGSP_HINT(RID_RET);
2207 }
2208
asm_setup_target(ASMState * as)2209 static void asm_setup_target(ASMState *as)
2210 {
2211 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
2212 }
2213
2214 /* -- Trace patching ------------------------------------------------------ */
2215
2216 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)2217 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2218 {
2219 MCode *p = T->mcode;
2220 MCode *pe = (MCode *)((char *)p + T->szmcode);
2221 MCode *px = exitstub_trace_addr(T, exitno);
2222 MCode *cstart = NULL;
2223 MCode *mcarea = lj_mcode_patch(J, p, 0);
2224 int clearso = 0, patchlong = 1;
2225 for (; p < pe; p++) {
2226 /* Look for exitstub branch, try to replace with branch to target. */
2227 uint32_t ins = *p;
2228 if ((ins & 0xfc000000u) == 0x40000000u &&
2229 ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
2230 ptrdiff_t delta = (char *)target - (char *)p;
2231 if (((ins >> 16) & 3) == (CC_SO&3)) {
2232 clearso = sizeof(MCode);
2233 delta -= sizeof(MCode);
2234 }
2235 /* Many, but not all short-range branches can be patched directly. */
2236 if (p[-1] == PPC_NOPATCH_GC_CHECK) {
2237 patchlong = 0;
2238 } else if (((delta + 0x8000) >> 16) == 0) {
2239 *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
2240 ((delta & 0x8000) * (PPCF_Y/0x8000));
2241 if (!cstart) cstart = p;
2242 }
2243 } else if ((ins & 0xfc000000u) == PPCI_B &&
2244 ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
2245 ptrdiff_t delta = (char *)target - (char *)p;
2246 lj_assertJ(((delta + 0x02000000) >> 26) == 0,
2247 "branch target out of range");
2248 *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
2249 if (!cstart) cstart = p;
2250 }
2251 }
2252 /* Always patch long-range branch in exit stub itself. Except, if we can't. */
2253 if (patchlong) {
2254 ptrdiff_t delta = (char *)target - (char *)px - clearso;
2255 lj_assertJ(((delta + 0x02000000) >> 26) == 0,
2256 "branch target out of range");
2257 *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
2258 }
2259 if (!cstart) cstart = px;
2260 lj_mcode_sync(cstart, px+1);
2261 if (clearso) { /* Extend the current trace. Ugly workaround. */
2262 MCode *pp = J->cur.mcode;
2263 J->cur.szmcode += sizeof(MCode);
2264 *--pp = PPCI_MCRXR; /* Clear SO flag. */
2265 J->cur.mcode = pp;
2266 lj_mcode_sync(pp, pp+1);
2267 }
2268 lj_mcode_patch(J, mcarea, 1);
2269 }
2270
2271