1 /*
2 ** PPC IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 /* -- Register allocator extensions --------------------------------------- */
7
8 /* Allocate a register with a hint. */
ra_hintalloc(ASMState * as,IRRef ref,Reg hint,RegSet allow)9 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
10 {
11 Reg r = IR(ref)->r;
12 if (ra_noreg(r)) {
13 if (!ra_hashint(r) && !iscrossref(as, ref))
14 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
15 r = ra_allocref(as, ref, allow);
16 }
17 ra_noweak(as, r);
18 return r;
19 }
20
21 /* Allocate two source registers for three-operand instructions. */
ra_alloc2(ASMState * as,IRIns * ir,RegSet allow)22 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
23 {
24 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
25 Reg left = irl->r, right = irr->r;
26 if (ra_hasreg(left)) {
27 ra_noweak(as, left);
28 if (ra_noreg(right))
29 right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
30 else
31 ra_noweak(as, right);
32 } else if (ra_hasreg(right)) {
33 ra_noweak(as, right);
34 left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
35 } else if (ra_hashint(right)) {
36 right = ra_allocref(as, ir->op2, allow);
37 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
38 } else {
39 left = ra_allocref(as, ir->op1, allow);
40 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
41 }
42 return left | (right << 8);
43 }
44
45 /* -- Guard handling ------------------------------------------------------ */
46
47 /* Setup exit stubs after the end of each trace. */
asm_exitstub_setup(ASMState * as,ExitNo nexits)48 static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
49 {
50 ExitNo i;
51 MCode *mxp = as->mctop;
52 if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
53 asm_mclimit(as);
54 /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
55 for (i = nexits-1; (int32_t)i >= 0; i--)
56 *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
57 *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
58 mxp--;
59 *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
60 *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
61 as->mctop = mxp;
62 }
63
asm_exitstub_addr(ASMState * as,ExitNo exitno)64 static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
65 {
66 /* Keep this in-sync with exitstub_trace_addr(). */
67 return as->mctop + exitno + 3;
68 }
69
70 /* Emit conditional branch to exit for guard. */
asm_guardcc(ASMState * as,PPCCC cc)71 static void asm_guardcc(ASMState *as, PPCCC cc)
72 {
73 MCode *target = asm_exitstub_addr(as, as->snapno);
74 MCode *p = as->mcp;
75 if (LJ_UNLIKELY(p == as->invmcp)) {
76 as->loopinv = 1;
77 *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
78 emit_condbranch(as, PPCI_BC, cc^4, p);
79 return;
80 }
81 emit_condbranch(as, PPCI_BC, cc, target);
82 }
83
84 /* -- Operand fusion ------------------------------------------------------ */
85
86 /* Limit linear search to this distance. Avoids O(n^2) behavior. */
87 #define CONFLICT_SEARCH_LIM 31
88
89 /* Check if there's no conflicting instruction between curins and ref. */
noconflict(ASMState * as,IRRef ref,IROp conflict)90 static int noconflict(ASMState *as, IRRef ref, IROp conflict)
91 {
92 IRIns *ir = as->ir;
93 IRRef i = as->curins;
94 if (i > ref + CONFLICT_SEARCH_LIM)
95 return 0; /* Give up, ref is too far away. */
96 while (--i > ref)
97 if (ir[i].o == conflict)
98 return 0; /* Conflict found. */
99 return 1; /* Ok, no conflict. */
100 }
101
102 /* Fuse the array base of colocated arrays. */
asm_fuseabase(ASMState * as,IRRef ref)103 static int32_t asm_fuseabase(ASMState *as, IRRef ref)
104 {
105 IRIns *ir = IR(ref);
106 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
107 !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
108 return (int32_t)sizeof(GCtab);
109 return 0;
110 }
111
112 /* Indicates load/store indexed is ok. */
113 #define AHUREF_LSX ((int32_t)0x80000000)
114
115 /* Fuse array/hash/upvalue reference into register+offset operand. */
asm_fuseahuref(ASMState * as,IRRef ref,int32_t * ofsp,RegSet allow)116 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
117 {
118 IRIns *ir = IR(ref);
119 if (ra_noreg(ir->r)) {
120 if (ir->o == IR_AREF) {
121 if (mayfuse(as, ref)) {
122 if (irref_isk(ir->op2)) {
123 IRRef tab = IR(ir->op1)->op1;
124 int32_t ofs = asm_fuseabase(as, tab);
125 IRRef refa = ofs ? tab : ir->op1;
126 ofs += 8*IR(ir->op2)->i;
127 if (checki16(ofs)) {
128 *ofsp = ofs;
129 return ra_alloc1(as, refa, allow);
130 }
131 }
132 if (*ofsp == AHUREF_LSX) {
133 Reg base = ra_alloc1(as, ir->op1, allow);
134 Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
135 return base | (idx << 8);
136 }
137 }
138 } else if (ir->o == IR_HREFK) {
139 if (mayfuse(as, ref)) {
140 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
141 if (checki16(ofs)) {
142 *ofsp = ofs;
143 return ra_alloc1(as, ir->op1, allow);
144 }
145 }
146 } else if (ir->o == IR_UREFC) {
147 if (irref_isk(ir->op1)) {
148 GCfunc *fn = ir_kfunc(IR(ir->op1));
149 int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
150 int32_t jgl = (intptr_t)J2G(as->J);
151 if ((uint32_t)(ofs-jgl) < 65536) {
152 *ofsp = ofs-jgl-32768;
153 return RID_JGL;
154 } else {
155 *ofsp = (int16_t)ofs;
156 return ra_allock(as, ofs-(int16_t)ofs, allow);
157 }
158 }
159 } else if (ir->o == IR_TMPREF) {
160 *ofsp = (int32_t)(offsetof(global_State, tmptv)-32768);
161 return RID_JGL;
162 }
163 }
164 *ofsp = 0;
165 return ra_alloc1(as, ref, allow);
166 }
167
168 /* Fuse XLOAD/XSTORE reference into load/store operand. */
asm_fusexref(ASMState * as,PPCIns pi,Reg rt,IRRef ref,RegSet allow,int32_t ofs)169 static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
170 RegSet allow, int32_t ofs)
171 {
172 IRIns *ir = IR(ref);
173 Reg base;
174 if (ra_noreg(ir->r) && canfuse(as, ir)) {
175 if (ir->o == IR_ADD) {
176 int32_t ofs2;
177 if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
178 ofs = ofs2;
179 ref = ir->op1;
180 } else if (ofs == 0) {
181 Reg right, left = ra_alloc2(as, ir, allow);
182 right = (left >> 8); left &= 255;
183 emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
184 return;
185 }
186 } else if (ir->o == IR_STRREF) {
187 lj_assertA(ofs == 0, "bad usage");
188 ofs = (int32_t)sizeof(GCstr);
189 if (irref_isk(ir->op2)) {
190 ofs += IR(ir->op2)->i;
191 ref = ir->op1;
192 } else if (irref_isk(ir->op1)) {
193 ofs += IR(ir->op1)->i;
194 ref = ir->op2;
195 } else {
196 /* NYI: Fuse ADD with constant. */
197 Reg tmp, right, left = ra_alloc2(as, ir, allow);
198 right = (left >> 8); left &= 255;
199 tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
200 emit_fai(as, pi, rt, tmp, ofs);
201 emit_tab(as, PPCI_ADD, tmp, left, right);
202 return;
203 }
204 if (!checki16(ofs)) {
205 Reg left = ra_alloc1(as, ref, allow);
206 Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
207 emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
208 return;
209 }
210 }
211 }
212 base = ra_alloc1(as, ref, allow);
213 emit_fai(as, pi, rt, base, ofs);
214 }
215
216 /* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
asm_fusexrefx(ASMState * as,PPCIns pi,Reg rt,IRRef ref,RegSet allow)217 static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
218 RegSet allow)
219 {
220 IRIns *ira = IR(ref);
221 Reg right, left;
222 if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) {
223 left = ra_alloc2(as, ira, allow);
224 right = (left >> 8); left &= 255;
225 } else {
226 right = ra_alloc1(as, ref, allow);
227 left = RID_R0;
228 }
229 emit_tab(as, pi, rt, left, right);
230 }
231
232 #if !LJ_SOFTFP
233 /* Fuse to multiply-add/sub instruction. */
asm_fusemadd(ASMState * as,IRIns * ir,PPCIns pi,PPCIns pir)234 static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
235 {
236 IRRef lref = ir->op1, rref = ir->op2;
237 IRIns *irm;
238 if (lref != rref &&
239 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
240 ra_noreg(irm->r)) ||
241 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
242 (rref = lref, pi = pir, ra_noreg(irm->r))))) {
243 Reg dest = ra_dest(as, ir, RSET_FPR);
244 Reg add = ra_alloc1(as, rref, RSET_FPR);
245 Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
246 right = (left >> 8); left &= 255;
247 emit_facb(as, pi, dest, left, right, add);
248 return 1;
249 }
250 return 0;
251 }
252 #endif
253
254 /* -- Calls --------------------------------------------------------------- */
255
256 /* Generate a call to a C function. */
asm_gencall(ASMState * as,const CCallInfo * ci,IRRef * args)257 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
258 {
259 uint32_t n, nargs = CCI_XNARGS(ci);
260 int32_t ofs = 8;
261 Reg gpr = REGARG_FIRSTGPR;
262 #if !LJ_SOFTFP
263 Reg fpr = REGARG_FIRSTFPR;
264 #endif
265 if ((void *)ci->func)
266 emit_call(as, (void *)ci->func);
267 for (n = 0; n < nargs; n++) { /* Setup args. */
268 IRRef ref = args[n];
269 if (ref) {
270 IRIns *ir = IR(ref);
271 #if !LJ_SOFTFP
272 if (irt_isfp(ir->t)) {
273 if (fpr <= REGARG_LASTFPR) {
274 lj_assertA(rset_test(as->freeset, fpr),
275 "reg %d not free", fpr); /* Already evicted. */
276 ra_leftov(as, fpr, ref);
277 fpr++;
278 } else {
279 Reg r = ra_alloc1(as, ref, RSET_FPR);
280 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
281 emit_spstore(as, ir, r, ofs);
282 ofs += irt_isnum(ir->t) ? 8 : 4;
283 }
284 } else
285 #endif
286 {
287 if (gpr <= REGARG_LASTGPR) {
288 lj_assertA(rset_test(as->freeset, gpr),
289 "reg %d not free", gpr); /* Already evicted. */
290 ra_leftov(as, gpr, ref);
291 gpr++;
292 } else {
293 Reg r = ra_alloc1(as, ref, RSET_GPR);
294 emit_spstore(as, ir, r, ofs);
295 ofs += 4;
296 }
297 }
298 } else {
299 if (gpr <= REGARG_LASTGPR)
300 gpr++;
301 else
302 ofs += 4;
303 }
304 checkmclim(as);
305 }
306 #if !LJ_SOFTFP
307 if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
308 emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
309 #endif
310 }
311
312 /* Setup result reg/sp for call. Evict scratch regs. */
asm_setupresult(ASMState * as,IRIns * ir,const CCallInfo * ci)313 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
314 {
315 RegSet drop = RSET_SCRATCH;
316 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
317 #if !LJ_SOFTFP
318 if ((ci->flags & CCI_NOFPRCLOBBER))
319 drop &= ~RSET_FPR;
320 #endif
321 if (ra_hasreg(ir->r))
322 rset_clear(drop, ir->r); /* Dest reg handled below. */
323 if (hiop && ra_hasreg((ir+1)->r))
324 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
325 ra_evictset(as, drop); /* Evictions must be performed first. */
326 if (ra_used(ir)) {
327 lj_assertA(!irt_ispri(ir->t), "PRI dest");
328 if (!LJ_SOFTFP && irt_isfp(ir->t)) {
329 if ((ci->flags & CCI_CASTU64)) {
330 /* Use spill slot or temp slots. */
331 int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
332 Reg dest = ir->r;
333 if (ra_hasreg(dest)) {
334 ra_free(as, dest);
335 ra_modified(as, dest);
336 emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
337 }
338 emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
339 emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
340 } else {
341 ra_destreg(as, ir, RID_FPRET);
342 }
343 } else if (hiop) {
344 ra_destpair(as, ir);
345 } else {
346 ra_destreg(as, ir, RID_RET);
347 }
348 }
349 }
350
asm_callx(ASMState * as,IRIns * ir)351 static void asm_callx(ASMState *as, IRIns *ir)
352 {
353 IRRef args[CCI_NARGS_MAX*2];
354 CCallInfo ci;
355 IRRef func;
356 IRIns *irf;
357 ci.flags = asm_callx_flags(as, ir);
358 asm_collectargs(as, ir, &ci, args);
359 asm_setupresult(as, ir, &ci);
360 func = ir->op2; irf = IR(func);
361 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
362 if (irref_isk(func)) { /* Call to constant address. */
363 ci.func = (ASMFunction)(void *)(intptr_t)(irf->i);
364 } else { /* Need a non-argument register for indirect calls. */
365 RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
366 Reg freg = ra_alloc1(as, func, allow);
367 *--as->mcp = PPCI_BCTRL;
368 *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
369 ci.func = (ASMFunction)(void *)0;
370 }
371 asm_gencall(as, &ci, args);
372 }
373
374 /* -- Returns ------------------------------------------------------------- */
375
376 /* Return to lower frame. Guard that it goes to the right spot. */
asm_retf(ASMState * as,IRIns * ir)377 static void asm_retf(ASMState *as, IRIns *ir)
378 {
379 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
380 void *pc = ir_kptr(IR(ir->op2));
381 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1));
382 as->topslot -= (BCReg)delta;
383 if ((int32_t)as->topslot < 0) as->topslot = 0;
384 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
385 emit_setgl(as, base, jit_base);
386 emit_addptr(as, base, -8*delta);
387 asm_guardcc(as, CC_NE);
388 emit_ab(as, PPCI_CMPW, RID_TMP,
389 ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
390 emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
391 }
392
393 /* -- Buffer operations --------------------------------------------------- */
394
395 #if LJ_HASBUFFER
asm_bufhdr_write(ASMState * as,Reg sb)396 static void asm_bufhdr_write(ASMState *as, Reg sb)
397 {
398 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
399 IRIns irgc;
400 irgc.ot = IRT(0, IRT_PGC); /* GC type. */
401 emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L));
402 emit_rot(as, PPCI_RLWIMI, RID_TMP, tmp, 0, 31-lj_fls(SBUF_MASK_FLAG), 31);
403 emit_getgl(as, RID_TMP, cur_L);
404 emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L));
405 }
406 #endif
407
408 /* -- Type conversions ---------------------------------------------------- */
409
410 #if !LJ_SOFTFP
asm_tointg(ASMState * as,IRIns * ir,Reg left)411 static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
412 {
413 RegSet allow = RSET_FPR;
414 Reg tmp = ra_scratch(as, rset_clear(allow, left));
415 Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
416 Reg dest = ra_dest(as, ir, RSET_GPR);
417 Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
418 asm_guardcc(as, CC_NE);
419 emit_fab(as, PPCI_FCMPU, 0, tmp, left);
420 emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
421 emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
422 emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
423 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
424 emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
425 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
426 emit_lsptr(as, PPCI_LFS, (fbias & 31),
427 (void *)&as->J->k32[LJ_K32_2P52_2P31], RSET_GPR);
428 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
429 emit_fb(as, PPCI_FCTIWZ, tmp, left);
430 }
431
asm_tobit(ASMState * as,IRIns * ir)432 static void asm_tobit(ASMState *as, IRIns *ir)
433 {
434 RegSet allow = RSET_FPR;
435 Reg dest = ra_dest(as, ir, RSET_GPR);
436 Reg left = ra_alloc1(as, ir->op1, allow);
437 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
438 Reg tmp = ra_scratch(as, rset_clear(allow, right));
439 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
440 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
441 emit_fab(as, PPCI_FADD, tmp, left, right);
442 }
443 #endif
444
asm_conv(ASMState * as,IRIns * ir)445 static void asm_conv(ASMState *as, IRIns *ir)
446 {
447 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
448 #if !LJ_SOFTFP
449 int stfp = (st == IRT_NUM || st == IRT_FLOAT);
450 #endif
451 IRRef lref = ir->op1;
452 /* 64 bit integer conversions are handled by SPLIT. */
453 lj_assertA(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64)),
454 "IR %04d has unsplit 64 bit type",
455 (int)(ir - as->ir) - REF_BIAS);
456 #if LJ_SOFTFP
457 /* FP conversions are handled by SPLIT. */
458 lj_assertA(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT),
459 "IR %04d has FP type",
460 (int)(ir - as->ir) - REF_BIAS);
461 /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
462 #else
463 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV");
464 if (irt_isfp(ir->t)) {
465 Reg dest = ra_dest(as, ir, RSET_FPR);
466 if (stfp) { /* FP to FP conversion. */
467 if (st == IRT_NUM) /* double -> float conversion. */
468 emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
469 else /* float -> double conversion is a no-op on PPC. */
470 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
471 } else { /* Integer to FP conversion. */
472 /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
473 /* IRT_U32: Bias with 2^52, subtract 2^52. */
474 RegSet allow = RSET_GPR;
475 Reg left = ra_alloc1(as, lref, allow);
476 Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
477 Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
478 if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
479 emit_fab(as, PPCI_FSUB, dest, dest, fbias);
480 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
481 emit_lsptr(as, PPCI_LFS, (fbias & 31),
482 &as->J->k32[st == IRT_U32 ? LJ_K32_2P52 : LJ_K32_2P52_2P31],
483 rset_clear(allow, hibias));
484 emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
485 RID_SP, SPOFS_TMPLO);
486 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
487 if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
488 }
489 } else if (stfp) { /* FP to integer conversion. */
490 if (irt_isguard(ir->t)) {
491 /* Checked conversions are only supported from number to int. */
492 lj_assertA(irt_isint(ir->t) && st == IRT_NUM,
493 "bad type for checked CONV");
494 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
495 } else {
496 Reg dest = ra_dest(as, ir, RSET_GPR);
497 Reg left = ra_alloc1(as, lref, RSET_FPR);
498 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
499 if (irt_isu32(ir->t)) {
500 /* Convert both x and x-2^31 to int and merge results. */
501 Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
502 emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
503 emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
504 emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
505 emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
506 emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
507 emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
508 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
509 emit_tai(as, PPCI_LWZ, dest,
510 RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
511 emit_fb(as, PPCI_FCTIWZ, tmp, left);
512 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
513 emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
514 emit_fab(as, PPCI_FSUB, tmp, left, tmp);
515 emit_lsptr(as, PPCI_LFS, (tmp & 31),
516 (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR);
517 } else {
518 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
519 emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
520 emit_fb(as, PPCI_FCTIWZ, tmp, left);
521 }
522 }
523 } else
524 #endif
525 {
526 Reg dest = ra_dest(as, ir, RSET_GPR);
527 if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
528 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
529 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT");
530 if ((ir->op2 & IRCONV_SEXT))
531 emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
532 else
533 emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
534 } else { /* 32/64 bit integer conversions. */
535 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
536 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
537 }
538 }
539 }
540
asm_strto(ASMState * as,IRIns * ir)541 static void asm_strto(ASMState *as, IRIns *ir)
542 {
543 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
544 IRRef args[2];
545 int32_t ofs = SPOFS_TMP;
546 #if LJ_SOFTFP
547 ra_evictset(as, RSET_SCRATCH);
548 if (ra_used(ir)) {
549 if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
550 (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) {
551 int i;
552 for (i = 0; i < 2; i++) {
553 Reg r = (ir+i)->r;
554 if (ra_hasreg(r)) {
555 ra_free(as, r);
556 ra_modified(as, r);
557 emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
558 }
559 }
560 ofs = sps_scale(ir->s & ~1);
561 } else {
562 Reg rhi = ra_dest(as, ir+1, RSET_GPR);
563 Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
564 emit_tai(as, PPCI_LWZ, rhi, RID_SP, ofs);
565 emit_tai(as, PPCI_LWZ, rlo, RID_SP, ofs+4);
566 }
567 }
568 #else
569 RegSet drop = RSET_SCRATCH;
570 if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
571 ra_evictset(as, drop);
572 if (ir->s) ofs = sps_scale(ir->s);
573 #endif
574 asm_guardcc(as, CC_EQ);
575 emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
576 args[0] = ir->op1; /* GCstr *str */
577 args[1] = ASMREF_TMP1; /* TValue *n */
578 asm_gencall(as, ci, args);
579 /* Store the result to the spill slot or temp slots. */
580 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
581 }
582
583 /* -- Memory references --------------------------------------------------- */
584
585 /* Get pointer to TValue. */
asm_tvptr(ASMState * as,Reg dest,IRRef ref,MSize mode)586 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode)
587 {
588 int32_t tmpofs = (int32_t)(offsetof(global_State, tmptv)-32768);
589 if ((mode & IRTMPREF_IN1)) {
590 IRIns *ir = IR(ref);
591 if (irt_isnum(ir->t)) {
592 if ((mode & IRTMPREF_OUT1)) {
593 #if LJ_SOFTFP
594 lj_assertA(irref_isk(ref), "unsplit FP op");
595 emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
596 emit_setgl(as,
597 ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, RSET_GPR),
598 tmptv.u32.lo);
599 emit_setgl(as,
600 ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, RSET_GPR),
601 tmptv.u32.hi);
602 #else
603 Reg src = ra_alloc1(as, ref, RSET_FPR);
604 emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
605 emit_fai(as, PPCI_STFD, src, RID_JGL, tmpofs);
606 #endif
607 } else if (irref_isk(ref)) {
608 /* Use the number constant itself as a TValue. */
609 ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
610 } else {
611 #if LJ_SOFTFP
612 lj_assertA(0, "unsplit FP op");
613 #else
614 /* Otherwise force a spill and use the spill slot. */
615 emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
616 #endif
617 }
618 } else {
619 /* Otherwise use g->tmptv to hold the TValue. */
620 Reg type;
621 emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
622 if (!irt_ispri(ir->t)) {
623 Reg src = ra_alloc1(as, ref, RSET_GPR);
624 emit_setgl(as, src, tmptv.gcr);
625 }
626 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t))
627 type = ra_alloc1(as, ref+1, RSET_GPR);
628 else
629 type = ra_allock(as, irt_toitype(ir->t), RSET_GPR);
630 emit_setgl(as, type, tmptv.it);
631 }
632 } else {
633 emit_tai(as, PPCI_ADDI, dest, RID_JGL, tmpofs);
634 }
635 }
636
asm_aref(ASMState * as,IRIns * ir)637 static void asm_aref(ASMState *as, IRIns *ir)
638 {
639 Reg dest = ra_dest(as, ir, RSET_GPR);
640 Reg idx, base;
641 if (irref_isk(ir->op2)) {
642 IRRef tab = IR(ir->op1)->op1;
643 int32_t ofs = asm_fuseabase(as, tab);
644 IRRef refa = ofs ? tab : ir->op1;
645 ofs += 8*IR(ir->op2)->i;
646 if (checki16(ofs)) {
647 base = ra_alloc1(as, refa, RSET_GPR);
648 emit_tai(as, PPCI_ADDI, dest, base, ofs);
649 return;
650 }
651 }
652 base = ra_alloc1(as, ir->op1, RSET_GPR);
653 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
654 emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
655 emit_slwi(as, RID_TMP, idx, 3);
656 }
657
658 /* Inlined hash lookup. Specialized for key type and for const keys.
659 ** The equivalent C code is:
660 ** Node *n = hashkey(t, key);
661 ** do {
662 ** if (lj_obj_equal(&n->key, key)) return &n->val;
663 ** } while ((n = nextnode(n)));
664 ** return niltv(L);
665 */
asm_href(ASMState * as,IRIns * ir,IROp merge)666 static void asm_href(ASMState *as, IRIns *ir, IROp merge)
667 {
668 RegSet allow = RSET_GPR;
669 int destused = ra_used(ir);
670 Reg dest = ra_dest(as, ir, allow);
671 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
672 Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
673 Reg tisnum = RID_NONE, tmpnum = RID_NONE;
674 IRRef refkey = ir->op2;
675 IRIns *irkey = IR(refkey);
676 int isk = irref_isk(refkey);
677 IRType1 kt = irkey->t;
678 uint32_t khash;
679 MCLabel l_end, l_loop, l_next;
680
681 rset_clear(allow, tab);
682 #if LJ_SOFTFP
683 if (!isk) {
684 key = ra_alloc1(as, refkey, allow);
685 rset_clear(allow, key);
686 if (irkey[1].o == IR_HIOP) {
687 if (ra_hasreg((irkey+1)->r)) {
688 tmpnum = (irkey+1)->r;
689 ra_noweak(as, tmpnum);
690 } else {
691 tmpnum = ra_allocref(as, refkey+1, allow);
692 }
693 rset_clear(allow, tmpnum);
694 }
695 }
696 #else
697 if (irt_isnum(kt)) {
698 key = ra_alloc1(as, refkey, RSET_FPR);
699 tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
700 tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
701 rset_clear(allow, tisnum);
702 } else if (!irt_ispri(kt)) {
703 key = ra_alloc1(as, refkey, allow);
704 rset_clear(allow, key);
705 }
706 #endif
707 tmp2 = ra_scratch(as, allow);
708 rset_clear(allow, tmp2);
709
710 /* Key not found in chain: jump to exit (if merged) or load niltv. */
711 l_end = emit_label(as);
712 as->invmcp = NULL;
713 if (merge == IR_NE)
714 asm_guardcc(as, CC_EQ);
715 else if (destused)
716 emit_loada(as, dest, niltvg(J2G(as->J)));
717
718 /* Follow hash chain until the end. */
719 l_loop = --as->mcp;
720 emit_ai(as, PPCI_CMPWI, dest, 0);
721 emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
722 l_next = emit_label(as);
723
724 /* Type and value comparison. */
725 if (merge == IR_EQ)
726 asm_guardcc(as, CC_EQ);
727 else
728 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
729 if (!LJ_SOFTFP && irt_isnum(kt)) {
730 emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
731 emit_condbranch(as, PPCI_BC, CC_GE, l_next);
732 emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
733 emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
734 } else {
735 if (!irt_ispri(kt)) {
736 emit_ab(as, PPCI_CMPW, tmp2, key);
737 emit_condbranch(as, PPCI_BC, CC_NE, l_next);
738 }
739 if (LJ_SOFTFP && ra_hasreg(tmpnum))
740 emit_ab(as, PPCI_CMPW, tmp1, tmpnum);
741 else
742 emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
743 if (!irt_ispri(kt))
744 emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
745 }
746 emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
747 *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
748 (((char *)as->mcp-(char *)l_loop) & 0xffffu);
749
750 /* Load main position relative to tab->node into dest. */
751 khash = isk ? ir_khash(as, irkey) : 1;
752 if (khash == 0) {
753 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
754 } else {
755 Reg tmphash = tmp1;
756 if (isk)
757 tmphash = ra_allock(as, khash, allow);
758 emit_tab(as, PPCI_ADD, dest, dest, tmp1);
759 emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
760 emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
761 emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
762 emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
763 if (isk) {
764 /* Nothing to do. */
765 } else if (irt_isstr(kt)) {
766 emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, sid));
767 } else { /* Must match with hash*() in lj_tab.c. */
768 emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
769 emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
770 emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
771 emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
772 emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
773 if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) {
774 #if LJ_SOFTFP
775 emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
776 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
777 emit_tab(as, PPCI_ADD, tmp1, tmpnum, tmpnum);
778 #else
779 int32_t ofs = ra_spill(as, irkey);
780 emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
781 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
782 emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
783 emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
784 emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
785 #endif
786 } else {
787 emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
788 emit_rotlwi(as, dest, tmp1, HASH_ROT1);
789 emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
790 emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
791 }
792 }
793 }
794 }
795
asm_hrefk(ASMState * as,IRIns * ir)796 static void asm_hrefk(ASMState *as, IRIns *ir)
797 {
798 IRIns *kslot = IR(ir->op2);
799 IRIns *irkey = IR(kslot->op1);
800 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
801 int32_t kofs = ofs + (int32_t)offsetof(Node, key);
802 Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
803 Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
804 Reg key = RID_NONE, type = RID_TMP, idx = node;
805 RegSet allow = rset_exclude(RSET_GPR, node);
806 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot");
807 if (ofs > 32736) {
808 idx = dest;
809 rset_clear(allow, dest);
810 kofs = (int32_t)offsetof(Node, key);
811 } else if (ra_hasreg(dest)) {
812 emit_tai(as, PPCI_ADDI, dest, node, ofs);
813 }
814 asm_guardcc(as, CC_NE);
815 if (!irt_ispri(irkey->t)) {
816 key = ra_scratch(as, allow);
817 rset_clear(allow, key);
818 }
819 rset_clear(allow, type);
820 if (irt_isnum(irkey->t)) {
821 emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
822 asm_guardcc(as, CC_NE);
823 emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
824 } else {
825 if (ra_hasreg(key)) {
826 emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
827 asm_guardcc(as, CC_NE);
828 }
829 emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
830 }
831 if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
832 emit_tai(as, PPCI_LWZ, type, idx, kofs);
833 if (ofs > 32736) {
834 emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
835 emit_tai(as, PPCI_ADDI, dest, node, ofs);
836 }
837 }
838
asm_uref(ASMState * as,IRIns * ir)839 static void asm_uref(ASMState *as, IRIns *ir)
840 {
841 Reg dest = ra_dest(as, ir, RSET_GPR);
842 if (irref_isk(ir->op1)) {
843 GCfunc *fn = ir_kfunc(IR(ir->op1));
844 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
845 emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
846 } else {
847 Reg uv = ra_scratch(as, RSET_GPR);
848 Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
849 if (ir->o == IR_UREFC) {
850 asm_guardcc(as, CC_NE);
851 emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
852 emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
853 emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
854 } else {
855 emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
856 }
857 emit_tai(as, PPCI_LWZ, uv, func,
858 (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
859 }
860 }
861
asm_fref(ASMState * as,IRIns * ir)862 static void asm_fref(ASMState *as, IRIns *ir)
863 {
864 UNUSED(as); UNUSED(ir);
865 lj_assertA(!ra_used(ir), "unfused FREF");
866 }
867
asm_strref(ASMState * as,IRIns * ir)868 static void asm_strref(ASMState *as, IRIns *ir)
869 {
870 Reg dest = ra_dest(as, ir, RSET_GPR);
871 IRRef ref = ir->op2, refk = ir->op1;
872 int32_t ofs = (int32_t)sizeof(GCstr);
873 Reg r;
874 if (irref_isk(ref)) {
875 IRRef tmp = refk; refk = ref; ref = tmp;
876 } else if (!irref_isk(refk)) {
877 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
878 IRIns *irr = IR(ir->op2);
879 if (ra_hasreg(irr->r)) {
880 ra_noweak(as, irr->r);
881 right = irr->r;
882 } else if (mayfuse(as, irr->op2) &&
883 irr->o == IR_ADD && irref_isk(irr->op2) &&
884 checki16(ofs + IR(irr->op2)->i)) {
885 ofs += IR(irr->op2)->i;
886 right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
887 } else {
888 right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
889 }
890 emit_tai(as, PPCI_ADDI, dest, dest, ofs);
891 emit_tab(as, PPCI_ADD, dest, left, right);
892 return;
893 }
894 r = ra_alloc1(as, ref, RSET_GPR);
895 ofs += IR(refk)->i;
896 if (checki16(ofs))
897 emit_tai(as, PPCI_ADDI, dest, r, ofs);
898 else
899 emit_tab(as, PPCI_ADD, dest, r,
900 ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
901 }
902
903 /* -- Loads and stores ---------------------------------------------------- */
904
asm_fxloadins(ASMState * as,IRIns * ir)905 static PPCIns asm_fxloadins(ASMState *as, IRIns *ir)
906 {
907 UNUSED(as);
908 switch (irt_type(ir->t)) {
909 case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
910 case IRT_U8: return PPCI_LBZ;
911 case IRT_I16: return PPCI_LHA;
912 case IRT_U16: return PPCI_LHZ;
913 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_LFD;
914 case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_LFS;
915 default: return PPCI_LWZ;
916 }
917 }
918
asm_fxstoreins(ASMState * as,IRIns * ir)919 static PPCIns asm_fxstoreins(ASMState *as, IRIns *ir)
920 {
921 UNUSED(as);
922 switch (irt_type(ir->t)) {
923 case IRT_I8: case IRT_U8: return PPCI_STB;
924 case IRT_I16: case IRT_U16: return PPCI_STH;
925 case IRT_NUM: lj_assertA(!LJ_SOFTFP, "unsplit FP op"); return PPCI_STFD;
926 case IRT_FLOAT: if (!LJ_SOFTFP) return PPCI_STFS;
927 default: return PPCI_STW;
928 }
929 }
930
asm_fload(ASMState * as,IRIns * ir)931 static void asm_fload(ASMState *as, IRIns *ir)
932 {
933 Reg dest = ra_dest(as, ir, RSET_GPR);
934 PPCIns pi = asm_fxloadins(as, ir);
935 Reg idx;
936 int32_t ofs;
937 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */
938 idx = RID_JGL;
939 ofs = (ir->op2 << 2) - 32768 - GG_OFS(g);
940 } else {
941 idx = ra_alloc1(as, ir->op1, RSET_GPR);
942 if (ir->op2 == IRFL_TAB_ARRAY) {
943 ofs = asm_fuseabase(as, ir->op1);
944 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
945 emit_tai(as, PPCI_ADDI, dest, idx, ofs);
946 return;
947 }
948 }
949 ofs = field_ofs[ir->op2];
950 }
951 lj_assertA(!irt_isi8(ir->t), "unsupported FLOAD I8");
952 emit_tai(as, pi, dest, idx, ofs);
953 }
954
asm_fstore(ASMState * as,IRIns * ir)955 static void asm_fstore(ASMState *as, IRIns *ir)
956 {
957 if (ir->r != RID_SINK) {
958 Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
959 IRIns *irf = IR(ir->op1);
960 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
961 int32_t ofs = field_ofs[irf->op2];
962 PPCIns pi = asm_fxstoreins(as, ir);
963 emit_tai(as, pi, src, idx, ofs);
964 }
965 }
966
asm_xload(ASMState * as,IRIns * ir)967 static void asm_xload(ASMState *as, IRIns *ir)
968 {
969 Reg dest = ra_dest(as, ir,
970 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
971 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD");
972 if (irt_isi8(ir->t))
973 emit_as(as, PPCI_EXTSB, dest, dest);
974 asm_fusexref(as, asm_fxloadins(as, ir), dest, ir->op1, RSET_GPR, 0);
975 }
976
asm_xstore_(ASMState * as,IRIns * ir,int32_t ofs)977 static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs)
978 {
979 IRIns *irb;
980 if (ir->r == RID_SINK)
981 return;
982 if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
983 ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
984 /* Fuse BSWAP with XSTORE to stwbrx. */
985 Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
986 asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
987 } else {
988 Reg src = ra_alloc1(as, ir->op2,
989 (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
990 asm_fusexref(as, asm_fxstoreins(as, ir), src, ir->op1,
991 rset_exclude(RSET_GPR, src), ofs);
992 }
993 }
994
995 #define asm_xstore(as, ir) asm_xstore_(as, ir, 0)
996
asm_ahuvload(ASMState * as,IRIns * ir)997 static void asm_ahuvload(ASMState *as, IRIns *ir)
998 {
999 IRType1 t = ir->t;
1000 Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
1001 RegSet allow = RSET_GPR;
1002 int32_t ofs = AHUREF_LSX;
1003 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) {
1004 t.irt = IRT_NUM;
1005 if (ra_used(ir+1)) {
1006 type = ra_dest(as, ir+1, allow);
1007 rset_clear(allow, type);
1008 }
1009 ofs = 0;
1010 }
1011 if (ra_used(ir)) {
1012 lj_assertA((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
1013 irt_isint(ir->t) || irt_isaddr(ir->t),
1014 "bad load type %d", irt_type(ir->t));
1015 if (LJ_SOFTFP || !irt_isnum(t)) ofs = 0;
1016 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
1017 rset_clear(allow, dest);
1018 }
1019 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1020 if (ir->o == IR_VLOAD) {
1021 ofs = ofs != AHUREF_LSX ? ofs + 8 * ir->op2 :
1022 ir->op2 ? 8 * ir->op2 : AHUREF_LSX;
1023 }
1024 if (irt_isnum(t)) {
1025 Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
1026 asm_guardcc(as, CC_GE);
1027 emit_ab(as, PPCI_CMPLW, type, tisnum);
1028 if (ra_hasreg(dest)) {
1029 if (!LJ_SOFTFP && ofs == AHUREF_LSX) {
1030 tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
1031 (idx&255)), (idx>>8)));
1032 emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
1033 } else {
1034 emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest, idx,
1035 ofs+4*LJ_SOFTFP);
1036 }
1037 }
1038 } else {
1039 asm_guardcc(as, CC_NE);
1040 emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
1041 if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
1042 }
1043 if (ofs == AHUREF_LSX) {
1044 emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
1045 emit_slwi(as, tmp, (idx>>8), 3);
1046 } else {
1047 emit_tai(as, PPCI_LWZ, type, idx, ofs);
1048 }
1049 }
1050
asm_ahustore(ASMState * as,IRIns * ir)1051 static void asm_ahustore(ASMState *as, IRIns *ir)
1052 {
1053 RegSet allow = RSET_GPR;
1054 Reg idx, src = RID_NONE, type = RID_NONE;
1055 int32_t ofs = AHUREF_LSX;
1056 if (ir->r == RID_SINK)
1057 return;
1058 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1059 src = ra_alloc1(as, ir->op2, RSET_FPR);
1060 } else {
1061 if (!irt_ispri(ir->t)) {
1062 src = ra_alloc1(as, ir->op2, allow);
1063 rset_clear(allow, src);
1064 ofs = 0;
1065 }
1066 if (LJ_SOFTFP && (ir+1)->o == IR_HIOP)
1067 type = ra_alloc1(as, (ir+1)->op2, allow);
1068 else
1069 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
1070 rset_clear(allow, type);
1071 }
1072 idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
1073 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1074 if (ofs == AHUREF_LSX) {
1075 emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
1076 emit_slwi(as, RID_TMP, (idx>>8), 3);
1077 } else {
1078 emit_fai(as, PPCI_STFD, src, idx, ofs);
1079 }
1080 } else {
1081 if (ra_hasreg(src))
1082 emit_tai(as, PPCI_STW, src, idx, ofs+4);
1083 if (ofs == AHUREF_LSX) {
1084 emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
1085 emit_slwi(as, RID_TMP, (idx>>8), 3);
1086 } else {
1087 emit_tai(as, PPCI_STW, type, idx, ofs);
1088 }
1089 }
1090 }
1091
asm_sload(ASMState * as,IRIns * ir)1092 static void asm_sload(ASMState *as, IRIns *ir)
1093 {
1094 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
1095 IRType1 t = ir->t;
1096 Reg dest = RID_NONE, type = RID_NONE, base;
1097 RegSet allow = RSET_GPR;
1098 int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
1099 if (hiop)
1100 t.irt = IRT_NUM;
1101 lj_assertA(!(ir->op2 & IRSLOAD_PARENT),
1102 "bad parent SLOAD"); /* Handled by asm_head_side(). */
1103 lj_assertA(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK),
1104 "inconsistent SLOAD variant");
1105 lj_assertA(LJ_DUALNUM ||
1106 !irt_isint(t) ||
1107 (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME|IRSLOAD_KEYINDEX)),
1108 "bad SLOAD type");
1109 #if LJ_SOFTFP
1110 lj_assertA(!(ir->op2 & IRSLOAD_CONVERT),
1111 "unsplit SLOAD convert"); /* Handled by LJ_SOFTFP SPLIT. */
1112 if (hiop && ra_used(ir+1)) {
1113 type = ra_dest(as, ir+1, allow);
1114 rset_clear(allow, type);
1115 }
1116 #else
1117 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
1118 dest = ra_scratch(as, RSET_FPR);
1119 asm_tointg(as, ir, dest);
1120 t.irt = IRT_NUM; /* Continue with a regular number type check. */
1121 } else
1122 #endif
1123 if (ra_used(ir)) {
1124 lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t),
1125 "bad SLOAD type %d", irt_type(ir->t));
1126 dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow);
1127 rset_clear(allow, dest);
1128 base = ra_alloc1(as, REF_BASE, allow);
1129 rset_clear(allow, base);
1130 if (!LJ_SOFTFP && (ir->op2 & IRSLOAD_CONVERT)) {
1131 if (irt_isint(t)) {
1132 emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
1133 dest = ra_scratch(as, RSET_FPR);
1134 emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
1135 emit_fb(as, PPCI_FCTIWZ, dest, dest);
1136 t.irt = IRT_NUM; /* Check for original type. */
1137 } else {
1138 Reg tmp = ra_scratch(as, allow);
1139 Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
1140 Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
1141 emit_fab(as, PPCI_FSUB, dest, dest, fbias);
1142 emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
1143 emit_lsptr(as, PPCI_LFS, (fbias & 31),
1144 (void *)&as->J->k32[LJ_K32_2P52_2P31],
1145 rset_clear(allow, hibias));
1146 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
1147 emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
1148 emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
1149 dest = tmp;
1150 t.irt = IRT_INT; /* Check for original type. */
1151 }
1152 }
1153 goto dotypecheck;
1154 }
1155 base = ra_alloc1(as, REF_BASE, allow);
1156 rset_clear(allow, base);
1157 dotypecheck:
1158 if (irt_isnum(t)) {
1159 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1160 Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
1161 asm_guardcc(as, CC_GE);
1162 #if !LJ_SOFTFP
1163 type = RID_TMP;
1164 #endif
1165 emit_ab(as, PPCI_CMPLW, type, tisnum);
1166 }
1167 if (ra_hasreg(dest)) emit_fai(as, LJ_SOFTFP ? PPCI_LWZ : PPCI_LFD, dest,
1168 base, ofs-(LJ_SOFTFP?0:4));
1169 } else {
1170 if ((ir->op2 & IRSLOAD_TYPECHECK)) {
1171 asm_guardcc(as, CC_NE);
1172 emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
1173 type = RID_TMP;
1174 }
1175 if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
1176 }
1177 if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
1178 }
1179
1180 /* -- Allocations --------------------------------------------------------- */
1181
1182 #if LJ_HASFFI
asm_cnew(ASMState * as,IRIns * ir)1183 static void asm_cnew(ASMState *as, IRIns *ir)
1184 {
1185 CTState *cts = ctype_ctsG(J2G(as->J));
1186 CTypeID id = (CTypeID)IR(ir->op1)->i;
1187 CTSize sz;
1188 CTInfo info = lj_ctype_info(cts, id, &sz);
1189 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
1190 IRRef args[4];
1191 RegSet drop = RSET_SCRATCH;
1192 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL),
1193 "bad CNEW/CNEWI operands");
1194
1195 as->gcsteps++;
1196 if (ra_hasreg(ir->r))
1197 rset_clear(drop, ir->r); /* Dest reg handled below. */
1198 ra_evictset(as, drop);
1199 if (ra_used(ir))
1200 ra_destreg(as, ir, RID_RET); /* GCcdata * */
1201
1202 /* Initialize immutable cdata object. */
1203 if (ir->o == IR_CNEWI) {
1204 RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
1205 int32_t ofs = sizeof(GCcdata);
1206 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz);
1207 if (sz == 8) {
1208 ofs += 4;
1209 lj_assertA((ir+1)->o == IR_HIOP, "expected HIOP for CNEWI");
1210 }
1211 for (;;) {
1212 Reg r = ra_alloc1(as, ir->op2, allow);
1213 emit_tai(as, PPCI_STW, r, RID_RET, ofs);
1214 rset_clear(allow, r);
1215 if (ofs == sizeof(GCcdata)) break;
1216 ofs -= 4; ir++;
1217 }
1218 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */
1219 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv];
1220 args[0] = ASMREF_L; /* lua_State *L */
1221 args[1] = ir->op1; /* CTypeID id */
1222 args[2] = ir->op2; /* CTSize sz */
1223 args[3] = ASMREF_TMP1; /* CTSize align */
1224 asm_gencall(as, ci, args);
1225 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info));
1226 return;
1227 }
1228
1229 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
1230 emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
1231 emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
1232 emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
1233 emit_ti(as, PPCI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */
1234 args[0] = ASMREF_L; /* lua_State *L */
1235 args[1] = ASMREF_TMP1; /* MSize size */
1236 asm_gencall(as, ci, args);
1237 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
1238 ra_releasetmp(as, ASMREF_TMP1));
1239 }
1240 #endif
1241
1242 /* -- Write barriers ------------------------------------------------------ */
1243
asm_tbar(ASMState * as,IRIns * ir)1244 static void asm_tbar(ASMState *as, IRIns *ir)
1245 {
1246 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
1247 Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
1248 Reg link = RID_TMP;
1249 MCLabel l_end = emit_label(as);
1250 emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
1251 emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
1252 emit_setgl(as, tab, gc.grayagain);
1253 lj_assertA(LJ_GC_BLACK == 0x04, "bad LJ_GC_BLACK");
1254 emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
1255 emit_getgl(as, link, gc.grayagain);
1256 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
1257 emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
1258 emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
1259 }
1260
asm_obar(ASMState * as,IRIns * ir)1261 static void asm_obar(ASMState *as, IRIns *ir)
1262 {
1263 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
1264 IRRef args[2];
1265 MCLabel l_end;
1266 Reg obj, val, tmp;
1267 /* No need for other object barriers (yet). */
1268 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type");
1269 ra_evictset(as, RSET_SCRATCH);
1270 l_end = emit_label(as);
1271 args[0] = ASMREF_TMP1; /* global_State *g */
1272 args[1] = ir->op1; /* TValue *tv */
1273 asm_gencall(as, ci, args);
1274 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
1275 obj = IR(ir->op1)->r;
1276 tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
1277 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
1278 emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
1279 emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
1280 emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
1281 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
1282 emit_tai(as, PPCI_LBZ, tmp, obj,
1283 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
1284 emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
1285 }
1286
1287 /* -- Arithmetic and logic operations ------------------------------------- */
1288
1289 #if !LJ_SOFTFP
asm_fparith(ASMState * as,IRIns * ir,PPCIns pi)1290 static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
1291 {
1292 Reg dest = ra_dest(as, ir, RSET_FPR);
1293 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1294 right = (left >> 8); left &= 255;
1295 if (pi == PPCI_FMUL)
1296 emit_fac(as, pi, dest, left, right);
1297 else
1298 emit_fab(as, pi, dest, left, right);
1299 }
1300
asm_fpunary(ASMState * as,IRIns * ir,PPCIns pi)1301 static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
1302 {
1303 Reg dest = ra_dest(as, ir, RSET_FPR);
1304 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
1305 emit_fb(as, pi, dest, left);
1306 }
1307
asm_fpmath(ASMState * as,IRIns * ir)1308 static void asm_fpmath(ASMState *as, IRIns *ir)
1309 {
1310 if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
1311 asm_fpunary(as, ir, PPCI_FSQRT);
1312 else
1313 asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
1314 }
1315 #endif
1316
asm_add(ASMState * as,IRIns * ir)1317 static void asm_add(ASMState *as, IRIns *ir)
1318 {
1319 #if !LJ_SOFTFP
1320 if (irt_isnum(ir->t)) {
1321 if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
1322 asm_fparith(as, ir, PPCI_FADD);
1323 } else
1324 #endif
1325 {
1326 Reg dest = ra_dest(as, ir, RSET_GPR);
1327 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1328 PPCIns pi;
1329 if (irref_isk(ir->op2)) {
1330 int32_t k = IR(ir->op2)->i;
1331 if (checki16(k)) {
1332 pi = PPCI_ADDI;
1333 /* May fail due to spills/restores above, but simplifies the logic. */
1334 if (as->flagmcp == as->mcp) {
1335 as->flagmcp = NULL;
1336 as->mcp++;
1337 pi = PPCI_ADDICDOT;
1338 }
1339 emit_tai(as, pi, dest, left, k);
1340 return;
1341 } else if ((k & 0xffff) == 0) {
1342 emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
1343 return;
1344 } else if (!as->sectref) {
1345 emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
1346 emit_tai(as, PPCI_ADDI, dest, left, k);
1347 return;
1348 }
1349 }
1350 pi = PPCI_ADD;
1351 /* May fail due to spills/restores above, but simplifies the logic. */
1352 if (as->flagmcp == as->mcp) {
1353 as->flagmcp = NULL;
1354 as->mcp++;
1355 pi |= PPCF_DOT;
1356 }
1357 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1358 emit_tab(as, pi, dest, left, right);
1359 }
1360 }
1361
asm_sub(ASMState * as,IRIns * ir)1362 static void asm_sub(ASMState *as, IRIns *ir)
1363 {
1364 #if !LJ_SOFTFP
1365 if (irt_isnum(ir->t)) {
1366 if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
1367 asm_fparith(as, ir, PPCI_FSUB);
1368 } else
1369 #endif
1370 {
1371 PPCIns pi = PPCI_SUBF;
1372 Reg dest = ra_dest(as, ir, RSET_GPR);
1373 Reg left, right;
1374 if (irref_isk(ir->op1)) {
1375 int32_t k = IR(ir->op1)->i;
1376 if (checki16(k)) {
1377 right = ra_alloc1(as, ir->op2, RSET_GPR);
1378 emit_tai(as, PPCI_SUBFIC, dest, right, k);
1379 return;
1380 }
1381 }
1382 /* May fail due to spills/restores above, but simplifies the logic. */
1383 if (as->flagmcp == as->mcp) {
1384 as->flagmcp = NULL;
1385 as->mcp++;
1386 pi |= PPCF_DOT;
1387 }
1388 left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1389 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1390 emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
1391 }
1392 }
1393
asm_mul(ASMState * as,IRIns * ir)1394 static void asm_mul(ASMState *as, IRIns *ir)
1395 {
1396 #if !LJ_SOFTFP
1397 if (irt_isnum(ir->t)) {
1398 asm_fparith(as, ir, PPCI_FMUL);
1399 } else
1400 #endif
1401 {
1402 PPCIns pi = PPCI_MULLW;
1403 Reg dest = ra_dest(as, ir, RSET_GPR);
1404 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1405 if (irref_isk(ir->op2)) {
1406 int32_t k = IR(ir->op2)->i;
1407 if (checki16(k)) {
1408 emit_tai(as, PPCI_MULLI, dest, left, k);
1409 return;
1410 }
1411 }
1412 /* May fail due to spills/restores above, but simplifies the logic. */
1413 if (as->flagmcp == as->mcp) {
1414 as->flagmcp = NULL;
1415 as->mcp++;
1416 pi |= PPCF_DOT;
1417 }
1418 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1419 emit_tab(as, pi, dest, left, right);
1420 }
1421 }
1422
1423 #define asm_fpdiv(as, ir) asm_fparith(as, ir, PPCI_FDIV)
1424
asm_neg(ASMState * as,IRIns * ir)1425 static void asm_neg(ASMState *as, IRIns *ir)
1426 {
1427 #if !LJ_SOFTFP
1428 if (irt_isnum(ir->t)) {
1429 asm_fpunary(as, ir, PPCI_FNEG);
1430 } else
1431 #endif
1432 {
1433 Reg dest, left;
1434 PPCIns pi = PPCI_NEG;
1435 if (as->flagmcp == as->mcp) {
1436 as->flagmcp = NULL;
1437 as->mcp++;
1438 pi |= PPCF_DOT;
1439 }
1440 dest = ra_dest(as, ir, RSET_GPR);
1441 left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1442 emit_tab(as, pi, dest, left, 0);
1443 }
1444 }
1445
1446 #define asm_abs(as, ir) asm_fpunary(as, ir, PPCI_FABS)
1447
asm_arithov(ASMState * as,IRIns * ir,PPCIns pi)1448 static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
1449 {
1450 Reg dest, left, right;
1451 if (as->flagmcp == as->mcp) {
1452 as->flagmcp = NULL;
1453 as->mcp++;
1454 }
1455 asm_guardcc(as, CC_SO);
1456 dest = ra_dest(as, ir, RSET_GPR);
1457 left = ra_alloc2(as, ir, RSET_GPR);
1458 right = (left >> 8); left &= 255;
1459 if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
1460 emit_tab(as, pi|PPCF_DOT, dest, left, right);
1461 }
1462
1463 #define asm_addov(as, ir) asm_arithov(as, ir, PPCI_ADDO)
1464 #define asm_subov(as, ir) asm_arithov(as, ir, PPCI_SUBFO)
1465 #define asm_mulov(as, ir) asm_arithov(as, ir, PPCI_MULLWO)
1466
1467 #if LJ_HASFFI
asm_add64(ASMState * as,IRIns * ir)1468 static void asm_add64(ASMState *as, IRIns *ir)
1469 {
1470 Reg dest = ra_dest(as, ir, RSET_GPR);
1471 Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
1472 PPCIns pi = PPCI_ADDE;
1473 if (irref_isk(ir->op2)) {
1474 int32_t k = IR(ir->op2)->i;
1475 if (k == 0)
1476 pi = PPCI_ADDZE;
1477 else if (k == -1)
1478 pi = PPCI_ADDME;
1479 else
1480 goto needright;
1481 right = 0;
1482 } else {
1483 needright:
1484 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1485 }
1486 emit_tab(as, pi, dest, left, right);
1487 ir--;
1488 dest = ra_dest(as, ir, RSET_GPR);
1489 left = ra_alloc1(as, ir->op1, RSET_GPR);
1490 if (irref_isk(ir->op2)) {
1491 int32_t k = IR(ir->op2)->i;
1492 if (checki16(k)) {
1493 emit_tai(as, PPCI_ADDIC, dest, left, k);
1494 return;
1495 }
1496 }
1497 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1498 emit_tab(as, PPCI_ADDC, dest, left, right);
1499 }
1500
asm_sub64(ASMState * as,IRIns * ir)1501 static void asm_sub64(ASMState *as, IRIns *ir)
1502 {
1503 Reg dest = ra_dest(as, ir, RSET_GPR);
1504 Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
1505 PPCIns pi = PPCI_SUBFE;
1506 if (irref_isk(ir->op1)) {
1507 int32_t k = IR(ir->op1)->i;
1508 if (k == 0)
1509 pi = PPCI_SUBFZE;
1510 else if (k == -1)
1511 pi = PPCI_SUBFME;
1512 else
1513 goto needleft;
1514 left = 0;
1515 } else {
1516 needleft:
1517 left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
1518 }
1519 emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
1520 ir--;
1521 dest = ra_dest(as, ir, RSET_GPR);
1522 right = ra_alloc1(as, ir->op2, RSET_GPR);
1523 if (irref_isk(ir->op1)) {
1524 int32_t k = IR(ir->op1)->i;
1525 if (checki16(k)) {
1526 emit_tai(as, PPCI_SUBFIC, dest, right, k);
1527 return;
1528 }
1529 }
1530 left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
1531 emit_tab(as, PPCI_SUBFC, dest, right, left);
1532 }
1533
asm_neg64(ASMState * as,IRIns * ir)1534 static void asm_neg64(ASMState *as, IRIns *ir)
1535 {
1536 Reg dest = ra_dest(as, ir, RSET_GPR);
1537 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1538 emit_tab(as, PPCI_SUBFZE, dest, left, 0);
1539 ir--;
1540 dest = ra_dest(as, ir, RSET_GPR);
1541 left = ra_alloc1(as, ir->op1, RSET_GPR);
1542 emit_tai(as, PPCI_SUBFIC, dest, left, 0);
1543 }
1544 #endif
1545
asm_bnot(ASMState * as,IRIns * ir)1546 static void asm_bnot(ASMState *as, IRIns *ir)
1547 {
1548 Reg dest, left, right;
1549 PPCIns pi = PPCI_NOR;
1550 if (as->flagmcp == as->mcp) {
1551 as->flagmcp = NULL;
1552 as->mcp++;
1553 pi |= PPCF_DOT;
1554 }
1555 dest = ra_dest(as, ir, RSET_GPR);
1556 if (mayfuse(as, ir->op1)) {
1557 IRIns *irl = IR(ir->op1);
1558 if (irl->o == IR_BAND)
1559 pi ^= (PPCI_NOR ^ PPCI_NAND);
1560 else if (irl->o == IR_BXOR)
1561 pi ^= (PPCI_NOR ^ PPCI_EQV);
1562 else if (irl->o != IR_BOR)
1563 goto nofuse;
1564 left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
1565 right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
1566 } else {
1567 nofuse:
1568 left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1569 }
1570 emit_asb(as, pi, dest, left, right);
1571 }
1572
asm_bswap(ASMState * as,IRIns * ir)1573 static void asm_bswap(ASMState *as, IRIns *ir)
1574 {
1575 Reg dest = ra_dest(as, ir, RSET_GPR);
1576 IRIns *irx;
1577 if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
1578 ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
1579 /* Fuse BSWAP with XLOAD to lwbrx. */
1580 asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
1581 } else {
1582 Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
1583 Reg tmp = dest;
1584 if (tmp == left) {
1585 tmp = RID_TMP;
1586 emit_mr(as, dest, RID_TMP);
1587 }
1588 emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
1589 emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
1590 emit_rotlwi(as, tmp, left, 8);
1591 }
1592 }
1593
1594 /* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
asm_fuseandsh(ASMState * as,PPCIns pi,int32_t mask,IRRef ref)1595 static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
1596 {
1597 IRIns *ir;
1598 Reg left;
1599 if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
1600 irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
1601 int32_t sh = (IR(ir->op2)->i & 31);
1602 switch (ir->o) {
1603 case IR_BSHL:
1604 if ((mask & ((1u<<sh)-1))) goto nofuse;
1605 break;
1606 case IR_BSHR:
1607 if ((mask & ~((~0u)>>sh))) goto nofuse;
1608 sh = ((32-sh)&31);
1609 break;
1610 case IR_BROL:
1611 break;
1612 default:
1613 goto nofuse;
1614 }
1615 left = ra_alloc1(as, ir->op1, RSET_GPR);
1616 *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
1617 return;
1618 }
1619 nofuse:
1620 left = ra_alloc1(as, ref, RSET_GPR);
1621 *--as->mcp = pi | PPCF_T(left);
1622 }
1623
asm_band(ASMState * as,IRIns * ir)1624 static void asm_band(ASMState *as, IRIns *ir)
1625 {
1626 Reg dest, left, right;
1627 IRRef lref = ir->op1;
1628 PPCIns dot = 0;
1629 IRRef op2;
1630 if (as->flagmcp == as->mcp) {
1631 as->flagmcp = NULL;
1632 as->mcp++;
1633 dot = PPCF_DOT;
1634 }
1635 dest = ra_dest(as, ir, RSET_GPR);
1636 if (irref_isk(ir->op2)) {
1637 int32_t k = IR(ir->op2)->i;
1638 if (k) {
1639 /* First check for a contiguous bitmask as used by rlwinm. */
1640 uint32_t s1 = lj_ffs((uint32_t)k);
1641 uint32_t k1 = ((uint32_t)k >> s1);
1642 if ((k1 & (k1+1)) == 0) {
1643 asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
1644 PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
1645 k, lref);
1646 return;
1647 }
1648 if (~(uint32_t)k) {
1649 uint32_t s2 = lj_ffs(~(uint32_t)k);
1650 uint32_t k2 = (~(uint32_t)k >> s2);
1651 if ((k2 & (k2+1)) == 0) {
1652 asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
1653 PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
1654 k, lref);
1655 return;
1656 }
1657 }
1658 }
1659 if (checku16(k)) {
1660 left = ra_alloc1(as, lref, RSET_GPR);
1661 emit_asi(as, PPCI_ANDIDOT, dest, left, k);
1662 return;
1663 } else if ((k & 0xffff) == 0) {
1664 left = ra_alloc1(as, lref, RSET_GPR);
1665 emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
1666 return;
1667 }
1668 }
1669 op2 = ir->op2;
1670 if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
1671 dot ^= (PPCI_AND ^ PPCI_ANDC);
1672 op2 = IR(op2)->op1;
1673 }
1674 left = ra_hintalloc(as, lref, dest, RSET_GPR);
1675 right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
1676 emit_asb(as, PPCI_AND ^ dot, dest, left, right);
1677 }
1678
asm_bitop(ASMState * as,IRIns * ir,PPCIns pi,PPCIns pik)1679 static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1680 {
1681 Reg dest = ra_dest(as, ir, RSET_GPR);
1682 Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
1683 if (irref_isk(ir->op2)) {
1684 int32_t k = IR(ir->op2)->i;
1685 Reg tmp = left;
1686 if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
1687 if (!checku16(k)) {
1688 emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
1689 if ((k & 0xffff) == 0) return;
1690 }
1691 emit_asi(as, pik, dest, left, k);
1692 return;
1693 }
1694 }
1695 /* May fail due to spills/restores above, but simplifies the logic. */
1696 if (as->flagmcp == as->mcp) {
1697 as->flagmcp = NULL;
1698 as->mcp++;
1699 pi |= PPCF_DOT;
1700 }
1701 right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1702 emit_asb(as, pi, dest, left, right);
1703 }
1704
1705 #define asm_bor(as, ir) asm_bitop(as, ir, PPCI_OR, PPCI_ORI)
1706 #define asm_bxor(as, ir) asm_bitop(as, ir, PPCI_XOR, PPCI_XORI)
1707
asm_bitshift(ASMState * as,IRIns * ir,PPCIns pi,PPCIns pik)1708 static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
1709 {
1710 Reg dest, left;
1711 Reg dot = 0;
1712 if (as->flagmcp == as->mcp) {
1713 as->flagmcp = NULL;
1714 as->mcp++;
1715 dot = PPCF_DOT;
1716 }
1717 dest = ra_dest(as, ir, RSET_GPR);
1718 left = ra_alloc1(as, ir->op1, RSET_GPR);
1719 if (irref_isk(ir->op2)) { /* Constant shifts. */
1720 int32_t shift = (IR(ir->op2)->i & 31);
1721 if (pik == 0) /* SLWI */
1722 emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
1723 else if (pik == 1) /* SRWI */
1724 emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
1725 else
1726 emit_asb(as, pik|dot, dest, left, shift);
1727 } else {
1728 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
1729 emit_asb(as, pi|dot, dest, left, right);
1730 }
1731 }
1732
1733 #define asm_bshl(as, ir) asm_bitshift(as, ir, PPCI_SLW, 0)
1734 #define asm_bshr(as, ir) asm_bitshift(as, ir, PPCI_SRW, 1)
1735 #define asm_bsar(as, ir) asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI)
1736 #define asm_brol(as, ir) \
1737 asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31), \
1738 PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31))
1739 #define asm_bror(as, ir) lj_assertA(0, "unexpected BROR")
1740
1741 #if LJ_SOFTFP
asm_sfpmin_max(ASMState * as,IRIns * ir)1742 static void asm_sfpmin_max(ASMState *as, IRIns *ir)
1743 {
1744 CCallInfo ci = lj_ir_callinfo[IRCALL_softfp_cmp];
1745 IRRef args[4];
1746 MCLabel l_right, l_end;
1747 Reg desthi = ra_dest(as, ir, RSET_GPR), destlo = ra_dest(as, ir+1, RSET_GPR);
1748 Reg righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
1749 Reg rightlo, leftlo = ra_alloc2(as, ir+1, RSET_GPR);
1750 PPCCC cond = (IROp)ir->o == IR_MIN ? CC_EQ : CC_NE;
1751 righthi = (lefthi >> 8); lefthi &= 255;
1752 rightlo = (leftlo >> 8); leftlo &= 255;
1753 args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
1754 args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
1755 l_end = emit_label(as);
1756 if (desthi != righthi) emit_mr(as, desthi, righthi);
1757 if (destlo != rightlo) emit_mr(as, destlo, rightlo);
1758 l_right = emit_label(as);
1759 if (l_end != l_right) emit_jmp(as, l_end);
1760 if (desthi != lefthi) emit_mr(as, desthi, lefthi);
1761 if (destlo != leftlo) emit_mr(as, destlo, leftlo);
1762 if (l_right == as->mcp+1) {
1763 cond ^= 4; l_right = l_end; ++as->mcp;
1764 }
1765 emit_condbranch(as, PPCI_BC, cond, l_right);
1766 ra_evictset(as, RSET_SCRATCH);
1767 emit_cmpi(as, RID_RET, 1);
1768 asm_gencall(as, &ci, args);
1769 }
1770 #endif
1771
asm_min_max(ASMState * as,IRIns * ir,int ismax)1772 static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
1773 {
1774 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1775 Reg dest = ra_dest(as, ir, RSET_FPR);
1776 Reg tmp = dest;
1777 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1778 right = (left >> 8); left &= 255;
1779 if (tmp == left || tmp == right)
1780 tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
1781 dest), left), right));
1782 emit_facb(as, PPCI_FSEL, dest, tmp, left, right);
1783 emit_fab(as, PPCI_FSUB, tmp, ismax ? left : right, ismax ? right : left);
1784 } else {
1785 Reg dest = ra_dest(as, ir, RSET_GPR);
1786 Reg tmp1 = RID_TMP, tmp2 = dest;
1787 Reg right, left = ra_alloc2(as, ir, RSET_GPR);
1788 right = (left >> 8); left &= 255;
1789 if (tmp2 == left || tmp2 == right)
1790 tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
1791 dest), left), right));
1792 emit_tab(as, PPCI_ADD, dest, tmp2, right);
1793 emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
1794 emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
1795 emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
1796 emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
1797 emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
1798 }
1799 }
1800
1801 #define asm_min(as, ir) asm_min_max(as, ir, 0)
1802 #define asm_max(as, ir) asm_min_max(as, ir, 1)
1803
1804 /* -- Comparisons --------------------------------------------------------- */
1805
1806 #define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
1807 #define CC_TWO 0x80 /* Check two flags for FP comparison. */
1808
1809 /* Map of comparisons to flags. ORDER IR. */
1810 static const uint8_t asm_compmap[IR_ABC+1] = {
1811 /* op int cc FP cc */
1812 /* LT */ CC_GE + (CC_GE<<4),
1813 /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
1814 /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
1815 /* GT */ CC_LE + (CC_LE<<4),
1816 /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
1817 /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
1818 /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
1819 /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
1820 /* EQ */ CC_NE + (CC_NE<<4),
1821 /* NE */ CC_EQ + (CC_EQ<<4),
1822 /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
1823 };
1824
asm_intcomp_(ASMState * as,IRRef lref,IRRef rref,Reg cr,PPCCC cc)1825 static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
1826 {
1827 Reg right, left = ra_alloc1(as, lref, RSET_GPR);
1828 if (irref_isk(rref)) {
1829 int32_t k = IR(rref)->i;
1830 if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
1831 if (checki16(k)) {
1832 emit_tai(as, PPCI_CMPWI, cr, left, k);
1833 /* Signed comparison with zero and referencing previous ins? */
1834 if (k == 0 && lref == as->curins-1)
1835 as->flagmcp = as->mcp; /* Allow elimination of the compare. */
1836 return;
1837 } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
1838 if (checku16(k)) {
1839 emit_tai(as, PPCI_CMPLWI, cr, left, k);
1840 return;
1841 } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
1842 emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
1843 emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
1844 return;
1845 }
1846 }
1847 } else { /* Unsigned comparison with constant. */
1848 if (checku16(k)) {
1849 emit_tai(as, PPCI_CMPLWI, cr, left, k);
1850 return;
1851 }
1852 }
1853 }
1854 right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
1855 emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
1856 }
1857
asm_comp(ASMState * as,IRIns * ir)1858 static void asm_comp(ASMState *as, IRIns *ir)
1859 {
1860 PPCCC cc = asm_compmap[ir->o];
1861 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
1862 Reg right, left = ra_alloc2(as, ir, RSET_FPR);
1863 right = (left >> 8); left &= 255;
1864 asm_guardcc(as, (cc >> 4));
1865 if ((cc & CC_TWO))
1866 emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
1867 emit_fab(as, PPCI_FCMPU, 0, left, right);
1868 } else {
1869 IRRef lref = ir->op1, rref = ir->op2;
1870 if (irref_isk(lref) && !irref_isk(rref)) {
1871 /* Swap constants to the right (only for ABC). */
1872 IRRef tmp = lref; lref = rref; rref = tmp;
1873 if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
1874 }
1875 asm_guardcc(as, cc);
1876 asm_intcomp_(as, lref, rref, 0, cc);
1877 }
1878 }
1879
1880 #define asm_equal(as, ir) asm_comp(as, ir)
1881
1882 #if LJ_SOFTFP
1883 /* SFP comparisons. */
asm_sfpcomp(ASMState * as,IRIns * ir)1884 static void asm_sfpcomp(ASMState *as, IRIns *ir)
1885 {
1886 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
1887 RegSet drop = RSET_SCRATCH;
1888 Reg r;
1889 IRRef args[4];
1890 args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1;
1891 args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2;
1892
1893 for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+3; r++) {
1894 if (!rset_test(as->freeset, r) &&
1895 regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR])
1896 rset_clear(drop, r);
1897 }
1898 ra_evictset(as, drop);
1899 asm_setupresult(as, ir, ci);
1900 switch ((IROp)ir->o) {
1901 case IR_ULT:
1902 asm_guardcc(as, CC_EQ);
1903 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
1904 case IR_ULE:
1905 asm_guardcc(as, CC_EQ);
1906 emit_ai(as, PPCI_CMPWI, RID_RET, 1);
1907 break;
1908 case IR_GE: case IR_GT:
1909 asm_guardcc(as, CC_EQ);
1910 emit_ai(as, PPCI_CMPWI, RID_RET, 2);
1911 default:
1912 asm_guardcc(as, (asm_compmap[ir->o] & 0xf));
1913 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
1914 break;
1915 }
1916 asm_gencall(as, ci, args);
1917 }
1918 #endif
1919
1920 #if LJ_HASFFI
1921 /* 64 bit integer comparisons. */
asm_comp64(ASMState * as,IRIns * ir)1922 static void asm_comp64(ASMState *as, IRIns *ir)
1923 {
1924 PPCCC cc = asm_compmap[(ir-1)->o];
1925 if ((cc&3) == (CC_EQ&3)) {
1926 asm_guardcc(as, cc);
1927 emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
1928 (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
1929 } else {
1930 asm_guardcc(as, CC_EQ);
1931 emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
1932 emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
1933 (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
1934 }
1935 /* Loword comparison sets cr1 and is unsigned, except for equality. */
1936 asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
1937 cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
1938 /* Hiword comparison sets cr0. */
1939 asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
1940 as->flagmcp = NULL; /* Doesn't work here. */
1941 }
1942 #endif
1943
1944 /* -- Split register ops -------------------------------------------------- */
1945
1946 /* Hiword op of a split 32/32 bit op. Previous op is be the loword op. */
asm_hiop(ASMState * as,IRIns * ir)1947 static void asm_hiop(ASMState *as, IRIns *ir)
1948 {
1949 /* HIOP is marked as a store because it needs its own DCE logic. */
1950 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
1951 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
1952 #if LJ_HASFFI || LJ_SOFTFP
1953 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
1954 as->curins--; /* Always skip the CONV. */
1955 #if LJ_HASFFI && !LJ_SOFTFP
1956 if (usehi || uselo)
1957 asm_conv64(as, ir);
1958 return;
1959 #endif
1960 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
1961 as->curins--; /* Always skip the loword comparison. */
1962 #if LJ_SOFTFP
1963 if (!irt_isint(ir->t)) {
1964 asm_sfpcomp(as, ir-1);
1965 return;
1966 }
1967 #endif
1968 #if LJ_HASFFI
1969 asm_comp64(as, ir);
1970 #endif
1971 return;
1972 #if LJ_SOFTFP
1973 } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
1974 as->curins--; /* Always skip the loword min/max. */
1975 if (uselo || usehi)
1976 asm_sfpmin_max(as, ir-1);
1977 return;
1978 #endif
1979 } else if ((ir-1)->o == IR_XSTORE) {
1980 as->curins--; /* Handle both stores here. */
1981 if ((ir-1)->r != RID_SINK) {
1982 asm_xstore_(as, ir, 0);
1983 asm_xstore_(as, ir-1, 4);
1984 }
1985 return;
1986 }
1987 #endif
1988 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
1989 switch ((ir-1)->o) {
1990 #if LJ_HASFFI
1991 case IR_ADD: as->curins--; asm_add64(as, ir); break;
1992 case IR_SUB: as->curins--; asm_sub64(as, ir); break;
1993 case IR_NEG: as->curins--; asm_neg64(as, ir); break;
1994 case IR_CNEWI:
1995 /* Nothing to do here. Handled by lo op itself. */
1996 break;
1997 #endif
1998 #if LJ_SOFTFP
1999 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2000 case IR_STRTO:
2001 if (!uselo)
2002 ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
2003 break;
2004 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: case IR_TMPREF:
2005 /* Nothing to do here. Handled by lo op itself. */
2006 break;
2007 #endif
2008 case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
2009 if (!uselo)
2010 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
2011 break;
2012 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break;
2013 }
2014 }
2015
2016 /* -- Profiling ----------------------------------------------------------- */
2017
asm_prof(ASMState * as,IRIns * ir)2018 static void asm_prof(ASMState *as, IRIns *ir)
2019 {
2020 UNUSED(ir);
2021 asm_guardcc(as, CC_NE);
2022 emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, HOOK_PROFILE);
2023 emit_lsglptr(as, PPCI_LBZ, RID_TMP,
2024 (int32_t)offsetof(global_State, hookmask));
2025 }
2026
2027 /* -- Stack handling ------------------------------------------------------ */
2028
2029 /* Check Lua stack size for overflow. Use exit handler as fallback. */
asm_stack_check(ASMState * as,BCReg topslot,IRIns * irp,RegSet allow,ExitNo exitno)2030 static void asm_stack_check(ASMState *as, BCReg topslot,
2031 IRIns *irp, RegSet allow, ExitNo exitno)
2032 {
2033 /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
2034 Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
2035 rset_clear(allow, pbase);
2036 tmp = allow ? rset_pickbot(allow) :
2037 (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
2038 emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
2039 if (allow == RSET_EMPTY) /* Restore temp. register. */
2040 emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
2041 else
2042 ra_modified(as, tmp);
2043 emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
2044 emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
2045 emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
2046 if (pbase == RID_TMP)
2047 emit_getgl(as, RID_TMP, jit_base);
2048 emit_getgl(as, tmp, cur_L);
2049 if (allow == RSET_EMPTY) /* Spill temp. register. */
2050 emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
2051 }
2052
2053 /* Restore Lua stack from on-trace state. */
asm_stack_restore(ASMState * as,SnapShot * snap)2054 static void asm_stack_restore(ASMState *as, SnapShot *snap)
2055 {
2056 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2057 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
2058 MSize n, nent = snap->nent;
2059 /* Store the value of all modified slots to the Lua stack. */
2060 for (n = 0; n < nent; n++) {
2061 SnapEntry sn = map[n];
2062 BCReg s = snap_slot(sn);
2063 int32_t ofs = 8*((int32_t)s-1);
2064 IRRef ref = snap_ref(sn);
2065 IRIns *ir = IR(ref);
2066 if ((sn & SNAP_NORESTORE))
2067 continue;
2068 if (irt_isnum(ir->t)) {
2069 #if LJ_SOFTFP
2070 Reg tmp;
2071 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2072 /* LJ_SOFTFP: must be a number constant. */
2073 lj_assertA(irref_isk(ref), "unsplit FP op");
2074 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow);
2075 emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?4:0));
2076 if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1);
2077 tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow);
2078 emit_tai(as, PPCI_STW, tmp, RID_BASE, ofs+(LJ_BE?0:4));
2079 #else
2080 Reg src = ra_alloc1(as, ref, RSET_FPR);
2081 emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
2082 #endif
2083 } else {
2084 Reg type;
2085 RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
2086 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t),
2087 "restore of IR type %d", irt_type(ir->t));
2088 if (!irt_ispri(ir->t)) {
2089 Reg src = ra_alloc1(as, ref, allow);
2090 rset_clear(allow, src);
2091 emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
2092 }
2093 if ((sn & (SNAP_CONT|SNAP_FRAME))) {
2094 if (s == 0) continue; /* Do not overwrite link to previous frame. */
2095 type = ra_allock(as, (int32_t)(*flinks--), allow);
2096 #if LJ_SOFTFP
2097 } else if ((sn & SNAP_SOFTFPNUM)) {
2098 type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE));
2099 #endif
2100 } else if ((sn & SNAP_KEYINDEX)) {
2101 type = ra_allock(as, (int32_t)LJ_KEYINDEX, allow);
2102 } else {
2103 type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
2104 }
2105 emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
2106 }
2107 checkmclim(as);
2108 }
2109 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot");
2110 }
2111
2112 /* -- GC handling --------------------------------------------------------- */
2113
2114 /* Marker to prevent patching the GC check exit. */
2115 #define PPC_NOPATCH_GC_CHECK PPCI_ORIS
2116
2117 /* Check GC threshold and do one or more GC steps. */
asm_gc_check(ASMState * as)2118 static void asm_gc_check(ASMState *as)
2119 {
2120 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
2121 IRRef args[2];
2122 MCLabel l_end;
2123 Reg tmp;
2124 ra_evictset(as, RSET_SCRATCH);
2125 l_end = emit_label(as);
2126 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
2127 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
2128 *--as->mcp = PPC_NOPATCH_GC_CHECK;
2129 emit_ai(as, PPCI_CMPWI, RID_RET, 0);
2130 args[0] = ASMREF_TMP1; /* global_State *g */
2131 args[1] = ASMREF_TMP2; /* MSize steps */
2132 asm_gencall(as, ci, args);
2133 emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
2134 tmp = ra_releasetmp(as, ASMREF_TMP2);
2135 emit_loadi(as, tmp, as->gcsteps);
2136 /* Jump around GC step if GC total < GC threshold. */
2137 emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
2138 emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
2139 emit_getgl(as, tmp, gc.threshold);
2140 emit_getgl(as, RID_TMP, gc.total);
2141 as->gcsteps = 0;
2142 checkmclim(as);
2143 }
2144
2145 /* -- Loop handling ------------------------------------------------------- */
2146
2147 /* Fixup the loop branch. */
asm_loop_fixup(ASMState * as)2148 static void asm_loop_fixup(ASMState *as)
2149 {
2150 MCode *p = as->mctop;
2151 MCode *target = as->mcp;
2152 if (as->loopinv) { /* Inverted loop branch? */
2153 /* asm_guardcc already inverted the cond branch and patched the final b. */
2154 p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
2155 } else {
2156 p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
2157 }
2158 }
2159
2160 /* Fixup the tail of the loop. */
asm_loop_tail_fixup(ASMState * as)2161 static void asm_loop_tail_fixup(ASMState *as)
2162 {
2163 UNUSED(as); /* Nothing to do. */
2164 }
2165
2166 /* -- Head of trace ------------------------------------------------------- */
2167
2168 /* Coalesce BASE register for a root trace. */
asm_head_root_base(ASMState * as)2169 static void asm_head_root_base(ASMState *as)
2170 {
2171 IRIns *ir = IR(REF_BASE);
2172 Reg r = ir->r;
2173 if (ra_hasreg(r)) {
2174 ra_free(as, r);
2175 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2176 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2177 if (r != RID_BASE)
2178 emit_mr(as, r, RID_BASE);
2179 }
2180 }
2181
2182 /* Coalesce BASE register for a side trace. */
asm_head_side_base(ASMState * as,IRIns * irp,RegSet allow)2183 static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
2184 {
2185 IRIns *ir = IR(REF_BASE);
2186 Reg r = ir->r;
2187 if (ra_hasreg(r)) {
2188 ra_free(as, r);
2189 if (rset_test(as->modset, r) || irt_ismarked(ir->t))
2190 ir->r = RID_INIT; /* No inheritance for modified BASE register. */
2191 if (irp->r == r) {
2192 rset_clear(allow, r); /* Mark same BASE register as coalesced. */
2193 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
2194 rset_clear(allow, irp->r);
2195 emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
2196 } else {
2197 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
2198 }
2199 }
2200 return allow;
2201 }
2202
2203 /* -- Tail of trace ------------------------------------------------------- */
2204
2205 /* Fixup the tail code. */
asm_tail_fixup(ASMState * as,TraceNo lnk)2206 static void asm_tail_fixup(ASMState *as, TraceNo lnk)
2207 {
2208 MCode *p = as->mctop;
2209 MCode *target;
2210 int32_t spadj = as->T->spadjust;
2211 if (spadj == 0) {
2212 *--p = PPCI_NOP;
2213 *--p = PPCI_NOP;
2214 as->mctop = p;
2215 } else {
2216 /* Patch stack adjustment. */
2217 lj_assertA(checki16(CFRAME_SIZE+spadj), "stack adjustment out of range");
2218 p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
2219 p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
2220 }
2221 /* Patch exit branch. */
2222 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
2223 p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
2224 }
2225
2226 /* Prepare tail of code. */
asm_tail_prep(ASMState * as)2227 static void asm_tail_prep(ASMState *as)
2228 {
2229 MCode *p = as->mctop - 1; /* Leave room for exit branch. */
2230 if (as->loopref) {
2231 as->invmcp = as->mcp = p;
2232 } else {
2233 as->mcp = p-2; /* Leave room for stack pointer adjustment. */
2234 as->invmcp = NULL;
2235 }
2236 }
2237
2238 /* -- Trace setup --------------------------------------------------------- */
2239
2240 /* Ensure there are enough stack slots for call arguments. */
asm_setup_call_slots(ASMState * as,IRIns * ir,const CCallInfo * ci)2241 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
2242 {
2243 IRRef args[CCI_NARGS_MAX*2];
2244 uint32_t i, nargs = CCI_XNARGS(ci);
2245 int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
2246 asm_collectargs(as, ir, ci, args);
2247 for (i = 0; i < nargs; i++)
2248 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
2249 if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
2250 } else {
2251 if (ngpr > 0) ngpr--; else nslots++;
2252 }
2253 if (nslots > as->evenspill) /* Leave room for args in stack slots. */
2254 as->evenspill = nslots;
2255 return (!LJ_SOFTFP && irt_isfp(ir->t)) ? REGSP_HINT(RID_FPRET) :
2256 REGSP_HINT(RID_RET);
2257 }
2258
asm_setup_target(ASMState * as)2259 static void asm_setup_target(ASMState *as)
2260 {
2261 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
2262 }
2263
2264 /* -- Trace patching ------------------------------------------------------ */
2265
2266 /* Patch exit jumps of existing machine code to a new target. */
lj_asm_patchexit(jit_State * J,GCtrace * T,ExitNo exitno,MCode * target)2267 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
2268 {
2269 MCode *p = T->mcode;
2270 MCode *pe = (MCode *)((char *)p + T->szmcode);
2271 MCode *px = exitstub_trace_addr(T, exitno);
2272 MCode *cstart = NULL;
2273 MCode *mcarea = lj_mcode_patch(J, p, 0);
2274 int clearso = 0, patchlong = 1;
2275 for (; p < pe; p++) {
2276 /* Look for exitstub branch, try to replace with branch to target. */
2277 uint32_t ins = *p;
2278 if ((ins & 0xfc000000u) == 0x40000000u &&
2279 ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
2280 ptrdiff_t delta = (char *)target - (char *)p;
2281 if (((ins >> 16) & 3) == (CC_SO&3)) {
2282 clearso = sizeof(MCode);
2283 delta -= sizeof(MCode);
2284 }
2285 /* Many, but not all short-range branches can be patched directly. */
2286 if (p[-1] == PPC_NOPATCH_GC_CHECK) {
2287 patchlong = 0;
2288 } else if (((delta + 0x8000) >> 16) == 0) {
2289 *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
2290 ((delta & 0x8000) * (PPCF_Y/0x8000));
2291 if (!cstart) cstart = p;
2292 }
2293 } else if ((ins & 0xfc000000u) == PPCI_B &&
2294 ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
2295 ptrdiff_t delta = (char *)target - (char *)p;
2296 lj_assertJ(((delta + 0x02000000) >> 26) == 0,
2297 "branch target out of range");
2298 *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
2299 if (!cstart) cstart = p;
2300 }
2301 }
2302 /* Always patch long-range branch in exit stub itself. Except, if we can't. */
2303 if (patchlong) {
2304 ptrdiff_t delta = (char *)target - (char *)px - clearso;
2305 lj_assertJ(((delta + 0x02000000) >> 26) == 0,
2306 "branch target out of range");
2307 *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
2308 }
2309 if (!cstart) cstart = px;
2310 lj_mcode_sync(cstart, px+1);
2311 if (clearso) { /* Extend the current trace. Ugly workaround. */
2312 MCode *pp = J->cur.mcode;
2313 J->cur.szmcode += sizeof(MCode);
2314 *--pp = PPCI_MCRXR; /* Clear SO flag. */
2315 J->cur.mcode = pp;
2316 lj_mcode_sync(pp, pp+1);
2317 }
2318 lj_mcode_patch(J, mcarea, 1);
2319 }
2320
2321