1 /*
2 ** Memory access optimizations.
3 ** AA: Alias Analysis using high-level semantic disambiguation.
4 ** FWD: Load Forwarding (L2L) + Store Forwarding (S2L).
5 ** DSE: Dead-Store Elimination.
6 ** Copyright (C) 2005-2021 Mike Pall. See Copyright Notice in luajit.h
7 */
8 
9 #define lj_opt_mem_c
10 #define LUA_CORE
11 
12 #include "lj_obj.h"
13 
14 #if LJ_HASJIT
15 
16 #include "lj_tab.h"
17 #include "lj_ir.h"
18 #include "lj_jit.h"
19 #include "lj_iropt.h"
20 #include "lj_ircall.h"
21 #include "lj_dispatch.h"
22 
23 /* Some local macros to save typing. Undef'd at the end. */
24 #define IR(ref)		(&J->cur.ir[(ref)])
25 #define fins		(&J->fold.ins)
26 #define fleft		(J->fold.left)
27 #define fright		(J->fold.right)
28 
29 /*
30 ** Caveat #1: return value is not always a TRef -- only use with tref_ref().
31 ** Caveat #2: FWD relies on active CSE for xREF operands -- see lj_opt_fold().
32 */
33 
34 /* Return values from alias analysis. */
35 typedef enum {
36   ALIAS_NO,	/* The two refs CANNOT alias (exact). */
37   ALIAS_MAY,	/* The two refs MAY alias (inexact). */
38   ALIAS_MUST	/* The two refs MUST alias (exact). */
39 } AliasRet;
40 
41 /* -- ALOAD/HLOAD forwarding and ASTORE/HSTORE elimination ---------------- */
42 
43 /* Simplified escape analysis: check for intervening stores. */
aa_escape(jit_State * J,IRIns * ir,IRIns * stop)44 static AliasRet aa_escape(jit_State *J, IRIns *ir, IRIns *stop)
45 {
46   IRRef ref = (IRRef)(ir - J->cur.ir);  /* The ref that might be stored. */
47   for (ir++; ir < stop; ir++)
48     if (ir->op2 == ref &&
49 	(ir->o == IR_ASTORE || ir->o == IR_HSTORE ||
50 	 ir->o == IR_USTORE || ir->o == IR_FSTORE))
51       return ALIAS_MAY;  /* Reference was stored and might alias. */
52   return ALIAS_NO;  /* Reference was not stored. */
53 }
54 
55 /* Alias analysis for two different table references. */
aa_table(jit_State * J,IRRef ta,IRRef tb)56 static AliasRet aa_table(jit_State *J, IRRef ta, IRRef tb)
57 {
58   IRIns *taba = IR(ta), *tabb = IR(tb);
59   int newa, newb;
60   lj_assertJ(ta != tb, "bad usage");
61   lj_assertJ(irt_istab(taba->t) && irt_istab(tabb->t), "bad usage");
62   /* Disambiguate new allocations. */
63   newa = (taba->o == IR_TNEW || taba->o == IR_TDUP);
64   newb = (tabb->o == IR_TNEW || tabb->o == IR_TDUP);
65   if (newa && newb)
66     return ALIAS_NO;  /* Two different allocations never alias. */
67   if (newb) {  /* At least one allocation? */
68     IRIns *tmp = taba; taba = tabb; tabb = tmp;
69   } else if (!newa) {
70     return ALIAS_MAY;  /* Anything else: we just don't know. */
71   }
72   return aa_escape(J, taba, tabb);
73 }
74 
75 /* Alias analysis for array and hash access using key-based disambiguation. */
aa_ahref(jit_State * J,IRIns * refa,IRIns * refb)76 static AliasRet aa_ahref(jit_State *J, IRIns *refa, IRIns *refb)
77 {
78   IRRef ka = refa->op2;
79   IRRef kb = refb->op2;
80   IRIns *keya, *keyb;
81   IRRef ta, tb;
82   if (refa == refb)
83     return ALIAS_MUST;  /* Shortcut for same refs. */
84   keya = IR(ka);
85   if (keya->o == IR_KSLOT) { ka = keya->op1; keya = IR(ka); }
86   keyb = IR(kb);
87   if (keyb->o == IR_KSLOT) { kb = keyb->op1; keyb = IR(kb); }
88   ta = (refa->o==IR_HREFK || refa->o==IR_AREF) ? IR(refa->op1)->op1 : refa->op1;
89   tb = (refb->o==IR_HREFK || refb->o==IR_AREF) ? IR(refb->op1)->op1 : refb->op1;
90   if (ka == kb) {
91     /* Same key. Check for same table with different ref (NEWREF vs. HREF). */
92     if (ta == tb)
93       return ALIAS_MUST;  /* Same key, same table. */
94     else
95       return aa_table(J, ta, tb);  /* Same key, possibly different table. */
96   }
97   if (irref_isk(ka) && irref_isk(kb))
98     return ALIAS_NO;  /* Different constant keys. */
99   if (refa->o == IR_AREF) {
100     /* Disambiguate array references based on index arithmetic. */
101     int32_t ofsa = 0, ofsb = 0;
102     IRRef basea = ka, baseb = kb;
103     lj_assertJ(refb->o == IR_AREF, "expected AREF");
104     /* Gather base and offset from t[base] or t[base+-ofs]. */
105     if (keya->o == IR_ADD && irref_isk(keya->op2)) {
106       basea = keya->op1;
107       ofsa = IR(keya->op2)->i;
108       if (basea == kb && ofsa != 0)
109 	return ALIAS_NO;  /* t[base+-ofs] vs. t[base]. */
110     }
111     if (keyb->o == IR_ADD && irref_isk(keyb->op2)) {
112       baseb = keyb->op1;
113       ofsb = IR(keyb->op2)->i;
114       if (ka == baseb && ofsb != 0)
115 	return ALIAS_NO;  /* t[base] vs. t[base+-ofs]. */
116     }
117     if (basea == baseb && ofsa != ofsb)
118       return ALIAS_NO;  /* t[base+-o1] vs. t[base+-o2] and o1 != o2. */
119   } else {
120     /* Disambiguate hash references based on the type of their keys. */
121     lj_assertJ((refa->o==IR_HREF || refa->o==IR_HREFK || refa->o==IR_NEWREF) &&
122 	       (refb->o==IR_HREF || refb->o==IR_HREFK || refb->o==IR_NEWREF),
123 	       "bad xREF IR op %d or %d", refa->o, refb->o);
124     if (!irt_sametype(keya->t, keyb->t))
125       return ALIAS_NO;  /* Different key types. */
126   }
127   if (ta == tb)
128     return ALIAS_MAY;  /* Same table, cannot disambiguate keys. */
129   else
130     return aa_table(J, ta, tb);  /* Try to disambiguate tables. */
131 }
132 
133 /* Array and hash load forwarding. */
fwd_ahload(jit_State * J,IRRef xref)134 static TRef fwd_ahload(jit_State *J, IRRef xref)
135 {
136   IRIns *xr = IR(xref);
137   IRRef lim = xref;  /* Search limit. */
138   IRRef ref;
139 
140   /* Search for conflicting stores. */
141   ref = J->chain[fins->o+IRDELTA_L2S];
142   while (ref > xref) {
143     IRIns *store = IR(ref);
144     switch (aa_ahref(J, xr, IR(store->op1))) {
145     case ALIAS_NO:   break;  /* Continue searching. */
146     case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
147     case ALIAS_MUST: return store->op2;  /* Store forwarding. */
148     }
149     ref = store->prev;
150   }
151 
152   /* No conflicting store (yet): const-fold loads from allocations. */
153   {
154     IRIns *ir = (xr->o == IR_HREFK || xr->o == IR_AREF) ? IR(xr->op1) : xr;
155     IRRef tab = ir->op1;
156     ir = IR(tab);
157     if (ir->o == IR_TNEW || (ir->o == IR_TDUP && irref_isk(xr->op2))) {
158       /* A NEWREF with a number key may end up pointing to the array part.
159       ** But it's referenced from HSTORE and not found in the ASTORE chain.
160       ** For now simply consider this a conflict without forwarding anything.
161       */
162       if (xr->o == IR_AREF) {
163 	IRRef ref2 = J->chain[IR_NEWREF];
164 	while (ref2 > tab) {
165 	  IRIns *newref = IR(ref2);
166 	  if (irt_isnum(IR(newref->op2)->t))
167 	    goto cselim;
168 	  ref2 = newref->prev;
169 	}
170       }
171       /* NEWREF inhibits CSE for HREF, and dependent FLOADs from HREFK/AREF.
172       ** But the above search for conflicting stores was limited by xref.
173       ** So continue searching, limited by the TNEW/TDUP. Store forwarding
174       ** is ok, too. A conflict does NOT limit the search for a matching load.
175       */
176       while (ref > tab) {
177 	IRIns *store = IR(ref);
178 	switch (aa_ahref(J, xr, IR(store->op1))) {
179 	case ALIAS_NO:   break;  /* Continue searching. */
180 	case ALIAS_MAY:  goto cselim;  /* Conflicting store. */
181 	case ALIAS_MUST: return store->op2;  /* Store forwarding. */
182 	}
183 	ref = store->prev;
184       }
185       if (ir->o == IR_TNEW && !irt_isnil(fins->t))
186 	return 0;  /* Type instability in loop-carried dependency. */
187       if (irt_ispri(fins->t)) {
188 	return TREF_PRI(irt_type(fins->t));
189       } else if (irt_isnum(fins->t) || (LJ_DUALNUM && irt_isint(fins->t)) ||
190 		 irt_isstr(fins->t)) {
191 	TValue keyv;
192 	cTValue *tv;
193 	IRIns *key = IR(xr->op2);
194 	if (key->o == IR_KSLOT) key = IR(key->op1);
195 	lj_ir_kvalue(J->L, &keyv, key);
196 	tv = lj_tab_get(J->L, ir_ktab(IR(ir->op1)), &keyv);
197 	lj_assertJ(itype2irt(tv) == irt_type(fins->t),
198 		   "mismatched type in constant table");
199 	if (irt_isnum(fins->t))
200 	  return lj_ir_knum_u64(J, tv->u64);
201 	else if (LJ_DUALNUM && irt_isint(fins->t))
202 	  return lj_ir_kint(J, intV(tv));
203 	else
204 	  return lj_ir_kstr(J, strV(tv));
205       }
206       /* Othwerwise: don't intern as a constant. */
207     }
208   }
209 
210 cselim:
211   /* Try to find a matching load. Below the conflicting store, if any. */
212   ref = J->chain[fins->o];
213   while (ref > lim) {
214     IRIns *load = IR(ref);
215     if (load->op1 == xref)
216       return ref;  /* Load forwarding. */
217     ref = load->prev;
218   }
219   return 0;  /* Conflict or no match. */
220 }
221 
222 /* Reassociate ALOAD across PHIs to handle t[i-1] forwarding case. */
fwd_aload_reassoc(jit_State * J)223 static TRef fwd_aload_reassoc(jit_State *J)
224 {
225   IRIns *irx = IR(fins->op1);
226   IRIns *key = IR(irx->op2);
227   if (key->o == IR_ADD && irref_isk(key->op2)) {
228     IRIns *add2 = IR(key->op1);
229     if (add2->o == IR_ADD && irref_isk(add2->op2) &&
230 	IR(key->op2)->i == -IR(add2->op2)->i) {
231       IRRef ref = J->chain[IR_AREF];
232       IRRef lim = add2->op1;
233       if (irx->op1 > lim) lim = irx->op1;
234       while (ref > lim) {
235 	IRIns *ir = IR(ref);
236 	if (ir->op1 == irx->op1 && ir->op2 == add2->op1)
237 	  return fwd_ahload(J, ref);
238 	ref = ir->prev;
239       }
240     }
241   }
242   return 0;
243 }
244 
245 /* ALOAD forwarding. */
lj_opt_fwd_aload(jit_State * J)246 TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J)
247 {
248   IRRef ref;
249   if ((ref = fwd_ahload(J, fins->op1)) ||
250       (ref = fwd_aload_reassoc(J)))
251     return ref;
252   return EMITFOLD;
253 }
254 
255 /* HLOAD forwarding. */
lj_opt_fwd_hload(jit_State * J)256 TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J)
257 {
258   IRRef ref = fwd_ahload(J, fins->op1);
259   if (ref)
260     return ref;
261   return EMITFOLD;
262 }
263 
264 /* HREFK forwarding. */
lj_opt_fwd_hrefk(jit_State * J)265 TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J)
266 {
267   IRRef tab = fleft->op1;
268   IRRef ref = J->chain[IR_NEWREF];
269   while (ref > tab) {
270     IRIns *newref = IR(ref);
271     if (tab == newref->op1) {
272       if (fright->op1 == newref->op2)
273 	return ref;  /* Forward from NEWREF. */
274       else
275 	goto docse;
276     } else if (aa_table(J, tab, newref->op1) != ALIAS_NO) {
277       goto docse;
278     }
279     ref = newref->prev;
280   }
281   /* No conflicting NEWREF: key location unchanged for HREFK of TDUP. */
282   if (IR(tab)->o == IR_TDUP)
283     fins->t.irt &= ~IRT_GUARD;  /* Drop HREFK guard. */
284 docse:
285   return CSEFOLD;
286 }
287 
288 /* Check whether HREF of TNEW/TDUP can be folded to niltv. */
lj_opt_fwd_href_nokey(jit_State * J)289 int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J)
290 {
291   IRRef lim = fins->op1;  /* Search limit. */
292   IRRef ref;
293 
294   /* The key for an ASTORE may end up in the hash part after a NEWREF. */
295   if (irt_isnum(fright->t) && J->chain[IR_NEWREF] > lim) {
296     ref = J->chain[IR_ASTORE];
297     while (ref > lim) {
298       if (ref < J->chain[IR_NEWREF])
299 	return 0;  /* Conflict. */
300       ref = IR(ref)->prev;
301     }
302   }
303 
304   /* Search for conflicting stores. */
305   ref = J->chain[IR_HSTORE];
306   while (ref > lim) {
307     IRIns *store = IR(ref);
308     if (aa_ahref(J, fins, IR(store->op1)) != ALIAS_NO)
309       return 0;  /* Conflict. */
310     ref = store->prev;
311   }
312 
313   return 1;  /* No conflict. Can fold to niltv. */
314 }
315 
316 /* Check whether there's no aliasing table.clear. */
fwd_aa_tab_clear(jit_State * J,IRRef lim,IRRef ta)317 static int fwd_aa_tab_clear(jit_State *J, IRRef lim, IRRef ta)
318 {
319   IRRef ref = J->chain[IR_CALLS];
320   while (ref > lim) {
321     IRIns *calls = IR(ref);
322     if (calls->op2 == IRCALL_lj_tab_clear &&
323 	(ta == calls->op1 || aa_table(J, ta, calls->op1) != ALIAS_NO))
324       return 0;  /* Conflict. */
325     ref = calls->prev;
326   }
327   return 1;  /* No conflict. Can safely FOLD/CSE. */
328 }
329 
330 /* Check whether there's no aliasing NEWREF/table.clear for the left operand. */
lj_opt_fwd_tptr(jit_State * J,IRRef lim)331 int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim)
332 {
333   IRRef ta = fins->op1;
334   IRRef ref = J->chain[IR_NEWREF];
335   while (ref > lim) {
336     IRIns *newref = IR(ref);
337     if (ta == newref->op1 || aa_table(J, ta, newref->op1) != ALIAS_NO)
338       return 0;  /* Conflict. */
339     ref = newref->prev;
340   }
341   return fwd_aa_tab_clear(J, lim, ta);
342 }
343 
344 /* ASTORE/HSTORE elimination. */
lj_opt_dse_ahstore(jit_State * J)345 TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J)
346 {
347   IRRef xref = fins->op1;  /* xREF reference. */
348   IRRef val = fins->op2;  /* Stored value reference. */
349   IRIns *xr = IR(xref);
350   IRRef1 *refp = &J->chain[fins->o];
351   IRRef ref = *refp;
352   while (ref > xref) {  /* Search for redundant or conflicting stores. */
353     IRIns *store = IR(ref);
354     switch (aa_ahref(J, xr, IR(store->op1))) {
355     case ALIAS_NO:
356       break;  /* Continue searching. */
357     case ALIAS_MAY:	/* Store to MAYBE the same location. */
358       if (store->op2 != val)  /* Conflict if the value is different. */
359 	goto doemit;
360       break;  /* Otherwise continue searching. */
361     case ALIAS_MUST:	/* Store to the same location. */
362       if (store->op2 == val)  /* Same value: drop the new store. */
363 	return DROPFOLD;
364       /* Different value: try to eliminate the redundant store. */
365       if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
366 	IRIns *ir;
367 	/* Check for any intervening guards (includes conflicting loads).
368 	** Note that lj_tab_keyindex and lj_vm_next don't need guards,
369 	** since they are followed by at least one guarded VLOAD.
370 	*/
371 	for (ir = IR(J->cur.nins-1); ir > store; ir--)
372 	  if (irt_isguard(ir->t) || ir->o == IR_ALEN)
373 	    goto doemit;  /* No elimination possible. */
374 	/* Remove redundant store from chain and replace with NOP. */
375 	*refp = store->prev;
376 	lj_ir_nop(store);
377 	/* Now emit the new store instead. */
378       }
379       goto doemit;
380     }
381     ref = *(refp = &store->prev);
382   }
383 doemit:
384   return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
385 }
386 
387 /* ALEN forwarding. */
lj_opt_fwd_alen(jit_State * J)388 TRef LJ_FASTCALL lj_opt_fwd_alen(jit_State *J)
389 {
390   IRRef tab = fins->op1;  /* Table reference. */
391   IRRef lim = tab;  /* Search limit. */
392   IRRef ref;
393 
394   /* Search for conflicting HSTORE with numeric key. */
395   ref = J->chain[IR_HSTORE];
396   while (ref > lim) {
397     IRIns *store = IR(ref);
398     IRIns *href = IR(store->op1);
399     IRIns *key = IR(href->op2);
400     if (irt_isnum(key->o == IR_KSLOT ? IR(key->op1)->t : key->t)) {
401       lim = ref;  /* Conflicting store found, limits search for ALEN. */
402       break;
403     }
404     ref = store->prev;
405   }
406 
407   /* Try to find a matching ALEN. */
408   ref = J->chain[IR_ALEN];
409   while (ref > lim) {
410     /* CSE for ALEN only depends on the table, not the hint. */
411     if (IR(ref)->op1 == tab) {
412       IRRef sref;
413 
414       /* Search for aliasing table.clear. */
415       if (!fwd_aa_tab_clear(J, ref, tab))
416 	break;
417 
418       /* Search for hint-forwarding or conflicting store. */
419       sref = J->chain[IR_ASTORE];
420       while (sref > ref) {
421 	IRIns *store = IR(sref);
422 	IRIns *aref = IR(store->op1);
423 	IRIns *fref = IR(aref->op1);
424 	if (tab == fref->op1) {  /* ASTORE to the same table. */
425 	  /* Detect t[#t+1] = x idiom for push. */
426 	  IRIns *idx = IR(aref->op2);
427 	  if (!irt_isnil(store->t) &&
428 	      idx->o == IR_ADD && idx->op1 == ref &&
429 	      IR(idx->op2)->o == IR_KINT && IR(idx->op2)->i == 1) {
430 	    /* Note: this requires an extra PHI check in loop unroll. */
431 	    fins->op2 = aref->op2;  /* Set ALEN hint. */
432 	  }
433 	  goto doemit;  /* Conflicting store, possibly giving a hint. */
434 	} else if (aa_table(J, tab, fref->op1) == ALIAS_NO) {
435 	  goto doemit;  /* Conflicting store. */
436 	}
437 	sref = store->prev;
438       }
439 
440       return ref;  /* Plain ALEN forwarding. */
441     }
442     ref = IR(ref)->prev;
443   }
444 doemit:
445   return EMITFOLD;
446 }
447 
448 /* -- ULOAD forwarding ---------------------------------------------------- */
449 
450 /* The current alias analysis for upvalues is very simplistic. It only
451 ** disambiguates between the unique upvalues of the same function.
452 ** This is good enough for now, since most upvalues are read-only.
453 **
454 ** A more precise analysis would be feasible with the help of the parser:
455 ** generate a unique key for every upvalue, even across all prototypes.
456 ** Lacking a realistic use-case, it's unclear whether this is beneficial.
457 */
aa_uref(IRIns * refa,IRIns * refb)458 static AliasRet aa_uref(IRIns *refa, IRIns *refb)
459 {
460   if (refa->o != refb->o)
461     return ALIAS_NO;  /* Different UREFx type. */
462   if (refa->op1 == refb->op1) {  /* Same function. */
463     if (refa->op2 == refb->op2)
464       return ALIAS_MUST;  /* Same function, same upvalue idx. */
465     else
466       return ALIAS_NO;  /* Same function, different upvalue idx. */
467   } else {  /* Different functions, check disambiguation hash values. */
468     if (((refa->op2 ^ refb->op2) & 0xff))
469       return ALIAS_NO;  /* Upvalues with different hash values cannot alias. */
470     else
471       return ALIAS_MAY;  /* No conclusion can be drawn for same hash value. */
472   }
473 }
474 
475 /* ULOAD forwarding. */
lj_opt_fwd_uload(jit_State * J)476 TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J)
477 {
478   IRRef uref = fins->op1;
479   IRRef lim = REF_BASE;  /* Search limit. */
480   IRIns *xr = IR(uref);
481   IRRef ref;
482 
483   /* Search for conflicting stores. */
484   ref = J->chain[IR_USTORE];
485   while (ref > lim) {
486     IRIns *store = IR(ref);
487     switch (aa_uref(xr, IR(store->op1))) {
488     case ALIAS_NO:   break;  /* Continue searching. */
489     case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
490     case ALIAS_MUST: return store->op2;  /* Store forwarding. */
491     }
492     ref = store->prev;
493   }
494 
495 cselim:
496   /* Try to find a matching load. Below the conflicting store, if any. */
497   ref = J->chain[IR_ULOAD];
498   while (ref > lim) {
499     IRIns *ir = IR(ref);
500     if (ir->op1 == uref ||
501 	(IR(ir->op1)->op12 == IR(uref)->op12 && IR(ir->op1)->o == IR(uref)->o))
502       return ref;  /* Match for identical or equal UREFx (non-CSEable UREFO). */
503     ref = ir->prev;
504   }
505   return lj_ir_emit(J);
506 }
507 
508 /* USTORE elimination. */
lj_opt_dse_ustore(jit_State * J)509 TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J)
510 {
511   IRRef xref = fins->op1;  /* xREF reference. */
512   IRRef val = fins->op2;  /* Stored value reference. */
513   IRIns *xr = IR(xref);
514   IRRef1 *refp = &J->chain[IR_USTORE];
515   IRRef ref = *refp;
516   while (ref > xref) {  /* Search for redundant or conflicting stores. */
517     IRIns *store = IR(ref);
518     switch (aa_uref(xr, IR(store->op1))) {
519     case ALIAS_NO:
520       break;  /* Continue searching. */
521     case ALIAS_MAY:	/* Store to MAYBE the same location. */
522       if (store->op2 != val)  /* Conflict if the value is different. */
523 	goto doemit;
524       break;  /* Otherwise continue searching. */
525     case ALIAS_MUST:	/* Store to the same location. */
526       if (store->op2 == val)  /* Same value: drop the new store. */
527 	return DROPFOLD;
528       /* Different value: try to eliminate the redundant store. */
529       if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
530 	IRIns *ir;
531 	/* Check for any intervening guards (includes conflicting loads). */
532 	for (ir = IR(J->cur.nins-1); ir > store; ir--)
533 	  if (irt_isguard(ir->t))
534 	    goto doemit;  /* No elimination possible. */
535 	/* Remove redundant store from chain and replace with NOP. */
536 	*refp = store->prev;
537 	lj_ir_nop(store);
538 	if (ref+1 < J->cur.nins &&
539 	    store[1].o == IR_OBAR && store[1].op1 == xref) {
540 	  IRRef1 *bp = &J->chain[IR_OBAR];
541 	  IRIns *obar;
542 	  for (obar = IR(*bp); *bp > ref+1; obar = IR(*bp))
543 	    bp = &obar->prev;
544 	  /* Remove OBAR, too. */
545 	  *bp = obar->prev;
546 	  lj_ir_nop(obar);
547 	}
548 	/* Now emit the new store instead. */
549       }
550       goto doemit;
551     }
552     ref = *(refp = &store->prev);
553   }
554 doemit:
555   return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
556 }
557 
558 /* -- FLOAD forwarding and FSTORE elimination ----------------------------- */
559 
560 /* Alias analysis for field access.
561 ** Field loads are cheap and field stores are rare.
562 ** Simple disambiguation based on field types is good enough.
563 */
aa_fref(jit_State * J,IRIns * refa,IRIns * refb)564 static AliasRet aa_fref(jit_State *J, IRIns *refa, IRIns *refb)
565 {
566   if (refa->op2 != refb->op2)
567     return ALIAS_NO;  /* Different fields. */
568   if (refa->op1 == refb->op1)
569     return ALIAS_MUST;  /* Same field, same object. */
570   else if (refa->op2 >= IRFL_TAB_META && refa->op2 <= IRFL_TAB_NOMM)
571     return aa_table(J, refa->op1, refb->op1);  /* Disambiguate tables. */
572   else
573     return ALIAS_MAY;  /* Same field, possibly different object. */
574 }
575 
576 /* Only the loads for mutable fields end up here (see FOLD). */
lj_opt_fwd_fload(jit_State * J)577 TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J)
578 {
579   IRRef oref = fins->op1;  /* Object reference. */
580   IRRef fid = fins->op2;  /* Field ID. */
581   IRRef lim = oref;  /* Search limit. */
582   IRRef ref;
583 
584   /* Search for conflicting stores. */
585   ref = J->chain[IR_FSTORE];
586   while (ref > oref) {
587     IRIns *store = IR(ref);
588     switch (aa_fref(J, fins, IR(store->op1))) {
589     case ALIAS_NO:   break;  /* Continue searching. */
590     case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
591     case ALIAS_MUST: return store->op2;  /* Store forwarding. */
592     }
593     ref = store->prev;
594   }
595 
596   /* No conflicting store: const-fold field loads from allocations. */
597   if (fid == IRFL_TAB_META) {
598     IRIns *ir = IR(oref);
599     if (ir->o == IR_TNEW || ir->o == IR_TDUP)
600       return lj_ir_knull(J, IRT_TAB);
601   }
602 
603 cselim:
604   /* Try to find a matching load. Below the conflicting store, if any. */
605   return lj_opt_cselim(J, lim);
606 }
607 
608 /* FSTORE elimination. */
lj_opt_dse_fstore(jit_State * J)609 TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J)
610 {
611   IRRef fref = fins->op1;  /* FREF reference. */
612   IRRef val = fins->op2;  /* Stored value reference. */
613   IRIns *xr = IR(fref);
614   IRRef1 *refp = &J->chain[IR_FSTORE];
615   IRRef ref = *refp;
616   while (ref > fref) {  /* Search for redundant or conflicting stores. */
617     IRIns *store = IR(ref);
618     switch (aa_fref(J, xr, IR(store->op1))) {
619     case ALIAS_NO:
620       break;  /* Continue searching. */
621     case ALIAS_MAY:
622       if (store->op2 != val)  /* Conflict if the value is different. */
623 	goto doemit;
624       break;  /* Otherwise continue searching. */
625     case ALIAS_MUST:
626       if (store->op2 == val &&
627 	  !(xr->op2 >= IRFL_SBUF_W && xr->op2 <= IRFL_SBUF_R))
628 	return DROPFOLD;  /* Same value: drop the new store. */
629       /* Different value: try to eliminate the redundant store. */
630       if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
631 	IRIns *ir;
632 	/* Check for any intervening guards or conflicting loads. */
633 	for (ir = IR(J->cur.nins-1); ir > store; ir--)
634 	  if (irt_isguard(ir->t) || (ir->o == IR_FLOAD && ir->op2 == xr->op2))
635 	    goto doemit;  /* No elimination possible. */
636 	/* Remove redundant store from chain and replace with NOP. */
637 	*refp = store->prev;
638 	lj_ir_nop(store);
639 	/* Now emit the new store instead. */
640       }
641       goto doemit;
642     }
643     ref = *(refp = &store->prev);
644   }
645 doemit:
646   return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
647 }
648 
649 /* Check whether there's no aliasing buffer op between IRFL_SBUF_*. */
lj_opt_fwd_sbuf(jit_State * J,IRRef lim)650 int LJ_FASTCALL lj_opt_fwd_sbuf(jit_State *J, IRRef lim)
651 {
652   IRRef ref;
653   if (J->chain[IR_BUFPUT] > lim)
654     return 0;  /* Conflict. */
655   ref = J->chain[IR_CALLS];
656   while (ref > lim) {
657     IRIns *ir = IR(ref);
658     if (ir->op2 >= IRCALL_lj_strfmt_putint && ir->op2 < IRCALL_lj_buf_tostr)
659       return 0;  /* Conflict. */
660     ref = ir->prev;
661   }
662   ref = J->chain[IR_CALLL];
663   while (ref > lim) {
664     IRIns *ir = IR(ref);
665     if (ir->op2 >= IRCALL_lj_strfmt_putint && ir->op2 < IRCALL_lj_buf_tostr)
666       return 0;  /* Conflict. */
667     ref = ir->prev;
668   }
669   return 1;  /* No conflict. Can safely FOLD/CSE. */
670 }
671 
672 /* -- XLOAD forwarding and XSTORE elimination ----------------------------- */
673 
674 /* Find cdata allocation for a reference (if any). */
aa_findcnew(jit_State * J,IRIns * ir)675 static IRIns *aa_findcnew(jit_State *J, IRIns *ir)
676 {
677   while (ir->o == IR_ADD) {
678     if (!irref_isk(ir->op1)) {
679       IRIns *ir1 = aa_findcnew(J, IR(ir->op1));  /* Left-recursion. */
680       if (ir1) return ir1;
681     }
682     if (irref_isk(ir->op2)) return NULL;
683     ir = IR(ir->op2);  /* Flatten right-recursion. */
684   }
685   return ir->o == IR_CNEW ? ir : NULL;
686 }
687 
688 /* Alias analysis for two cdata allocations. */
aa_cnew(jit_State * J,IRIns * refa,IRIns * refb)689 static AliasRet aa_cnew(jit_State *J, IRIns *refa, IRIns *refb)
690 {
691   IRIns *cnewa = aa_findcnew(J, refa);
692   IRIns *cnewb = aa_findcnew(J, refb);
693   if (cnewa == cnewb)
694     return ALIAS_MAY;  /* Same allocation or neither is an allocation. */
695   if (cnewa && cnewb)
696     return ALIAS_NO;  /* Two different allocations never alias. */
697   if (cnewb) { cnewa = cnewb; refb = refa; }
698   return aa_escape(J, cnewa, refb);
699 }
700 
701 /* Alias analysis for XLOAD/XSTORE. */
aa_xref(jit_State * J,IRIns * refa,IRIns * xa,IRIns * xb)702 static AliasRet aa_xref(jit_State *J, IRIns *refa, IRIns *xa, IRIns *xb)
703 {
704   ptrdiff_t ofsa = 0, ofsb = 0;
705   IRIns *refb = IR(xb->op1);
706   IRIns *basea = refa, *baseb = refb;
707   if (refa == refb && irt_sametype(xa->t, xb->t))
708     return ALIAS_MUST;  /* Shortcut for same refs with identical type. */
709   /* Offset-based disambiguation. */
710   if (refa->o == IR_ADD && irref_isk(refa->op2)) {
711     IRIns *irk = IR(refa->op2);
712     basea = IR(refa->op1);
713     ofsa = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
714 					    (ptrdiff_t)irk->i;
715   }
716   if (refb->o == IR_ADD && irref_isk(refb->op2)) {
717     IRIns *irk = IR(refb->op2);
718     baseb = IR(refb->op1);
719     ofsb = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
720 					    (ptrdiff_t)irk->i;
721   }
722   /* Treat constified pointers like base vs. base+offset. */
723   if (basea->o == IR_KPTR && baseb->o == IR_KPTR) {
724     ofsb += (char *)ir_kptr(baseb) - (char *)ir_kptr(basea);
725     baseb = basea;
726   }
727   /* This implements (very) strict aliasing rules.
728   ** Different types do NOT alias, except for differences in signedness.
729   ** Type punning through unions is allowed (but forces a reload).
730   */
731   if (basea == baseb) {
732     ptrdiff_t sza = irt_size(xa->t), szb = irt_size(xb->t);
733     if (ofsa == ofsb) {
734       if (sza == szb && irt_isfp(xa->t) == irt_isfp(xb->t))
735 	return ALIAS_MUST;  /* Same-sized, same-kind. May need to convert. */
736     } else if (ofsa + sza <= ofsb || ofsb + szb <= ofsa) {
737       return ALIAS_NO;  /* Non-overlapping base+-o1 vs. base+-o2. */
738     }
739     /* NYI: extract, extend or reinterpret bits (int <-> fp). */
740     return ALIAS_MAY;  /* Overlapping or type punning: force reload. */
741   }
742   if (!irt_sametype(xa->t, xb->t) &&
743       !(irt_typerange(xa->t, IRT_I8, IRT_U64) &&
744 	((xa->t.irt - IRT_I8) ^ (xb->t.irt - IRT_I8)) == 1))
745     return ALIAS_NO;
746   /* NYI: structural disambiguation. */
747   return aa_cnew(J, basea, baseb);  /* Try to disambiguate allocations. */
748 }
749 
750 /* Return CSEd reference or 0. Caveat: swaps lower ref to the right! */
reassoc_trycse(jit_State * J,IROp op,IRRef op1,IRRef op2)751 static IRRef reassoc_trycse(jit_State *J, IROp op, IRRef op1, IRRef op2)
752 {
753   IRRef ref = J->chain[op];
754   IRRef lim = op1;
755   if (op2 > lim) { lim = op2; op2 = op1; op1 = lim; }
756   while (ref > lim) {
757     IRIns *ir = IR(ref);
758     if (ir->op1 == op1 && ir->op2 == op2)
759       return ref;
760     ref = ir->prev;
761   }
762   return 0;
763 }
764 
765 /* Reassociate index references. */
reassoc_xref(jit_State * J,IRIns * ir)766 static IRRef reassoc_xref(jit_State *J, IRIns *ir)
767 {
768   ptrdiff_t ofs = 0;
769   if (ir->o == IR_ADD && irref_isk(ir->op2)) {  /* Get constant offset. */
770     IRIns *irk = IR(ir->op2);
771     ofs = (LJ_64 && irk->o == IR_KINT64) ? (ptrdiff_t)ir_k64(irk)->u64 :
772 					   (ptrdiff_t)irk->i;
773     ir = IR(ir->op1);
774   }
775   if (ir->o == IR_ADD) {  /* Add of base + index. */
776     /* Index ref > base ref for loop-carried dependences. Only check op1. */
777     IRIns *ir2, *ir1 = IR(ir->op1);
778     int32_t shift = 0;
779     IRRef idxref;
780     /* Determine index shifts. Don't bother with IR_MUL here. */
781     if (ir1->o == IR_BSHL && irref_isk(ir1->op2))
782       shift = IR(ir1->op2)->i;
783     else if (ir1->o == IR_ADD && ir1->op1 == ir1->op2)
784       shift = 1;
785     else
786       ir1 = ir;
787     ir2 = IR(ir1->op1);
788     /* A non-reassociated add. Must be a loop-carried dependence. */
789     if (ir2->o == IR_ADD && irt_isint(ir2->t) && irref_isk(ir2->op2))
790       ofs += (ptrdiff_t)IR(ir2->op2)->i << shift;
791     else
792       return 0;
793     idxref = ir2->op1;
794     /* Try to CSE the reassociated chain. Give up if not found. */
795     if (ir1 != ir &&
796 	!(idxref = reassoc_trycse(J, ir1->o, idxref,
797 				  ir1->o == IR_BSHL ? ir1->op2 : idxref)))
798       return 0;
799     if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, ir->op2)))
800       return 0;
801     if (ofs != 0) {
802       IRRef refk = tref_ref(lj_ir_kintp(J, ofs));
803       if (!(idxref = reassoc_trycse(J, IR_ADD, idxref, refk)))
804 	return 0;
805     }
806     return idxref;  /* Success, found a reassociated index reference. Phew. */
807   }
808   return 0;  /* Failure. */
809 }
810 
811 /* XLOAD forwarding. */
lj_opt_fwd_xload(jit_State * J)812 TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J)
813 {
814   IRRef xref = fins->op1;
815   IRIns *xr = IR(xref);
816   IRRef lim = xref;  /* Search limit. */
817   IRRef ref;
818 
819   if ((fins->op2 & IRXLOAD_READONLY))
820     goto cselim;
821   if ((fins->op2 & IRXLOAD_VOLATILE))
822     goto doemit;
823 
824   /* Search for conflicting stores. */
825   ref = J->chain[IR_XSTORE];
826 retry:
827   if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
828   if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
829   while (ref > lim) {
830     IRIns *store = IR(ref);
831     switch (aa_xref(J, xr, fins, store)) {
832     case ALIAS_NO:   break;  /* Continue searching. */
833     case ALIAS_MAY:  lim = ref; goto cselim;  /* Limit search for load. */
834     case ALIAS_MUST:
835       /* Emit conversion if the loaded type doesn't match the forwarded type. */
836       if (!irt_sametype(fins->t, IR(store->op2)->t)) {
837 	IRType dt = irt_type(fins->t), st = irt_type(IR(store->op2)->t);
838 	if (dt == IRT_I8 || dt == IRT_I16) {  /* Trunc + sign-extend. */
839 	  st = dt | IRCONV_SEXT;
840 	  dt = IRT_INT;
841 	} else if (dt == IRT_U8 || dt == IRT_U16) {  /* Trunc + zero-extend. */
842 	  st = dt;
843 	  dt = IRT_INT;
844 	}
845 	fins->ot = IRT(IR_CONV, dt);
846 	fins->op1 = store->op2;
847 	fins->op2 = (dt<<5)|st;
848 	return RETRYFOLD;
849       }
850       return store->op2;  /* Store forwarding. */
851     }
852     ref = store->prev;
853   }
854 
855 cselim:
856   /* Try to find a matching load. Below the conflicting store, if any. */
857   ref = J->chain[IR_XLOAD];
858   while (ref > lim) {
859     /* CSE for XLOAD depends on the type, but not on the IRXLOAD_* flags. */
860     if (IR(ref)->op1 == xref && irt_sametype(IR(ref)->t, fins->t))
861       return ref;
862     ref = IR(ref)->prev;
863   }
864 
865   /* Reassociate XLOAD across PHIs to handle a[i-1] forwarding case. */
866   if (!(fins->op2 & IRXLOAD_READONLY) && J->chain[IR_LOOP] &&
867       xref == fins->op1 && (xref = reassoc_xref(J, xr)) != 0) {
868     ref = J->chain[IR_XSTORE];
869     while (ref > lim)  /* Skip stores that have already been checked. */
870       ref = IR(ref)->prev;
871     lim = xref;
872     xr = IR(xref);
873     goto retry;  /* Retry with the reassociated reference. */
874   }
875 doemit:
876   return EMITFOLD;
877 }
878 
879 /* XSTORE elimination. */
lj_opt_dse_xstore(jit_State * J)880 TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J)
881 {
882   IRRef xref = fins->op1;
883   IRIns *xr = IR(xref);
884   IRRef lim = xref;  /* Search limit. */
885   IRRef val = fins->op2;  /* Stored value reference. */
886   IRRef1 *refp = &J->chain[IR_XSTORE];
887   IRRef ref = *refp;
888   if (J->chain[IR_CALLXS] > lim) lim = J->chain[IR_CALLXS];
889   if (J->chain[IR_XBAR] > lim) lim = J->chain[IR_XBAR];
890   if (J->chain[IR_XSNEW] > lim) lim = J->chain[IR_XSNEW];
891   while (ref > lim) {  /* Search for redundant or conflicting stores. */
892     IRIns *store = IR(ref);
893     switch (aa_xref(J, xr, fins, store)) {
894     case ALIAS_NO:
895       break;  /* Continue searching. */
896     case ALIAS_MAY:
897       if (store->op2 != val)  /* Conflict if the value is different. */
898 	goto doemit;
899       break;  /* Otherwise continue searching. */
900     case ALIAS_MUST:
901       if (store->op2 == val)  /* Same value: drop the new store. */
902 	return DROPFOLD;
903       /* Different value: try to eliminate the redundant store. */
904       if (ref > J->chain[IR_LOOP]) {  /* Quick check to avoid crossing LOOP. */
905 	IRIns *ir;
906 	/* Check for any intervening guards or any XLOADs (no AA performed). */
907 	for (ir = IR(J->cur.nins-1); ir > store; ir--)
908 	  if (irt_isguard(ir->t) || ir->o == IR_XLOAD)
909 	    goto doemit;  /* No elimination possible. */
910 	/* Remove redundant store from chain and replace with NOP. */
911 	*refp = store->prev;
912 	lj_ir_nop(store);
913 	/* Now emit the new store instead. */
914       }
915       goto doemit;
916     }
917     ref = *(refp = &store->prev);
918   }
919 doemit:
920   return EMITFOLD;  /* Otherwise we have a conflict or simply no match. */
921 }
922 
923 /* -- ASTORE/HSTORE previous type analysis -------------------------------- */
924 
925 /* Check whether the previous value for a table store is non-nil.
926 ** This can be derived either from a previous store or from a previous
927 ** load (because all loads from tables perform a type check).
928 **
929 ** The result of the analysis can be used to avoid the metatable check
930 ** and the guard against HREF returning niltv. Both of these are cheap,
931 ** so let's not spend too much effort on the analysis.
932 **
933 ** A result of 1 is exact: previous value CANNOT be nil.
934 ** A result of 0 is inexact: previous value MAY be nil.
935 */
lj_opt_fwd_wasnonnil(jit_State * J,IROpT loadop,IRRef xref)936 int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref)
937 {
938   /* First check stores. */
939   IRRef ref = J->chain[loadop+IRDELTA_L2S];
940   while (ref > xref) {
941     IRIns *store = IR(ref);
942     if (store->op1 == xref) {  /* Same xREF. */
943       /* A nil store MAY alias, but a non-nil store MUST alias. */
944       return !irt_isnil(store->t);
945     } else if (irt_isnil(store->t)) {  /* Must check any nil store. */
946       IRRef skref = IR(store->op1)->op2;
947       IRRef xkref = IR(xref)->op2;
948       /* Same key type MAY alias. Need ALOAD check due to multiple int types. */
949       if (loadop == IR_ALOAD || irt_sametype(IR(skref)->t, IR(xkref)->t)) {
950 	if (skref == xkref || !irref_isk(skref) || !irref_isk(xkref))
951 	  return 0;  /* A nil store with same const key or var key MAY alias. */
952 	/* Different const keys CANNOT alias. */
953       }  /* Different key types CANNOT alias. */
954     }  /* Other non-nil stores MAY alias. */
955     ref = store->prev;
956   }
957 
958   /* Check loads since nothing could be derived from stores. */
959   ref = J->chain[loadop];
960   while (ref > xref) {
961     IRIns *load = IR(ref);
962     if (load->op1 == xref) {  /* Same xREF. */
963       /* A nil load MAY alias, but a non-nil load MUST alias. */
964       return !irt_isnil(load->t);
965     }  /* Other non-nil loads MAY alias. */
966     ref = load->prev;
967   }
968   return 0;  /* Nothing derived at all, previous value MAY be nil. */
969 }
970 
971 /* ------------------------------------------------------------------------ */
972 
973 #undef IR
974 #undef fins
975 #undef fleft
976 #undef fright
977 
978 #endif
979