1 /*
2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
3 *
4 * This file is part of libFirm.
5 *
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
10 *
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
14 *
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE.
18 */
19
20 /**
21 * @file
22 * @brief Memory disambiguator
23 * @author Michael Beck
24 * @date 27.12.2006
25 */
26 #include "config.h"
27
28 #include <stdlib.h>
29 #include <stdbool.h>
30
31 #include "adt/pmap.h"
32 #include "irnode_t.h"
33 #include "irgraph_t.h"
34 #include "irprog_t.h"
35 #include "irmemory_t.h"
36 #include "irmemory.h"
37 #include "irflag.h"
38 #include "hashptr.h"
39 #include "irflag.h"
40 #include "irouts.h"
41 #include "irgwalk.h"
42 #include "irprintf.h"
43 #include "debug.h"
44 #include "error.h"
45 #include "typerep.h"
46 #include "irpass.h"
47
48 /** The debug handle. */
49 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
50 DEBUG_ONLY(static firm_dbg_module_t *dbgcall = NULL;)
51
52 /** The source language specific language disambiguator function. */
53 static DISAMBIGUATOR_FUNC language_disambuigator = NULL;
54
55 /** The global memory disambiguator options. */
56 static unsigned global_mem_disamgig_opt = aa_opt_no_opt;
57
get_ir_alias_relation_name(ir_alias_relation rel)58 const char *get_ir_alias_relation_name(ir_alias_relation rel)
59 {
60 #define X(a) case a: return #a
61 switch (rel) {
62 X(ir_no_alias);
63 X(ir_may_alias);
64 X(ir_sure_alias);
65 default:
66 panic("UNKNOWN alias relation");
67 }
68 #undef X
69 }
70
get_irg_memory_disambiguator_options(const ir_graph * irg)71 unsigned get_irg_memory_disambiguator_options(const ir_graph *irg)
72 {
73 unsigned opt = irg->mem_disambig_opt;
74 if (opt & aa_opt_inherited)
75 return global_mem_disamgig_opt;
76 return opt;
77 }
78
set_irg_memory_disambiguator_options(ir_graph * irg,unsigned options)79 void set_irg_memory_disambiguator_options(ir_graph *irg, unsigned options)
80 {
81 irg->mem_disambig_opt = options & ~aa_opt_inherited;
82 }
83
set_irp_memory_disambiguator_options(unsigned options)84 void set_irp_memory_disambiguator_options(unsigned options)
85 {
86 global_mem_disamgig_opt = options;
87 }
88
get_base_sc(ir_storage_class_class_t x)89 ir_storage_class_class_t get_base_sc(ir_storage_class_class_t x)
90 {
91 return x & ~ir_sc_modifiers;
92 }
93
94 /**
95 * Find the base address and entity of an Sel node.
96 *
97 * @param sel the node
98 * @param pEnt after return points to the base entity.
99 *
100 * @return the base address.
101 */
find_base_adr(const ir_node * sel,ir_entity ** pEnt)102 static ir_node *find_base_adr(const ir_node *sel, ir_entity **pEnt)
103 {
104 ir_node *ptr = get_Sel_ptr(sel);
105
106 while (is_Sel(ptr)) {
107 sel = ptr;
108 ptr = get_Sel_ptr(sel);
109 }
110 *pEnt = get_Sel_entity(sel);
111 return ptr;
112 }
113
114 /**
115 * Check if a given Const node is greater or equal a given size.
116 *
117 * @param cns a Const node
118 * @param size a integer size
119 *
120 * @return ir_no_alias if the Const is greater, ir_may_alias else
121 */
check_const(const ir_node * cns,int size)122 static ir_alias_relation check_const(const ir_node *cns, int size)
123 {
124 ir_tarval *tv = get_Const_tarval(cns);
125 ir_tarval *tv_size;
126
127 if (size == 0)
128 return tarval_is_null(tv) ? ir_may_alias : ir_no_alias;
129 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
130 return tarval_cmp(tv_size, tv) & (ir_relation_less_equal) ? ir_no_alias : ir_may_alias;
131 }
132
133 /**
134 * Treat idx1 and idx2 as integer indexes and check if they differ always more than size.
135 *
136 * @param idx1 a node representing the first index
137 * @param idx2 a node representing the second index
138 * @param size an integer size
139 *
140 * @return ir_sure_alias iff idx1 == idx2
141 * ir_no_alias iff they ALWAYS differ more than size
142 * ir_may_alias else
143 */
different_index(const ir_node * idx1,const ir_node * idx2,int size)144 static ir_alias_relation different_index(const ir_node *idx1, const ir_node *idx2, int size)
145 {
146 if (idx1 == idx2)
147 return ir_sure_alias;
148 if (is_Const(idx1) && is_Const(idx2)) {
149 /* both are const, we can compare them */
150 ir_tarval *tv1 = get_Const_tarval(idx1);
151 ir_tarval *tv2 = get_Const_tarval(idx2);
152 ir_tarval *tv, *tv_size;
153 ir_mode *m1, *m2;
154
155 if (size == 0)
156 return tv1 == tv2 ? ir_sure_alias : ir_no_alias;
157
158 /* arg, modes may be different */
159 m1 = get_tarval_mode(tv1);
160 m2 = get_tarval_mode(tv2);
161 if (m1 != m2) {
162 int size = get_mode_size_bits(m1) - get_mode_size_bits(m2);
163
164 if (size < 0) {
165 /* m1 is a small mode, cast up */
166 m1 = mode_is_signed(m1) ? find_signed_mode(m2) : find_unsigned_mode(m2);
167 if (m1 == NULL) {
168 /* should NOT happen, but if it does we give up here */
169 return ir_may_alias;
170 }
171 tv1 = tarval_convert_to(tv1, m1);
172 } else if (size > 0) {
173 /* m2 is a small mode, cast up */
174 m2 = mode_is_signed(m2) ? find_signed_mode(m1) : find_unsigned_mode(m1);
175 if (m2 == NULL) {
176 /* should NOT happen, but if it does we give up here */
177 return ir_may_alias;
178 }
179 tv2 = tarval_convert_to(tv2, m2);
180 }
181 /* here the size should be identical, check for signed */
182 if (get_mode_sign(m1) != get_mode_sign(m2)) {
183 /* find the signed */
184 if (mode_is_signed(m2)) {
185 ir_tarval *t = tv1;
186 ir_mode *tm = m1;
187 tv1 = tv2; m1 = m2;
188 tv2 = t; m2 = tm;
189 }
190
191 /* m1 is now the signed one */
192 if (!tarval_is_negative(tv1)) {
193 /* tv1 is signed, but >= 0, simply cast into unsigned */
194 tv1 = tarval_convert_to(tv1, m2);
195 } else {
196 tv_size = new_tarval_from_long(size, m2);
197
198 if (tarval_cmp(tv2, tv_size) & (ir_relation_greater_equal)) {
199 /* tv1 is negative and tv2 >= tv_size, so the difference is bigger than size */
200 return ir_no_alias;
201 }
202 /* tv_size > tv2, so we can subtract without overflow */
203 tv2 = tarval_sub(tv_size, tv2, NULL);
204
205 /* tv1 is < 0, so we can negate it */
206 tv1 = tarval_neg(tv1);
207
208 /* cast it into unsigned. for two-complement it does the right thing for MIN_INT */
209 tv1 = tarval_convert_to(tv1, m2);
210
211 /* now we can compare without overflow */
212 return tarval_cmp(tv1, tv2) & (ir_relation_greater_equal) ? ir_no_alias : ir_may_alias;
213 }
214 }
215 }
216 if (tarval_cmp(tv1, tv2) == ir_relation_greater) {
217 ir_tarval *t = tv1;
218 tv1 = tv2;
219 tv2 = t;
220 }
221 /* tv1 is now the "smaller" one */
222 tv = tarval_sub(tv2, tv1, NULL);
223 tv_size = new_tarval_from_long(size, get_tarval_mode(tv));
224 return tarval_cmp(tv_size, tv) & (ir_relation_less_equal) ? ir_no_alias : ir_may_alias;
225 }
226
227 /* Note: we rely here on the fact that normalization puts constants on the RIGHT side */
228 if (is_Add(idx1)) {
229 ir_node *l1 = get_Add_left(idx1);
230 ir_node *r1 = get_Add_right(idx1);
231
232 if (l1 == idx2) {
233 /* x + c == y */
234 if (is_Const(r1))
235 return check_const(r1, size);
236 }
237 if (is_Add(idx2)) {
238 /* both are Adds, check if they are of x + a == x + b kind */
239 ir_node *l2 = get_Add_left(idx2);
240 ir_node *r2 = get_Add_right(idx2);
241
242 if (l1 == l2)
243 return different_index(r1, r2, size);
244 else if (l1 == r2)
245 return different_index(r1, l2, size);
246 else if (r1 == r2)
247 return different_index(l1, l2, size);
248 else if (r1 == l2)
249 return different_index(l1, r2, size);
250 }
251 }
252 if (is_Add(idx2)) {
253 ir_node *l2 = get_Add_left(idx2);
254 ir_node *r2 = get_Add_right(idx2);
255
256 if (l2 == idx1) {
257 /* x + c == y */
258 if (is_Const(r2))
259 return check_const(r2, size);
260 }
261 }
262
263 if (is_Sub(idx1)) {
264 ir_node *l1 = get_Sub_left(idx1);
265 ir_node *r1 = get_Sub_right(idx1);
266
267 if (l1 == idx2) {
268 /* x - c == y */
269 if (is_Const(r1))
270 return check_const(r1, size);
271 }
272
273 if (is_Sub(idx2)) {
274 /* both are Subs, check if they are of x - a == x - b kind */
275 ir_node *l2 = get_Sub_left(idx2);
276
277 if (l1 == l2) {
278 ir_node *r2 = get_Sub_right(idx2);
279 return different_index(r1, r2, size);
280 }
281 }
282 }
283 if (is_Sub(idx2)) {
284 ir_node *l2 = get_Sub_left(idx2);
285 ir_node *r2 = get_Sub_right(idx2);
286
287 if (l2 == idx1) {
288 /* x - c == y */
289 if (is_Const(r2))
290 return check_const(r2, size);
291 }
292
293 }
294 return ir_may_alias;
295 }
296
297 /**
298 * Two Sel addresses have the same base address, check if their offsets are
299 * different.
300 *
301 * @param adr1 The first address.
302 * @param adr2 The second address.
303 */
different_sel_offsets(const ir_node * sel1,const ir_node * sel2)304 static ir_alias_relation different_sel_offsets(const ir_node *sel1, const ir_node *sel2)
305 {
306 /* seems to be broken */
307 (void) sel1;
308 (void) sel2;
309 #if 0
310 ir_entity *ent1 = get_Sel_entity(sel1);
311 ir_entity *ent2 = get_Sel_entity(sel2);
312 int i, check_arr = 0;
313
314 if (ent1 == ent2)
315 check_arr = 1;
316 else {
317 ir_type *tp1 = get_entity_type(ent1);
318 ir_type *tp2 = get_entity_type(ent2);
319
320 if (tp1 == tp2)
321 check_arr = 1;
322 else if (get_type_state(tp1) == layout_fixed && get_type_state(tp2) == layout_fixed &&
323 get_type_size_bits(tp1) == get_type_size_bits(tp2))
324 check_arr = 1;
325 }
326 if (check_arr) {
327 /* we select an entity of same size, check for indexes */
328 int n = get_Sel_n_indexs(sel1);
329 int have_no = 0;
330
331 if (n > 0 && n == get_Sel_n_indexs(sel2)) {
332 /* same non-zero number of indexes, an array access, check */
333 for (i = 0; i < n; ++i) {
334 ir_node *idx1 = get_Sel_index(sel1, i);
335 ir_node *idx2 = get_Sel_index(sel2, i);
336 ir_alias_relation res = different_index(idx1, idx2, 0); /* we can safely IGNORE the size here if it's at least >0 */
337
338 if (res == may_alias)
339 return may_alias;
340 else if (res == no_alias)
341 have_no = 1;
342 }
343 /* if we have at least one no_alias, there is no alias relation, else we have sure */
344 return have_no > 0 ? no_alias : sure_alias;
345 }
346 }
347 #else
348 (void) different_index;
349 #endif
350 return ir_may_alias;
351 }
352
353 /**
354 * Determine the alias relation by checking if adr1 and adr2 are pointer
355 * to different type.
356 *
357 * @param adr1 The first address.
358 * @param adr2 The second address.
359 */
different_types(const ir_node * adr1,const ir_node * adr2)360 static ir_alias_relation different_types(const ir_node *adr1, const ir_node *adr2)
361 {
362 ir_entity *ent1 = NULL, *ent2 = NULL;
363
364 if (is_SymConst_addr_ent(adr1))
365 ent1 = get_SymConst_entity(adr1);
366 else if (is_Sel(adr1))
367 ent1 = get_Sel_entity(adr1);
368
369 if (is_SymConst_addr_ent(adr2))
370 ent2 = get_SymConst_entity(adr2);
371 else if (is_Sel(adr2))
372 ent2 = get_Sel_entity(adr2);
373
374 if (ent1 != NULL && ent2 != NULL) {
375 ir_type *tp1 = get_entity_type(ent1);
376 ir_type *tp2 = get_entity_type(ent2);
377
378 if (tp1 != tp2) {
379 /* do deref until no pointer types are found */
380 while (is_Pointer_type(tp1) && is_Pointer_type(tp2)) {
381 tp1 = get_pointer_points_to_type(tp1);
382 tp2 = get_pointer_points_to_type(tp2);
383 }
384
385 if (get_type_tpop(tp1) != get_type_tpop(tp2)) {
386 /* different type structure */
387 return ir_no_alias;
388 }
389 if (is_Class_type(tp1)) {
390 /* check class hierarchy */
391 if (! is_SubClass_of(tp1, tp2) &&
392 ! is_SubClass_of(tp2, tp1))
393 return ir_no_alias;
394 } else {
395 /* different types */
396 return ir_no_alias;
397 }
398 }
399 }
400 return ir_may_alias;
401 }
402
403 /**
404 * Returns non-zero if a node is a result on a malloc-like routine.
405 *
406 * @param node the Proj node to test
407 */
is_malloc_Result(const ir_node * node)408 static int is_malloc_Result(const ir_node *node)
409 {
410 node = get_Proj_pred(node);
411 if (! is_Proj(node))
412 return 0;
413 node = get_Proj_pred(node);
414 if (! is_Call(node))
415 return 0;
416 node = get_Call_ptr(node);
417 if (is_SymConst_addr_ent(node)) {
418 ir_entity *ent = get_SymConst_entity(node);
419
420 if (get_entity_additional_properties(ent) & mtp_property_malloc)
421 return 1;
422 return 0;
423 }
424 return 0;
425 }
426
classify_pointer(const ir_node * irn,const ir_entity * ent)427 ir_storage_class_class_t classify_pointer(const ir_node *irn,
428 const ir_entity *ent)
429 {
430 ir_graph *irg = get_irn_irg(irn);
431 ir_storage_class_class_t res = ir_sc_pointer;
432 if (is_SymConst_addr_ent(irn)) {
433 ir_entity *entity = get_SymConst_entity(irn);
434 ir_type *owner = get_entity_owner(entity);
435 res = owner == get_tls_type() ? ir_sc_tls : ir_sc_globalvar;
436 if (! (get_entity_usage(entity) & ir_usage_address_taken))
437 res |= ir_sc_modifier_nottaken;
438 } else if (irn == get_irg_frame(irg)) {
439 res = ir_sc_localvar;
440 if (ent != NULL && !(get_entity_usage(ent) & ir_usage_address_taken))
441 res |= ir_sc_modifier_nottaken;
442 } else if (is_Proj(irn) && is_malloc_Result(irn)) {
443 return ir_sc_malloced;
444 } else if (is_Const(irn)) {
445 return ir_sc_globaladdr;
446 } else if (is_arg_Proj(irn)) {
447 res |= ir_sc_modifier_argument;
448 }
449
450 return res;
451 }
452
453 /**
454 * If adr represents a Bitfield Sel, skip it
455 */
skip_Bitfield_Sels(const ir_node * adr)456 static const ir_node *skip_Bitfield_Sels(const ir_node *adr)
457 {
458 if (is_Sel(adr)) {
459 ir_entity *ent = get_Sel_entity(adr);
460 ir_type *bf_type = get_entity_type(ent);
461
462 /* is it a bitfield type? */
463 if (is_Primitive_type(bf_type) && get_primitive_base_type(bf_type) != NULL)
464 adr = get_Sel_ptr(adr);
465 }
466 return adr;
467 }
468
469 /**
470 * Determine the alias relation between two addresses.
471 *
472 * @param addr1 pointer address of the first memory operation
473 * @param mode1 the mode of the accessed data through addr1
474 * @param addr2 pointer address of the second memory operation
475 * @param mode2 the mode of the accessed data through addr2
476 *
477 * @return found memory relation
478 */
_get_alias_relation(const ir_node * adr1,const ir_mode * mode1,const ir_node * adr2,const ir_mode * mode2)479 static ir_alias_relation _get_alias_relation(
480 const ir_node *adr1, const ir_mode *mode1,
481 const ir_node *adr2, const ir_mode *mode2)
482 {
483 ir_entity *ent1, *ent2;
484 unsigned options;
485 long offset1 = 0;
486 long offset2 = 0;
487 const ir_node *base1;
488 const ir_node *base2;
489 const ir_node *orig_adr1 = adr1;
490 const ir_node *orig_adr2 = adr2;
491 ir_graph *irg;
492 unsigned mode_size;
493 ir_storage_class_class_t class1, class2, mod1, mod2;
494 int have_const_offsets;
495
496 if (! get_opt_alias_analysis())
497 return ir_may_alias;
498
499 if (adr1 == adr2)
500 return ir_sure_alias;
501
502 irg = get_irn_irg(adr1);
503 options = get_irg_memory_disambiguator_options(irg);
504
505 /* The Armageddon switch */
506 if (options & aa_opt_no_alias)
507 return ir_no_alias;
508
509 /* do the addresses have constants offsets?
510 * Note: nodes are normalized to have constants at right inputs,
511 * sub X, C is normalized to add X, -C
512 */
513 have_const_offsets = 1;
514 while (is_Add(adr1)) {
515 ir_node *add_right = get_Add_right(adr1);
516 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
517 ir_tarval *tv = get_Const_tarval(add_right);
518 offset1 += get_tarval_long(tv);
519 adr1 = get_Add_left(adr1);
520 } else if (mode_is_reference(get_irn_mode(add_right))) {
521 adr1 = add_right;
522 have_const_offsets = 0;
523 } else {
524 adr1 = get_Add_left(adr1);
525 have_const_offsets = 0;
526 }
527 }
528 while (is_Add(adr2)) {
529 ir_node *add_right = get_Add_right(adr2);
530 if (is_Const(add_right) && !mode_is_reference(get_irn_mode(add_right))) {
531 ir_tarval *tv = get_Const_tarval(add_right);
532 offset2 += get_tarval_long(tv);
533 adr2 = get_Add_left(adr2);
534 } else if (mode_is_reference(get_irn_mode(add_right))) {
535 adr2 = add_right;
536 have_const_offsets = 0;
537 } else {
538 adr2 = get_Add_left(adr2);
539 have_const_offsets = 0;
540 }
541 }
542
543 mode_size = get_mode_size_bytes(mode1);
544 if (get_mode_size_bytes(mode2) > mode_size) {
545 mode_size = get_mode_size_bytes(mode2);
546 }
547
548 /* same base address -> compare offsets if possible.
549 * FIXME: type long is not sufficient for this task ...
550 */
551 if (adr1 == adr2 && have_const_offsets) {
552 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
553 return ir_no_alias;
554 else
555 return ir_sure_alias;
556 }
557
558 /*
559 * Bitfields can be constructed as Sels from its base address.
560 * As they have different entities, the disambiguator would find that they are
561 * alias free. While this is true for its values, it is false for the addresses
562 * (strictly speaking, the Sel's are NOT the addresses of the bitfields).
563 * So, skip those bitfield selecting Sel's.
564 */
565 adr1 = skip_Bitfield_Sels(adr1);
566 adr2 = skip_Bitfield_Sels(adr2);
567
568 /* skip Sels */
569 base1 = adr1;
570 base2 = adr2;
571 ent1 = NULL;
572 ent2 = NULL;
573 if (is_Sel(adr1)) {
574 base1 = find_base_adr(adr1, &ent1);
575 }
576 if (is_Sel(adr2)) {
577 base2 = find_base_adr(adr2, &ent2);
578 }
579
580 /* same base address -> compare Sel entities */
581 if (base1 == base2 && ent1 != NULL && ent2 != NULL) {
582 if (ent1 != ent2)
583 return ir_no_alias;
584 else if (have_const_offsets)
585 return different_sel_offsets(adr1, adr2);
586 }
587
588 mod1 = classify_pointer(base1, ent1);
589 mod2 = classify_pointer(base2, ent2);
590
591 class1 = get_base_sc(mod1);
592 class2 = get_base_sc(mod2);
593
594 /* struct-access cannot alias with variables */
595 if (ent1 == NULL && ent2 != NULL && is_compound_type(get_entity_owner(ent2))
596 && (class1 == ir_sc_globalvar || class1 == ir_sc_localvar || class1 == ir_sc_tls || class1 == ir_sc_globaladdr)) {
597 return ir_no_alias;
598 }
599 if (ent2 == NULL && ent1 != NULL && is_compound_type(get_entity_owner(ent1))
600 && (class2 == ir_sc_globalvar || class2 == ir_sc_localvar || class2 == ir_sc_tls || class2 == ir_sc_globaladdr)) {
601 return ir_no_alias;
602 }
603
604 if (class1 == ir_sc_pointer || class2 == ir_sc_pointer) {
605 /* swap pointer class to class1 */
606 if (class2 == ir_sc_pointer) {
607 ir_storage_class_class_t temp = mod1;
608 mod1 = mod2;
609 mod2 = temp;
610 class1 = get_base_sc(mod1);
611 class2 = get_base_sc(mod2);
612 }
613 /* a pointer and an object whose address was never taken */
614 if (mod2 & ir_sc_modifier_nottaken) {
615 return ir_no_alias;
616 }
617 if (mod1 & ir_sc_modifier_argument) {
618 if ( (options & aa_opt_no_alias_args)
619 && (mod2 & ir_sc_modifier_argument))
620 return ir_no_alias;
621 if ( (options & aa_opt_no_alias_args_global)
622 && (class2 == ir_sc_globalvar
623 || class2 == ir_sc_tls
624 || class2 == ir_sc_globaladdr))
625 return ir_no_alias;
626 }
627 } else if (class1 != class2) {
628 /* two objects from different memory spaces */
629 return ir_no_alias;
630 } else {
631 /* both classes are equal */
632 if (class1 == ir_sc_globalvar) {
633 ir_entity *entity1 = get_SymConst_entity(base1);
634 ir_entity *entity2 = get_SymConst_entity(base2);
635 if (entity1 != entity2)
636 return ir_no_alias;
637
638 /* for some reason CSE didn't happen yet for the 2 SymConsts... */
639 return ir_may_alias;
640 } else if (class1 == ir_sc_globaladdr) {
641 ir_tarval *tv = get_Const_tarval(base1);
642 offset1 += get_tarval_long(tv);
643 tv = get_Const_tarval(base2);
644 offset2 += get_tarval_long(tv);
645
646 if ((unsigned long)labs(offset2 - offset1) >= mode_size)
647 return ir_no_alias;
648 else
649 return ir_sure_alias;
650 }
651 }
652
653 /* Type based alias analysis */
654 if (options & aa_opt_type_based) {
655 ir_alias_relation rel;
656
657 if (options & aa_opt_byte_type_may_alias) {
658 if (get_mode_size_bits(mode1) == 8 || get_mode_size_bits(mode2) == 8) {
659 /* One of the modes address a byte. Assume a ir_may_alias and leave
660 the type based check. */
661 goto leave_type_based_alias;
662 }
663 }
664 /* cheap check: If the mode sizes did not match, the types MUST be different */
665 if (get_mode_size_bits(mode1) != get_mode_size_bits(mode2))
666 return ir_no_alias;
667
668 /* cheap test: if only one is a reference mode, no alias */
669 if (mode_is_reference(mode1) != mode_is_reference(mode2))
670 return ir_no_alias;
671
672 /* cheap test: if arithmetic is different, no alias */
673 if (get_mode_arithmetic(mode1) != get_mode_arithmetic(mode2))
674 return ir_no_alias;
675
676 /* try rule R5 */
677 rel = different_types(orig_adr1, orig_adr2);
678 if (rel != ir_may_alias)
679 return rel;
680 leave_type_based_alias:;
681 }
682
683 /* do we have a language specific memory disambiguator? */
684 if (language_disambuigator != NULL) {
685 ir_alias_relation rel = language_disambuigator(orig_adr1, mode1, orig_adr2, mode2);
686 if (rel != ir_may_alias)
687 return rel;
688 }
689
690 /* access points-to information here */
691 return ir_may_alias;
692 }
693
get_alias_relation(const ir_node * adr1,const ir_mode * mode1,const ir_node * adr2,const ir_mode * mode2)694 ir_alias_relation get_alias_relation(
695 const ir_node *adr1, const ir_mode *mode1,
696 const ir_node *adr2, const ir_mode *mode2)
697 {
698 ir_alias_relation rel = _get_alias_relation(adr1, mode1, adr2, mode2);
699 DB((dbg, LEVEL_1, "alias(%+F, %+F) = %s\n", adr1, adr2, get_ir_alias_relation_name(rel)));
700 return rel;
701 }
702
set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func)703 void set_language_memory_disambiguator(DISAMBIGUATOR_FUNC func)
704 {
705 language_disambuigator = func;
706 }
707
708 /** The result cache for the memory disambiguator. */
709 static set *result_cache = NULL;
710
711 /** An entry in the relation cache. */
712 typedef struct mem_disambig_entry {
713 const ir_node *adr1; /**< The first address. */
714 const ir_mode *mode1; /**< The first address mode. */
715 const ir_node *adr2; /**< The second address. */
716 const ir_mode *mode2; /**< The second address mode. */
717 ir_alias_relation result; /**< The alias relation result. */
718 } mem_disambig_entry;
719
720 #define HASH_ENTRY(adr1, adr2) (hash_ptr(adr1) ^ hash_ptr(adr2))
721
722 /**
723 * Compare two relation cache entries.
724 */
cmp_mem_disambig_entry(const void * elt,const void * key,size_t size)725 static int cmp_mem_disambig_entry(const void *elt, const void *key, size_t size)
726 {
727 const mem_disambig_entry *p1 = (const mem_disambig_entry*) elt;
728 const mem_disambig_entry *p2 = (const mem_disambig_entry*) key;
729 (void) size;
730
731 return p1->adr1 == p2->adr1 && p1->adr2 == p2->adr2 &&
732 p1->mode1 == p2->mode1 && p1->mode2 == p2->mode2;
733 }
734
mem_disambig_init(void)735 void mem_disambig_init(void)
736 {
737 result_cache = new_set(cmp_mem_disambig_entry, 8);
738 }
739
get_alias_relation_ex(const ir_node * adr1,const ir_mode * mode1,const ir_node * adr2,const ir_mode * mode2)740 ir_alias_relation get_alias_relation_ex(
741 const ir_node *adr1, const ir_mode *mode1,
742 const ir_node *adr2, const ir_mode *mode2)
743 {
744 mem_disambig_entry key, *entry;
745
746 ir_fprintf(stderr, "%+F <-> %+F\n", adr1, adr2);
747
748 if (! get_opt_alias_analysis())
749 return ir_may_alias;
750
751 if (get_irn_opcode(adr1) > get_irn_opcode(adr2)) {
752 const ir_node *t = adr1;
753 adr1 = adr2;
754 adr2 = t;
755 }
756
757 key.adr1 = adr1;
758 key.adr2 = adr2;
759 key.mode1 = mode1;
760 key.mode2 = mode2;
761 entry = set_find(mem_disambig_entry, result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
762 if (entry != NULL)
763 return entry->result;
764
765 key.result = get_alias_relation(adr1, mode1, adr2, mode2);
766
767 (void)set_insert(mem_disambig_entry, result_cache, &key, sizeof(key), HASH_ENTRY(adr1, adr2));
768 return key.result;
769 }
770
mem_disambig_term(void)771 void mem_disambig_term(void)
772 {
773 if (result_cache != NULL) {
774 del_set(result_cache);
775 result_cache = NULL;
776 }
777 }
778
779 /**
780 * Check the mode of a Load/Store with the mode of the entity
781 * that is accessed.
782 * If the mode of the entity and the Load/Store mode do not match, we
783 * have the bad reinterpret case:
784 *
785 * int i;
786 * char b = *(char *)&i;
787 *
788 * We do NOT count this as one value and return address_taken
789 * in that case.
790 * However, we support an often used case. If the mode is two-complement
791 * we allow casts between signed/unsigned.
792 *
793 * @param mode the mode of the Load/Store
794 * @param ent_mode the mode of the accessed entity
795 *
796 * @return non-zero if the Load/Store is a hidden cast, zero else
797 */
is_hidden_cast(const ir_mode * mode,const ir_mode * ent_mode)798 static int is_hidden_cast(const ir_mode *mode, const ir_mode *ent_mode)
799 {
800 if (ent_mode == NULL)
801 return false;
802
803 if (ent_mode != mode) {
804 if (ent_mode == NULL ||
805 get_mode_size_bits(ent_mode) != get_mode_size_bits(mode) ||
806 get_mode_arithmetic(ent_mode) != irma_twos_complement ||
807 get_mode_arithmetic(mode) != irma_twos_complement)
808 return true;
809 }
810 return false;
811 }
812
813 /**
814 * Determine the usage state of a node (or its successor Sels).
815 *
816 * @param irn the node
817 */
determine_entity_usage(const ir_node * irn,ir_entity * entity)818 static ir_entity_usage determine_entity_usage(const ir_node *irn, ir_entity *entity)
819 {
820 int i;
821 ir_mode *emode, *mode;
822 ir_node *value;
823 ir_type *tp;
824 unsigned res = 0;
825
826 for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
827 ir_node *succ = get_irn_out(irn, i);
828
829 switch (get_irn_opcode(succ)) {
830 case iro_Load:
831 /* beware: irn might be a Id node here, so irn might be not
832 equal to get_Load_ptr(succ) */
833 res |= ir_usage_read;
834
835 /* check if this load is not a hidden conversion */
836 mode = get_Load_mode(succ);
837 emode = get_type_mode(get_entity_type(entity));
838 if (is_hidden_cast(mode, emode))
839 res |= ir_usage_reinterpret_cast;
840 break;
841
842 case iro_Store:
843 /* check that the node is not the Store's value */
844 if (irn == get_Store_value(succ)) {
845 res |= ir_usage_unknown;
846 }
847 if (irn == get_Store_ptr(succ)) {
848 res |= ir_usage_write;
849
850 /* check if this Store is not a hidden conversion */
851 value = get_Store_value(succ);
852 mode = get_irn_mode(value);
853 emode = get_type_mode(get_entity_type(entity));
854 if (is_hidden_cast(mode, emode))
855 res |= ir_usage_reinterpret_cast;
856 }
857 assert(irn != get_Store_mem(succ));
858 break;
859
860 case iro_CopyB:
861 /* CopyB are like Loads/Stores */
862 tp = get_entity_type(entity);
863 if (tp != get_CopyB_type(succ)) {
864 /* bad, different types, might be a hidden conversion */
865 res |= ir_usage_reinterpret_cast;
866 }
867 if (irn == get_CopyB_dst(succ)) {
868 res |= ir_usage_write;
869 } else {
870 assert(irn == get_CopyB_src(succ));
871 res |= ir_usage_read;
872 }
873 break;
874
875 case iro_Add:
876 case iro_Sub:
877 /* Check the successor of irn. */
878 res |= determine_entity_usage(succ, entity);
879 break;
880 case iro_Sel: {
881 ir_entity *sel_entity = get_Sel_entity(succ);
882 /* this analysis can't handle unions correctly */
883 if (is_Union_type(get_entity_owner(sel_entity))) {
884 res |= ir_usage_unknown;
885 break;
886 }
887 /* Check the successor of irn. */
888 res |= determine_entity_usage(succ, sel_entity);
889 break;
890 }
891
892 case iro_Call:
893 if (irn == get_Call_ptr(succ)) {
894 /* TODO: we could check for reinterpret casts here...
895 * But I doubt anyone is interested in that bit for
896 * function entities and I'm too lazy to write the code now.
897 */
898 res |= ir_usage_read;
899 } else {
900 assert(irn != get_Call_mem(succ));
901 res |= ir_usage_unknown;
902 }
903 break;
904
905 /* skip identities */
906 case iro_Id:
907 res |= determine_entity_usage(succ, entity);
908 break;
909
910 /* skip tuples */
911 case iro_Tuple: {
912 int input_nr;
913 for (input_nr = get_Tuple_n_preds(succ) - 1; input_nr >= 0;
914 --input_nr) {
915 ir_node *pred = get_Tuple_pred(succ, input_nr);
916 if (pred == irn) {
917 int k;
918 /* we found one input */
919 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
920 ir_node *proj = get_irn_out(succ, k);
921
922 if (is_Proj(proj) && get_Proj_proj(proj) == input_nr) {
923 res |= determine_entity_usage(proj, entity);
924 break;
925 }
926 }
927 }
928 }
929 break;
930 }
931
932 default:
933 /* another op, we don't know anything (we could do more advanced
934 * things like a dataflow analysis here) */
935 res |= ir_usage_unknown;
936 break;
937 }
938 }
939
940 return (ir_entity_usage) res;
941 }
942
943 /**
944 * Update the usage flags of all frame entities.
945 */
analyse_irg_entity_usage(ir_graph * irg)946 static void analyse_irg_entity_usage(ir_graph *irg)
947 {
948 ir_type *ft = get_irg_frame_type(irg);
949 ir_node *irg_frame;
950 size_t i, n;
951 int j, k, static_link_arg;
952
953 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
954
955 /* set initial state to not_taken, as this is the "smallest" state */
956 for (i = 0, n = get_class_n_members(ft); i < n; ++i) {
957 ir_entity *ent = get_class_member(ft, i);
958
959 /* methods can only be analyzed globally */
960 if (! is_method_entity(ent)) {
961 ir_entity_usage flags = ir_usage_none;
962 if (get_entity_linkage(ent) & IR_LINKAGE_HIDDEN_USER)
963 flags = ir_usage_unknown;
964 set_entity_usage(ent, flags);
965 }
966 }
967
968 irg_frame = get_irg_frame(irg);
969
970 for (j = get_irn_n_outs(irg_frame) - 1; j >= 0; --j) {
971 ir_node *succ = get_irn_out(irg_frame, j);
972 ir_entity *entity;
973 unsigned flags;
974
975 if (!is_Sel(succ))
976 continue;
977
978 entity = get_Sel_entity(succ);
979 flags = get_entity_usage(entity);
980 flags |= determine_entity_usage(succ, entity);
981 set_entity_usage(entity, (ir_entity_usage) flags);
982 }
983
984 /* check inner functions accessing outer frame */
985 static_link_arg = 0;
986 for (i = 0, n = get_class_n_members(ft); i < n; ++i) {
987 ir_entity *ent = get_class_member(ft, i);
988 ir_graph *inner_irg;
989 ir_node *args;
990
991 if (! is_method_entity(ent))
992 continue;
993
994 inner_irg = get_entity_irg(ent);
995 if (inner_irg == NULL)
996 continue;
997
998 assure_irg_outs(inner_irg);
999 args = get_irg_args(inner_irg);
1000 for (j = get_irn_n_outs(args) - 1; j >= 0; --j) {
1001 ir_node *arg = get_irn_out(args, j);
1002
1003 if (get_Proj_proj(arg) == static_link_arg) {
1004 for (k = get_irn_n_outs(arg) - 1; k >= 0; --k) {
1005 ir_node *succ = get_irn_out(arg, k);
1006
1007 if (is_Sel(succ)) {
1008 ir_entity *entity = get_Sel_entity(succ);
1009
1010 if (get_entity_owner(entity) == ft) {
1011 /* found an access to the outer frame */
1012 unsigned flags;
1013
1014 flags = get_entity_usage(entity);
1015 flags |= determine_entity_usage(succ, entity);
1016 set_entity_usage(entity, (ir_entity_usage) flags);
1017 }
1018 }
1019 }
1020 }
1021 }
1022 }
1023
1024 /* now computed */
1025 add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
1026 }
1027
assure_irg_entity_usage_computed(ir_graph * irg)1028 void assure_irg_entity_usage_computed(ir_graph *irg)
1029 {
1030 if (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE))
1031 return;
1032
1033 analyse_irg_entity_usage(irg);
1034 }
1035
1036
1037 /**
1038 * Initialize the entity_usage flag for a global type like type.
1039 */
init_entity_usage(ir_type * tp)1040 static void init_entity_usage(ir_type *tp)
1041 {
1042 size_t i, n;
1043
1044 /* We have to be conservative: All external visible entities are unknown */
1045 for (i = 0, n = get_compound_n_members(tp); i < n; ++i) {
1046 ir_entity *ent = get_compound_member(tp, i);
1047 unsigned flags = ir_usage_none;
1048
1049 if (entity_is_externally_visible(ent)) {
1050 flags |= ir_usage_unknown;
1051 }
1052 set_entity_usage(ent, (ir_entity_usage) flags);
1053 }
1054 }
1055
1056 /**
1057 * Mark all entities used in the initializer as unknown usage.
1058 *
1059 * @param initializer the initializer to check
1060 */
check_initializer_nodes(ir_initializer_t * initializer)1061 static void check_initializer_nodes(ir_initializer_t *initializer)
1062 {
1063 size_t i;
1064 ir_node *n;
1065
1066 switch (initializer->kind) {
1067 case IR_INITIALIZER_CONST:
1068 /* let's check if it's an address */
1069 n = initializer->consti.value;
1070 if (is_SymConst_addr_ent(n)) {
1071 ir_entity *ent = get_SymConst_entity(n);
1072 set_entity_usage(ent, ir_usage_unknown);
1073 }
1074 return;
1075 case IR_INITIALIZER_TARVAL:
1076 case IR_INITIALIZER_NULL:
1077 return;
1078 case IR_INITIALIZER_COMPOUND:
1079 for (i = 0; i < initializer->compound.n_initializers; ++i) {
1080 ir_initializer_t *sub_initializer
1081 = initializer->compound.initializers[i];
1082 check_initializer_nodes(sub_initializer);
1083 }
1084 return;
1085 }
1086 panic("invalid initializer found");
1087 }
1088
1089 /**
1090 * Mark all entities used in the initializer for the given entity as unknown
1091 * usage.
1092 *
1093 * @param ent the entity
1094 */
check_initializer(ir_entity * ent)1095 static void check_initializer(ir_entity *ent)
1096 {
1097 /* Beware: Methods are always initialized with "themself". This does not
1098 * count as a taken address.
1099 * TODO: this initialisation with "themself" is wrong and should be removed
1100 */
1101 if (is_Method_type(get_entity_type(ent)))
1102 return;
1103
1104 if (ent->initializer != NULL) {
1105 check_initializer_nodes(ent->initializer);
1106 }
1107 }
1108
1109
1110 /**
1111 * Mark all entities used in initializers as unknown usage.
1112 *
1113 * @param tp a compound type
1114 */
check_initializers(ir_type * tp)1115 static void check_initializers(ir_type *tp)
1116 {
1117 size_t i, n;
1118
1119 for (i = 0, n = get_compound_n_members(tp); i < n; ++i) {
1120 ir_entity *ent = get_compound_member(tp, i);
1121
1122 check_initializer(ent);
1123 }
1124 }
1125
1126 #ifdef DEBUG_libfirm
1127 /**
1128 * Print the entity usage flags of all entities of a given type for debugging.
1129 *
1130 * @param tp a compound type
1131 */
print_entity_usage_flags(const ir_type * tp)1132 static void print_entity_usage_flags(const ir_type *tp)
1133 {
1134 size_t i, n;
1135 for (i = 0, n = get_compound_n_members(tp); i < n; ++i) {
1136 ir_entity *ent = get_compound_member(tp, i);
1137 ir_entity_usage flags = get_entity_usage(ent);
1138
1139 if (flags == 0)
1140 continue;
1141 ir_printf("%+F:", ent);
1142 if (flags & ir_usage_address_taken)
1143 printf(" address_taken");
1144 if (flags & ir_usage_read)
1145 printf(" read");
1146 if (flags & ir_usage_write)
1147 printf(" write");
1148 if (flags & ir_usage_reinterpret_cast)
1149 printf(" reinterp_cast");
1150 printf("\n");
1151 }
1152 }
1153 #endif /* DEBUG_libfirm */
1154
1155 /**
1156 * Post-walker: check for global entity address
1157 */
check_global_address(ir_node * irn,void * data)1158 static void check_global_address(ir_node *irn, void *data)
1159 {
1160 ir_entity *ent;
1161 unsigned flags;
1162 (void) data;
1163
1164 if (is_SymConst_addr_ent(irn)) {
1165 /* A global. */
1166 ent = get_SymConst_entity(irn);
1167 } else
1168 return;
1169
1170 flags = get_entity_usage(ent);
1171 flags |= determine_entity_usage(irn, ent);
1172 set_entity_usage(ent, (ir_entity_usage) flags);
1173 }
1174
1175 /**
1176 * Update the entity usage flags of all global entities.
1177 */
analyse_irp_globals_entity_usage(void)1178 static void analyse_irp_globals_entity_usage(void)
1179 {
1180 size_t i, n;
1181 ir_segment_t s;
1182
1183 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1184 ir_type *type = get_segment_type(s);
1185 init_entity_usage(type);
1186 }
1187
1188 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1189 ir_type *type = get_segment_type(s);
1190 check_initializers(type);
1191 }
1192
1193 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1194 ir_graph *irg = get_irp_irg(i);
1195
1196 assure_irg_outs(irg);
1197 irg_walk_graph(irg, NULL, check_global_address, NULL);
1198 }
1199
1200 #ifdef DEBUG_libfirm
1201 if (firm_dbg_get_mask(dbg) & LEVEL_1) {
1202 for (s = IR_SEGMENT_FIRST; s <= IR_SEGMENT_LAST; ++s) {
1203 print_entity_usage_flags(get_segment_type(s));
1204 }
1205 }
1206 #endif /* DEBUG_libfirm */
1207
1208 /* now computed */
1209 irp->globals_entity_usage_state = ir_entity_usage_computed;
1210 }
1211
get_irp_globals_entity_usage_state(void)1212 ir_entity_usage_computed_state get_irp_globals_entity_usage_state(void)
1213 {
1214 return irp->globals_entity_usage_state;
1215 }
1216
set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state)1217 void set_irp_globals_entity_usage_state(ir_entity_usage_computed_state state)
1218 {
1219 irp->globals_entity_usage_state = state;
1220 }
1221
assure_irp_globals_entity_usage_computed(void)1222 void assure_irp_globals_entity_usage_computed(void)
1223 {
1224 if (irp->globals_entity_usage_state != ir_entity_usage_not_computed)
1225 return;
1226
1227 analyse_irp_globals_entity_usage();
1228 }
1229
firm_init_memory_disambiguator(void)1230 void firm_init_memory_disambiguator(void)
1231 {
1232 FIRM_DBG_REGISTER(dbg, "firm.ana.irmemory");
1233 FIRM_DBG_REGISTER(dbgcall, "firm.opt.cc");
1234 }
1235
1236
1237 /** Maps method types to cloned method types. */
1238 static pmap *mtp_map;
1239
1240 /**
1241 * Clone a method type if not already cloned.
1242 *
1243 * @param tp the type to clone
1244 */
clone_type_and_cache(ir_type * tp)1245 static ir_type *clone_type_and_cache(ir_type *tp)
1246 {
1247 ir_type *res = pmap_get(ir_type, mtp_map, tp);
1248
1249 if (res == NULL) {
1250 res = clone_type_method(tp);
1251 pmap_insert(mtp_map, tp, res);
1252 }
1253
1254 return res;
1255 }
1256
1257 /**
1258 * Walker: clone all call types of Calls to methods having the
1259 * mtp_property_private property set.
1260 */
update_calls_to_private(ir_node * call,void * env)1261 static void update_calls_to_private(ir_node *call, void *env)
1262 {
1263 (void) env;
1264 if (is_Call(call)) {
1265 ir_node *ptr = get_Call_ptr(call);
1266
1267 if (is_SymConst(ptr)) {
1268 ir_entity *ent = get_SymConst_entity(ptr);
1269 ir_type *ctp = get_Call_type(call);
1270
1271 if (get_entity_additional_properties(ent) & mtp_property_private) {
1272 if ((get_method_additional_properties(ctp) & mtp_property_private) == 0) {
1273 ctp = clone_type_and_cache(ctp);
1274 add_method_additional_properties(ctp, mtp_property_private);
1275 set_Call_type(call, ctp);
1276 DB((dbgcall, LEVEL_1, "changed call to private method %+F using cloned type %+F\n", ent, ctp));
1277 }
1278 }
1279 }
1280 }
1281 }
1282
mark_private_methods(void)1283 void mark_private_methods(void)
1284 {
1285 size_t i, n;
1286 int changed = 0;
1287
1288 assure_irp_globals_entity_usage_computed();
1289
1290 mtp_map = pmap_create();
1291
1292 /* first step: change the calling conventions of the local non-escaped entities */
1293 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1294 ir_graph *irg = get_irp_irg(i);
1295 ir_entity *ent = get_irg_entity(irg);
1296 ir_entity_usage flags = get_entity_usage(ent);
1297
1298 if (!(flags & ir_usage_address_taken) && !entity_is_externally_visible(ent)) {
1299 ir_type *mtp = get_entity_type(ent);
1300
1301 add_entity_additional_properties(ent, mtp_property_private);
1302 DB((dbgcall, LEVEL_1, "found private method %+F\n", ent));
1303 if ((get_method_additional_properties(mtp) & mtp_property_private) == 0) {
1304 /* need a new type */
1305 mtp = clone_type_and_cache(mtp);
1306 add_method_additional_properties(mtp, mtp_property_private);
1307 set_entity_type(ent, mtp);
1308 DB((dbgcall, LEVEL_2, "changed entity type of %+F to %+F\n", ent, mtp));
1309 changed = 1;
1310 }
1311 }
1312 }
1313
1314 if (changed)
1315 all_irg_walk(NULL, update_calls_to_private, NULL);
1316
1317 pmap_destroy(mtp_map);
1318 }
1319
mark_private_methods_pass(const char * name)1320 ir_prog_pass_t *mark_private_methods_pass(const char *name)
1321 {
1322 return def_prog_pass(name ? name : "mark_private_methods", mark_private_methods);
1323 }
1324