xref: /netbsd/external/gpl3/gcc/dist/gcc/analyzer/store.cc (revision f0fbc68b)
1 /* Classes for modeling the state of memory.
2    Copyright (C) 2020-2022 Free Software Foundation, Inc.
3    Contributed by David Malcolm <dmalcolm@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tree.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "gimple.h"
28 #include "gimple-iterator.h"
29 #include "diagnostic-core.h"
30 #include "graphviz.h"
31 #include "options.h"
32 #include "cgraph.h"
33 #include "tree-dfa.h"
34 #include "stringpool.h"
35 #include "convert.h"
36 #include "target.h"
37 #include "fold-const.h"
38 #include "tree-pretty-print.h"
39 #include "diagnostic-color.h"
40 #include "diagnostic-metadata.h"
41 #include "tristate.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "function.h"
45 #include "json.h"
46 #include "analyzer/analyzer.h"
47 #include "analyzer/analyzer-logging.h"
48 #include "ordered-hash-map.h"
49 #include "options.h"
50 #include "cgraph.h"
51 #include "cfg.h"
52 #include "digraph.h"
53 #include "analyzer/supergraph.h"
54 #include "sbitmap.h"
55 #include "analyzer/call-string.h"
56 #include "analyzer/program-point.h"
57 #include "analyzer/store.h"
58 #include "analyzer/region-model.h"
59 #include "analyzer/analyzer-selftests.h"
60 #include "stor-layout.h"
61 
62 #if ENABLE_ANALYZER
63 
64 namespace ana {
65 
66 /* Dump SVALS to PP, sorting them to ensure determinism.  */
67 
68 static void
dump_svalue_set(const hash_set<const svalue * > & svals,pretty_printer * pp,bool simple)69 dump_svalue_set (const hash_set <const svalue *> &svals,
70 		 pretty_printer *pp, bool simple)
71 {
72   auto_vec <const svalue *> v;
73   for (hash_set<const svalue *>::iterator iter = svals.begin ();
74        iter != svals.end (); ++iter)
75     {
76       v.safe_push (*iter);
77     }
78   v.qsort (svalue::cmp_ptr_ptr);
79 
80   pp_character (pp, '{');
81   const svalue *sval;
82   unsigned i;
83   FOR_EACH_VEC_ELT (v, i, sval)
84     {
85       if (i > 0)
86 	pp_string (pp, ", ");
87       sval->dump_to_pp (pp, simple);
88     }
89   pp_character (pp, '}');
90 }
91 
92 /* class uncertainty_t.  */
93 
94 /* Dump this object to PP.  */
95 
96 void
dump_to_pp(pretty_printer * pp,bool simple) const97 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
98 {
99   pp_string (pp, "{m_maybe_bound_svals: ");
100   dump_svalue_set (m_maybe_bound_svals, pp, simple);
101 
102   pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
103   dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
104   pp_string (pp, "}");
105 }
106 
107 /* Dump this object to stderr.  */
108 
109 DEBUG_FUNCTION void
dump(bool simple) const110 uncertainty_t::dump (bool simple) const
111 {
112   pretty_printer pp;
113   pp_format_decoder (&pp) = default_tree_printer;
114   pp_show_color (&pp) = pp_show_color (global_dc->printer);
115   pp.buffer->stream = stderr;
116   dump_to_pp (&pp, simple);
117   pp_newline (&pp);
118   pp_flush (&pp);
119 }
120 
121 /* class binding_key.  */
122 
123 const binding_key *
make(store_manager * mgr,const region * r)124 binding_key::make (store_manager *mgr, const region *r)
125 {
126   region_offset offset = r->get_offset ();
127   if (offset.symbolic_p ())
128     return mgr->get_symbolic_binding (r);
129   else
130     {
131       bit_size_t bit_size;
132       if (r->get_bit_size (&bit_size))
133 	return mgr->get_concrete_binding (offset.get_bit_offset (),
134 					  bit_size);
135       else
136 	return mgr->get_symbolic_binding (r);
137     }
138 }
139 
140 /* Dump this binding_key to stderr.  */
141 
142 DEBUG_FUNCTION void
dump(bool simple) const143 binding_key::dump (bool simple) const
144 {
145   pretty_printer pp;
146   pp_format_decoder (&pp) = default_tree_printer;
147   pp_show_color (&pp) = pp_show_color (global_dc->printer);
148   pp.buffer->stream = stderr;
149   dump_to_pp (&pp, simple);
150   pp_newline (&pp);
151   pp_flush (&pp);
152 }
153 
154 /* Get a description of this binding_key.  */
155 
156 label_text
get_desc(bool simple) const157 binding_key::get_desc (bool simple) const
158 {
159   pretty_printer pp;
160   pp_format_decoder (&pp) = default_tree_printer;
161   dump_to_pp (&pp, simple);
162   return label_text::take (xstrdup (pp_formatted_text (&pp)));
163 }
164 
165 /* qsort callback.  */
166 
167 int
cmp_ptrs(const void * p1,const void * p2)168 binding_key::cmp_ptrs (const void *p1, const void *p2)
169 {
170   const binding_key * const *pk1 = (const binding_key * const *)p1;
171   const binding_key * const *pk2 = (const binding_key * const *)p2;
172   return cmp (*pk1, *pk2);
173 }
174 
175 /* Comparator for binding_keys.  */
176 
177 int
cmp(const binding_key * k1,const binding_key * k2)178 binding_key::cmp (const binding_key *k1, const binding_key *k2)
179 {
180   int concrete1 = k1->concrete_p ();
181   int concrete2 = k2->concrete_p ();
182   if (int concrete_cmp = concrete1 - concrete2)
183     return concrete_cmp;
184   if (concrete1)
185     {
186       const concrete_binding *b1 = (const concrete_binding *)k1;
187       const concrete_binding *b2 = (const concrete_binding *)k2;
188       if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
189 				   b2->get_start_bit_offset (),
190 				   SIGNED))
191 	return start_cmp;
192       return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
193 		      SIGNED);
194     }
195   else
196     {
197       const symbolic_binding *s1 = (const symbolic_binding *)k1;
198       const symbolic_binding *s2 = (const symbolic_binding *)k2;
199       if (s1 > s2)
200 	return 1;
201       if (s1 < s2)
202 	return -1;
203       return 0;
204     }
205 }
206 
207 /* struct bit_range.  */
208 
209 void
dump_to_pp(pretty_printer * pp) const210 bit_range::dump_to_pp (pretty_printer *pp) const
211 {
212   byte_range bytes (0, 0);
213   if (as_byte_range (&bytes))
214     bytes.dump_to_pp (pp);
215   else
216     {
217       pp_string (pp, "start: ");
218       pp_wide_int (pp, m_start_bit_offset, SIGNED);
219       pp_string (pp, ", size: ");
220       pp_wide_int (pp, m_size_in_bits, SIGNED);
221       pp_string (pp, ", next: ");
222       pp_wide_int (pp, get_next_bit_offset (), SIGNED);
223     }
224 }
225 
226 /* Dump this object to stderr.  */
227 
228 DEBUG_FUNCTION void
dump() const229 bit_range::dump () const
230 {
231   pretty_printer pp;
232   pp.buffer->stream = stderr;
233   dump_to_pp (&pp);
234   pp_newline (&pp);
235   pp_flush (&pp);
236 }
237 
238 /* If OTHER is a subset of this, return true and write
239    to *OUT the relative range of OTHER within this.
240    Otherwise return false.  */
241 
242 bool
contains_p(const bit_range & other,bit_range * out) const243 bit_range::contains_p (const bit_range &other, bit_range *out) const
244 {
245   if (contains_p (other.get_start_bit_offset ())
246       && contains_p (other.get_last_bit_offset ()))
247     {
248       out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
249       out->m_size_in_bits = other.m_size_in_bits;
250       return true;
251     }
252   else
253     return false;
254 }
255 
256 /* If OTHER intersects this, return true and write
257    the relative range of OTHER within THIS to *OUT_THIS,
258    and the relative range of THIS within OTHER to *OUT_OTHER.
259    Otherwise return false.  */
260 
261 bool
intersects_p(const bit_range & other,bit_range * out_this,bit_range * out_other) const262 bit_range::intersects_p (const bit_range &other,
263 			 bit_range *out_this,
264 			 bit_range *out_other) const
265 {
266   if (get_start_bit_offset () < other.get_next_bit_offset ()
267       && other.get_start_bit_offset () < get_next_bit_offset ())
268     {
269       bit_offset_t overlap_start
270 	= MAX (get_start_bit_offset (),
271 	       other.get_start_bit_offset ());
272       bit_offset_t overlap_next
273 	= MIN (get_next_bit_offset (),
274 	       other.get_next_bit_offset ());
275       gcc_assert (overlap_next > overlap_start);
276       bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
277       *out_this = abs_overlap_bits - get_start_bit_offset ();
278       *out_other = abs_overlap_bits - other.get_start_bit_offset ();
279       return true;
280     }
281   else
282     return false;
283 }
284 
285 int
cmp(const bit_range & br1,const bit_range & br2)286 bit_range::cmp (const bit_range &br1, const bit_range &br2)
287 {
288   if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
289 				br2.m_start_bit_offset))
290     return start_cmp;
291 
292   return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
293 }
294 
295 /* Offset this range by OFFSET.  */
296 
297 bit_range
operator -(bit_offset_t offset) const298 bit_range::operator- (bit_offset_t offset) const
299 {
300   return bit_range (m_start_bit_offset - offset, m_size_in_bits);
301 }
302 
303 /* If MASK is a contiguous range of set bits, write them
304    to *OUT and return true.
305    Otherwise return false.  */
306 
307 bool
from_mask(unsigned HOST_WIDE_INT mask,bit_range * out)308 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
309 {
310   unsigned iter_bit_idx = 0;
311   unsigned HOST_WIDE_INT iter_bit_mask = 1;
312 
313   /* Find the first contiguous run of set bits in MASK.  */
314 
315   /* Find first set bit in MASK.  */
316   while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
317     {
318       if (mask & iter_bit_mask)
319 	break;
320       iter_bit_idx++;
321       iter_bit_mask <<= 1;
322     }
323   if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
324     /* MASK is zero.  */
325     return false;
326 
327   unsigned first_set_iter_bit_idx = iter_bit_idx;
328   unsigned num_set_bits = 1;
329   iter_bit_idx++;
330   iter_bit_mask <<= 1;
331 
332   /* Find next unset bit in MASK.  */
333   while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
334     {
335       if (!(mask & iter_bit_mask))
336 	break;
337       num_set_bits++;
338       iter_bit_idx++;
339       iter_bit_mask <<= 1;
340     }
341   if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
342     {
343       *out = bit_range (first_set_iter_bit_idx, num_set_bits);
344       return true;
345     }
346 
347   /* We now have the first contiguous run of set bits in MASK.
348      Fail if any other bits are set.  */
349   while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
350     {
351       if (mask & iter_bit_mask)
352 	return false;
353       iter_bit_idx++;
354       iter_bit_mask <<= 1;
355     }
356 
357   *out = bit_range (first_set_iter_bit_idx, num_set_bits);
358   return true;
359 }
360 
361 /* Attempt to convert this bit_range to a byte_range.
362    Return true if it is possible, writing the result to *OUT.
363    Otherwise return false.  */
364 
365 bool
as_byte_range(byte_range * out) const366 bit_range::as_byte_range (byte_range *out) const
367 {
368   if (m_start_bit_offset % BITS_PER_UNIT == 0
369       && m_size_in_bits % BITS_PER_UNIT == 0)
370     {
371       out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
372       out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
373       return true;
374     }
375   return false;
376 }
377 
378 /* Dump this object to PP.  */
379 
380 void
dump_to_pp(pretty_printer * pp) const381 byte_range::dump_to_pp (pretty_printer *pp) const
382 {
383   if (m_size_in_bytes == 1)
384     {
385       pp_string (pp, "byte ");
386       pp_wide_int (pp, m_start_byte_offset, SIGNED);
387     }
388   else
389     {
390       pp_string (pp, "bytes ");
391       pp_wide_int (pp, m_start_byte_offset, SIGNED);
392       pp_string (pp, "-");
393       pp_wide_int (pp, get_last_byte_offset (), SIGNED);
394     }
395 }
396 
397 /* Dump this object to stderr.  */
398 
399 DEBUG_FUNCTION void
dump() const400 byte_range::dump () const
401 {
402   pretty_printer pp;
403   pp.buffer->stream = stderr;
404   dump_to_pp (&pp);
405   pp_newline (&pp);
406   pp_flush (&pp);
407 }
408 
409 /* If OTHER is a subset of this, return true and write
410    to *OUT the relative range of OTHER within this.
411    Otherwise return false.  */
412 
413 bool
contains_p(const byte_range & other,byte_range * out) const414 byte_range::contains_p (const byte_range &other, byte_range *out) const
415 {
416   if (contains_p (other.get_start_byte_offset ())
417       && contains_p (other.get_last_byte_offset ()))
418     {
419       out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
420       out->m_size_in_bytes = other.m_size_in_bytes;
421       return true;
422     }
423   else
424     return false;
425 }
426 
427 /* qsort comparator for byte ranges.  */
428 
429 int
cmp(const byte_range & br1,const byte_range & br2)430 byte_range::cmp (const byte_range &br1, const byte_range &br2)
431 {
432   /* Order first by offset.  */
433   if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
434 				br2.m_start_byte_offset))
435     return start_cmp;
436 
437   /* ...then by size.  */
438   return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
439 }
440 
441 /* class concrete_binding : public binding_key.  */
442 
443 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding.  */
444 
445 void
dump_to_pp(pretty_printer * pp,bool) const446 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
447 {
448   m_bit_range.dump_to_pp (pp);
449 }
450 
451 /* Return true if this binding overlaps with OTHER.  */
452 
453 bool
overlaps_p(const concrete_binding & other) const454 concrete_binding::overlaps_p (const concrete_binding &other) const
455 {
456   if (get_start_bit_offset () < other.get_next_bit_offset ()
457       && get_next_bit_offset () > other.get_start_bit_offset ())
458     return true;
459   return false;
460 }
461 
462 /* Comparator for use by vec<const concrete_binding *>::qsort.  */
463 
464 int
cmp_ptr_ptr(const void * p1,const void * p2)465 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
466 {
467   const concrete_binding *b1 = *(const concrete_binding * const *)p1;
468   const concrete_binding *b2 = *(const concrete_binding * const *)p2;
469 
470   return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
471 }
472 
473 /* class symbolic_binding : public binding_key.  */
474 
475 void
dump_to_pp(pretty_printer * pp,bool simple) const476 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
477 {
478   //binding_key::dump_to_pp (pp, simple);
479   pp_string (pp, "region: ");
480   m_region->dump_to_pp (pp, simple);
481 }
482 
483 /* Comparator for use by vec<const symbolic_binding *>::qsort.  */
484 
485 int
cmp_ptr_ptr(const void * p1,const void * p2)486 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
487 {
488   const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
489   const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
490 
491   return region::cmp_ids (b1->get_region (), b2->get_region ());
492 }
493 
494 /* The store is oblivious to the types of the svalues bound within
495    it: any type can get bound at any location.
496    Simplify any casts before binding.
497 
498    For example, if we have:
499      struct big { int ia[1024]; };
500      struct big src, dst;
501      memcpy (&dst, &src, sizeof (struct big));
502    this reaches us in gimple form as:
503      MEM <unsigned char[4096]> [(char * {ref-all})&dst]
504        = MEM <unsigned char[4096]> [(char * {ref-all})&src];
505    Using cast_region when handling the MEM_REF would give us:
506      INIT_VAL(CAST_REG(unsigned char[4096], src))
507    as rhs_sval, but we can fold that into a cast svalue:
508      CAST(unsigned char[4096], INIT_VAL(src))
509    We can discard that cast from the svalue when binding it in
510    the store for "dst", and simply store:
511      cluster for: dst
512        key:   {kind: direct, start: 0, size: 32768, next: 32768}
513        value: ‘struct big’ {INIT_VAL(src)}.  */
514 
515 static const svalue *
simplify_for_binding(const svalue * sval)516 simplify_for_binding (const svalue *sval)
517 {
518   if (const svalue *cast_sval = sval->maybe_undo_cast ())
519     sval = cast_sval;
520   return sval;
521 }
522 
523 /* class binding_map.  */
524 
525 /* binding_map's copy ctor.  */
526 
binding_map(const binding_map & other)527 binding_map::binding_map (const binding_map &other)
528 : m_map (other.m_map)
529 {
530 }
531 
532 /* binding_map's assignment operator.  */
533 
534 binding_map&
operator =(const binding_map & other)535 binding_map::operator=(const binding_map &other)
536 {
537   /* For now, assume we only ever copy to an empty cluster.  */
538   gcc_assert (m_map.elements () == 0);
539   for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
540        ++iter)
541     {
542       const binding_key *key = (*iter).first;
543       const svalue *sval = (*iter).second;
544       m_map.put (key, sval);
545     }
546   return *this;
547 }
548 
549 /* binding_map's equality operator.  */
550 
551 bool
operator ==(const binding_map & other) const552 binding_map::operator== (const binding_map &other) const
553 {
554   if (m_map.elements () != other.m_map.elements ())
555     return false;
556 
557   for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
558     {
559       const binding_key *key = (*iter).first;
560       const svalue *sval = (*iter).second;
561       const svalue **other_slot
562 	= const_cast <map_t &> (other.m_map).get (key);
563       if (other_slot == NULL)
564 	return false;
565       if (sval != *other_slot)
566 	return false;
567     }
568   gcc_checking_assert (hash () == other.hash ());
569   return true;
570 }
571 
572 /* Generate a hash value for this binding_map.  */
573 
574 hashval_t
hash() const575 binding_map::hash () const
576 {
577   hashval_t result = 0;
578   for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
579     {
580       /* Use a new hasher for each key to avoid depending on the ordering
581 	 of keys when accumulating the result.  */
582       inchash::hash hstate;
583       hstate.add_ptr ((*iter).first);
584       hstate.add_ptr ((*iter).second);
585       result ^= hstate.end ();
586     }
587   return result;
588 }
589 
590 /* Dump a representation of this binding_map to PP.
591    SIMPLE controls how values and regions are to be printed.
592    If MULTILINE, then split the dump over multiple lines and
593    use whitespace for readability, otherwise put all on one line.  */
594 
595 void
dump_to_pp(pretty_printer * pp,bool simple,bool multiline) const596 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
597 			 bool multiline) const
598 {
599   auto_vec <const binding_key *> binding_keys;
600   for (map_t::iterator iter = m_map.begin ();
601        iter != m_map.end (); ++iter)
602     {
603       const binding_key *key = (*iter).first;
604       binding_keys.safe_push (key);
605     }
606   binding_keys.qsort (binding_key::cmp_ptrs);
607 
608   const binding_key *key;
609   unsigned i;
610   FOR_EACH_VEC_ELT (binding_keys, i, key)
611     {
612       const svalue *value = *const_cast <map_t &> (m_map).get (key);
613       if (multiline)
614 	{
615 	  pp_string (pp, "    key:   {");
616 	  key->dump_to_pp (pp, simple);
617 	  pp_string (pp, "}");
618 	  pp_newline (pp);
619 	  pp_string (pp, "    value: ");
620 	  if (tree t = value->get_type ())
621 	    dump_quoted_tree (pp, t);
622 	  pp_string (pp, " {");
623 	  value->dump_to_pp (pp, simple);
624 	  pp_string (pp, "}");
625 	  pp_newline (pp);
626 	}
627       else
628 	{
629 	  if (i > 0)
630 	    pp_string (pp, ", ");
631 	  pp_string (pp, "binding key: {");
632 	  key->dump_to_pp (pp, simple);
633 	  pp_string (pp, "}, value: {");
634 	  value->dump_to_pp (pp, simple);
635 	  pp_string (pp, "}");
636 	}
637     }
638 }
639 
640 /* Dump a multiline representation of this binding_map to stderr.  */
641 
642 DEBUG_FUNCTION void
dump(bool simple) const643 binding_map::dump (bool simple) const
644 {
645   pretty_printer pp;
646   pp_format_decoder (&pp) = default_tree_printer;
647   pp_show_color (&pp) = pp_show_color (global_dc->printer);
648   pp.buffer->stream = stderr;
649   dump_to_pp (&pp, simple, true);
650   pp_newline (&pp);
651   pp_flush (&pp);
652 }
653 
654 /* Return a new json::object of the form
655    {KEY_DESC : SVALUE_DESC,
656     ...for the various key/value pairs in this binding_map}.  */
657 
658 json::object *
to_json() const659 binding_map::to_json () const
660 {
661   json::object *map_obj = new json::object ();
662 
663   auto_vec <const binding_key *> binding_keys;
664   for (map_t::iterator iter = m_map.begin ();
665        iter != m_map.end (); ++iter)
666     {
667       const binding_key *key = (*iter).first;
668       binding_keys.safe_push (key);
669     }
670   binding_keys.qsort (binding_key::cmp_ptrs);
671 
672   const binding_key *key;
673   unsigned i;
674   FOR_EACH_VEC_ELT (binding_keys, i, key)
675     {
676       const svalue *value = *const_cast <map_t &> (m_map).get (key);
677       label_text key_desc = key->get_desc ();
678       map_obj->set (key_desc.m_buffer, value->to_json ());
679       key_desc.maybe_free ();
680     }
681 
682   return map_obj;
683 }
684 
685 /* Comparator for imposing an order on binding_maps.  */
686 
687 int
cmp(const binding_map & map1,const binding_map & map2)688 binding_map::cmp (const binding_map &map1, const binding_map &map2)
689 {
690   if (int count_cmp = map1.elements () - map2.elements ())
691     return count_cmp;
692 
693   auto_vec <const binding_key *> keys1 (map1.elements ());
694   for (map_t::iterator iter = map1.begin ();
695        iter != map1.end (); ++iter)
696     keys1.quick_push ((*iter).first);
697   keys1.qsort (binding_key::cmp_ptrs);
698 
699   auto_vec <const binding_key *> keys2 (map2.elements ());
700   for (map_t::iterator iter = map2.begin ();
701        iter != map2.end (); ++iter)
702     keys2.quick_push ((*iter).first);
703   keys2.qsort (binding_key::cmp_ptrs);
704 
705   for (size_t i = 0; i < keys1.length (); i++)
706     {
707       const binding_key *k1 = keys1[i];
708       const binding_key *k2 = keys2[i];
709       if (int key_cmp = binding_key::cmp (k1, k2))
710 	return key_cmp;
711       gcc_assert (k1 == k2);
712       if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
713 	return sval_cmp;
714     }
715 
716   return 0;
717 }
718 
719 /* Get the child region of PARENT_REG based upon INDEX within a
720    CONSTRUCTOR.   */
721 
722 static const region *
get_subregion_within_ctor(const region * parent_reg,tree index,region_model_manager * mgr)723 get_subregion_within_ctor (const region *parent_reg, tree index,
724 			   region_model_manager *mgr)
725 {
726   switch (TREE_CODE (index))
727     {
728     default:
729       gcc_unreachable ();
730     case INTEGER_CST:
731       {
732 	const svalue *index_sval
733 	  = mgr->get_or_create_constant_svalue (index);
734 	return mgr->get_element_region (parent_reg,
735 					TREE_TYPE (parent_reg->get_type ()),
736 					index_sval);
737       }
738       break;
739     case FIELD_DECL:
740       return mgr->get_field_region (parent_reg, index);
741     }
742 }
743 
744 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR.  */
745 
746 static const svalue *
get_svalue_for_ctor_val(tree val,region_model_manager * mgr)747 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
748 {
749   /* Reuse the get_rvalue logic from region_model.  */
750   region_model m (mgr);
751   return m.get_rvalue (path_var (val, 0), NULL);
752 }
753 
754 /* Bind values from CONSTRUCTOR to this map, relative to
755    PARENT_REG's relationship to its base region.
756    Return true if successful, false if there was a problem (e.g. due
757    to hitting a complexity limit).  */
758 
759 bool
apply_ctor_to_region(const region * parent_reg,tree ctor,region_model_manager * mgr)760 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
761 				   region_model_manager *mgr)
762 {
763   gcc_assert (parent_reg);
764   gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
765 
766   unsigned ix;
767   tree index;
768   tree val;
769   tree parent_type = parent_reg->get_type ();
770   tree field;
771   if (TREE_CODE (parent_type) == RECORD_TYPE)
772     field = TYPE_FIELDS (parent_type);
773   else
774     field = NULL_TREE;
775   FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
776     {
777       if (!index)
778 	{
779 	  /* If index is NULL, then iterate through the fields for
780 	     a RECORD_TYPE, or use an INTEGER_CST otherwise.
781 	     Compare with similar logic in output_constructor.  */
782 	  if (field)
783 	    {
784 	      index = field;
785 	      field = DECL_CHAIN (field);
786 	    }
787 	  else
788 	    index = build_int_cst (integer_type_node, ix);
789 	}
790       else if (TREE_CODE (index) == RANGE_EXPR)
791 	{
792 	  tree min_index = TREE_OPERAND (index, 0);
793 	  tree max_index = TREE_OPERAND (index, 1);
794 	  if (min_index == max_index)
795 	    {
796 	      if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
797 						    min_index, val))
798 		return false;
799 	    }
800 	  else
801 	    {
802 	      if (!apply_ctor_val_to_range (parent_reg, mgr,
803 					    min_index, max_index, val))
804 		return false;
805 	    }
806 	  continue;
807 	}
808       if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
809 	return false;
810     }
811   return true;
812 }
813 
814 /* Bind the value VAL into the range of elements within PARENT_REF
815    from MIN_INDEX to MAX_INDEX (including endpoints).
816    For use in handling RANGE_EXPR within a CONSTRUCTOR.
817    Return true if successful, false if there was a problem (e.g. due
818    to hitting a complexity limit).  */
819 
820 bool
apply_ctor_val_to_range(const region * parent_reg,region_model_manager * mgr,tree min_index,tree max_index,tree val)821 binding_map::apply_ctor_val_to_range (const region *parent_reg,
822 				      region_model_manager *mgr,
823 				      tree min_index, tree max_index,
824 				      tree val)
825 {
826   gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
827   gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
828 
829   /* Generate a binding key for the range.  */
830   const region *min_element
831     = get_subregion_within_ctor (parent_reg, min_index, mgr);
832   const region *max_element
833     = get_subregion_within_ctor (parent_reg, max_index, mgr);
834   region_offset min_offset = min_element->get_offset ();
835   if (min_offset.symbolic_p ())
836     return false;
837   bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
838   store_manager *smgr = mgr->get_store_manager ();
839   const binding_key *max_element_key = binding_key::make (smgr, max_element);
840   if (max_element_key->symbolic_p ())
841     return false;
842   const concrete_binding *max_element_ckey
843     = max_element_key->dyn_cast_concrete_binding ();
844   bit_size_t range_size_in_bits
845     = max_element_ckey->get_next_bit_offset () - start_bit_offset;
846   const concrete_binding *range_key
847     = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
848   if (range_key->symbolic_p ())
849     return false;
850 
851   /* Get the value.  */
852   if (TREE_CODE (val) == CONSTRUCTOR)
853     return false;
854   const svalue *sval = get_svalue_for_ctor_val (val, mgr);
855 
856   /* Bind the value to the range.  */
857   put (range_key, sval);
858   return true;
859 }
860 
861 /* Bind the value VAL into INDEX within PARENT_REF.
862    For use in handling a pair of entries within a CONSTRUCTOR.
863    Return true if successful, false if there was a problem (e.g. due
864    to hitting a complexity limit).  */
865 
866 bool
apply_ctor_pair_to_child_region(const region * parent_reg,region_model_manager * mgr,tree index,tree val)867 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
868 					      region_model_manager *mgr,
869 					      tree index, tree val)
870 {
871   const region *child_reg
872     = get_subregion_within_ctor (parent_reg, index, mgr);
873   if (TREE_CODE (val) == CONSTRUCTOR)
874     return apply_ctor_to_region (child_reg, val, mgr);
875   else
876     {
877       const svalue *sval = get_svalue_for_ctor_val (val, mgr);
878       const binding_key *k
879 	= binding_key::make (mgr->get_store_manager (), child_reg);
880       /* Handle the case where we have an unknown size for child_reg
881 	 (e.g. due to it being a trailing field with incomplete array
882 	 type.  */
883       if (!k->concrete_p ())
884 	{
885 	  /* Assume that sval has a well-defined size for this case.  */
886 	  tree sval_type = sval->get_type ();
887 	  gcc_assert (sval_type);
888 	  HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
889 	  gcc_assert (sval_byte_size != -1);
890 	  bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
891 	  /* Get offset of child relative to base region.  */
892 	  region_offset child_base_offset = child_reg->get_offset ();
893 	  if (child_base_offset.symbolic_p ())
894 	    return false;
895 	  /* Convert to an offset relative to the parent region.  */
896 	  region_offset parent_base_offset = parent_reg->get_offset ();
897 	  gcc_assert (!parent_base_offset.symbolic_p ());
898 	  bit_offset_t child_parent_offset
899 	    = (child_base_offset.get_bit_offset ()
900 	       - parent_base_offset.get_bit_offset ());
901 	  /* Create a concrete key for the child within the parent.  */
902 	  k = mgr->get_store_manager ()->get_concrete_binding
903 	    (child_parent_offset, sval_bit_size);
904 	}
905       gcc_assert (k->concrete_p ());
906       put (k, sval);
907       return true;
908     }
909 }
910 
911 /* Populate OUT with all bindings within this map that overlap KEY.  */
912 
913 void
get_overlapping_bindings(const binding_key * key,auto_vec<const binding_key * > * out)914 binding_map::get_overlapping_bindings (const binding_key *key,
915 				       auto_vec<const binding_key *> *out)
916 {
917   for (auto iter : *this)
918     {
919       const binding_key *iter_key = iter.first;
920       if (const concrete_binding *ckey
921 	    = key->dyn_cast_concrete_binding ())
922 	{
923 	  if (const concrete_binding *iter_ckey
924 	      = iter_key->dyn_cast_concrete_binding ())
925 	    {
926 	      if (ckey->overlaps_p (*iter_ckey))
927 		out->safe_push (iter_key);
928 	    }
929 	  else
930 	    {
931 	      /* Assume overlap.  */
932 	      out->safe_push (iter_key);
933 	    }
934 	}
935       else
936 	{
937 	  /* Assume overlap.  */
938 	  out->safe_push (iter_key);
939 	}
940     }
941 }
942 
943 /* Remove, truncate, and/or split any bindings within this map that
944    overlap DROP_KEY.
945 
946    For example, if we have:
947 
948      +------------------------------------+
949      |             old binding            |
950      +------------------------------------+
951 
952    which is to be overwritten with:
953 
954      .......+----------------------+.......
955      .......|      new binding     |.......
956      .......+----------------------+.......
957 
958    this function "cuts a hole" out of the old binding:
959 
960      +------+......................+------+
961      |prefix| hole for new binding |suffix|
962      +------+......................+------+
963 
964    into which the new binding can be added without
965    overlapping the prefix or suffix.
966 
967    The prefix and suffix (if added) will be bound to the pertinent
968    parts of the value of the old binding.
969 
970    For example, given:
971      struct s5
972      {
973        char arr[8];
974      };
975      void test_5 (struct s5 *p)
976      {
977        struct s5 f = *p;
978        f.arr[3] = 42;
979      }
980    then after the "f = *p;" we have:
981      cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
982    and at the "f.arr[3] = 42;" we remove the bindings overlapping
983    "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
984    giving:
985      cluster for: f
986        key:   {bytes 0-2}
987        value:  {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
988        key:   {bytes 4-7}
989        value:  {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
990    punching a hole into which the new value can be written at byte 3:
991      cluster for: f
992        key:   {bytes 0-2}
993        value:  {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
994        key:   {byte 3}
995        value: 'char' {(char)42}
996        key:   {bytes 4-7}
997        value:  {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
998 
999    If UNCERTAINTY is non-NULL, use it to record any svalues that
1000    were removed, as being maybe-bound.
1001 
1002    If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1003    in the map, due to one or both of the underlying clusters being
1004    symbolic (but not the same symbolic region).  Hence even if DROP_KEY is a
1005    concrete binding it could actually be referring to the same memory as
1006    distinct concrete bindings in the map.  Remove all bindings, but
1007    register any svalues with *UNCERTAINTY.  */
1008 
1009 void
remove_overlapping_bindings(store_manager * mgr,const binding_key * drop_key,uncertainty_t * uncertainty,bool always_overlap)1010 binding_map::remove_overlapping_bindings (store_manager *mgr,
1011 					  const binding_key *drop_key,
1012 					  uncertainty_t *uncertainty,
1013 					  bool always_overlap)
1014 {
1015   /* Get the bindings of interest within this map.  */
1016   auto_vec<const binding_key *> bindings;
1017   if (always_overlap)
1018     for (auto iter : *this)
1019       bindings.safe_push (iter.first); /* Add all bindings.  */
1020   else
1021     /* Just add overlapping bindings.  */
1022     get_overlapping_bindings (drop_key, &bindings);
1023 
1024   unsigned i;
1025   const binding_key *iter_binding;
1026   FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1027     {
1028       /* Record any svalues that were removed to *UNCERTAINTY as being
1029 	 maybe-bound, provided at least some part of the binding is symbolic.
1030 
1031 	 Specifically, if at least one of the bindings is symbolic, or we
1032 	 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1033 	 regions, then we don't know that the svalue has been overwritten,
1034 	 and should record that to *UNCERTAINTY.
1035 
1036 	 However, if we have concrete keys accessing within the same symbolic
1037 	 region, then we *know* that the symbolic region has been overwritten,
1038 	 so we don't record it to *UNCERTAINTY, as this could be a genuine
1039 	 leak.  */
1040       const svalue *old_sval = get (iter_binding);
1041       if (uncertainty
1042 	  && (drop_key->symbolic_p ()
1043 	      || iter_binding->symbolic_p ()
1044 	      || always_overlap))
1045 	uncertainty->on_maybe_bound_sval (old_sval);
1046 
1047       /* Begin by removing the old binding. */
1048       m_map.remove (iter_binding);
1049 
1050       /* Don't attempt to handle prefixes/suffixes for the
1051 	 "always_overlap" case; everything's being removed.  */
1052       if (always_overlap)
1053 	continue;
1054 
1055       /* Now potentially add the prefix and suffix.  */
1056       if (const concrete_binding *drop_ckey
1057 	  = drop_key->dyn_cast_concrete_binding ())
1058 	if (const concrete_binding *iter_ckey
1059 	      = iter_binding->dyn_cast_concrete_binding ())
1060 	  {
1061 	    gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1062 
1063 	    const bit_range &drop_bits = drop_ckey->get_bit_range ();
1064 	    const bit_range &iter_bits = iter_ckey->get_bit_range ();
1065 
1066 	    if (iter_bits.get_start_bit_offset ()
1067 		  < drop_bits.get_start_bit_offset ())
1068 	      {
1069 		/* We have a truncated prefix.  */
1070 		bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1071 				       (drop_bits.get_start_bit_offset ()
1072 					- iter_bits.get_start_bit_offset ()));
1073 		const concrete_binding *prefix_key
1074 		  = mgr->get_concrete_binding (prefix_bits);
1075 		bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1076 		const svalue *prefix_sval
1077 		  = old_sval->extract_bit_range (NULL_TREE,
1078 						 rel_prefix,
1079 						 mgr->get_svalue_manager ());
1080 		m_map.put (prefix_key, prefix_sval);
1081 	      }
1082 
1083 	    if (iter_bits.get_next_bit_offset ()
1084 		  > drop_bits.get_next_bit_offset ())
1085 	      {
1086 		/* We have a truncated suffix.  */
1087 		bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1088 				       (iter_bits.get_next_bit_offset ()
1089 					- drop_bits.get_next_bit_offset ()));
1090 		const concrete_binding *suffix_key
1091 		  = mgr->get_concrete_binding (suffix_bits);
1092 		bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1093 					- iter_bits.get_start_bit_offset (),
1094 				      suffix_bits.m_size_in_bits);
1095 		const svalue *suffix_sval
1096 		  = old_sval->extract_bit_range (NULL_TREE,
1097 						 rel_suffix,
1098 						 mgr->get_svalue_manager ());
1099 		m_map.put (suffix_key, suffix_sval);
1100 	      }
1101 	  }
1102     }
1103 }
1104 
1105 /* class binding_cluster.  */
1106 
1107 /* binding_cluster's copy ctor.  */
1108 
binding_cluster(const binding_cluster & other)1109 binding_cluster::binding_cluster (const binding_cluster &other)
1110 : m_base_region (other.m_base_region), m_map (other.m_map),
1111   m_escaped (other.m_escaped), m_touched (other.m_touched)
1112 {
1113 }
1114 
1115 /* binding_cluster's assignment operator.  */
1116 
1117 binding_cluster&
operator =(const binding_cluster & other)1118 binding_cluster::operator= (const binding_cluster &other)
1119 {
1120   gcc_assert (m_base_region == other.m_base_region);
1121   m_map = other.m_map;
1122   m_escaped = other.m_escaped;
1123   m_touched = other.m_touched;
1124   return *this;
1125 }
1126 
1127 /* binding_cluster's equality operator.  */
1128 
1129 bool
operator ==(const binding_cluster & other) const1130 binding_cluster::operator== (const binding_cluster &other) const
1131 {
1132   if (m_map != other.m_map)
1133     return false;
1134 
1135   if (m_base_region != other.m_base_region)
1136     return false;
1137 
1138   if (m_escaped != other.m_escaped)
1139     return false;
1140 
1141   if (m_touched != other.m_touched)
1142     return false;
1143 
1144   gcc_checking_assert (hash () == other.hash ());
1145 
1146   return true;
1147 }
1148 
1149 /* Generate a hash value for this binding_cluster.  */
1150 
1151 hashval_t
hash() const1152 binding_cluster::hash () const
1153 {
1154   return m_map.hash ();
1155 }
1156 
1157 /* Return true if this binding_cluster is symbolic
1158    i.e. its base region is symbolic.  */
1159 
1160 bool
symbolic_p() const1161 binding_cluster::symbolic_p () const
1162 {
1163   return m_base_region->get_kind () == RK_SYMBOLIC;
1164 }
1165 
1166 /* Dump a representation of this binding_cluster to PP.
1167    SIMPLE controls how values and regions are to be printed.
1168    If MULTILINE, then split the dump over multiple lines and
1169    use whitespace for readability, otherwise put all on one line.  */
1170 
1171 void
dump_to_pp(pretty_printer * pp,bool simple,bool multiline) const1172 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1173 			     bool multiline) const
1174 {
1175   if (m_escaped)
1176     {
1177       if (multiline)
1178 	{
1179 	  pp_string (pp, "    ESCAPED");
1180 	  pp_newline (pp);
1181 	}
1182       else
1183 	pp_string (pp, "(ESCAPED)");
1184     }
1185   if (m_touched)
1186     {
1187       if (multiline)
1188 	{
1189 	  pp_string (pp, "    TOUCHED");
1190 	  pp_newline (pp);
1191 	}
1192       else
1193 	pp_string (pp, "(TOUCHED)");
1194     }
1195 
1196   m_map.dump_to_pp (pp, simple, multiline);
1197 }
1198 
1199 /* Dump a multiline representation of this binding_cluster to stderr.  */
1200 
1201 DEBUG_FUNCTION void
dump(bool simple) const1202 binding_cluster::dump (bool simple) const
1203 {
1204   pretty_printer pp;
1205   pp_format_decoder (&pp) = default_tree_printer;
1206   pp_show_color (&pp) = pp_show_color (global_dc->printer);
1207   pp.buffer->stream = stderr;
1208   pp_string (&pp, "  cluster for: ");
1209   m_base_region->dump_to_pp (&pp, simple);
1210   pp_string (&pp, ": ");
1211   pp_newline (&pp);
1212   dump_to_pp (&pp, simple, true);
1213   pp_newline (&pp);
1214   pp_flush (&pp);
1215 }
1216 
1217 /* Assert that this object is valid.  */
1218 
1219 void
validate() const1220 binding_cluster::validate () const
1221 {
1222   int num_symbolic = 0;
1223   int num_concrete = 0;
1224   for (auto iter : m_map)
1225     {
1226       if (iter.first->symbolic_p ())
1227 	num_symbolic++;
1228       else
1229 	num_concrete++;
1230     }
1231   /* We shouldn't have more than one symbolic key per cluster
1232      (or one would have clobbered the other).  */
1233   gcc_assert (num_symbolic < 2);
1234   /* We can't have both concrete and symbolic keys.  */
1235   gcc_assert (num_concrete == 0 || num_symbolic == 0);
1236 }
1237 
1238 /* Return a new json::object of the form
1239    {"escaped": true/false,
1240     "touched": true/false,
1241     "map" : object for the binding_map.  */
1242 
1243 json::object *
to_json() const1244 binding_cluster::to_json () const
1245 {
1246   json::object *cluster_obj = new json::object ();
1247 
1248   cluster_obj->set ("escaped", new json::literal (m_escaped));
1249   cluster_obj->set ("touched", new json::literal (m_touched));
1250   cluster_obj->set ("map", m_map.to_json ());
1251 
1252   return cluster_obj;
1253 }
1254 
1255 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1256    compound_sval.  */
1257 
1258 void
bind(store_manager * mgr,const region * reg,const svalue * sval)1259 binding_cluster::bind (store_manager *mgr,
1260 		       const region *reg, const svalue *sval)
1261 {
1262   if (const compound_svalue *compound_sval
1263 	= sval->dyn_cast_compound_svalue ())
1264     {
1265       bind_compound_sval (mgr, reg, compound_sval);
1266       return;
1267     }
1268 
1269   const binding_key *binding = binding_key::make (mgr, reg);
1270   bind_key (binding, sval);
1271 }
1272 
1273 /* Bind SVAL to KEY.
1274    Unpacking of compound_svalues should already have been done by the
1275    time this is called.  */
1276 
1277 void
bind_key(const binding_key * key,const svalue * sval)1278 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1279 {
1280   gcc_assert (sval->get_kind () != SK_COMPOUND);
1281 
1282   m_map.put (key, sval);
1283   if (key->symbolic_p ())
1284     m_touched = true;
1285 }
1286 
1287 /* Subroutine of binding_cluster::bind.
1288    Unpack compound_svals when binding them, so that we bind them
1289    element-wise.  */
1290 
1291 void
bind_compound_sval(store_manager * mgr,const region * reg,const compound_svalue * compound_sval)1292 binding_cluster::bind_compound_sval (store_manager *mgr,
1293 				     const region *reg,
1294 				     const compound_svalue *compound_sval)
1295 {
1296   region_offset reg_offset = reg->get_offset ();
1297   if (reg_offset.symbolic_p ())
1298     {
1299       m_touched = true;
1300       clobber_region (mgr, reg);
1301       return;
1302     }
1303 
1304   for (map_t::iterator iter = compound_sval->begin ();
1305        iter != compound_sval->end (); ++iter)
1306     {
1307       const binding_key *iter_key = (*iter).first;
1308       const svalue *iter_sval = (*iter).second;
1309 
1310       if (const concrete_binding *concrete_key
1311 	  = iter_key->dyn_cast_concrete_binding ())
1312 	{
1313 	  bit_offset_t effective_start
1314 	    = (concrete_key->get_start_bit_offset ()
1315 	       + reg_offset.get_bit_offset ());
1316 	  const concrete_binding *effective_concrete_key
1317 	    = mgr->get_concrete_binding (effective_start,
1318 					 concrete_key->get_size_in_bits ());
1319 	  bind_key (effective_concrete_key, iter_sval);
1320 	}
1321       else
1322 	gcc_unreachable ();
1323     }
1324 }
1325 
1326 /* Remove all bindings overlapping REG within this cluster.  */
1327 
1328 void
clobber_region(store_manager * mgr,const region * reg)1329 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1330 {
1331   remove_overlapping_bindings (mgr, reg, NULL);
1332 }
1333 
1334 /* Remove any bindings for REG within this cluster.  */
1335 
1336 void
purge_region(store_manager * mgr,const region * reg)1337 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1338 {
1339   gcc_assert (reg->get_kind () == RK_DECL);
1340   const binding_key *binding
1341     = binding_key::make (mgr, const_cast<region *> (reg));
1342   m_map.remove (binding);
1343 }
1344 
1345 /* Clobber REG and fill it with repeated copies of SVAL.  */
1346 
1347 void
fill_region(store_manager * mgr,const region * reg,const svalue * sval)1348 binding_cluster::fill_region (store_manager *mgr,
1349 			      const region *reg,
1350 			      const svalue *sval)
1351 {
1352   clobber_region (mgr, reg);
1353 
1354   region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1355   const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1356   const svalue *fill_sval
1357     = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1358 					       byte_size_sval, sval);
1359   bind (mgr, reg, fill_sval);
1360 }
1361 
1362 /* Clobber REG within this cluster and fill it with zeroes.  */
1363 
1364 void
zero_fill_region(store_manager * mgr,const region * reg)1365 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1366 {
1367   region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1368   const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1369   fill_region (mgr, reg, zero_sval);
1370 }
1371 
1372 /* Mark REG_TO_BIND within this cluster as being unknown.
1373 
1374    Remove any bindings overlapping REG_FOR_OVERLAP.
1375    If UNCERTAINTY is non-NULL, use it to record any svalues that
1376    had bindings to them removed, as being maybe-bound.
1377 
1378    REG_TO_BIND and REG_FOR_OVERLAP are the same for
1379    store::mark_region_as_unknown, but are different in
1380    store::set_value's alias handling, for handling the case where
1381    we have a write to a symbolic REG_FOR_OVERLAP. */
1382 
1383 void
mark_region_as_unknown(store_manager * mgr,const region * reg_to_bind,const region * reg_for_overlap,uncertainty_t * uncertainty)1384 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1385 					 const region *reg_to_bind,
1386 					 const region *reg_for_overlap,
1387 					 uncertainty_t *uncertainty)
1388 {
1389   remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty);
1390 
1391   /* Add a default binding to "unknown".  */
1392   region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1393   const svalue *sval
1394     = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1395   bind (mgr, reg_to_bind, sval);
1396 }
1397 
1398 /* Purge state involving SVAL.  */
1399 
1400 void
purge_state_involving(const svalue * sval,region_model_manager * sval_mgr)1401 binding_cluster::purge_state_involving (const svalue *sval,
1402 					region_model_manager *sval_mgr)
1403 {
1404   auto_vec<const binding_key *> to_remove;
1405   auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1406   for (auto iter : m_map)
1407     {
1408       const binding_key *iter_key = iter.first;
1409       if (const symbolic_binding *symbolic_key
1410 	    = iter_key->dyn_cast_symbolic_binding ())
1411 	{
1412 	  const region *reg = symbolic_key->get_region ();
1413 	  if (reg->involves_p (sval))
1414 	    to_remove.safe_push (iter_key);
1415 	}
1416       const svalue *iter_sval = iter.second;
1417       if (iter_sval->involves_p (sval))
1418 	to_make_unknown.safe_push (std::make_pair(iter_key,
1419 						  iter_sval->get_type ()));
1420     }
1421   for (auto iter : to_remove)
1422     {
1423       m_map.remove (iter);
1424       m_touched = true;
1425     }
1426   for (auto iter : to_make_unknown)
1427     {
1428       const svalue *new_sval
1429 	= sval_mgr->get_or_create_unknown_svalue (iter.second);
1430       m_map.put (iter.first, new_sval);
1431     }
1432 }
1433 
1434 /* Get any SVAL bound to REG within this cluster via kind KIND,
1435    without checking parent regions of REG.  */
1436 
1437 const svalue *
get_binding(store_manager * mgr,const region * reg) const1438 binding_cluster::get_binding (store_manager *mgr,
1439 			      const region *reg) const
1440 {
1441   const binding_key *reg_binding = binding_key::make (mgr, reg);
1442   const svalue *sval = m_map.get (reg_binding);
1443   if (sval)
1444     {
1445       /* If we have a struct with a single field, then the binding of
1446 	 the field will equal that of the struct, and looking up e.g.
1447 	 PARENT_REG.field within:
1448 	    cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1449 	 will erroneously return INIT_VAL(OTHER_REG), rather than
1450 	   SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1451 	 Fix this issue by iterating upwards whilst the bindings are equal,
1452 	 expressing the lookups as subvalues.
1453 	 We have to gather a list of subregion accesses, then walk it
1454 	 in reverse to get the subvalues.  */
1455       auto_vec<const region *> regions;
1456       while (const region *parent_reg = reg->get_parent_region ())
1457 	{
1458 	  const binding_key *parent_reg_binding
1459 	    = binding_key::make (mgr, parent_reg);
1460 	  if (parent_reg_binding == reg_binding
1461 	      && sval->get_type ()
1462 	      && reg->get_type ()
1463 	      && sval->get_type () != reg->get_type ())
1464 	    {
1465 	      regions.safe_push (reg);
1466 	      reg = parent_reg;
1467 	    }
1468 	  else
1469 	    break;
1470 	}
1471       if (sval->get_type ()
1472 	  && reg->get_type ()
1473 	  && sval->get_type () == reg->get_type ())
1474 	{
1475 	  unsigned i;
1476 	  const region *iter_reg;
1477 	  FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1478 	    {
1479 	      region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1480 	      sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1481 							sval, iter_reg);
1482 	    }
1483 	}
1484     }
1485   return sval;
1486 }
1487 
1488 /* Get any SVAL bound to REG within this cluster,
1489    either directly for REG, or recursively checking for bindings within
1490    parent regions and extracting subvalues if need be.  */
1491 
1492 const svalue *
get_binding_recursive(store_manager * mgr,const region * reg) const1493 binding_cluster::get_binding_recursive (store_manager *mgr,
1494 					const region *reg) const
1495 {
1496   if (const svalue *sval = get_binding (mgr, reg))
1497     return sval;
1498   if (reg != m_base_region)
1499     if (const region *parent_reg = reg->get_parent_region ())
1500       if (const svalue *parent_sval
1501 	  = get_binding_recursive (mgr, parent_reg))
1502 	{
1503 	  /* Extract child svalue from parent svalue.  */
1504 	  region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1505 	  return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1506 						    parent_sval, reg);
1507 	}
1508   return NULL;
1509 }
1510 
1511 /* Get any value bound for REG within this cluster.  */
1512 
1513 const svalue *
get_any_binding(store_manager * mgr,const region * reg) const1514 binding_cluster::get_any_binding (store_manager *mgr,
1515 				  const region *reg) const
1516 {
1517   /* Look for a direct binding.  */
1518   if (const svalue *direct_sval
1519       = get_binding_recursive (mgr, reg))
1520     return direct_sval;
1521 
1522   /* If we had a write to a cluster of unknown size, we might
1523      have a self-binding of the whole base region with an svalue,
1524      where the base region is symbolic.
1525      Handle such cases by returning sub_svalue instances.  */
1526   if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1527     {
1528       /* Extract child svalue from parent svalue.  */
1529       region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1530       return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1531 						cluster_sval, reg);
1532     }
1533 
1534   /* If this cluster has been touched by a symbolic write, then the content
1535      of any subregion not currently specifically bound is "UNKNOWN".  */
1536   if (m_touched)
1537     {
1538       region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1539       return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1540     }
1541 
1542   /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1543      then we don't know if we're reading those values or not, so the result
1544      is also "UNKNOWN".  */
1545   if (reg->get_offset ().symbolic_p ()
1546       && m_map.elements () > 0)
1547     {
1548       region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1549       return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1550     }
1551 
1552   if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1553     return compound_sval;
1554 
1555   /* Otherwise, the initial value, or uninitialized.  */
1556   return NULL;
1557 }
1558 
1559 /* Attempt to get a compound_svalue for the bindings within the cluster
1560    affecting REG (which could be the base region itself).
1561 
1562    Create a compound_svalue with the subset of bindings the affect REG,
1563    offsetting them so that the offsets are relative to the start of REG
1564    within the cluster.
1565 
1566    For example, REG could be one element within an array of structs.
1567 
1568    Return the resulting compound_svalue, or NULL if there's a problem.  */
1569 
1570 const svalue *
maybe_get_compound_binding(store_manager * mgr,const region * reg) const1571 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1572 					     const region *reg) const
1573 {
1574   region_offset cluster_offset = m_base_region->get_offset ();
1575   if (cluster_offset.symbolic_p ())
1576     return NULL;
1577   region_offset reg_offset = reg->get_offset ();
1578   if (reg_offset.symbolic_p ())
1579     return NULL;
1580 
1581   region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1582 
1583   /* We will a build the result map in two parts:
1584      (a) result_map, holding the concrete keys from this cluster,
1585 
1586      (b) default_map, holding the initial values for the region
1587      (e.g. uninitialized, initializer values, or zero), unless this
1588      cluster has been touched.
1589 
1590      We will populate (a), and as we do, clobber (b), trimming and
1591      splitting its bindings as necessary.
1592      Finally, we will merge (b) into (a), giving a concrete map
1593      that merges both the initial values and the bound values from
1594      the binding_cluster.
1595      Doing it this way reduces N for the O(N^2) intersection-finding,
1596      perhaps we should have a spatial-organized data structure for
1597      concrete keys, though.  */
1598 
1599   binding_map result_map;
1600   binding_map default_map;
1601 
1602   /* Set up default values in default_map.  */
1603   const svalue *default_sval;
1604   if (m_touched)
1605     default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1606   else
1607     default_sval = sval_mgr->get_or_create_initial_value (reg);
1608   const binding_key *default_key = binding_key::make (mgr, reg);
1609   default_map.put (default_key, default_sval);
1610 
1611   for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1612     {
1613       const binding_key *key = (*iter).first;
1614       const svalue *sval = (*iter).second;
1615 
1616       if (const concrete_binding *concrete_key
1617 	  = key->dyn_cast_concrete_binding ())
1618 	{
1619 	  const bit_range &bound_range = concrete_key->get_bit_range ();
1620 
1621 	  bit_size_t reg_bit_size;
1622 	  if (!reg->get_bit_size (&reg_bit_size))
1623 	    return NULL;
1624 
1625 	  bit_range reg_range (reg_offset.get_bit_offset (),
1626 			       reg_bit_size);
1627 
1628 	  /* Skip bindings that are outside the bit range of REG.  */
1629 	  if (!bound_range.intersects_p (reg_range))
1630 	    continue;
1631 
1632 	  /* We shouldn't have an exact match; that should have been
1633 	     handled already.  */
1634 	  gcc_assert (!(reg_range == bound_range));
1635 
1636 	  bit_range subrange (0, 0);
1637 	  if (reg_range.contains_p (bound_range, &subrange))
1638 	    {
1639 	      /* We have a bound range fully within REG.
1640 		 Add it to map, offsetting accordingly.  */
1641 
1642 	      /* Get offset of KEY relative to REG, rather than to
1643 		 the cluster.  */
1644 	      const concrete_binding *offset_concrete_key
1645 		= mgr->get_concrete_binding (subrange);
1646 	      result_map.put (offset_concrete_key, sval);
1647 
1648 	      /* Clobber default_map, removing/trimming/spliting where
1649 		 it overlaps with offset_concrete_key.  */
1650 	      default_map.remove_overlapping_bindings (mgr,
1651 						       offset_concrete_key,
1652 						       NULL, false);
1653 	    }
1654 	  else if (bound_range.contains_p (reg_range, &subrange))
1655 	    {
1656 	      /* REG is fully within the bound range, but
1657 		 is not equal to it; we're extracting a subvalue.  */
1658 	      return sval->extract_bit_range (reg->get_type (),
1659 					      subrange,
1660 					      mgr->get_svalue_manager ());
1661 	    }
1662 	  else
1663 	    {
1664 	      /* REG and the bound range partially overlap.  */
1665 	      bit_range reg_subrange (0, 0);
1666 	      bit_range bound_subrange (0, 0);
1667 	      reg_range.intersects_p (bound_range,
1668 				      &reg_subrange, &bound_subrange);
1669 
1670 	      /* Get the bits from the bound value for the bits at the
1671 		 intersection (relative to the bound value).  */
1672 	      const svalue *overlap_sval
1673 		= sval->extract_bit_range (NULL_TREE,
1674 					   bound_subrange,
1675 					   mgr->get_svalue_manager ());
1676 
1677 	      /* Get key for overlap, relative to the REG.  */
1678 	      const concrete_binding *overlap_concrete_key
1679 		= mgr->get_concrete_binding (reg_subrange);
1680 	      result_map.put (overlap_concrete_key, overlap_sval);
1681 
1682 	      /* Clobber default_map, removing/trimming/spliting where
1683 		 it overlaps with overlap_concrete_key.  */
1684 	      default_map.remove_overlapping_bindings (mgr,
1685 						       overlap_concrete_key,
1686 						       NULL, false);
1687 	    }
1688 	}
1689       else
1690 	/* Can't handle symbolic bindings.  */
1691 	return NULL;
1692     }
1693 
1694   if (result_map.elements () == 0)
1695     return NULL;
1696 
1697   /* Merge any bindings from default_map into result_map.  */
1698   for (auto iter : default_map)
1699     {
1700       const binding_key *key = iter.first;
1701       const svalue *sval = iter.second;
1702       result_map.put (key, sval);
1703     }
1704 
1705   return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1706 }
1707 
1708 /* Remove, truncate, and/or split any bindings within this map that
1709    could overlap REG.
1710 
1711    If REG's base region or this cluster is symbolic and they're different
1712    base regions, then remove everything in this cluster's map, on the
1713    grounds that REG could be referring to the same memory as anything
1714    in the map.
1715 
1716    If UNCERTAINTY is non-NULL, use it to record any svalues that
1717    were removed, as being maybe-bound.  */
1718 
1719 void
remove_overlapping_bindings(store_manager * mgr,const region * reg,uncertainty_t * uncertainty)1720 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1721 					      const region *reg,
1722 					      uncertainty_t *uncertainty)
1723 {
1724   const binding_key *reg_binding = binding_key::make (mgr, reg);
1725 
1726   const region *cluster_base_reg = get_base_region ();
1727   const region *other_base_reg = reg->get_base_region ();
1728   /* If at least one of the base regions involved is symbolic, and they're
1729      not the same base region, then consider everything in the map as
1730      potentially overlapping with reg_binding (even if it's a concrete
1731      binding and things in the map are concrete - they could be referring
1732      to the same memory when the symbolic base regions are taken into
1733      account).  */
1734   bool always_overlap = (cluster_base_reg != other_base_reg
1735 			 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1736 			     || other_base_reg->get_kind () == RK_SYMBOLIC));
1737   m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1738 				     always_overlap);
1739 }
1740 
1741 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1742    MGR and MERGER.
1743    Return true if they can be merged, false otherwise.  */
1744 
1745 bool
can_merge_p(const binding_cluster * cluster_a,const binding_cluster * cluster_b,binding_cluster * out_cluster,store * out_store,store_manager * mgr,model_merger * merger)1746 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1747 			      const binding_cluster *cluster_b,
1748 			      binding_cluster *out_cluster,
1749 			      store *out_store,
1750 			      store_manager *mgr,
1751 			      model_merger *merger)
1752 {
1753   gcc_assert (out_cluster);
1754 
1755   /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1756      true if either of the inputs is true.  */
1757   if ((cluster_a && cluster_a->m_escaped)
1758       || (cluster_b && cluster_b->m_escaped))
1759     out_cluster->m_escaped = true;
1760   if ((cluster_a && cluster_a->m_touched)
1761       || (cluster_b && cluster_b->m_touched))
1762     out_cluster->m_touched = true;
1763 
1764   /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1765      could be NULL.  Handle these cases.  */
1766   if (cluster_a == NULL)
1767     {
1768       gcc_assert (cluster_b != NULL);
1769       gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1770       out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1771       return true;
1772     }
1773   if (cluster_b == NULL)
1774     {
1775       gcc_assert (cluster_a != NULL);
1776       gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1777       out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1778       return true;
1779     }
1780 
1781   /* The "both inputs are non-NULL" case.  */
1782   gcc_assert (cluster_a != NULL && cluster_b != NULL);
1783   gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1784   gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1785 
1786   hash_set<const binding_key *> keys;
1787   for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1788        iter_a != cluster_a->m_map.end (); ++iter_a)
1789     {
1790       const binding_key *key_a = (*iter_a).first;
1791       keys.add (key_a);
1792     }
1793   for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1794        iter_b != cluster_b->m_map.end (); ++iter_b)
1795     {
1796       const binding_key *key_b = (*iter_b).first;
1797       keys.add (key_b);
1798     }
1799   int num_symbolic_keys = 0;
1800   int num_concrete_keys = 0;
1801   for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1802        iter != keys.end (); ++iter)
1803     {
1804       region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1805       const binding_key *key = *iter;
1806       const svalue *sval_a = cluster_a->get_any_value (key);
1807       const svalue *sval_b = cluster_b->get_any_value (key);
1808 
1809       if (key->symbolic_p ())
1810 	num_symbolic_keys++;
1811       else
1812 	num_concrete_keys++;
1813 
1814       if (sval_a == sval_b)
1815 	{
1816 	  gcc_assert (sval_a);
1817 	  out_cluster->m_map.put (key, sval_a);
1818 	  continue;
1819 	}
1820       else if (sval_a && sval_b)
1821 	{
1822 	  if (const svalue *merged_sval
1823 	      = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1824 	    {
1825 	      out_cluster->m_map.put (key, merged_sval);
1826 	      continue;
1827 	    }
1828 	  /* Merger of the svalues failed.  Reject merger of the cluster.   */
1829 	  return false;
1830 	}
1831 
1832       /* If we get here, then one cluster binds this key and the other
1833 	 doesn't; merge them as "UNKNOWN".  */
1834       gcc_assert (sval_a || sval_b);
1835 
1836       const svalue *bound_sval = sval_a ? sval_a : sval_b;
1837       tree type = bound_sval->get_type ();
1838       const svalue *unknown_sval
1839 	= mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1840 
1841       /* ...but reject the merger if this sval shouldn't be mergeable
1842 	 (e.g. reject merging svalues that have non-purgable sm-state,
1843 	 to avoid falsely reporting memory leaks by merging them
1844 	 with something else).  */
1845       if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1846 	return false;
1847 
1848       out_cluster->m_map.put (key, unknown_sval);
1849     }
1850 
1851   /* We can only have at most one symbolic key per cluster,
1852      and if we do, we can't have any concrete keys.
1853      If this happens, mark the cluster as touched, with no keys.  */
1854   if (num_symbolic_keys >= 2
1855       || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1856     {
1857       out_cluster->m_touched = true;
1858       out_cluster->m_map.empty ();
1859     }
1860 
1861   /* We don't handle other kinds of overlaps yet.  */
1862 
1863   return true;
1864 }
1865 
1866 /* Update this cluster to reflect an attempt to merge OTHER where there
1867    is no other cluster to merge with, and so we're notionally merging the
1868    bound values in OTHER with the initial value of the relevant regions.
1869 
1870    Any bound keys in OTHER should be bound to unknown in this.  */
1871 
1872 void
make_unknown_relative_to(const binding_cluster * other,store * out_store,store_manager * mgr)1873 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1874 					   store *out_store,
1875 					   store_manager *mgr)
1876 {
1877   for (map_t::iterator iter = other->m_map.begin ();
1878        iter != other->m_map.end (); ++iter)
1879     {
1880       const binding_key *iter_key = (*iter).first;
1881       const svalue *iter_sval = (*iter).second;
1882       const svalue *unknown_sval
1883 	= mgr->get_svalue_manager ()->get_or_create_unknown_svalue
1884 	  (iter_sval->get_type ());
1885       m_map.put (iter_key, unknown_sval);
1886 
1887       /* For any pointers in OTHER, the merger means that the
1888 	 concrete pointer becomes an unknown value, which could
1889 	 show up as a false report of a leak when considering what
1890 	 pointers are live before vs after.
1891 	 Avoid this by marking the base regions they point to as having
1892 	 escaped.  */
1893       if (const region_svalue *region_sval
1894 	  = iter_sval->dyn_cast_region_svalue ())
1895 	{
1896 	  const region *base_reg
1897 	    = region_sval->get_pointee ()->get_base_region ();
1898 	  if (base_reg->tracked_p ()
1899 	      && !base_reg->symbolic_for_unknown_ptr_p ())
1900 	    {
1901 	      binding_cluster *c = out_store->get_or_create_cluster (base_reg);
1902 	      c->mark_as_escaped ();
1903 	    }
1904 	}
1905     }
1906 }
1907 
1908 /* Mark this cluster as having escaped.  */
1909 
1910 void
mark_as_escaped()1911 binding_cluster::mark_as_escaped ()
1912 {
1913   m_escaped = true;
1914 }
1915 
1916 /* If this cluster has escaped (by this call, or by an earlier one, or
1917    by being an external param), then unbind all values and mark it
1918    as "touched", so that it has a conjured value, rather than an
1919    initial_svalue.
1920    Use P to purge state involving conjured_svalues.  */
1921 
1922 void
on_unknown_fncall(const gcall * call,store_manager * mgr,const conjured_purge & p)1923 binding_cluster::on_unknown_fncall (const gcall *call,
1924 				    store_manager *mgr,
1925 				    const conjured_purge &p)
1926 {
1927   if (m_escaped)
1928     {
1929       m_map.empty ();
1930 
1931       /* Bind it to a new "conjured" value using CALL.  */
1932       const svalue *sval
1933 	= mgr->get_svalue_manager ()->get_or_create_conjured_svalue
1934 	    (m_base_region->get_type (), call, m_base_region, p);
1935       bind (mgr, m_base_region, sval);
1936 
1937       m_touched = true;
1938     }
1939 }
1940 
1941 /* Mark this cluster as having been clobbered by STMT.
1942    Use P to purge state involving conjured_svalues.  */
1943 
1944 void
on_asm(const gasm * stmt,store_manager * mgr,const conjured_purge & p)1945 binding_cluster::on_asm (const gasm *stmt,
1946 			 store_manager *mgr,
1947 			 const conjured_purge &p)
1948 {
1949   m_map.empty ();
1950 
1951   /* Bind it to a new "conjured" value using CALL.  */
1952   const svalue *sval
1953     = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
1954     (m_base_region->get_type (), stmt, m_base_region, p);
1955   bind (mgr, m_base_region, sval);
1956 
1957   m_touched = true;
1958 }
1959 
1960 /* Return true if this binding_cluster has no information
1961    i.e. if there are no bindings, and it hasn't been marked as having
1962    escaped, or touched symbolically.  */
1963 
1964 bool
redundant_p() const1965 binding_cluster::redundant_p () const
1966 {
1967   return (m_map.elements () == 0
1968 	  && !m_escaped
1969 	  && !m_touched);
1970 }
1971 
1972 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type.  */
1973 
1974 static void
append_pathvar_with_type(path_var pv,tree type,auto_vec<path_var> * out_pvs)1975 append_pathvar_with_type (path_var pv,
1976 			  tree type,
1977 			  auto_vec<path_var> *out_pvs)
1978 {
1979   gcc_assert (pv.m_tree);
1980 
1981   if (TREE_TYPE (pv.m_tree) != type)
1982     pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
1983 
1984   out_pvs->safe_push (pv);
1985 }
1986 
1987 /* Find representative path_vars for SVAL within this binding of BASE_REG,
1988    appending the results to OUT_PVS.  */
1989 
1990 void
get_representative_path_vars(const region_model * model,svalue_set * visited,const region * base_reg,const svalue * sval,auto_vec<path_var> * out_pvs) const1991 binding_cluster::get_representative_path_vars (const region_model *model,
1992 					       svalue_set *visited,
1993 					       const region *base_reg,
1994 					       const svalue *sval,
1995 					       auto_vec<path_var> *out_pvs)
1996   const
1997 {
1998   sval = simplify_for_binding (sval);
1999 
2000   for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2001     {
2002       const binding_key *key = (*iter).first;
2003       const svalue *bound_sval = (*iter).second;
2004       if (bound_sval == sval)
2005 	{
2006 	  if (const concrete_binding *ckey
2007 		= key->dyn_cast_concrete_binding ())
2008 	    {
2009 	      auto_vec <const region *> subregions;
2010 	      base_reg->get_subregions_for_binding
2011 		(model->get_manager (),
2012 		 ckey->get_start_bit_offset (),
2013 		 ckey->get_size_in_bits (),
2014 		 sval->get_type (),
2015 		 &subregions);
2016 	      unsigned i;
2017 	      const region *subregion;
2018 	      FOR_EACH_VEC_ELT (subregions, i, subregion)
2019 		{
2020 		  if (path_var pv
2021 		      = model->get_representative_path_var (subregion,
2022 							    visited))
2023 		    append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2024 		}
2025 	    }
2026 	  else
2027 	    {
2028 	      const symbolic_binding *skey = (const symbolic_binding *)key;
2029 	      if (path_var pv
2030 		  = model->get_representative_path_var (skey->get_region (),
2031 							visited))
2032 		append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2033 	    }
2034 	}
2035     }
2036 }
2037 
2038 /* Get any svalue bound to KEY, or NULL.  */
2039 
2040 const svalue *
get_any_value(const binding_key * key) const2041 binding_cluster::get_any_value (const binding_key *key) const
2042 {
2043   return m_map.get (key);
2044 }
2045 
2046 /* If this cluster has a single direct binding for the whole of the region,
2047    return it.
2048    For use in simplifying dumps.  */
2049 
2050 const svalue *
maybe_get_simple_value(store_manager * mgr) const2051 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2052 {
2053   /* Fail gracefully if MGR is NULL to make it easier to dump store
2054      instances in the debugger.  */
2055   if (mgr == NULL)
2056     return NULL;
2057 
2058   if (m_map.elements () != 1)
2059     return NULL;
2060 
2061   const binding_key *key = binding_key::make (mgr, m_base_region);
2062   return get_any_value (key);
2063 }
2064 
2065 /* class store_manager.  */
2066 
2067 logger *
get_logger() const2068 store_manager::get_logger () const
2069 {
2070   return m_mgr->get_logger ();
2071 }
2072 
2073 /* binding consolidation.  */
2074 
2075 const concrete_binding *
get_concrete_binding(bit_offset_t start_bit_offset,bit_offset_t size_in_bits)2076 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2077 				     bit_offset_t size_in_bits)
2078 {
2079   concrete_binding b (start_bit_offset, size_in_bits);
2080   if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2081     return existing;
2082 
2083   concrete_binding *to_save = new concrete_binding (b);
2084   m_concrete_binding_key_mgr.put (b, to_save);
2085   return to_save;
2086 }
2087 
2088 const symbolic_binding *
get_symbolic_binding(const region * reg)2089 store_manager::get_symbolic_binding (const region *reg)
2090 {
2091   symbolic_binding b (reg);
2092   if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2093     return existing;
2094 
2095   symbolic_binding *to_save = new symbolic_binding (b);
2096   m_symbolic_binding_key_mgr.put (b, to_save);
2097   return to_save;
2098 }
2099 
2100 /* class store.  */
2101 
2102 /* store's default ctor.  */
2103 
store()2104 store::store ()
2105 : m_called_unknown_fn (false)
2106 {
2107 }
2108 
2109 /* store's copy ctor.  */
2110 
store(const store & other)2111 store::store (const store &other)
2112 : m_cluster_map (other.m_cluster_map.elements ()),
2113   m_called_unknown_fn (other.m_called_unknown_fn)
2114 {
2115   for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2116        iter != other.m_cluster_map.end ();
2117        ++iter)
2118     {
2119       const region *reg = (*iter).first;
2120       gcc_assert (reg);
2121       binding_cluster *c = (*iter).second;
2122       gcc_assert (c);
2123       m_cluster_map.put (reg, new binding_cluster (*c));
2124     }
2125 }
2126 
2127 /* store's dtor.  */
2128 
~store()2129 store::~store ()
2130 {
2131   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2132        iter != m_cluster_map.end ();
2133        ++iter)
2134     delete (*iter).second;
2135 }
2136 
2137 /* store's assignment operator.  */
2138 
2139 store &
operator =(const store & other)2140 store::operator= (const store &other)
2141 {
2142   /* Delete existing cluster map.  */
2143   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2144        iter != m_cluster_map.end ();
2145        ++iter)
2146     delete (*iter).second;
2147   m_cluster_map.empty ();
2148 
2149   m_called_unknown_fn = other.m_called_unknown_fn;
2150 
2151   for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2152        iter != other.m_cluster_map.end ();
2153        ++iter)
2154     {
2155       const region *reg = (*iter).first;
2156       gcc_assert (reg);
2157       binding_cluster *c = (*iter).second;
2158       gcc_assert (c);
2159       m_cluster_map.put (reg, new binding_cluster (*c));
2160     }
2161   return *this;
2162 }
2163 
2164 /* store's equality operator.  */
2165 
2166 bool
operator ==(const store & other) const2167 store::operator== (const store &other) const
2168 {
2169   if (m_called_unknown_fn != other.m_called_unknown_fn)
2170     return false;
2171 
2172   if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2173     return false;
2174 
2175   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2176        iter != m_cluster_map.end ();
2177        ++iter)
2178     {
2179       const region *reg = (*iter).first;
2180       binding_cluster *c = (*iter).second;
2181       binding_cluster **other_slot
2182 	= const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2183       if (other_slot == NULL)
2184 	return false;
2185       if (*c != **other_slot)
2186 	return false;
2187     }
2188 
2189   gcc_checking_assert (hash () == other.hash ());
2190 
2191   return true;
2192 }
2193 
2194 /* Get a hash value for this store.  */
2195 
2196 hashval_t
hash() const2197 store::hash () const
2198 {
2199   hashval_t result = 0;
2200   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2201        iter != m_cluster_map.end ();
2202        ++iter)
2203     result ^= (*iter).second->hash ();
2204   return result;
2205 }
2206 
2207 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2208    removing duplicate parents.  */
2209 
2210 static void
get_sorted_parent_regions(auto_vec<const region * > * out,auto_vec<const region * > & in)2211 get_sorted_parent_regions (auto_vec<const region *> *out,
2212 			   auto_vec<const region *> &in)
2213 {
2214   /* Get the set of parent regions.  */
2215   hash_set<const region *> parent_regions;
2216   const region *iter_reg;
2217   unsigned i;
2218   FOR_EACH_VEC_ELT (in, i, iter_reg)
2219     {
2220       const region *parent_reg = iter_reg->get_parent_region ();
2221       gcc_assert (parent_reg);
2222       parent_regions.add (parent_reg);
2223     }
2224 
2225   /* Write to OUT.  */
2226   for (hash_set<const region *>::iterator iter = parent_regions.begin();
2227        iter != parent_regions.end(); ++iter)
2228     out->safe_push (*iter);
2229 
2230   /* Sort OUT.  */
2231   out->qsort (region::cmp_ptr_ptr);
2232 }
2233 
2234 /* Dump a representation of this store to PP, using SIMPLE to control how
2235    svalues and regions are printed.
2236    MGR is used for simplifying dumps if non-NULL, but can also be NULL
2237    (to make it easier to use from the debugger).  */
2238 
2239 void
dump_to_pp(pretty_printer * pp,bool simple,bool multiline,store_manager * mgr) const2240 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2241 		   store_manager *mgr) const
2242 {
2243   /* Sort into some deterministic order.  */
2244   auto_vec<const region *> base_regions;
2245   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2246        iter != m_cluster_map.end (); ++iter)
2247     {
2248       const region *base_reg = (*iter).first;
2249       base_regions.safe_push (base_reg);
2250     }
2251   base_regions.qsort (region::cmp_ptr_ptr);
2252 
2253   /* Gather clusters, organize by parent region, so that we can group
2254      together locals, globals, etc.  */
2255   auto_vec<const region *> parent_regions;
2256   get_sorted_parent_regions (&parent_regions, base_regions);
2257 
2258   const region *parent_reg;
2259   unsigned i;
2260   FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2261     {
2262       gcc_assert (parent_reg);
2263       pp_string (pp, "clusters within ");
2264       parent_reg->dump_to_pp (pp, simple);
2265       if (multiline)
2266 	pp_newline (pp);
2267       else
2268 	pp_string (pp, " {");
2269 
2270       const region *base_reg;
2271       unsigned j;
2272       FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2273 	{
2274 	  /* This is O(N * M), but N ought to be small.  */
2275 	  if (base_reg->get_parent_region () != parent_reg)
2276 	    continue;
2277 	  binding_cluster *cluster
2278 	    = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2279 	  if (!multiline)
2280 	    {
2281 	      if (j > 0)
2282 		pp_string (pp, ", ");
2283 	    }
2284 	  if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2285 	    {
2286 	      /* Special-case to simplify dumps for the common case where
2287 		 we just have one value directly bound to the whole of a
2288 		 region.  */
2289 	      if (multiline)
2290 		{
2291 		  pp_string (pp, "  cluster for: ");
2292 		  base_reg->dump_to_pp (pp, simple);
2293 		  pp_string (pp, ": ");
2294 		  sval->dump_to_pp (pp, simple);
2295 		  if (cluster->escaped_p ())
2296 		    pp_string (pp, " (ESCAPED)");
2297 		  if (cluster->touched_p ())
2298 		    pp_string (pp, " (TOUCHED)");
2299 		  pp_newline (pp);
2300 		}
2301 	      else
2302 		{
2303 		  pp_string (pp, "region: {");
2304 		  base_reg->dump_to_pp (pp, simple);
2305 		  pp_string (pp, ", value: ");
2306 		  sval->dump_to_pp (pp, simple);
2307 		  if (cluster->escaped_p ())
2308 		    pp_string (pp, " (ESCAPED)");
2309 		  if (cluster->touched_p ())
2310 		    pp_string (pp, " (TOUCHED)");
2311 		  pp_string (pp, "}");
2312 		}
2313 	    }
2314 	  else if (multiline)
2315 	    {
2316 	      pp_string (pp, "  cluster for: ");
2317 	      base_reg->dump_to_pp (pp, simple);
2318 	      pp_newline (pp);
2319 	      cluster->dump_to_pp (pp, simple, multiline);
2320 	    }
2321 	  else
2322 	    {
2323 	      pp_string (pp, "base region: {");
2324 	      base_reg->dump_to_pp (pp, simple);
2325 	      pp_string (pp, "} has cluster: {");
2326 	      cluster->dump_to_pp (pp, simple, multiline);
2327 	      pp_string (pp, "}");
2328 	    }
2329 	}
2330       if (!multiline)
2331 	pp_string (pp, "}");
2332     }
2333   pp_printf (pp, "m_called_unknown_fn: %s",
2334 	     m_called_unknown_fn ? "TRUE" : "FALSE");
2335   if (multiline)
2336     pp_newline (pp);
2337 }
2338 
2339 /* Dump a multiline representation of this store to stderr.  */
2340 
2341 DEBUG_FUNCTION void
dump(bool simple) const2342 store::dump (bool simple) const
2343 {
2344   pretty_printer pp;
2345   pp_format_decoder (&pp) = default_tree_printer;
2346   pp_show_color (&pp) = pp_show_color (global_dc->printer);
2347   pp.buffer->stream = stderr;
2348   dump_to_pp (&pp, simple, true, NULL);
2349   pp_newline (&pp);
2350   pp_flush (&pp);
2351 }
2352 
2353 /* Assert that this object is valid.  */
2354 
2355 void
validate() const2356 store::validate () const
2357 {
2358   for (auto iter : m_cluster_map)
2359     iter.second->validate ();
2360 }
2361 
2362 /* Return a new json::object of the form
2363    {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2364 			 ... for each cluster within parent region},
2365     ...for each parent region,
2366     "called_unknown_fn": true/false}.  */
2367 
2368 json::object *
to_json() const2369 store::to_json () const
2370 {
2371   json::object *store_obj = new json::object ();
2372 
2373   /* Sort into some deterministic order.  */
2374   auto_vec<const region *> base_regions;
2375   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2376        iter != m_cluster_map.end (); ++iter)
2377     {
2378       const region *base_reg = (*iter).first;
2379       base_regions.safe_push (base_reg);
2380     }
2381   base_regions.qsort (region::cmp_ptr_ptr);
2382 
2383   /* Gather clusters, organize by parent region, so that we can group
2384      together locals, globals, etc.  */
2385   auto_vec<const region *> parent_regions;
2386   get_sorted_parent_regions (&parent_regions, base_regions);
2387 
2388   const region *parent_reg;
2389   unsigned i;
2390   FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2391     {
2392       gcc_assert (parent_reg);
2393 
2394       json::object *clusters_in_parent_reg_obj = new json::object ();
2395 
2396       const region *base_reg;
2397       unsigned j;
2398       FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2399 	{
2400 	  /* This is O(N * M), but N ought to be small.  */
2401 	  if (base_reg->get_parent_region () != parent_reg)
2402 	    continue;
2403 	  binding_cluster *cluster
2404 	    = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2405 	  label_text base_reg_desc = base_reg->get_desc ();
2406 	  clusters_in_parent_reg_obj->set (base_reg_desc.m_buffer,
2407 					   cluster->to_json ());
2408 	  base_reg_desc.maybe_free ();
2409 	}
2410       label_text parent_reg_desc = parent_reg->get_desc ();
2411       store_obj->set (parent_reg_desc.m_buffer, clusters_in_parent_reg_obj);
2412       parent_reg_desc.maybe_free ();
2413     }
2414 
2415   store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2416 
2417   return store_obj;
2418 }
2419 
2420 /* Get any svalue bound to REG, or NULL.  */
2421 
2422 const svalue *
get_any_binding(store_manager * mgr,const region * reg) const2423 store::get_any_binding (store_manager *mgr, const region *reg) const
2424 {
2425   const region *base_reg = reg->get_base_region ();
2426   binding_cluster **cluster_slot
2427     = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2428   if (!cluster_slot)
2429     return NULL;
2430   return (*cluster_slot)->get_any_binding (mgr, reg);
2431 }
2432 
2433 /* Set the value of LHS_REG to RHS_SVAL.  */
2434 
2435 void
set_value(store_manager * mgr,const region * lhs_reg,const svalue * rhs_sval,uncertainty_t * uncertainty)2436 store::set_value (store_manager *mgr, const region *lhs_reg,
2437 		  const svalue *rhs_sval,
2438 		  uncertainty_t *uncertainty)
2439 {
2440   logger *logger = mgr->get_logger ();
2441   LOG_SCOPE (logger);
2442 
2443   remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2444 
2445   rhs_sval = simplify_for_binding (rhs_sval);
2446 
2447   const region *lhs_base_reg = lhs_reg->get_base_region ();
2448   binding_cluster *lhs_cluster;
2449   if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2450     {
2451       /* Reject attempting to bind values into a symbolic region
2452 	 for an unknown ptr; merely invalidate values below.  */
2453       lhs_cluster = NULL;
2454 
2455       /* The LHS of the write is *UNKNOWN.  If the RHS is a pointer,
2456 	 then treat the region being pointed to as having escaped.  */
2457       if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2458 	{
2459 	  const region *ptr_dst = ptr_sval->get_pointee ();
2460 	  const region *ptr_base_reg = ptr_dst->get_base_region ();
2461 	  mark_as_escaped (ptr_base_reg);
2462 	}
2463     }
2464   else if (lhs_base_reg->tracked_p ())
2465     {
2466       lhs_cluster = get_or_create_cluster (lhs_base_reg);
2467       lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2468     }
2469   else
2470     {
2471       /* Reject attempting to bind values into an untracked region;
2472 	 merely invalidate values below.  */
2473       lhs_cluster = NULL;
2474     }
2475 
2476   /* Bindings to a cluster can affect other clusters if a symbolic
2477      base region is involved.
2478      Writes to concrete clusters can't affect other concrete clusters,
2479      but can affect symbolic clusters.
2480      Writes to symbolic clusters can affect both concrete and symbolic
2481      clusters.
2482      Invalidate our knowledge of other clusters that might have been
2483      affected by the write.  */
2484   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2485        iter != m_cluster_map.end (); ++iter)
2486     {
2487       const region *iter_base_reg = (*iter).first;
2488       binding_cluster *iter_cluster = (*iter).second;
2489       if (iter_base_reg != lhs_base_reg
2490 	  && (lhs_cluster == NULL
2491 	      || lhs_cluster->symbolic_p ()
2492 	      || iter_cluster->symbolic_p ()))
2493 	{
2494 	  tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2495 	  switch (t_alias.get_value ())
2496 	    {
2497 	    default:
2498 	      gcc_unreachable ();
2499 
2500 	    case tristate::TS_UNKNOWN:
2501 	      if (logger)
2502 		{
2503 		  pretty_printer *pp = logger->get_printer ();
2504 		  logger->start_log_line ();
2505 		  logger->log_partial ("possible aliasing of ");
2506 		  iter_base_reg->dump_to_pp (pp, true);
2507 		  logger->log_partial (" when writing SVAL: ");
2508 		  rhs_sval->dump_to_pp (pp, true);
2509 		  logger->log_partial (" to LHS_REG: ");
2510 		  lhs_reg->dump_to_pp (pp, true);
2511 		  logger->end_log_line ();
2512 		}
2513 	      /* Mark all of iter_cluster's iter_base_reg as unknown,
2514 		 using LHS_REG when considering overlaps, to handle
2515 		 symbolic vs concrete issues.  */
2516 	      iter_cluster->mark_region_as_unknown
2517 		(mgr,
2518 		 iter_base_reg, /* reg_to_bind */
2519 		 lhs_reg, /* reg_for_overlap */
2520 		 uncertainty);
2521 	      break;
2522 
2523 	    case tristate::TS_TRUE:
2524 	      gcc_unreachable ();
2525 	      break;
2526 
2527 	    case tristate::TS_FALSE:
2528 	      /* If they can't be aliases, then don't invalidate this
2529 		 cluster.  */
2530 	      break;
2531 	    }
2532 	}
2533     }
2534 }
2535 
2536 /* Determine if BASE_REG_A could be an alias of BASE_REG_B.  */
2537 
2538 tristate
eval_alias(const region * base_reg_a,const region * base_reg_b) const2539 store::eval_alias (const region *base_reg_a,
2540 		   const region *base_reg_b) const
2541 {
2542   /* SSA names can't alias.  */
2543   tree decl_a = base_reg_a->maybe_get_decl ();
2544   if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2545     return tristate::TS_FALSE;
2546   tree decl_b = base_reg_b->maybe_get_decl ();
2547   if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2548     return tristate::TS_FALSE;
2549 
2550   /* Try both ways, for symmetry.  */
2551   tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2552   if (ts_ab.is_false ())
2553     return tristate::TS_FALSE;
2554   tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2555   if (ts_ba.is_false ())
2556     return tristate::TS_FALSE;
2557   return tristate::TS_UNKNOWN;
2558 }
2559 
2560 /* Half of store::eval_alias; called twice for symmetry.  */
2561 
2562 tristate
eval_alias_1(const region * base_reg_a,const region * base_reg_b) const2563 store::eval_alias_1 (const region *base_reg_a,
2564 		     const region *base_reg_b) const
2565 {
2566   if (const symbolic_region *sym_reg_a
2567       = base_reg_a->dyn_cast_symbolic_region ())
2568     {
2569       const svalue *sval_a = sym_reg_a->get_pointer ();
2570       if (tree decl_b = base_reg_b->maybe_get_decl ())
2571 	{
2572 	  if (!may_be_aliased (decl_b))
2573 	    return tristate::TS_FALSE;
2574 	  if (sval_a->get_kind () == SK_INITIAL)
2575 	    if (!is_global_var (decl_b))
2576 	      {
2577 		/* The initial value of a pointer can't point to a local.  */
2578 		return tristate::TS_FALSE;
2579 	      }
2580 	}
2581       if (sval_a->get_kind () == SK_INITIAL
2582 	  && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2583 	{
2584 	  /* The initial value of a pointer can't point to a
2585 	     region that was allocated on the heap after the beginning of the
2586 	     path.  */
2587 	  return tristate::TS_FALSE;
2588 	}
2589       if (const widening_svalue *widening_sval_a
2590 	  = sval_a->dyn_cast_widening_svalue ())
2591 	{
2592 	  const svalue *base = widening_sval_a->get_base_svalue ();
2593 	  if (const region_svalue *region_sval
2594 		= base->dyn_cast_region_svalue ())
2595 	    {
2596 	      const region *pointee = region_sval->get_pointee ();
2597 	      /* If we have sval_a is WIDENING(&REGION, OP), and
2598 		 B can't alias REGION, then B can't alias A either.
2599 		 For example, A might arise from
2600 		   for (ptr = &REGION; ...; ptr++)
2601 		 where sval_a is ptr in the 2nd iteration of the loop.
2602 		 We want to ensure that "*ptr" can only clobber things
2603 		 within REGION's base region.  */
2604 	      tristate ts = eval_alias (pointee->get_base_region (),
2605 					base_reg_b);
2606 	      if (ts.is_false ())
2607 		return tristate::TS_FALSE;
2608 	    }
2609 	}
2610     }
2611   return tristate::TS_UNKNOWN;
2612 }
2613 
2614 /* Remove all bindings overlapping REG within this store.  */
2615 
2616 void
clobber_region(store_manager * mgr,const region * reg)2617 store::clobber_region (store_manager *mgr, const region *reg)
2618 {
2619   const region *base_reg = reg->get_base_region ();
2620   binding_cluster **slot = m_cluster_map.get (base_reg);
2621   if (!slot)
2622     return;
2623   binding_cluster *cluster = *slot;
2624   cluster->clobber_region (mgr, reg);
2625   if (cluster->redundant_p ())
2626     {
2627       delete cluster;
2628       m_cluster_map.remove (base_reg);
2629     }
2630 }
2631 
2632 /* Remove any bindings for REG within this store.  */
2633 
2634 void
purge_region(store_manager * mgr,const region * reg)2635 store::purge_region (store_manager *mgr, const region *reg)
2636 {
2637   const region *base_reg = reg->get_base_region ();
2638   binding_cluster **slot = m_cluster_map.get (base_reg);
2639   if (!slot)
2640     return;
2641   binding_cluster *cluster = *slot;
2642   cluster->purge_region (mgr, reg);
2643   if (cluster->redundant_p ())
2644     {
2645       delete cluster;
2646       m_cluster_map.remove (base_reg);
2647     }
2648 }
2649 
2650 /* Fill REG with SVAL.  */
2651 
2652 void
fill_region(store_manager * mgr,const region * reg,const svalue * sval)2653 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2654 {
2655   const region *base_reg = reg->get_base_region ();
2656   if (base_reg->symbolic_for_unknown_ptr_p ()
2657       || !base_reg->tracked_p ())
2658     return;
2659   binding_cluster *cluster = get_or_create_cluster (base_reg);
2660   cluster->fill_region (mgr, reg, sval);
2661 }
2662 
2663 /* Zero-fill REG.  */
2664 
2665 void
zero_fill_region(store_manager * mgr,const region * reg)2666 store::zero_fill_region (store_manager *mgr, const region *reg)
2667 {
2668   region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2669   const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2670   fill_region (mgr, reg, zero_sval);
2671 }
2672 
2673 /* Mark REG as having unknown content.  */
2674 
2675 void
mark_region_as_unknown(store_manager * mgr,const region * reg,uncertainty_t * uncertainty)2676 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2677 			       uncertainty_t *uncertainty)
2678 {
2679   const region *base_reg = reg->get_base_region ();
2680   if (base_reg->symbolic_for_unknown_ptr_p ()
2681       || !base_reg->tracked_p ())
2682     return;
2683   binding_cluster *cluster = get_or_create_cluster (base_reg);
2684   cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty);
2685 }
2686 
2687 /* Purge state involving SVAL.  */
2688 
2689 void
purge_state_involving(const svalue * sval,region_model_manager * sval_mgr)2690 store::purge_state_involving (const svalue *sval,
2691 			      region_model_manager *sval_mgr)
2692 {
2693   auto_vec <const region *> base_regs_to_purge;
2694   for (auto iter : m_cluster_map)
2695     {
2696       const region *base_reg = iter.first;
2697       if (base_reg->involves_p (sval))
2698 	base_regs_to_purge.safe_push (base_reg);
2699       else
2700 	{
2701 	  binding_cluster *cluster = iter.second;
2702 	  cluster->purge_state_involving (sval, sval_mgr);
2703 	}
2704     }
2705 
2706   for (auto iter : base_regs_to_purge)
2707     purge_cluster (iter);
2708 }
2709 
2710 /* Get the cluster for BASE_REG, or NULL (const version).  */
2711 
2712 const binding_cluster *
get_cluster(const region * base_reg) const2713 store::get_cluster (const region *base_reg) const
2714 {
2715   gcc_assert (base_reg);
2716   gcc_assert (base_reg->get_base_region () == base_reg);
2717   if (binding_cluster **slot
2718 	= const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2719     return *slot;
2720   else
2721     return NULL;
2722 }
2723 
2724 /* Get the cluster for BASE_REG, or NULL (non-const version).  */
2725 
2726 binding_cluster *
get_cluster(const region * base_reg)2727 store::get_cluster (const region *base_reg)
2728 {
2729   gcc_assert (base_reg);
2730   gcc_assert (base_reg->get_base_region () == base_reg);
2731   if (binding_cluster **slot = m_cluster_map.get (base_reg))
2732     return *slot;
2733   else
2734     return NULL;
2735 }
2736 
2737 /* Get the cluster for BASE_REG, creating it if doesn't already exist.  */
2738 
2739 binding_cluster *
get_or_create_cluster(const region * base_reg)2740 store::get_or_create_cluster (const region *base_reg)
2741 {
2742   gcc_assert (base_reg);
2743   gcc_assert (base_reg->get_base_region () == base_reg);
2744 
2745   /* We shouldn't create clusters for dereferencing an UNKNOWN ptr.  */
2746   gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2747 
2748   /* We shouldn't create clusters for base regions that aren't trackable.  */
2749   gcc_assert (base_reg->tracked_p ());
2750 
2751   if (binding_cluster **slot = m_cluster_map.get (base_reg))
2752     return *slot;
2753 
2754   binding_cluster *cluster = new binding_cluster (base_reg);
2755   m_cluster_map.put (base_reg, cluster);
2756 
2757   return cluster;
2758 }
2759 
2760 /* Remove any cluster for BASE_REG, for use by
2761    region_model::unbind_region_and_descendents
2762    when popping stack frames and handling deleted heap regions.  */
2763 
2764 void
purge_cluster(const region * base_reg)2765 store::purge_cluster (const region *base_reg)
2766 {
2767   gcc_assert (base_reg->get_base_region () == base_reg);
2768   binding_cluster **slot = m_cluster_map.get (base_reg);
2769   if (!slot)
2770     return;
2771   binding_cluster *cluster = *slot;
2772   delete cluster;
2773   m_cluster_map.remove (base_reg);
2774 }
2775 
2776 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2777    Return true if successful, or false if the stores can't be merged.  */
2778 
2779 bool
can_merge_p(const store * store_a,const store * store_b,store * out_store,store_manager * mgr,model_merger * merger)2780 store::can_merge_p (const store *store_a, const store *store_b,
2781 		    store *out_store, store_manager *mgr,
2782 		    model_merger *merger)
2783 {
2784   if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2785     out_store->m_called_unknown_fn = true;
2786 
2787   /* Get the union of all base regions for STORE_A and STORE_B.  */
2788   hash_set<const region *> base_regions;
2789   for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2790        iter_a != store_a->m_cluster_map.end (); ++iter_a)
2791     {
2792       const region *base_reg_a = (*iter_a).first;
2793       base_regions.add (base_reg_a);
2794     }
2795   for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2796        iter_b != store_b->m_cluster_map.end (); ++iter_b)
2797     {
2798       const region *base_reg_b = (*iter_b).first;
2799       base_regions.add (base_reg_b);
2800     }
2801 
2802   /* Sort the base regions before considering them.  This ought not to
2803      affect the results, but can affect which types UNKNOWN_REGIONs are
2804      created for in a run; sorting them thus avoids minor differences
2805      in logfiles.  */
2806   auto_vec<const region *> vec_base_regions (base_regions.elements ());
2807   for (hash_set<const region *>::iterator iter = base_regions.begin ();
2808        iter != base_regions.end (); ++iter)
2809     vec_base_regions.quick_push (*iter);
2810   vec_base_regions.qsort (region::cmp_ptr_ptr);
2811   unsigned i;
2812   const region *base_reg;
2813   FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2814     {
2815       const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2816       const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
2817       /* At least one of cluster_a and cluster_b must be non-NULL.  */
2818       binding_cluster *out_cluster
2819 	= out_store->get_or_create_cluster (base_reg);
2820       if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
2821 					 out_cluster, out_store, mgr, merger))
2822 	return false;
2823     }
2824   return true;
2825 }
2826 
2827 /* Mark the cluster for BASE_REG as having escaped.
2828    For use when handling an unrecognized function call, and
2829    for params to "top-level" calls.
2830    Further unknown function calls could touch it, even if the cluster
2831    isn't reachable from args of those calls.  */
2832 
2833 void
mark_as_escaped(const region * base_reg)2834 store::mark_as_escaped (const region *base_reg)
2835 {
2836   gcc_assert (base_reg);
2837   gcc_assert (base_reg->get_base_region () == base_reg);
2838 
2839   if (base_reg->symbolic_for_unknown_ptr_p ()
2840       || !base_reg->tracked_p ())
2841     return;
2842 
2843   binding_cluster *cluster = get_or_create_cluster (base_reg);
2844   cluster->mark_as_escaped ();
2845 }
2846 
2847 /* Handle an unknown fncall by updating any clusters that have escaped
2848    (either in this fncall, or in a prior one).  */
2849 
2850 void
on_unknown_fncall(const gcall * call,store_manager * mgr,const conjured_purge & p)2851 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
2852 			  const conjured_purge &p)
2853 {
2854   m_called_unknown_fn = true;
2855 
2856   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2857        iter != m_cluster_map.end (); ++iter)
2858     (*iter).second->on_unknown_fncall (call, mgr, p);
2859 }
2860 
2861 /* Return true if a non-const pointer to BASE_REG (or something within it)
2862    has escaped to code outside of the TU being analyzed.  */
2863 
2864 bool
escaped_p(const region * base_reg) const2865 store::escaped_p (const region *base_reg) const
2866 {
2867   gcc_assert (base_reg);
2868   gcc_assert (base_reg->get_base_region () == base_reg);
2869 
2870   if (binding_cluster **cluster_slot
2871       = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
2872     return (*cluster_slot)->escaped_p ();
2873   return false;
2874 }
2875 
2876 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
2877    this store, using VISITED to ensure the traversal terminates.  */
2878 
2879 void
get_representative_path_vars(const region_model * model,svalue_set * visited,const svalue * sval,auto_vec<path_var> * out_pvs) const2880 store::get_representative_path_vars (const region_model *model,
2881 				     svalue_set *visited,
2882 				     const svalue *sval,
2883 				     auto_vec<path_var> *out_pvs) const
2884 {
2885   gcc_assert (sval);
2886 
2887   /* Find all bindings that reference SVAL.  */
2888   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2889        iter != m_cluster_map.end (); ++iter)
2890     {
2891       const region *base_reg = (*iter).first;
2892       binding_cluster *cluster = (*iter).second;
2893       cluster->get_representative_path_vars (model, visited, base_reg, sval,
2894 					     out_pvs);
2895     }
2896 
2897   if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
2898     {
2899       const region *reg = init_sval->get_region ();
2900       if (path_var pv = model->get_representative_path_var (reg,
2901 							    visited))
2902 	out_pvs->safe_push (pv);
2903     }
2904 }
2905 
2906 /* Remove all bindings overlapping REG within this store, removing
2907    any clusters that become redundant.
2908 
2909    If UNCERTAINTY is non-NULL, use it to record any svalues that
2910    were removed, as being maybe-bound.  */
2911 
2912 void
remove_overlapping_bindings(store_manager * mgr,const region * reg,uncertainty_t * uncertainty)2913 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
2914 				    uncertainty_t *uncertainty)
2915 {
2916   const region *base_reg = reg->get_base_region ();
2917   if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
2918     {
2919       binding_cluster *cluster = *cluster_slot;
2920       if (reg == base_reg && !escaped_p (base_reg))
2921 	{
2922 	  /* Remove whole cluster.  */
2923 	  m_cluster_map.remove (base_reg);
2924 	  delete cluster;
2925 	  return;
2926 	}
2927       cluster->remove_overlapping_bindings (mgr, reg, uncertainty);
2928     }
2929 }
2930 
2931 /* Subclass of visitor that accumulates a hash_set of the regions that
2932    were visited.  */
2933 
2934 struct region_finder : public visitor
2935 {
visit_regionana::region_finder2936   void visit_region (const region *reg) FINAL OVERRIDE
2937   {
2938     m_regs.add (reg);
2939   }
2940 
2941   hash_set<const region *> m_regs;
2942 };
2943 
2944 /* Canonicalize this store, to maximize the chance of equality between
2945    instances.  */
2946 
2947 void
canonicalize(store_manager * mgr)2948 store::canonicalize (store_manager *mgr)
2949 {
2950   /* If we have e.g.:
2951          cluster for: HEAP_ALLOCATED_REGION(543)
2952            ESCAPED
2953            TOUCHED
2954      where the heap region is empty and unreferenced, then purge that
2955      cluster, to avoid unbounded state chains involving these.  */
2956 
2957   /* Find regions that are referenced by bound values in the store.  */
2958   region_finder s;
2959   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2960        iter != m_cluster_map.end (); ++iter)
2961     {
2962       binding_cluster *cluster = (*iter).second;
2963       for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
2964 	   bind_iter != cluster->m_map.end (); ++bind_iter)
2965 	(*bind_iter).second->accept (&s);
2966     }
2967 
2968   /* Locate heap-allocated regions that have empty bindings that weren't
2969      found above.  */
2970   hash_set<const region *> purgeable_regions;
2971   for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2972        iter != m_cluster_map.end (); ++iter)
2973     {
2974       const region *base_reg = (*iter).first;
2975       binding_cluster *cluster = (*iter).second;
2976       if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
2977 	{
2978 	  if (cluster->empty_p ())
2979 	    if (!s.m_regs.contains (base_reg))
2980 	      purgeable_regions.add (base_reg);
2981 
2982 	  /* Also cover the UNKNOWN case.  */
2983 	  if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2984 	    if (sval->get_kind () == SK_UNKNOWN)
2985 	      if (!s.m_regs.contains (base_reg))
2986 		purgeable_regions.add (base_reg);
2987 	}
2988     }
2989 
2990   /* Purge them.  */
2991   for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
2992        iter != purgeable_regions.end (); ++iter)
2993     {
2994       const region *base_reg = *iter;
2995       purge_cluster (base_reg);
2996     }
2997 }
2998 
2999 /* Subroutine for use by exploded_path::feasible_p.
3000 
3001    We need to deal with state differences between:
3002    (a) when the exploded_graph is being initially constructed and
3003    (b) when replaying the state changes along a specific path in
3004    in exploded_path::feasible_p.
3005 
3006    In (a), state merging happens, so when exploring a loop
3007      for (i = 0; i < 1024; i++)
3008    on successive iterations we have i == 0, then i == WIDENING.
3009 
3010    In (b), no state merging happens, so naively replaying the path
3011    that goes twice through the loop then exits it
3012    would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3013    that exits the loop, which would be found to be infeasible as i == 1,
3014    and the path would be rejected.
3015 
3016    We need to fix up state during replay.  This subroutine is
3017    called whenever we enter a supernode that we've already
3018    visited along this exploded_path, passing in OTHER_STORE
3019    from the destination enode's state.
3020 
3021    Find bindings to widening values in OTHER_STORE.
3022    For all that are found, update the binding in this store to UNKNOWN.  */
3023 
3024 void
loop_replay_fixup(const store * other_store,region_model_manager * mgr)3025 store::loop_replay_fixup (const store *other_store,
3026 			  region_model_manager *mgr)
3027 {
3028   gcc_assert (other_store);
3029   for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3030        iter != other_store->m_cluster_map.end (); ++iter)
3031     {
3032       const region *base_reg = (*iter).first;
3033       binding_cluster *cluster = (*iter).second;
3034       for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3035 	   bind_iter != cluster->m_map.end (); ++bind_iter)
3036 	{
3037 	  const binding_key *key = (*bind_iter).first;
3038 	  const svalue *sval = (*bind_iter).second;
3039 	  if (sval->get_kind () == SK_WIDENING)
3040 	    {
3041 	      binding_cluster *this_cluster
3042 		= get_or_create_cluster (base_reg);
3043 	      const svalue *unknown
3044 		= mgr->get_or_create_unknown_svalue (sval->get_type ());
3045 	      this_cluster->bind_key (key, unknown);
3046 	    }
3047 	}
3048     }
3049 }
3050 
3051 #if CHECKING_P
3052 
3053 namespace selftest {
3054 
3055 /* Verify that bit_range::intersects_p works as expected.  */
3056 
3057 static void
test_bit_range_intersects_p()3058 test_bit_range_intersects_p ()
3059 {
3060   bit_range b0 (0, 1);
3061   bit_range b1 (1, 1);
3062   bit_range b2 (2, 1);
3063   bit_range b3 (3, 1);
3064   bit_range b4 (4, 1);
3065   bit_range b5 (5, 1);
3066   bit_range b6 (6, 1);
3067   bit_range b7 (7, 1);
3068   bit_range b1_to_6 (1, 6);
3069   bit_range b0_to_7 (0, 8);
3070   bit_range b3_to_5 (3, 3);
3071   bit_range b6_to_7 (6, 2);
3072 
3073   /* self-intersection is true.  */
3074   ASSERT_TRUE (b0.intersects_p (b0));
3075   ASSERT_TRUE (b7.intersects_p (b7));
3076   ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3077   ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3078 
3079   ASSERT_FALSE (b0.intersects_p (b1));
3080   ASSERT_FALSE (b1.intersects_p (b0));
3081   ASSERT_FALSE (b0.intersects_p (b7));
3082   ASSERT_FALSE (b7.intersects_p (b0));
3083 
3084   ASSERT_TRUE (b0_to_7.intersects_p (b0));
3085   ASSERT_TRUE (b0_to_7.intersects_p (b7));
3086   ASSERT_TRUE (b0.intersects_p (b0_to_7));
3087   ASSERT_TRUE (b7.intersects_p (b0_to_7));
3088 
3089   ASSERT_FALSE (b0.intersects_p (b1_to_6));
3090   ASSERT_FALSE (b1_to_6.intersects_p (b0));
3091   ASSERT_TRUE (b1.intersects_p (b1_to_6));
3092   ASSERT_TRUE (b1_to_6.intersects_p (b1));
3093   ASSERT_TRUE (b1_to_6.intersects_p (b6));
3094   ASSERT_FALSE (b1_to_6.intersects_p (b7));
3095 
3096   ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3097   ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3098 
3099   ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3100   ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3101 
3102   bit_range r1 (0,0);
3103   bit_range r2 (0,0);
3104   ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3105   ASSERT_EQ (r1.get_start_bit_offset (), 0);
3106   ASSERT_EQ (r1.m_size_in_bits, 6);
3107   ASSERT_EQ (r2.get_start_bit_offset (), 1);
3108   ASSERT_EQ (r2.m_size_in_bits, 6);
3109 
3110   ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3111   ASSERT_EQ (r1.get_start_bit_offset (), 1);
3112   ASSERT_EQ (r1.m_size_in_bits, 6);
3113   ASSERT_EQ (r2.get_start_bit_offset (), 0);
3114   ASSERT_EQ (r2.m_size_in_bits, 6);
3115 }
3116 
3117 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ.  */
3118 
3119 static void
assert_bit_range_from_mask_eq(const location & loc,unsigned HOST_WIDE_INT mask,const bit_range & expected)3120 assert_bit_range_from_mask_eq (const location &loc,
3121 			       unsigned HOST_WIDE_INT mask,
3122 			       const bit_range &expected)
3123 {
3124   bit_range actual (0, 0);
3125   bool ok = bit_range::from_mask (mask, &actual);
3126   ASSERT_TRUE_AT (loc, ok);
3127   ASSERT_EQ_AT (loc, actual, expected);
3128 }
3129 
3130 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3131    out EXPECTED_BIT_RANGE.  */
3132 
3133 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3134   SELFTEST_BEGIN_STMT							\
3135   assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK,		\
3136 				 EXPECTED_BIT_RANGE);			\
3137   SELFTEST_END_STMT
3138 
3139 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK.  */
3140 
3141 static void
assert_no_bit_range_from_mask_eq(const location & loc,unsigned HOST_WIDE_INT mask)3142 assert_no_bit_range_from_mask_eq (const location &loc,
3143 				  unsigned HOST_WIDE_INT mask)
3144 {
3145   bit_range actual (0, 0);
3146   bool ok = bit_range::from_mask (mask, &actual);
3147   ASSERT_FALSE_AT (loc, ok);
3148 }
3149 
3150 /* Assert that bit_range::from_mask (MASK) returns false.  */
3151 
3152 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3153   SELFTEST_BEGIN_STMT							\
3154   assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK);		\
3155   SELFTEST_END_STMT
3156 
3157 /* Verify that bit_range::from_mask works as expected.  */
3158 
3159 static void
test_bit_range_from_mask()3160 test_bit_range_from_mask ()
3161 {
3162   /* Should fail on zero.  */
3163   ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3164 
3165   /* Verify 1-bit masks.  */
3166   ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3167   ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3168   ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3169   ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3170   ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3171   ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3172   ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3173   ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3174 
3175   /* Verify N-bit masks starting at bit 0.  */
3176   ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3177   ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3178   ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3179   ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3180   ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3181   ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3182   ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3183   ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3184 
3185   /* Various other tests. */
3186   ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3187   ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3188   ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3189 
3190   /* Multiple ranges of set bits should fail.  */
3191   ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3192   ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3193 }
3194 
3195 /* Implementation detail of ASSERT_OVERLAP.  */
3196 
3197 static void
assert_overlap(const location & loc,const concrete_binding * b1,const concrete_binding * b2)3198 assert_overlap (const location &loc,
3199 		const concrete_binding *b1,
3200 		const concrete_binding *b2)
3201 {
3202   ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3203   ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3204 }
3205 
3206 /* Implementation detail of ASSERT_DISJOINT.  */
3207 
3208 static void
assert_disjoint(const location & loc,const concrete_binding * b1,const concrete_binding * b2)3209 assert_disjoint (const location &loc,
3210 		 const concrete_binding *b1,
3211 		 const concrete_binding *b2)
3212 {
3213   ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3214   ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3215 }
3216 
3217 /* Assert that B1 and B2 overlap, checking both ways.  */
3218 
3219 #define ASSERT_OVERLAP(B1, B2) \
3220   SELFTEST_BEGIN_STMT				\
3221   assert_overlap (SELFTEST_LOCATION, B1, B2);	\
3222   SELFTEST_END_STMT
3223 
3224 /* Assert that B1 and B2 do not overlap, checking both ways.  */
3225 
3226 #define ASSERT_DISJOINT(B1, B2) \
3227   SELFTEST_BEGIN_STMT				\
3228   assert_disjoint (SELFTEST_LOCATION, B1, B2);  \
3229   SELFTEST_END_STMT
3230 
3231 /* Verify that concrete_binding::overlaps_p works as expected.  */
3232 
3233 static void
test_binding_key_overlap()3234 test_binding_key_overlap ()
3235 {
3236   store_manager mgr (NULL);
3237 
3238   /* Various 8-bit bindings.  */
3239   const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3240   const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3241   const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3242   const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3243 
3244   /* 16-bit bindings.  */
3245   const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3246   const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3247   const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3248 
3249   /* 32-bit binding.  */
3250   const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3251 
3252   /* Everything should self-overlap.  */
3253   ASSERT_OVERLAP (cb_0_7, cb_0_7);
3254   ASSERT_OVERLAP (cb_8_15, cb_8_15);
3255   ASSERT_OVERLAP (cb_16_23, cb_16_23);
3256   ASSERT_OVERLAP (cb_24_31, cb_24_31);
3257   ASSERT_OVERLAP (cb_0_15, cb_0_15);
3258   ASSERT_OVERLAP (cb_8_23, cb_8_23);
3259   ASSERT_OVERLAP (cb_16_31, cb_16_31);
3260   ASSERT_OVERLAP (cb_0_31, cb_0_31);
3261 
3262   /* Verify the 8-bit bindings that don't overlap each other.  */
3263   ASSERT_DISJOINT (cb_0_7, cb_8_15);
3264   ASSERT_DISJOINT (cb_8_15, cb_16_23);
3265 
3266   /* Check for overlap of differently-sized bindings.  */
3267   ASSERT_OVERLAP (cb_0_7, cb_0_31);
3268   /* ...and with differing start points.  */
3269   ASSERT_OVERLAP (cb_8_15, cb_0_31);
3270   ASSERT_DISJOINT (cb_8_15, cb_16_31);
3271   ASSERT_OVERLAP (cb_16_23, cb_0_31);
3272   ASSERT_OVERLAP (cb_16_31, cb_0_31);
3273 
3274   ASSERT_DISJOINT (cb_0_7, cb_8_23);
3275   ASSERT_OVERLAP (cb_8_23, cb_16_23);
3276   ASSERT_OVERLAP (cb_8_23, cb_16_31);
3277   ASSERT_DISJOINT (cb_8_23, cb_24_31);
3278 }
3279 
3280 /* Run all of the selftests within this file.  */
3281 
3282 void
analyzer_store_cc_tests()3283 analyzer_store_cc_tests ()
3284 {
3285   test_bit_range_intersects_p ();
3286   test_bit_range_from_mask ();
3287   test_binding_key_overlap ();
3288 }
3289 
3290 } // namespace selftest
3291 
3292 #endif /* CHECKING_P */
3293 
3294 } // namespace ana
3295 
3296 #endif /* #if ENABLE_ANALYZER */
3297