1 /* Classes for modeling the state of memory.
2 Copyright (C) 2019-2021 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tree.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "gimple.h"
28 #include "gimple-iterator.h"
29 #include "diagnostic-core.h"
30 #include "graphviz.h"
31 #include "options.h"
32 #include "cgraph.h"
33 #include "tree-dfa.h"
34 #include "stringpool.h"
35 #include "convert.h"
36 #include "target.h"
37 #include "fold-const.h"
38 #include "tree-pretty-print.h"
39 #include "diagnostic-color.h"
40 #include "diagnostic-metadata.h"
41 #include "tristate.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "function.h"
45 #include "json.h"
46 #include "analyzer/analyzer.h"
47 #include "analyzer/analyzer-logging.h"
48 #include "ordered-hash-map.h"
49 #include "options.h"
50 #include "cgraph.h"
51 #include "cfg.h"
52 #include "digraph.h"
53 #include "analyzer/supergraph.h"
54 #include "sbitmap.h"
55 #include "analyzer/call-string.h"
56 #include "analyzer/program-point.h"
57 #include "analyzer/store.h"
58 #include "analyzer/region-model.h"
59 #include "analyzer/constraint-manager.h"
60 #include "diagnostic-event-id.h"
61 #include "analyzer/sm.h"
62 #include "diagnostic-event-id.h"
63 #include "analyzer/sm.h"
64 #include "analyzer/pending-diagnostic.h"
65 #include "analyzer/region-model-reachability.h"
66 #include "analyzer/analyzer-selftests.h"
67 #include "analyzer/program-state.h"
68 #include "stor-layout.h"
69 #include "attribs.h"
70 #include "tree-object-size.h"
71
72 #if ENABLE_ANALYZER
73
74 namespace ana {
75
76 /* Dump T to PP in language-independent form, for debugging/logging/dumping
77 purposes. */
78
79 void
dump_tree(pretty_printer * pp,tree t)80 dump_tree (pretty_printer *pp, tree t)
81 {
82 dump_generic_node (pp, t, 0, TDF_SLIM, 0);
83 }
84
85 /* Dump T to PP in language-independent form in quotes, for
86 debugging/logging/dumping purposes. */
87
88 void
dump_quoted_tree(pretty_printer * pp,tree t)89 dump_quoted_tree (pretty_printer *pp, tree t)
90 {
91 pp_begin_quote (pp, pp_show_color (pp));
92 dump_tree (pp, t);
93 pp_end_quote (pp, pp_show_color (pp));
94 }
95
96 /* Equivalent to pp_printf (pp, "%qT", t), to avoid nesting pp_printf
97 calls within other pp_printf calls.
98
99 default_tree_printer handles 'T' and some other codes by calling
100 dump_generic_node (pp, t, 0, TDF_SLIM, 0);
101 dump_generic_node calls pp_printf in various places, leading to
102 garbled output.
103
104 Ideally pp_printf could be made to be reentrant, but in the meantime
105 this function provides a workaround. */
106
107 void
print_quoted_type(pretty_printer * pp,tree t)108 print_quoted_type (pretty_printer *pp, tree t)
109 {
110 pp_begin_quote (pp, pp_show_color (pp));
111 dump_generic_node (pp, t, 0, TDF_SLIM, 0);
112 pp_end_quote (pp, pp_show_color (pp));
113 }
114
115 /* class region_to_value_map. */
116
117 /* Assignment operator for region_to_value_map. */
118
119 region_to_value_map &
operator =(const region_to_value_map & other)120 region_to_value_map::operator= (const region_to_value_map &other)
121 {
122 m_hash_map.empty ();
123 for (auto iter : other.m_hash_map)
124 {
125 const region *reg = iter.first;
126 const svalue *sval = iter.second;
127 m_hash_map.put (reg, sval);
128 }
129 return *this;
130 }
131
132 /* Equality operator for region_to_value_map. */
133
134 bool
operator ==(const region_to_value_map & other) const135 region_to_value_map::operator== (const region_to_value_map &other) const
136 {
137 if (m_hash_map.elements () != other.m_hash_map.elements ())
138 return false;
139
140 for (auto iter : *this)
141 {
142 const region *reg = iter.first;
143 const svalue *sval = iter.second;
144 const svalue * const *other_slot = other.get (reg);
145 if (other_slot == NULL)
146 return false;
147 if (sval != *other_slot)
148 return false;
149 }
150
151 return true;
152 }
153
154 /* Dump this object to PP. */
155
156 void
dump_to_pp(pretty_printer * pp,bool simple,bool multiline) const157 region_to_value_map::dump_to_pp (pretty_printer *pp, bool simple,
158 bool multiline) const
159 {
160 auto_vec<const region *> regs;
161 for (iterator iter = begin (); iter != end (); ++iter)
162 regs.safe_push ((*iter).first);
163 regs.qsort (region::cmp_ptr_ptr);
164 if (multiline)
165 pp_newline (pp);
166 else
167 pp_string (pp, " {");
168 unsigned i;
169 const region *reg;
170 FOR_EACH_VEC_ELT (regs, i, reg)
171 {
172 if (multiline)
173 pp_string (pp, " ");
174 else if (i > 0)
175 pp_string (pp, ", ");
176 reg->dump_to_pp (pp, simple);
177 pp_string (pp, ": ");
178 const svalue *sval = *get (reg);
179 sval->dump_to_pp (pp, true);
180 if (multiline)
181 pp_newline (pp);
182 }
183 if (!multiline)
184 pp_string (pp, "}");
185 }
186
187 /* Dump this object to stderr. */
188
189 DEBUG_FUNCTION void
dump(bool simple) const190 region_to_value_map::dump (bool simple) const
191 {
192 pretty_printer pp;
193 pp_format_decoder (&pp) = default_tree_printer;
194 pp_show_color (&pp) = pp_show_color (global_dc->printer);
195 pp.buffer->stream = stderr;
196 dump_to_pp (&pp, simple, true);
197 pp_newline (&pp);
198 pp_flush (&pp);
199 }
200
201
202 /* Attempt to merge THIS with OTHER, writing the result
203 to OUT.
204
205 For now, write (region, value) mappings that are in common between THIS
206 and OTHER to OUT, effectively taking the intersection, rather than
207 rejecting differences. */
208
209 bool
can_merge_with_p(const region_to_value_map & other,region_to_value_map * out) const210 region_to_value_map::can_merge_with_p (const region_to_value_map &other,
211 region_to_value_map *out) const
212 {
213 for (auto iter : *this)
214 {
215 const region *iter_reg = iter.first;
216 const svalue *iter_sval = iter.second;
217 const svalue * const * other_slot = other.get (iter_reg);
218 if (other_slot)
219 if (iter_sval == *other_slot)
220 out->put (iter_reg, iter_sval);
221 }
222 return true;
223 }
224
225 /* Purge any state involving SVAL. */
226
227 void
purge_state_involving(const svalue * sval)228 region_to_value_map::purge_state_involving (const svalue *sval)
229 {
230 auto_vec<const region *> to_purge;
231 for (auto iter : *this)
232 {
233 const region *iter_reg = iter.first;
234 const svalue *iter_sval = iter.second;
235 if (iter_reg->involves_p (sval) || iter_sval->involves_p (sval))
236 to_purge.safe_push (iter_reg);
237 }
238 for (auto iter : to_purge)
239 m_hash_map.remove (iter);
240 }
241
242 /* class region_model. */
243
244 /* Ctor for region_model: construct an "empty" model. */
245
region_model(region_model_manager * mgr)246 region_model::region_model (region_model_manager *mgr)
247 : m_mgr (mgr), m_store (), m_current_frame (NULL),
248 m_dynamic_extents ()
249 {
250 m_constraints = new constraint_manager (mgr);
251 }
252
253 /* region_model's copy ctor. */
254
region_model(const region_model & other)255 region_model::region_model (const region_model &other)
256 : m_mgr (other.m_mgr), m_store (other.m_store),
257 m_constraints (new constraint_manager (*other.m_constraints)),
258 m_current_frame (other.m_current_frame),
259 m_dynamic_extents (other.m_dynamic_extents)
260 {
261 }
262
263 /* region_model's dtor. */
264
~region_model()265 region_model::~region_model ()
266 {
267 delete m_constraints;
268 }
269
270 /* region_model's assignment operator. */
271
272 region_model &
operator =(const region_model & other)273 region_model::operator= (const region_model &other)
274 {
275 /* m_mgr is const. */
276 gcc_assert (m_mgr == other.m_mgr);
277
278 m_store = other.m_store;
279
280 delete m_constraints;
281 m_constraints = new constraint_manager (*other.m_constraints);
282
283 m_current_frame = other.m_current_frame;
284
285 m_dynamic_extents = other.m_dynamic_extents;
286
287 return *this;
288 }
289
290 /* Equality operator for region_model.
291
292 Amongst other things this directly compares the stores and the constraint
293 managers, so for this to be meaningful both this and OTHER should
294 have been canonicalized. */
295
296 bool
operator ==(const region_model & other) const297 region_model::operator== (const region_model &other) const
298 {
299 /* We can only compare instances that use the same manager. */
300 gcc_assert (m_mgr == other.m_mgr);
301
302 if (m_store != other.m_store)
303 return false;
304
305 if (*m_constraints != *other.m_constraints)
306 return false;
307
308 if (m_current_frame != other.m_current_frame)
309 return false;
310
311 if (m_dynamic_extents != other.m_dynamic_extents)
312 return false;
313
314 gcc_checking_assert (hash () == other.hash ());
315
316 return true;
317 }
318
319 /* Generate a hash value for this region_model. */
320
321 hashval_t
hash() const322 region_model::hash () const
323 {
324 hashval_t result = m_store.hash ();
325 result ^= m_constraints->hash ();
326 return result;
327 }
328
329 /* Dump a representation of this model to PP, showing the
330 stack, the store, and any constraints.
331 Use SIMPLE to control how svalues and regions are printed. */
332
333 void
dump_to_pp(pretty_printer * pp,bool simple,bool multiline) const334 region_model::dump_to_pp (pretty_printer *pp, bool simple,
335 bool multiline) const
336 {
337 /* Dump stack. */
338 pp_printf (pp, "stack depth: %i", get_stack_depth ());
339 if (multiline)
340 pp_newline (pp);
341 else
342 pp_string (pp, " {");
343 for (const frame_region *iter_frame = m_current_frame; iter_frame;
344 iter_frame = iter_frame->get_calling_frame ())
345 {
346 if (multiline)
347 pp_string (pp, " ");
348 else if (iter_frame != m_current_frame)
349 pp_string (pp, ", ");
350 pp_printf (pp, "frame (index %i): ", iter_frame->get_index ());
351 iter_frame->dump_to_pp (pp, simple);
352 if (multiline)
353 pp_newline (pp);
354 }
355 if (!multiline)
356 pp_string (pp, "}");
357
358 /* Dump store. */
359 if (!multiline)
360 pp_string (pp, ", {");
361 m_store.dump_to_pp (pp, simple, multiline,
362 m_mgr->get_store_manager ());
363 if (!multiline)
364 pp_string (pp, "}");
365
366 /* Dump constraints. */
367 pp_string (pp, "constraint_manager:");
368 if (multiline)
369 pp_newline (pp);
370 else
371 pp_string (pp, " {");
372 m_constraints->dump_to_pp (pp, multiline);
373 if (!multiline)
374 pp_string (pp, "}");
375
376 /* Dump sizes of dynamic regions, if any are known. */
377 if (!m_dynamic_extents.is_empty ())
378 {
379 pp_string (pp, "dynamic_extents:");
380 m_dynamic_extents.dump_to_pp (pp, simple, multiline);
381 }
382 }
383
384 /* Dump a representation of this model to FILE. */
385
386 void
dump(FILE * fp,bool simple,bool multiline) const387 region_model::dump (FILE *fp, bool simple, bool multiline) const
388 {
389 pretty_printer pp;
390 pp_format_decoder (&pp) = default_tree_printer;
391 pp_show_color (&pp) = pp_show_color (global_dc->printer);
392 pp.buffer->stream = fp;
393 dump_to_pp (&pp, simple, multiline);
394 pp_newline (&pp);
395 pp_flush (&pp);
396 }
397
398 /* Dump a multiline representation of this model to stderr. */
399
400 DEBUG_FUNCTION void
dump(bool simple) const401 region_model::dump (bool simple) const
402 {
403 dump (stderr, simple, true);
404 }
405
406 /* Dump a multiline representation of this model to stderr. */
407
408 DEBUG_FUNCTION void
debug() const409 region_model::debug () const
410 {
411 dump (true);
412 }
413
414 /* Assert that this object is valid. */
415
416 void
validate() const417 region_model::validate () const
418 {
419 m_store.validate ();
420 }
421
422 /* Canonicalize the store and constraints, to maximize the chance of
423 equality between region_model instances. */
424
425 void
canonicalize()426 region_model::canonicalize ()
427 {
428 m_store.canonicalize (m_mgr->get_store_manager ());
429 m_constraints->canonicalize ();
430 }
431
432 /* Return true if this region_model is in canonical form. */
433
434 bool
canonicalized_p() const435 region_model::canonicalized_p () const
436 {
437 region_model copy (*this);
438 copy.canonicalize ();
439 return *this == copy;
440 }
441
442 /* See the comment for store::loop_replay_fixup. */
443
444 void
loop_replay_fixup(const region_model * dst_state)445 region_model::loop_replay_fixup (const region_model *dst_state)
446 {
447 m_store.loop_replay_fixup (dst_state->get_store (), m_mgr);
448 }
449
450 /* A subclass of pending_diagnostic for complaining about uses of
451 poisoned values. */
452
453 class poisoned_value_diagnostic
454 : public pending_diagnostic_subclass<poisoned_value_diagnostic>
455 {
456 public:
poisoned_value_diagnostic(tree expr,enum poison_kind pkind)457 poisoned_value_diagnostic (tree expr, enum poison_kind pkind)
458 : m_expr (expr), m_pkind (pkind)
459 {}
460
get_kind() const461 const char *get_kind () const FINAL OVERRIDE { return "poisoned_value_diagnostic"; }
462
use_of_uninit_p() const463 bool use_of_uninit_p () const FINAL OVERRIDE
464 {
465 return m_pkind == POISON_KIND_UNINIT;
466 }
467
operator ==(const poisoned_value_diagnostic & other) const468 bool operator== (const poisoned_value_diagnostic &other) const
469 {
470 return m_expr == other.m_expr;
471 }
472
emit(rich_location * rich_loc)473 bool emit (rich_location *rich_loc) FINAL OVERRIDE
474 {
475 switch (m_pkind)
476 {
477 default:
478 gcc_unreachable ();
479 case POISON_KIND_UNINIT:
480 {
481 diagnostic_metadata m;
482 m.add_cwe (457); /* "CWE-457: Use of Uninitialized Variable". */
483 return warning_meta (rich_loc, m,
484 OPT_Wanalyzer_use_of_uninitialized_value,
485 "use of uninitialized value %qE",
486 m_expr);
487 }
488 break;
489 case POISON_KIND_FREED:
490 {
491 diagnostic_metadata m;
492 m.add_cwe (416); /* "CWE-416: Use After Free". */
493 return warning_meta (rich_loc, m,
494 OPT_Wanalyzer_use_after_free,
495 "use after %<free%> of %qE",
496 m_expr);
497 }
498 break;
499 case POISON_KIND_POPPED_STACK:
500 {
501 /* TODO: which CWE? */
502 return warning_at
503 (rich_loc,
504 OPT_Wanalyzer_use_of_pointer_in_stale_stack_frame,
505 "dereferencing pointer %qE to within stale stack frame",
506 m_expr);
507 }
508 break;
509 }
510 }
511
describe_final_event(const evdesc::final_event & ev)512 label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
513 {
514 switch (m_pkind)
515 {
516 default:
517 gcc_unreachable ();
518 case POISON_KIND_UNINIT:
519 return ev.formatted_print ("use of uninitialized value %qE here",
520 m_expr);
521 case POISON_KIND_FREED:
522 return ev.formatted_print ("use after %<free%> of %qE here",
523 m_expr);
524 case POISON_KIND_POPPED_STACK:
525 return ev.formatted_print
526 ("dereferencing pointer %qE to within stale stack frame",
527 m_expr);
528 }
529 }
530
531 private:
532 tree m_expr;
533 enum poison_kind m_pkind;
534 };
535
536 /* A subclass of pending_diagnostic for complaining about shifts
537 by negative counts. */
538
539 class shift_count_negative_diagnostic
540 : public pending_diagnostic_subclass<shift_count_negative_diagnostic>
541 {
542 public:
shift_count_negative_diagnostic(const gassign * assign,tree count_cst)543 shift_count_negative_diagnostic (const gassign *assign, tree count_cst)
544 : m_assign (assign), m_count_cst (count_cst)
545 {}
546
get_kind() const547 const char *get_kind () const FINAL OVERRIDE
548 {
549 return "shift_count_negative_diagnostic";
550 }
551
operator ==(const shift_count_negative_diagnostic & other) const552 bool operator== (const shift_count_negative_diagnostic &other) const
553 {
554 return (m_assign == other.m_assign
555 && same_tree_p (m_count_cst, other.m_count_cst));
556 }
557
emit(rich_location * rich_loc)558 bool emit (rich_location *rich_loc) FINAL OVERRIDE
559 {
560 return warning_at (rich_loc, OPT_Wanalyzer_shift_count_negative,
561 "shift by negative count (%qE)", m_count_cst);
562 }
563
describe_final_event(const evdesc::final_event & ev)564 label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
565 {
566 return ev.formatted_print ("shift by negative amount here (%qE)", m_count_cst);
567 }
568
569 private:
570 const gassign *m_assign;
571 tree m_count_cst;
572 };
573
574 /* A subclass of pending_diagnostic for complaining about shifts
575 by counts >= the width of the operand type. */
576
577 class shift_count_overflow_diagnostic
578 : public pending_diagnostic_subclass<shift_count_overflow_diagnostic>
579 {
580 public:
shift_count_overflow_diagnostic(const gassign * assign,int operand_precision,tree count_cst)581 shift_count_overflow_diagnostic (const gassign *assign,
582 int operand_precision,
583 tree count_cst)
584 : m_assign (assign), m_operand_precision (operand_precision),
585 m_count_cst (count_cst)
586 {}
587
get_kind() const588 const char *get_kind () const FINAL OVERRIDE
589 {
590 return "shift_count_overflow_diagnostic";
591 }
592
operator ==(const shift_count_overflow_diagnostic & other) const593 bool operator== (const shift_count_overflow_diagnostic &other) const
594 {
595 return (m_assign == other.m_assign
596 && m_operand_precision == other.m_operand_precision
597 && same_tree_p (m_count_cst, other.m_count_cst));
598 }
599
emit(rich_location * rich_loc)600 bool emit (rich_location *rich_loc) FINAL OVERRIDE
601 {
602 return warning_at (rich_loc, OPT_Wanalyzer_shift_count_overflow,
603 "shift by count (%qE) >= precision of type (%qi)",
604 m_count_cst, m_operand_precision);
605 }
606
describe_final_event(const evdesc::final_event & ev)607 label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
608 {
609 return ev.formatted_print ("shift by count %qE here", m_count_cst);
610 }
611
612 private:
613 const gassign *m_assign;
614 int m_operand_precision;
615 tree m_count_cst;
616 };
617
618 /* If ASSIGN is a stmt that can be modelled via
619 set_value (lhs_reg, SVALUE, CTXT)
620 for some SVALUE, get the SVALUE.
621 Otherwise return NULL. */
622
623 const svalue *
get_gassign_result(const gassign * assign,region_model_context * ctxt)624 region_model::get_gassign_result (const gassign *assign,
625 region_model_context *ctxt)
626 {
627 tree lhs = gimple_assign_lhs (assign);
628 tree rhs1 = gimple_assign_rhs1 (assign);
629 enum tree_code op = gimple_assign_rhs_code (assign);
630 switch (op)
631 {
632 default:
633 return NULL;
634
635 case POINTER_PLUS_EXPR:
636 {
637 /* e.g. "_1 = a_10(D) + 12;" */
638 tree ptr = rhs1;
639 tree offset = gimple_assign_rhs2 (assign);
640
641 const svalue *ptr_sval = get_rvalue (ptr, ctxt);
642 const svalue *offset_sval = get_rvalue (offset, ctxt);
643 /* Quoting tree.def, "the second operand [of a POINTER_PLUS_EXPR]
644 is an integer of type sizetype". */
645 offset_sval = m_mgr->get_or_create_cast (size_type_node, offset_sval);
646
647 const svalue *sval_binop
648 = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
649 ptr_sval, offset_sval);
650 return sval_binop;
651 }
652 break;
653
654 case POINTER_DIFF_EXPR:
655 {
656 /* e.g. "_1 = p_2(D) - q_3(D);". */
657 tree rhs2 = gimple_assign_rhs2 (assign);
658 const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
659 const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
660
661 // TODO: perhaps fold to zero if they're known to be equal?
662
663 const svalue *sval_binop
664 = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
665 rhs1_sval, rhs2_sval);
666 return sval_binop;
667 }
668 break;
669
670 /* Assignments of the form
671 set_value (lvalue (LHS), rvalue (EXPR))
672 for various EXPR.
673 We already have the lvalue for the LHS above, as "lhs_reg". */
674 case ADDR_EXPR: /* LHS = &RHS; */
675 case BIT_FIELD_REF:
676 case COMPONENT_REF: /* LHS = op0.op1; */
677 case MEM_REF:
678 case REAL_CST:
679 case COMPLEX_CST:
680 case VECTOR_CST:
681 case INTEGER_CST:
682 case ARRAY_REF:
683 case SSA_NAME: /* LHS = VAR; */
684 case VAR_DECL: /* LHS = VAR; */
685 case PARM_DECL:/* LHS = VAR; */
686 case REALPART_EXPR:
687 case IMAGPART_EXPR:
688 return get_rvalue (rhs1, ctxt);
689
690 case ABS_EXPR:
691 case ABSU_EXPR:
692 case CONJ_EXPR:
693 case BIT_NOT_EXPR:
694 case FIX_TRUNC_EXPR:
695 case FLOAT_EXPR:
696 case NEGATE_EXPR:
697 case NOP_EXPR:
698 case VIEW_CONVERT_EXPR:
699 {
700 /* Unary ops. */
701 const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
702 const svalue *sval_unaryop
703 = m_mgr->get_or_create_unaryop (TREE_TYPE (lhs), op, rhs_sval);
704 return sval_unaryop;
705 }
706
707 case EQ_EXPR:
708 case GE_EXPR:
709 case LE_EXPR:
710 case NE_EXPR:
711 case GT_EXPR:
712 case LT_EXPR:
713 case UNORDERED_EXPR:
714 case ORDERED_EXPR:
715 {
716 tree rhs2 = gimple_assign_rhs2 (assign);
717
718 const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
719 const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
720
721 if (TREE_TYPE (lhs) == boolean_type_node)
722 {
723 /* Consider constraints between svalues. */
724 tristate t = eval_condition (rhs1_sval, op, rhs2_sval);
725 if (t.is_known ())
726 return m_mgr->get_or_create_constant_svalue
727 (t.is_true () ? boolean_true_node : boolean_false_node);
728 }
729
730 /* Otherwise, generate a symbolic binary op. */
731 const svalue *sval_binop
732 = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
733 rhs1_sval, rhs2_sval);
734 return sval_binop;
735 }
736 break;
737
738 case PLUS_EXPR:
739 case MINUS_EXPR:
740 case MULT_EXPR:
741 case MULT_HIGHPART_EXPR:
742 case TRUNC_DIV_EXPR:
743 case CEIL_DIV_EXPR:
744 case FLOOR_DIV_EXPR:
745 case ROUND_DIV_EXPR:
746 case TRUNC_MOD_EXPR:
747 case CEIL_MOD_EXPR:
748 case FLOOR_MOD_EXPR:
749 case ROUND_MOD_EXPR:
750 case RDIV_EXPR:
751 case EXACT_DIV_EXPR:
752 case LSHIFT_EXPR:
753 case RSHIFT_EXPR:
754 case LROTATE_EXPR:
755 case RROTATE_EXPR:
756 case BIT_IOR_EXPR:
757 case BIT_XOR_EXPR:
758 case BIT_AND_EXPR:
759 case MIN_EXPR:
760 case MAX_EXPR:
761 case COMPLEX_EXPR:
762 {
763 /* Binary ops. */
764 tree rhs2 = gimple_assign_rhs2 (assign);
765
766 const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
767 const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
768
769 if (ctxt && (op == LSHIFT_EXPR || op == RSHIFT_EXPR))
770 {
771 /* "INT34-C. Do not shift an expression by a negative number of bits
772 or by greater than or equal to the number of bits that exist in
773 the operand." */
774 if (const tree rhs2_cst = rhs2_sval->maybe_get_constant ())
775 if (TREE_CODE (rhs2_cst) == INTEGER_CST)
776 {
777 if (tree_int_cst_sgn (rhs2_cst) < 0)
778 ctxt->warn (new shift_count_negative_diagnostic
779 (assign, rhs2_cst));
780 else if (compare_tree_int (rhs2_cst,
781 TYPE_PRECISION (TREE_TYPE (rhs1)))
782 >= 0)
783 ctxt->warn (new shift_count_overflow_diagnostic
784 (assign, TYPE_PRECISION (TREE_TYPE (rhs1)),
785 rhs2_cst));
786 }
787 }
788
789 const svalue *sval_binop
790 = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
791 rhs1_sval, rhs2_sval);
792 return sval_binop;
793 }
794
795 /* Vector expressions. In theory we could implement these elementwise,
796 but for now, simply return unknown values. */
797 case VEC_DUPLICATE_EXPR:
798 case VEC_SERIES_EXPR:
799 case VEC_COND_EXPR:
800 case VEC_PERM_EXPR:
801 case VEC_WIDEN_MULT_HI_EXPR:
802 case VEC_WIDEN_MULT_LO_EXPR:
803 case VEC_WIDEN_MULT_EVEN_EXPR:
804 case VEC_WIDEN_MULT_ODD_EXPR:
805 case VEC_UNPACK_HI_EXPR:
806 case VEC_UNPACK_LO_EXPR:
807 case VEC_UNPACK_FLOAT_HI_EXPR:
808 case VEC_UNPACK_FLOAT_LO_EXPR:
809 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
810 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
811 case VEC_PACK_TRUNC_EXPR:
812 case VEC_PACK_SAT_EXPR:
813 case VEC_PACK_FIX_TRUNC_EXPR:
814 case VEC_PACK_FLOAT_EXPR:
815 case VEC_WIDEN_LSHIFT_HI_EXPR:
816 case VEC_WIDEN_LSHIFT_LO_EXPR:
817 return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (lhs));
818 }
819 }
820
821 /* Check for SVAL being poisoned, adding a warning to CTXT.
822 Return SVAL, or, if a warning is added, another value, to avoid
823 repeatedly complaining about the same poisoned value in followup code. */
824
825 const svalue *
check_for_poison(const svalue * sval,tree expr,region_model_context * ctxt) const826 region_model::check_for_poison (const svalue *sval,
827 tree expr,
828 region_model_context *ctxt) const
829 {
830 if (!ctxt)
831 return sval;
832
833 if (const poisoned_svalue *poisoned_sval = sval->dyn_cast_poisoned_svalue ())
834 {
835 /* If we have an SSA name for a temporary, we don't want to print
836 '<unknown>'.
837 Poisoned values are shared by type, and so we can't reconstruct
838 the tree other than via the def stmts, using
839 fixup_tree_for_diagnostic. */
840 tree diag_arg = fixup_tree_for_diagnostic (expr);
841 enum poison_kind pkind = poisoned_sval->get_poison_kind ();
842 if (ctxt->warn (new poisoned_value_diagnostic (diag_arg, pkind)))
843 {
844 /* We only want to report use of a poisoned value at the first
845 place it gets used; return an unknown value to avoid generating
846 a chain of followup warnings. */
847 sval = m_mgr->get_or_create_unknown_svalue (sval->get_type ());
848 }
849
850 return sval;
851 }
852
853 return sval;
854 }
855
856 /* Update this model for the ASSIGN stmt, using CTXT to report any
857 diagnostics. */
858
859 void
on_assignment(const gassign * assign,region_model_context * ctxt)860 region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
861 {
862 tree lhs = gimple_assign_lhs (assign);
863 tree rhs1 = gimple_assign_rhs1 (assign);
864
865 const region *lhs_reg = get_lvalue (lhs, ctxt);
866
867 /* Most assignments are handled by:
868 set_value (lhs_reg, SVALUE, CTXT)
869 for some SVALUE. */
870 if (const svalue *sval = get_gassign_result (assign, ctxt))
871 {
872 tree expr = get_diagnostic_tree_for_gassign (assign);
873 check_for_poison (sval, expr, ctxt);
874 set_value (lhs_reg, sval, ctxt);
875 return;
876 }
877
878 enum tree_code op = gimple_assign_rhs_code (assign);
879 switch (op)
880 {
881 default:
882 {
883 if (0)
884 sorry_at (assign->location, "unhandled assignment op: %qs",
885 get_tree_code_name (op));
886 const svalue *unknown_sval
887 = m_mgr->get_or_create_unknown_svalue (TREE_TYPE (lhs));
888 set_value (lhs_reg, unknown_sval, ctxt);
889 }
890 break;
891
892 case CONSTRUCTOR:
893 {
894 if (TREE_CLOBBER_P (rhs1))
895 {
896 /* e.g. "x ={v} {CLOBBER};" */
897 clobber_region (lhs_reg);
898 }
899 else
900 {
901 /* Any CONSTRUCTOR that survives to this point is either
902 just a zero-init of everything, or a vector. */
903 if (!CONSTRUCTOR_NO_CLEARING (rhs1))
904 zero_fill_region (lhs_reg);
905 unsigned ix;
906 tree index;
907 tree val;
908 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), ix, index, val)
909 {
910 gcc_assert (TREE_CODE (TREE_TYPE (rhs1)) == VECTOR_TYPE);
911 if (!index)
912 index = build_int_cst (integer_type_node, ix);
913 gcc_assert (TREE_CODE (index) == INTEGER_CST);
914 const svalue *index_sval
915 = m_mgr->get_or_create_constant_svalue (index);
916 gcc_assert (index_sval);
917 const region *sub_reg
918 = m_mgr->get_element_region (lhs_reg,
919 TREE_TYPE (val),
920 index_sval);
921 const svalue *val_sval = get_rvalue (val, ctxt);
922 set_value (sub_reg, val_sval, ctxt);
923 }
924 }
925 }
926 break;
927
928 case STRING_CST:
929 {
930 /* e.g. "struct s2 x = {{'A', 'B', 'C', 'D'}};". */
931 const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
932 m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
933 ctxt ? ctxt->get_uncertainty () : NULL);
934 }
935 break;
936 }
937 }
938
939 /* A pending_diagnostic subclass for implementing "__analyzer_dump_path". */
940
941 class dump_path_diagnostic
942 : public pending_diagnostic_subclass<dump_path_diagnostic>
943 {
944 public:
emit(rich_location * richloc)945 bool emit (rich_location *richloc) FINAL OVERRIDE
946 {
947 inform (richloc, "path");
948 return true;
949 }
950
get_kind() const951 const char *get_kind () const FINAL OVERRIDE { return "dump_path_diagnostic"; }
952
operator ==(const dump_path_diagnostic &) const953 bool operator== (const dump_path_diagnostic &) const
954 {
955 return true;
956 }
957 };
958
959 /* Handle the pre-sm-state part of STMT, modifying this object in-place.
960 Write true to *OUT_TERMINATE_PATH if the path should be terminated.
961 Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
962 side effects. */
963
964 void
on_stmt_pre(const gimple * stmt,bool * out_terminate_path,bool * out_unknown_side_effects,region_model_context * ctxt)965 region_model::on_stmt_pre (const gimple *stmt,
966 bool *out_terminate_path,
967 bool *out_unknown_side_effects,
968 region_model_context *ctxt)
969 {
970 switch (gimple_code (stmt))
971 {
972 default:
973 /* No-op for now. */
974 break;
975
976 case GIMPLE_ASSIGN:
977 {
978 const gassign *assign = as_a <const gassign *> (stmt);
979 on_assignment (assign, ctxt);
980 }
981 break;
982
983 case GIMPLE_ASM:
984 {
985 const gasm *asm_stmt = as_a <const gasm *> (stmt);
986 on_asm_stmt (asm_stmt, ctxt);
987 }
988 break;
989
990 case GIMPLE_CALL:
991 {
992 /* Track whether we have a gcall to a function that's not recognized by
993 anything, for which we don't have a function body, or for which we
994 don't know the fndecl. */
995 const gcall *call = as_a <const gcall *> (stmt);
996
997 /* Debugging/test support. */
998 if (is_special_named_call_p (call, "__analyzer_describe", 2))
999 impl_call_analyzer_describe (call, ctxt);
1000 else if (is_special_named_call_p (call, "__analyzer_dump_capacity", 1))
1001 impl_call_analyzer_dump_capacity (call, ctxt);
1002 else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
1003 {
1004 /* Handle the builtin "__analyzer_dump_path" by queuing a
1005 diagnostic at this exploded_node. */
1006 ctxt->warn (new dump_path_diagnostic ());
1007 }
1008 else if (is_special_named_call_p (call, "__analyzer_dump_region_model",
1009 0))
1010 {
1011 /* Handle the builtin "__analyzer_dump_region_model" by dumping
1012 the region model's state to stderr. */
1013 dump (false);
1014 }
1015 else if (is_special_named_call_p (call, "__analyzer_eval", 1))
1016 impl_call_analyzer_eval (call, ctxt);
1017 else if (is_special_named_call_p (call, "__analyzer_break", 0))
1018 {
1019 /* Handle the builtin "__analyzer_break" by triggering a
1020 breakpoint. */
1021 /* TODO: is there a good cross-platform way to do this? */
1022 raise (SIGINT);
1023 }
1024 else if (is_special_named_call_p (call,
1025 "__analyzer_dump_exploded_nodes",
1026 1))
1027 {
1028 /* This is handled elsewhere. */
1029 }
1030 else
1031 *out_unknown_side_effects = on_call_pre (call, ctxt,
1032 out_terminate_path);
1033 }
1034 break;
1035
1036 case GIMPLE_RETURN:
1037 {
1038 const greturn *return_ = as_a <const greturn *> (stmt);
1039 on_return (return_, ctxt);
1040 }
1041 break;
1042 }
1043 }
1044
1045 /* Update this model for the CALL stmt, using CTXT to report any
1046 diagnostics - the first half.
1047
1048 Updates to the region_model that should be made *before* sm-states
1049 are updated are done here; other updates to the region_model are done
1050 in region_model::on_call_post.
1051
1052 Return true if the function call has unknown side effects (it wasn't
1053 recognized and we don't have a body for it, or are unable to tell which
1054 fndecl it is).
1055
1056 Write true to *OUT_TERMINATE_PATH if this execution path should be
1057 terminated (e.g. the function call terminates the process). */
1058
1059 bool
on_call_pre(const gcall * call,region_model_context * ctxt,bool * out_terminate_path)1060 region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
1061 bool *out_terminate_path)
1062 {
1063 call_details cd (call, this, ctxt);
1064
1065 bool unknown_side_effects = false;
1066
1067 /* Some of the cases below update the lhs of the call based on the
1068 return value, but not all. Provide a default value, which may
1069 get overwritten below. */
1070 if (tree lhs = gimple_call_lhs (call))
1071 {
1072 const region *lhs_region = get_lvalue (lhs, ctxt);
1073 const svalue *sval
1074 = m_mgr->get_or_create_conjured_svalue (TREE_TYPE (lhs), call,
1075 lhs_region);
1076 purge_state_involving (sval, ctxt);
1077 set_value (lhs_region, sval, ctxt);
1078 }
1079
1080 if (gimple_call_internal_p (call))
1081 {
1082 switch (gimple_call_internal_fn (call))
1083 {
1084 default:
1085 break;
1086 case IFN_BUILTIN_EXPECT:
1087 impl_call_builtin_expect (cd);
1088 return false;
1089 case IFN_UBSAN_BOUNDS:
1090 return false;
1091 }
1092 }
1093
1094 if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
1095 {
1096 /* The various impl_call_* member functions are implemented
1097 in region-model-impl-calls.cc.
1098 Having them split out into separate functions makes it easier
1099 to put breakpoints on the handling of specific functions. */
1100
1101 if (fndecl_built_in_p (callee_fndecl, BUILT_IN_NORMAL)
1102 && gimple_builtin_call_types_compatible_p (call, callee_fndecl))
1103 switch (DECL_UNCHECKED_FUNCTION_CODE (callee_fndecl))
1104 {
1105 default:
1106 if (!DECL_PURE_P (callee_fndecl))
1107 unknown_side_effects = true;
1108 break;
1109 case BUILT_IN_ALLOCA:
1110 case BUILT_IN_ALLOCA_WITH_ALIGN:
1111 impl_call_alloca (cd);
1112 return false;
1113 case BUILT_IN_CALLOC:
1114 impl_call_calloc (cd);
1115 return false;
1116 case BUILT_IN_EXPECT:
1117 case BUILT_IN_EXPECT_WITH_PROBABILITY:
1118 impl_call_builtin_expect (cd);
1119 return false;
1120 case BUILT_IN_FREE:
1121 /* Handle in "on_call_post". */
1122 break;
1123 case BUILT_IN_MALLOC:
1124 impl_call_malloc (cd);
1125 return false;
1126 case BUILT_IN_MEMCPY:
1127 case BUILT_IN_MEMCPY_CHK:
1128 impl_call_memcpy (cd);
1129 return false;
1130 case BUILT_IN_MEMSET:
1131 case BUILT_IN_MEMSET_CHK:
1132 impl_call_memset (cd);
1133 return false;
1134 break;
1135 case BUILT_IN_REALLOC:
1136 return false;
1137 case BUILT_IN_STRCHR:
1138 impl_call_strchr (cd);
1139 return false;
1140 case BUILT_IN_STRCPY:
1141 case BUILT_IN_STRCPY_CHK:
1142 impl_call_strcpy (cd);
1143 return false;
1144 case BUILT_IN_STRLEN:
1145 impl_call_strlen (cd);
1146 return false;
1147
1148 case BUILT_IN_STACK_SAVE:
1149 case BUILT_IN_STACK_RESTORE:
1150 return false;
1151
1152 /* Stdio builtins. */
1153 case BUILT_IN_FPRINTF:
1154 case BUILT_IN_FPRINTF_UNLOCKED:
1155 case BUILT_IN_PUTC:
1156 case BUILT_IN_PUTC_UNLOCKED:
1157 case BUILT_IN_FPUTC:
1158 case BUILT_IN_FPUTC_UNLOCKED:
1159 case BUILT_IN_FPUTS:
1160 case BUILT_IN_FPUTS_UNLOCKED:
1161 case BUILT_IN_FWRITE:
1162 case BUILT_IN_FWRITE_UNLOCKED:
1163 case BUILT_IN_PRINTF:
1164 case BUILT_IN_PRINTF_UNLOCKED:
1165 case BUILT_IN_PUTCHAR:
1166 case BUILT_IN_PUTCHAR_UNLOCKED:
1167 case BUILT_IN_PUTS:
1168 case BUILT_IN_PUTS_UNLOCKED:
1169 case BUILT_IN_VFPRINTF:
1170 case BUILT_IN_VPRINTF:
1171 /* These stdio builtins have external effects that are out
1172 of scope for the analyzer: we only want to model the effects
1173 on the return value. */
1174 break;
1175 }
1176 else if (is_named_call_p (callee_fndecl, "malloc", call, 1))
1177 {
1178 impl_call_malloc (cd);
1179 return false;
1180 }
1181 else if (is_named_call_p (callee_fndecl, "calloc", call, 2))
1182 {
1183 impl_call_calloc (cd);
1184 return false;
1185 }
1186 else if (is_named_call_p (callee_fndecl, "alloca", call, 1))
1187 {
1188 impl_call_alloca (cd);
1189 return false;
1190 }
1191 else if (is_named_call_p (callee_fndecl, "realloc", call, 2))
1192 {
1193 impl_call_realloc (cd);
1194 return false;
1195 }
1196 else if (is_named_call_p (callee_fndecl, "error"))
1197 {
1198 if (impl_call_error (cd, 3, out_terminate_path))
1199 return false;
1200 else
1201 unknown_side_effects = true;
1202 }
1203 else if (is_named_call_p (callee_fndecl, "error_at_line"))
1204 {
1205 if (impl_call_error (cd, 5, out_terminate_path))
1206 return false;
1207 else
1208 unknown_side_effects = true;
1209 }
1210 else if (is_named_call_p (callee_fndecl, "fgets", call, 3)
1211 || is_named_call_p (callee_fndecl, "fgets_unlocked", call, 3))
1212 {
1213 impl_call_fgets (cd);
1214 return false;
1215 }
1216 else if (is_named_call_p (callee_fndecl, "fread", call, 4))
1217 {
1218 impl_call_fread (cd);
1219 return false;
1220 }
1221 else if (is_named_call_p (callee_fndecl, "getchar", call, 0))
1222 {
1223 /* No side-effects (tracking stream state is out-of-scope
1224 for the analyzer). */
1225 }
1226 else if (is_named_call_p (callee_fndecl, "memset", call, 3)
1227 && POINTER_TYPE_P (cd.get_arg_type (0)))
1228 {
1229 impl_call_memset (cd);
1230 return false;
1231 }
1232 else if (is_named_call_p (callee_fndecl, "strchr", call, 2)
1233 && POINTER_TYPE_P (cd.get_arg_type (0)))
1234 {
1235 impl_call_strchr (cd);
1236 return false;
1237 }
1238 else if (is_named_call_p (callee_fndecl, "strlen", call, 1)
1239 && POINTER_TYPE_P (cd.get_arg_type (0)))
1240 {
1241 impl_call_strlen (cd);
1242 return false;
1243 }
1244 else if (is_named_call_p (callee_fndecl, "operator new", call, 1))
1245 {
1246 impl_call_operator_new (cd);
1247 return false;
1248 }
1249 else if (is_named_call_p (callee_fndecl, "operator new []", call, 1))
1250 {
1251 impl_call_operator_new (cd);
1252 return false;
1253 }
1254 else if (is_named_call_p (callee_fndecl, "operator delete", call, 1)
1255 || is_named_call_p (callee_fndecl, "operator delete", call, 2)
1256 || is_named_call_p (callee_fndecl, "operator delete []", call, 1))
1257 {
1258 /* Handle in "on_call_post". */
1259 }
1260 else if (!fndecl_has_gimple_body_p (callee_fndecl)
1261 && !DECL_PURE_P (callee_fndecl)
1262 && !fndecl_built_in_p (callee_fndecl))
1263 unknown_side_effects = true;
1264 }
1265 else
1266 unknown_side_effects = true;
1267
1268 return unknown_side_effects;
1269 }
1270
1271 /* Update this model for the CALL stmt, using CTXT to report any
1272 diagnostics - the second half.
1273
1274 Updates to the region_model that should be made *after* sm-states
1275 are updated are done here; other updates to the region_model are done
1276 in region_model::on_call_pre.
1277
1278 If UNKNOWN_SIDE_EFFECTS is true, also call handle_unrecognized_call
1279 to purge state. */
1280
1281 void
on_call_post(const gcall * call,bool unknown_side_effects,region_model_context * ctxt)1282 region_model::on_call_post (const gcall *call,
1283 bool unknown_side_effects,
1284 region_model_context *ctxt)
1285 {
1286 if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
1287 {
1288 call_details cd (call, this, ctxt);
1289 if (is_named_call_p (callee_fndecl, "free", call, 1))
1290 {
1291 impl_call_free (cd);
1292 return;
1293 }
1294 if (is_named_call_p (callee_fndecl, "operator delete", call, 1)
1295 || is_named_call_p (callee_fndecl, "operator delete", call, 2)
1296 || is_named_call_p (callee_fndecl, "operator delete []", call, 1))
1297 {
1298 impl_call_operator_delete (cd);
1299 return;
1300 }
1301 /* Was this fndecl referenced by
1302 __attribute__((malloc(FOO)))? */
1303 if (lookup_attribute ("*dealloc", DECL_ATTRIBUTES (callee_fndecl)))
1304 {
1305 impl_deallocation_call (cd);
1306 return;
1307 }
1308 if (fndecl_built_in_p (callee_fndecl, BUILT_IN_NORMAL)
1309 && gimple_builtin_call_types_compatible_p (call, callee_fndecl))
1310 switch (DECL_UNCHECKED_FUNCTION_CODE (callee_fndecl))
1311 {
1312 default:
1313 break;
1314 case BUILT_IN_REALLOC:
1315 impl_call_realloc (cd);
1316 return;
1317 }
1318 }
1319
1320 if (unknown_side_effects)
1321 handle_unrecognized_call (call, ctxt);
1322 }
1323
1324 /* Purge state involving SVAL from this region_model, using CTXT
1325 (if non-NULL) to purge other state in a program_state.
1326
1327 For example, if we're at the def-stmt of an SSA name, then we need to
1328 purge any state for svalues that involve that SSA name. This avoids
1329 false positives in loops, since a symbolic value referring to the
1330 SSA name will be referring to the previous value of that SSA name.
1331
1332 For example, in:
1333 while ((e = hashmap_iter_next(&iter))) {
1334 struct oid2strbuf *e_strbuf = (struct oid2strbuf *)e;
1335 free (e_strbuf->value);
1336 }
1337 at the def-stmt of e_8:
1338 e_8 = hashmap_iter_next (&iter);
1339 we should purge the "freed" state of:
1340 INIT_VAL(CAST_REG(‘struct oid2strbuf’, (*INIT_VAL(e_8))).value)
1341 which is the "e_strbuf->value" value from the previous iteration,
1342 or we will erroneously report a double-free - the "e_8" within it
1343 refers to the previous value. */
1344
1345 void
purge_state_involving(const svalue * sval,region_model_context * ctxt)1346 region_model::purge_state_involving (const svalue *sval,
1347 region_model_context *ctxt)
1348 {
1349 if (!sval->can_have_associated_state_p ())
1350 return;
1351 m_store.purge_state_involving (sval, m_mgr);
1352 m_constraints->purge_state_involving (sval);
1353 m_dynamic_extents.purge_state_involving (sval);
1354 if (ctxt)
1355 ctxt->purge_state_involving (sval);
1356 }
1357
1358 /* Handle a call CALL to a function with unknown behavior.
1359
1360 Traverse the regions in this model, determining what regions are
1361 reachable from pointer arguments to CALL and from global variables,
1362 recursively.
1363
1364 Set all reachable regions to new unknown values and purge sm-state
1365 from their values, and from values that point to them. */
1366
1367 void
handle_unrecognized_call(const gcall * call,region_model_context * ctxt)1368 region_model::handle_unrecognized_call (const gcall *call,
1369 region_model_context *ctxt)
1370 {
1371 tree fndecl = get_fndecl_for_call (call, ctxt);
1372
1373 reachable_regions reachable_regs (this);
1374
1375 /* Determine the reachable regions and their mutability. */
1376 {
1377 /* Add globals and regions that already escaped in previous
1378 unknown calls. */
1379 m_store.for_each_cluster (reachable_regions::init_cluster_cb,
1380 &reachable_regs);
1381
1382 /* Params that are pointers. */
1383 tree iter_param_types = NULL_TREE;
1384 if (fndecl)
1385 iter_param_types = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
1386 for (unsigned arg_idx = 0; arg_idx < gimple_call_num_args (call); arg_idx++)
1387 {
1388 /* Track expected param type, where available. */
1389 tree param_type = NULL_TREE;
1390 if (iter_param_types)
1391 {
1392 param_type = TREE_VALUE (iter_param_types);
1393 gcc_assert (param_type);
1394 iter_param_types = TREE_CHAIN (iter_param_types);
1395 }
1396
1397 tree parm = gimple_call_arg (call, arg_idx);
1398 const svalue *parm_sval = get_rvalue (parm, ctxt);
1399 reachable_regs.handle_parm (parm_sval, param_type);
1400 }
1401 }
1402
1403 uncertainty_t *uncertainty = ctxt ? ctxt->get_uncertainty () : NULL;
1404
1405 /* Purge sm-state for the svalues that were reachable,
1406 both in non-mutable and mutable form. */
1407 for (svalue_set::iterator iter
1408 = reachable_regs.begin_reachable_svals ();
1409 iter != reachable_regs.end_reachable_svals (); ++iter)
1410 {
1411 const svalue *sval = (*iter);
1412 if (ctxt)
1413 ctxt->on_unknown_change (sval, false);
1414 }
1415 for (svalue_set::iterator iter
1416 = reachable_regs.begin_mutable_svals ();
1417 iter != reachable_regs.end_mutable_svals (); ++iter)
1418 {
1419 const svalue *sval = (*iter);
1420 if (ctxt)
1421 ctxt->on_unknown_change (sval, true);
1422 if (uncertainty)
1423 uncertainty->on_mutable_sval_at_unknown_call (sval);
1424 }
1425
1426 /* Mark any clusters that have escaped. */
1427 reachable_regs.mark_escaped_clusters (ctxt);
1428
1429 /* Update bindings for all clusters that have escaped, whether above,
1430 or previously. */
1431 m_store.on_unknown_fncall (call, m_mgr->get_store_manager ());
1432
1433 /* Purge dynamic extents from any regions that have escaped mutably:
1434 realloc could have been called on them. */
1435 for (hash_set<const region *>::iterator
1436 iter = reachable_regs.begin_mutable_base_regs ();
1437 iter != reachable_regs.end_mutable_base_regs ();
1438 ++iter)
1439 {
1440 const region *base_reg = (*iter);
1441 unset_dynamic_extents (base_reg);
1442 }
1443 }
1444
1445 /* Traverse the regions in this model, determining what regions are
1446 reachable from the store and populating *OUT.
1447
1448 If EXTRA_SVAL is non-NULL, treat it as an additional "root"
1449 for reachability (for handling return values from functions when
1450 analyzing return of the only function on the stack).
1451
1452 If UNCERTAINTY is non-NULL, treat any svalues that were recorded
1453 within it as being maybe-bound as additional "roots" for reachability.
1454
1455 Find svalues that haven't leaked. */
1456
1457 void
get_reachable_svalues(svalue_set * out,const svalue * extra_sval,const uncertainty_t * uncertainty)1458 region_model::get_reachable_svalues (svalue_set *out,
1459 const svalue *extra_sval,
1460 const uncertainty_t *uncertainty)
1461 {
1462 reachable_regions reachable_regs (this);
1463
1464 /* Add globals and regions that already escaped in previous
1465 unknown calls. */
1466 m_store.for_each_cluster (reachable_regions::init_cluster_cb,
1467 &reachable_regs);
1468
1469 if (extra_sval)
1470 reachable_regs.handle_sval (extra_sval);
1471
1472 if (uncertainty)
1473 for (uncertainty_t::iterator iter
1474 = uncertainty->begin_maybe_bound_svals ();
1475 iter != uncertainty->end_maybe_bound_svals (); ++iter)
1476 reachable_regs.handle_sval (*iter);
1477
1478 /* Get regions for locals that have explicitly bound values. */
1479 for (store::cluster_map_t::iterator iter = m_store.begin ();
1480 iter != m_store.end (); ++iter)
1481 {
1482 const region *base_reg = (*iter).first;
1483 if (const region *parent = base_reg->get_parent_region ())
1484 if (parent->get_kind () == RK_FRAME)
1485 reachable_regs.add (base_reg, false);
1486 }
1487
1488 /* Populate *OUT based on the values that were reachable. */
1489 for (svalue_set::iterator iter
1490 = reachable_regs.begin_reachable_svals ();
1491 iter != reachable_regs.end_reachable_svals (); ++iter)
1492 out->add (*iter);
1493 }
1494
1495 /* Update this model for the RETURN_STMT, using CTXT to report any
1496 diagnostics. */
1497
1498 void
on_return(const greturn * return_stmt,region_model_context * ctxt)1499 region_model::on_return (const greturn *return_stmt, region_model_context *ctxt)
1500 {
1501 tree callee = get_current_function ()->decl;
1502 tree lhs = DECL_RESULT (callee);
1503 tree rhs = gimple_return_retval (return_stmt);
1504
1505 if (lhs && rhs)
1506 copy_region (get_lvalue (lhs, ctxt), get_lvalue (rhs, ctxt), ctxt);
1507 }
1508
1509 /* Update this model for a call and return of setjmp/sigsetjmp at CALL within
1510 ENODE, using CTXT to report any diagnostics.
1511
1512 This is for the initial direct invocation of setjmp/sigsetjmp (which returns
1513 0), as opposed to any second return due to longjmp/sigsetjmp. */
1514
1515 void
on_setjmp(const gcall * call,const exploded_node * enode,region_model_context * ctxt)1516 region_model::on_setjmp (const gcall *call, const exploded_node *enode,
1517 region_model_context *ctxt)
1518 {
1519 const svalue *buf_ptr = get_rvalue (gimple_call_arg (call, 0), ctxt);
1520 const region *buf_reg = deref_rvalue (buf_ptr, gimple_call_arg (call, 0),
1521 ctxt);
1522
1523 /* Create a setjmp_svalue for this call and store it in BUF_REG's
1524 region. */
1525 if (buf_reg)
1526 {
1527 setjmp_record r (enode, call);
1528 const svalue *sval
1529 = m_mgr->get_or_create_setjmp_svalue (r, buf_reg->get_type ());
1530 set_value (buf_reg, sval, ctxt);
1531 }
1532
1533 /* Direct calls to setjmp return 0. */
1534 if (tree lhs = gimple_call_lhs (call))
1535 {
1536 const svalue *new_sval
1537 = m_mgr->get_or_create_int_cst (TREE_TYPE (lhs), 0);
1538 const region *lhs_reg = get_lvalue (lhs, ctxt);
1539 set_value (lhs_reg, new_sval, ctxt);
1540 }
1541 }
1542
1543 /* Update this region_model for rewinding from a "longjmp" at LONGJMP_CALL
1544 to a "setjmp" at SETJMP_CALL where the final stack depth should be
1545 SETJMP_STACK_DEPTH. Pop any stack frames. Leak detection is *not*
1546 done, and should be done by the caller. */
1547
1548 void
on_longjmp(const gcall * longjmp_call,const gcall * setjmp_call,int setjmp_stack_depth,region_model_context * ctxt)1549 region_model::on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
1550 int setjmp_stack_depth, region_model_context *ctxt)
1551 {
1552 /* Evaluate the val, using the frame of the "longjmp". */
1553 tree fake_retval = gimple_call_arg (longjmp_call, 1);
1554 const svalue *fake_retval_sval = get_rvalue (fake_retval, ctxt);
1555
1556 /* Pop any frames until we reach the stack depth of the function where
1557 setjmp was called. */
1558 gcc_assert (get_stack_depth () >= setjmp_stack_depth);
1559 while (get_stack_depth () > setjmp_stack_depth)
1560 pop_frame (NULL, NULL, ctxt);
1561
1562 gcc_assert (get_stack_depth () == setjmp_stack_depth);
1563
1564 /* Assign to LHS of "setjmp" in new_state. */
1565 if (tree lhs = gimple_call_lhs (setjmp_call))
1566 {
1567 /* Passing 0 as the val to longjmp leads to setjmp returning 1. */
1568 const svalue *zero_sval
1569 = m_mgr->get_or_create_int_cst (TREE_TYPE (fake_retval), 0);
1570 tristate eq_zero = eval_condition (fake_retval_sval, EQ_EXPR, zero_sval);
1571 /* If we have 0, use 1. */
1572 if (eq_zero.is_true ())
1573 {
1574 const svalue *one_sval
1575 = m_mgr->get_or_create_int_cst (TREE_TYPE (fake_retval), 1);
1576 fake_retval_sval = one_sval;
1577 }
1578 else
1579 {
1580 /* Otherwise note that the value is nonzero. */
1581 m_constraints->add_constraint (fake_retval_sval, NE_EXPR, zero_sval);
1582 }
1583
1584 /* Decorate the return value from setjmp as being unmergeable,
1585 so that we don't attempt to merge states with it as zero
1586 with states in which it's nonzero, leading to a clean distinction
1587 in the exploded_graph betweeen the first return and the second
1588 return. */
1589 fake_retval_sval = m_mgr->get_or_create_unmergeable (fake_retval_sval);
1590
1591 const region *lhs_reg = get_lvalue (lhs, ctxt);
1592 set_value (lhs_reg, fake_retval_sval, ctxt);
1593 }
1594 }
1595
1596 /* Update this region_model for a phi stmt of the form
1597 LHS = PHI <...RHS...>.
1598 where RHS is for the appropriate edge.
1599 Get state from OLD_STATE so that all of the phi stmts for a basic block
1600 are effectively handled simultaneously. */
1601
1602 void
handle_phi(const gphi * phi,tree lhs,tree rhs,const region_model & old_state,region_model_context * ctxt)1603 region_model::handle_phi (const gphi *phi,
1604 tree lhs, tree rhs,
1605 const region_model &old_state,
1606 region_model_context *ctxt)
1607 {
1608 /* For now, don't bother tracking the .MEM SSA names. */
1609 if (tree var = SSA_NAME_VAR (lhs))
1610 if (TREE_CODE (var) == VAR_DECL)
1611 if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
1612 return;
1613
1614 const svalue *src_sval = old_state.get_rvalue (rhs, ctxt);
1615 const region *dst_reg = old_state.get_lvalue (lhs, ctxt);
1616
1617 set_value (dst_reg, src_sval, ctxt);
1618
1619 if (ctxt)
1620 ctxt->on_phi (phi, rhs);
1621 }
1622
1623 /* Implementation of region_model::get_lvalue; the latter adds type-checking.
1624
1625 Get the id of the region for PV within this region_model,
1626 emitting any diagnostics to CTXT. */
1627
1628 const region *
get_lvalue_1(path_var pv,region_model_context * ctxt) const1629 region_model::get_lvalue_1 (path_var pv, region_model_context *ctxt) const
1630 {
1631 tree expr = pv.m_tree;
1632
1633 gcc_assert (expr);
1634
1635 switch (TREE_CODE (expr))
1636 {
1637 default:
1638 return m_mgr->get_region_for_unexpected_tree_code (ctxt, expr,
1639 dump_location_t ());
1640
1641 case ARRAY_REF:
1642 {
1643 tree array = TREE_OPERAND (expr, 0);
1644 tree index = TREE_OPERAND (expr, 1);
1645
1646 const region *array_reg = get_lvalue (array, ctxt);
1647 const svalue *index_sval = get_rvalue (index, ctxt);
1648 return m_mgr->get_element_region (array_reg,
1649 TREE_TYPE (TREE_TYPE (array)),
1650 index_sval);
1651 }
1652 break;
1653
1654 case MEM_REF:
1655 {
1656 tree ptr = TREE_OPERAND (expr, 0);
1657 tree offset = TREE_OPERAND (expr, 1);
1658 const svalue *ptr_sval = get_rvalue (ptr, ctxt);
1659 const svalue *offset_sval = get_rvalue (offset, ctxt);
1660 const region *star_ptr = deref_rvalue (ptr_sval, ptr, ctxt);
1661 return m_mgr->get_offset_region (star_ptr,
1662 TREE_TYPE (expr),
1663 offset_sval);
1664 }
1665 break;
1666
1667 case FUNCTION_DECL:
1668 return m_mgr->get_region_for_fndecl (expr);
1669
1670 case LABEL_DECL:
1671 return m_mgr->get_region_for_label (expr);
1672
1673 case VAR_DECL:
1674 /* Handle globals. */
1675 if (is_global_var (expr))
1676 return m_mgr->get_region_for_global (expr);
1677
1678 /* Fall through. */
1679
1680 case SSA_NAME:
1681 case PARM_DECL:
1682 case RESULT_DECL:
1683 {
1684 gcc_assert (TREE_CODE (expr) == SSA_NAME
1685 || TREE_CODE (expr) == PARM_DECL
1686 || TREE_CODE (expr) == VAR_DECL
1687 || TREE_CODE (expr) == RESULT_DECL);
1688
1689 int stack_index = pv.m_stack_depth;
1690 const frame_region *frame = get_frame_at_index (stack_index);
1691 gcc_assert (frame);
1692 return frame->get_region_for_local (m_mgr, expr);
1693 }
1694
1695 case COMPONENT_REF:
1696 {
1697 /* obj.field */
1698 tree obj = TREE_OPERAND (expr, 0);
1699 tree field = TREE_OPERAND (expr, 1);
1700 const region *obj_reg = get_lvalue (obj, ctxt);
1701 return m_mgr->get_field_region (obj_reg, field);
1702 }
1703 break;
1704
1705 case STRING_CST:
1706 return m_mgr->get_region_for_string (expr);
1707 }
1708 }
1709
1710 /* Assert that SRC_TYPE can be converted to DST_TYPE as a no-op. */
1711
1712 static void
assert_compat_types(tree src_type,tree dst_type)1713 assert_compat_types (tree src_type, tree dst_type)
1714 {
1715 if (src_type && dst_type && !VOID_TYPE_P (dst_type))
1716 {
1717 #if CHECKING_P
1718 if (!(useless_type_conversion_p (src_type, dst_type)))
1719 internal_error ("incompatible types: %qT and %qT", src_type, dst_type);
1720 #endif
1721 }
1722 }
1723
1724 /* Return true if SRC_TYPE can be converted to DST_TYPE as a no-op. */
1725
1726 bool
compat_types_p(tree src_type,tree dst_type)1727 compat_types_p (tree src_type, tree dst_type)
1728 {
1729 if (src_type && dst_type && !VOID_TYPE_P (dst_type))
1730 if (!(useless_type_conversion_p (src_type, dst_type)))
1731 return false;
1732 return true;
1733 }
1734
1735 /* Get the region for PV within this region_model,
1736 emitting any diagnostics to CTXT. */
1737
1738 const region *
get_lvalue(path_var pv,region_model_context * ctxt) const1739 region_model::get_lvalue (path_var pv, region_model_context *ctxt) const
1740 {
1741 if (pv.m_tree == NULL_TREE)
1742 return NULL;
1743
1744 const region *result_reg = get_lvalue_1 (pv, ctxt);
1745 assert_compat_types (result_reg->get_type (), TREE_TYPE (pv.m_tree));
1746 return result_reg;
1747 }
1748
1749 /* Get the region for EXPR within this region_model (assuming the most
1750 recent stack frame if it's a local). */
1751
1752 const region *
get_lvalue(tree expr,region_model_context * ctxt) const1753 region_model::get_lvalue (tree expr, region_model_context *ctxt) const
1754 {
1755 return get_lvalue (path_var (expr, get_stack_depth () - 1), ctxt);
1756 }
1757
1758 /* Implementation of region_model::get_rvalue; the latter adds type-checking.
1759
1760 Get the value of PV within this region_model,
1761 emitting any diagnostics to CTXT. */
1762
1763 const svalue *
get_rvalue_1(path_var pv,region_model_context * ctxt) const1764 region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt) const
1765 {
1766 gcc_assert (pv.m_tree);
1767
1768 switch (TREE_CODE (pv.m_tree))
1769 {
1770 default:
1771 return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (pv.m_tree));
1772
1773 case ADDR_EXPR:
1774 {
1775 /* "&EXPR". */
1776 tree expr = pv.m_tree;
1777 tree op0 = TREE_OPERAND (expr, 0);
1778 const region *expr_reg = get_lvalue (op0, ctxt);
1779 return m_mgr->get_ptr_svalue (TREE_TYPE (expr), expr_reg);
1780 }
1781 break;
1782
1783 case BIT_FIELD_REF:
1784 {
1785 tree expr = pv.m_tree;
1786 tree op0 = TREE_OPERAND (expr, 0);
1787 const region *reg = get_lvalue (op0, ctxt);
1788 tree num_bits = TREE_OPERAND (expr, 1);
1789 tree first_bit_offset = TREE_OPERAND (expr, 2);
1790 gcc_assert (TREE_CODE (num_bits) == INTEGER_CST);
1791 gcc_assert (TREE_CODE (first_bit_offset) == INTEGER_CST);
1792 bit_range bits (TREE_INT_CST_LOW (first_bit_offset),
1793 TREE_INT_CST_LOW (num_bits));
1794 return get_rvalue_for_bits (TREE_TYPE (expr), reg, bits, ctxt);
1795 }
1796
1797 case SSA_NAME:
1798 case VAR_DECL:
1799 case PARM_DECL:
1800 case RESULT_DECL:
1801 case ARRAY_REF:
1802 {
1803 const region *reg = get_lvalue (pv, ctxt);
1804 return get_store_value (reg, ctxt);
1805 }
1806
1807 case REALPART_EXPR:
1808 case IMAGPART_EXPR:
1809 case VIEW_CONVERT_EXPR:
1810 {
1811 tree expr = pv.m_tree;
1812 tree arg = TREE_OPERAND (expr, 0);
1813 const svalue *arg_sval = get_rvalue (arg, ctxt);
1814 const svalue *sval_unaryop
1815 = m_mgr->get_or_create_unaryop (TREE_TYPE (expr), TREE_CODE (expr),
1816 arg_sval);
1817 return sval_unaryop;
1818 };
1819
1820 case INTEGER_CST:
1821 case REAL_CST:
1822 case COMPLEX_CST:
1823 case VECTOR_CST:
1824 case STRING_CST:
1825 return m_mgr->get_or_create_constant_svalue (pv.m_tree);
1826
1827 case POINTER_PLUS_EXPR:
1828 {
1829 tree expr = pv.m_tree;
1830 tree ptr = TREE_OPERAND (expr, 0);
1831 tree offset = TREE_OPERAND (expr, 1);
1832 const svalue *ptr_sval = get_rvalue (ptr, ctxt);
1833 const svalue *offset_sval = get_rvalue (offset, ctxt);
1834 const svalue *sval_binop
1835 = m_mgr->get_or_create_binop (TREE_TYPE (expr), POINTER_PLUS_EXPR,
1836 ptr_sval, offset_sval);
1837 return sval_binop;
1838 }
1839
1840 /* Binary ops. */
1841 case PLUS_EXPR:
1842 case MULT_EXPR:
1843 {
1844 tree expr = pv.m_tree;
1845 tree arg0 = TREE_OPERAND (expr, 0);
1846 tree arg1 = TREE_OPERAND (expr, 1);
1847 const svalue *arg0_sval = get_rvalue (arg0, ctxt);
1848 const svalue *arg1_sval = get_rvalue (arg1, ctxt);
1849 const svalue *sval_binop
1850 = m_mgr->get_or_create_binop (TREE_TYPE (expr), TREE_CODE (expr),
1851 arg0_sval, arg1_sval);
1852 return sval_binop;
1853 }
1854
1855 case COMPONENT_REF:
1856 case MEM_REF:
1857 {
1858 const region *ref_reg = get_lvalue (pv, ctxt);
1859 return get_store_value (ref_reg, ctxt);
1860 }
1861 case OBJ_TYPE_REF:
1862 {
1863 tree expr = OBJ_TYPE_REF_EXPR (pv.m_tree);
1864 return get_rvalue (expr, ctxt);
1865 }
1866 }
1867 }
1868
1869 /* Get the value of PV within this region_model,
1870 emitting any diagnostics to CTXT. */
1871
1872 const svalue *
get_rvalue(path_var pv,region_model_context * ctxt) const1873 region_model::get_rvalue (path_var pv, region_model_context *ctxt) const
1874 {
1875 if (pv.m_tree == NULL_TREE)
1876 return NULL;
1877
1878 const svalue *result_sval = get_rvalue_1 (pv, ctxt);
1879
1880 assert_compat_types (result_sval->get_type (), TREE_TYPE (pv.m_tree));
1881
1882 result_sval = check_for_poison (result_sval, pv.m_tree, ctxt);
1883
1884 return result_sval;
1885 }
1886
1887 /* Get the value of EXPR within this region_model (assuming the most
1888 recent stack frame if it's a local). */
1889
1890 const svalue *
get_rvalue(tree expr,region_model_context * ctxt) const1891 region_model::get_rvalue (tree expr, region_model_context *ctxt) const
1892 {
1893 return get_rvalue (path_var (expr, get_stack_depth () - 1), ctxt);
1894 }
1895
1896 /* Return true if this model is on a path with "main" as the entrypoint
1897 (as opposed to one in which we're merely analyzing a subset of the
1898 path through the code). */
1899
1900 bool
called_from_main_p() const1901 region_model::called_from_main_p () const
1902 {
1903 if (!m_current_frame)
1904 return false;
1905 /* Determine if the oldest stack frame in this model is for "main". */
1906 const frame_region *frame0 = get_frame_at_index (0);
1907 gcc_assert (frame0);
1908 return id_equal (DECL_NAME (frame0->get_function ()->decl), "main");
1909 }
1910
1911 /* Subroutine of region_model::get_store_value for when REG is (or is within)
1912 a global variable that hasn't been touched since the start of this path
1913 (or was implicitly touched due to a call to an unknown function). */
1914
1915 const svalue *
get_initial_value_for_global(const region * reg) const1916 region_model::get_initial_value_for_global (const region *reg) const
1917 {
1918 /* Get the decl that REG is for (or is within). */
1919 const decl_region *base_reg
1920 = reg->get_base_region ()->dyn_cast_decl_region ();
1921 gcc_assert (base_reg);
1922 tree decl = base_reg->get_decl ();
1923
1924 /* Special-case: to avoid having to explicitly update all previously
1925 untracked globals when calling an unknown fn, they implicitly have
1926 an unknown value if an unknown call has occurred, unless this is
1927 static to-this-TU and hasn't escaped. Globals that have escaped
1928 are explicitly tracked, so we shouldn't hit this case for them. */
1929 if (m_store.called_unknown_fn_p ()
1930 && TREE_PUBLIC (decl)
1931 && !TREE_READONLY (decl))
1932 return m_mgr->get_or_create_unknown_svalue (reg->get_type ());
1933
1934 /* If we are on a path from the entrypoint from "main" and we have a
1935 global decl defined in this TU that hasn't been touched yet, then
1936 the initial value of REG can be taken from the initialization value
1937 of the decl. */
1938 if (called_from_main_p () || TREE_READONLY (decl))
1939 {
1940 /* Attempt to get the initializer value for base_reg. */
1941 if (const svalue *base_reg_init
1942 = base_reg->get_svalue_for_initializer (m_mgr))
1943 {
1944 if (reg == base_reg)
1945 return base_reg_init;
1946 else
1947 {
1948 /* Get the value for REG within base_reg_init. */
1949 binding_cluster c (base_reg);
1950 c.bind (m_mgr->get_store_manager (), base_reg, base_reg_init);
1951 const svalue *sval
1952 = c.get_any_binding (m_mgr->get_store_manager (), reg);
1953 if (sval)
1954 {
1955 if (reg->get_type ())
1956 sval = m_mgr->get_or_create_cast (reg->get_type (),
1957 sval);
1958 return sval;
1959 }
1960 }
1961 }
1962 }
1963
1964 /* Otherwise, return INIT_VAL(REG). */
1965 return m_mgr->get_or_create_initial_value (reg);
1966 }
1967
1968 /* Get a value for REG, looking it up in the store, or otherwise falling
1969 back to "initial" or "unknown" values.
1970 Use CTXT to report any warnings associated with reading from REG. */
1971
1972 const svalue *
get_store_value(const region * reg,region_model_context * ctxt) const1973 region_model::get_store_value (const region *reg,
1974 region_model_context *ctxt) const
1975 {
1976 check_region_for_read (reg, ctxt);
1977
1978 /* Special-case: handle var_decls in the constant pool. */
1979 if (const decl_region *decl_reg = reg->dyn_cast_decl_region ())
1980 if (const svalue *sval = decl_reg->maybe_get_constant_value (m_mgr))
1981 return sval;
1982
1983 const svalue *sval
1984 = m_store.get_any_binding (m_mgr->get_store_manager (), reg);
1985 if (sval)
1986 {
1987 if (reg->get_type ())
1988 sval = m_mgr->get_or_create_cast (reg->get_type (), sval);
1989 return sval;
1990 }
1991
1992 /* Special-case: read at a constant index within a STRING_CST. */
1993 if (const offset_region *offset_reg = reg->dyn_cast_offset_region ())
1994 if (tree byte_offset_cst
1995 = offset_reg->get_byte_offset ()->maybe_get_constant ())
1996 if (const string_region *str_reg
1997 = reg->get_parent_region ()->dyn_cast_string_region ())
1998 {
1999 tree string_cst = str_reg->get_string_cst ();
2000 if (const svalue *char_sval
2001 = m_mgr->maybe_get_char_from_string_cst (string_cst,
2002 byte_offset_cst))
2003 return m_mgr->get_or_create_cast (reg->get_type (), char_sval);
2004 }
2005
2006 /* Special-case: read the initial char of a STRING_CST. */
2007 if (const cast_region *cast_reg = reg->dyn_cast_cast_region ())
2008 if (const string_region *str_reg
2009 = cast_reg->get_original_region ()->dyn_cast_string_region ())
2010 {
2011 tree string_cst = str_reg->get_string_cst ();
2012 tree byte_offset_cst = build_int_cst (integer_type_node, 0);
2013 if (const svalue *char_sval
2014 = m_mgr->maybe_get_char_from_string_cst (string_cst,
2015 byte_offset_cst))
2016 return m_mgr->get_or_create_cast (reg->get_type (), char_sval);
2017 }
2018
2019 /* Otherwise we implicitly have the initial value of the region
2020 (if the cluster had been touched, binding_cluster::get_any_binding,
2021 would have returned UNKNOWN, and we would already have returned
2022 that above). */
2023
2024 /* Handle globals. */
2025 if (reg->get_base_region ()->get_parent_region ()->get_kind ()
2026 == RK_GLOBALS)
2027 return get_initial_value_for_global (reg);
2028
2029 return m_mgr->get_or_create_initial_value (reg);
2030 }
2031
2032 /* Return false if REG does not exist, true if it may do.
2033 This is for detecting regions within the stack that don't exist anymore
2034 after frames are popped. */
2035
2036 bool
region_exists_p(const region * reg) const2037 region_model::region_exists_p (const region *reg) const
2038 {
2039 /* If within a stack frame, check that the stack frame is live. */
2040 if (const frame_region *enclosing_frame = reg->maybe_get_frame_region ())
2041 {
2042 /* Check that the current frame is the enclosing frame, or is called
2043 by it. */
2044 for (const frame_region *iter_frame = get_current_frame (); iter_frame;
2045 iter_frame = iter_frame->get_calling_frame ())
2046 if (iter_frame == enclosing_frame)
2047 return true;
2048 return false;
2049 }
2050
2051 return true;
2052 }
2053
2054 /* Get a region for referencing PTR_SVAL, creating a region if need be, and
2055 potentially generating warnings via CTXT.
2056 PTR_SVAL must be of pointer type.
2057 PTR_TREE if non-NULL can be used when emitting diagnostics. */
2058
2059 const region *
deref_rvalue(const svalue * ptr_sval,tree ptr_tree,region_model_context * ctxt) const2060 region_model::deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
2061 region_model_context *ctxt) const
2062 {
2063 gcc_assert (ptr_sval);
2064 gcc_assert (POINTER_TYPE_P (ptr_sval->get_type ()));
2065
2066 /* If we're dereferencing PTR_SVAL, assume that it is non-NULL; add this
2067 as a constraint. This suppresses false positives from
2068 -Wanalyzer-null-dereference for the case where we later have an
2069 if (PTR_SVAL) that would occur if we considered the false branch
2070 and transitioned the malloc state machine from start->null. */
2071 tree null_ptr_cst = build_int_cst (ptr_sval->get_type (), 0);
2072 const svalue *null_ptr = m_mgr->get_or_create_constant_svalue (null_ptr_cst);
2073 m_constraints->add_constraint (ptr_sval, NE_EXPR, null_ptr);
2074
2075 switch (ptr_sval->get_kind ())
2076 {
2077 default:
2078 break;
2079
2080 case SK_REGION:
2081 {
2082 const region_svalue *region_sval
2083 = as_a <const region_svalue *> (ptr_sval);
2084 return region_sval->get_pointee ();
2085 }
2086
2087 case SK_BINOP:
2088 {
2089 const binop_svalue *binop_sval
2090 = as_a <const binop_svalue *> (ptr_sval);
2091 switch (binop_sval->get_op ())
2092 {
2093 case POINTER_PLUS_EXPR:
2094 {
2095 /* If we have a symbolic value expressing pointer arithmentic,
2096 try to convert it to a suitable region. */
2097 const region *parent_region
2098 = deref_rvalue (binop_sval->get_arg0 (), NULL_TREE, ctxt);
2099 const svalue *offset = binop_sval->get_arg1 ();
2100 tree type= TREE_TYPE (ptr_sval->get_type ());
2101 return m_mgr->get_offset_region (parent_region, type, offset);
2102 }
2103 default:
2104 break;
2105 }
2106 }
2107 break;
2108
2109 case SK_POISONED:
2110 {
2111 if (ctxt)
2112 {
2113 tree ptr = get_representative_tree (ptr_sval);
2114 /* If we can't get a representative tree for PTR_SVAL
2115 (e.g. if it hasn't been bound into the store), then
2116 fall back on PTR_TREE, if non-NULL. */
2117 if (!ptr)
2118 ptr = ptr_tree;
2119 if (ptr)
2120 {
2121 const poisoned_svalue *poisoned_sval
2122 = as_a <const poisoned_svalue *> (ptr_sval);
2123 enum poison_kind pkind = poisoned_sval->get_poison_kind ();
2124 ctxt->warn (new poisoned_value_diagnostic (ptr, pkind));
2125 }
2126 }
2127 }
2128 break;
2129 }
2130
2131 return m_mgr->get_symbolic_region (ptr_sval);
2132 }
2133
2134 /* Attempt to get BITS within any value of REG, as TYPE.
2135 In particular, extract values from compound_svalues for the case
2136 where there's a concrete binding at BITS.
2137 Return an unknown svalue if we can't handle the given case.
2138 Use CTXT to report any warnings associated with reading from REG. */
2139
2140 const svalue *
get_rvalue_for_bits(tree type,const region * reg,const bit_range & bits,region_model_context * ctxt) const2141 region_model::get_rvalue_for_bits (tree type,
2142 const region *reg,
2143 const bit_range &bits,
2144 region_model_context *ctxt) const
2145 {
2146 const svalue *sval = get_store_value (reg, ctxt);
2147 return m_mgr->get_or_create_bits_within (type, bits, sval);
2148 }
2149
2150 /* A subclass of pending_diagnostic for complaining about writes to
2151 constant regions of memory. */
2152
2153 class write_to_const_diagnostic
2154 : public pending_diagnostic_subclass<write_to_const_diagnostic>
2155 {
2156 public:
write_to_const_diagnostic(const region * reg,tree decl)2157 write_to_const_diagnostic (const region *reg, tree decl)
2158 : m_reg (reg), m_decl (decl)
2159 {}
2160
get_kind() const2161 const char *get_kind () const FINAL OVERRIDE
2162 {
2163 return "write_to_const_diagnostic";
2164 }
2165
operator ==(const write_to_const_diagnostic & other) const2166 bool operator== (const write_to_const_diagnostic &other) const
2167 {
2168 return (m_reg == other.m_reg
2169 && m_decl == other.m_decl);
2170 }
2171
emit(rich_location * rich_loc)2172 bool emit (rich_location *rich_loc) FINAL OVERRIDE
2173 {
2174 auto_diagnostic_group d;
2175 bool warned;
2176 switch (m_reg->get_kind ())
2177 {
2178 default:
2179 warned = warning_at (rich_loc, OPT_Wanalyzer_write_to_const,
2180 "write to %<const%> object %qE", m_decl);
2181 break;
2182 case RK_FUNCTION:
2183 warned = warning_at (rich_loc, OPT_Wanalyzer_write_to_const,
2184 "write to function %qE", m_decl);
2185 break;
2186 case RK_LABEL:
2187 warned = warning_at (rich_loc, OPT_Wanalyzer_write_to_const,
2188 "write to label %qE", m_decl);
2189 break;
2190 }
2191 if (warned)
2192 inform (DECL_SOURCE_LOCATION (m_decl), "declared here");
2193 return warned;
2194 }
2195
describe_final_event(const evdesc::final_event & ev)2196 label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
2197 {
2198 switch (m_reg->get_kind ())
2199 {
2200 default:
2201 return ev.formatted_print ("write to %<const%> object %qE here", m_decl);
2202 case RK_FUNCTION:
2203 return ev.formatted_print ("write to function %qE here", m_decl);
2204 case RK_LABEL:
2205 return ev.formatted_print ("write to label %qE here", m_decl);
2206 }
2207 }
2208
2209 private:
2210 const region *m_reg;
2211 tree m_decl;
2212 };
2213
2214 /* A subclass of pending_diagnostic for complaining about writes to
2215 string literals. */
2216
2217 class write_to_string_literal_diagnostic
2218 : public pending_diagnostic_subclass<write_to_string_literal_diagnostic>
2219 {
2220 public:
write_to_string_literal_diagnostic(const region * reg)2221 write_to_string_literal_diagnostic (const region *reg)
2222 : m_reg (reg)
2223 {}
2224
get_kind() const2225 const char *get_kind () const FINAL OVERRIDE
2226 {
2227 return "write_to_string_literal_diagnostic";
2228 }
2229
operator ==(const write_to_string_literal_diagnostic & other) const2230 bool operator== (const write_to_string_literal_diagnostic &other) const
2231 {
2232 return m_reg == other.m_reg;
2233 }
2234
emit(rich_location * rich_loc)2235 bool emit (rich_location *rich_loc) FINAL OVERRIDE
2236 {
2237 return warning_at (rich_loc, OPT_Wanalyzer_write_to_string_literal,
2238 "write to string literal");
2239 /* Ideally we would show the location of the STRING_CST as well,
2240 but it is not available at this point. */
2241 }
2242
describe_final_event(const evdesc::final_event & ev)2243 label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
2244 {
2245 return ev.formatted_print ("write to string literal here");
2246 }
2247
2248 private:
2249 const region *m_reg;
2250 };
2251
2252 /* Use CTXT to warn If DEST_REG is a region that shouldn't be written to. */
2253
2254 void
check_for_writable_region(const region * dest_reg,region_model_context * ctxt) const2255 region_model::check_for_writable_region (const region* dest_reg,
2256 region_model_context *ctxt) const
2257 {
2258 /* Fail gracefully if CTXT is NULL. */
2259 if (!ctxt)
2260 return;
2261
2262 const region *base_reg = dest_reg->get_base_region ();
2263 switch (base_reg->get_kind ())
2264 {
2265 default:
2266 break;
2267 case RK_FUNCTION:
2268 {
2269 const function_region *func_reg = as_a <const function_region *> (base_reg);
2270 tree fndecl = func_reg->get_fndecl ();
2271 ctxt->warn (new write_to_const_diagnostic (func_reg, fndecl));
2272 }
2273 break;
2274 case RK_LABEL:
2275 {
2276 const label_region *label_reg = as_a <const label_region *> (base_reg);
2277 tree label = label_reg->get_label ();
2278 ctxt->warn (new write_to_const_diagnostic (label_reg, label));
2279 }
2280 break;
2281 case RK_DECL:
2282 {
2283 const decl_region *decl_reg = as_a <const decl_region *> (base_reg);
2284 tree decl = decl_reg->get_decl ();
2285 /* Warn about writes to const globals.
2286 Don't warn for writes to const locals, and params in particular,
2287 since we would warn in push_frame when setting them up (e.g the
2288 "this" param is "T* const"). */
2289 if (TREE_READONLY (decl)
2290 && is_global_var (decl))
2291 ctxt->warn (new write_to_const_diagnostic (dest_reg, decl));
2292 }
2293 break;
2294 case RK_STRING:
2295 ctxt->warn (new write_to_string_literal_diagnostic (dest_reg));
2296 break;
2297 }
2298 }
2299
2300 /* Get the capacity of REG in bytes. */
2301
2302 const svalue *
get_capacity(const region * reg) const2303 region_model::get_capacity (const region *reg) const
2304 {
2305 switch (reg->get_kind ())
2306 {
2307 default:
2308 break;
2309 case RK_DECL:
2310 {
2311 const decl_region *decl_reg = as_a <const decl_region *> (reg);
2312 tree decl = decl_reg->get_decl ();
2313 if (TREE_CODE (decl) == SSA_NAME)
2314 {
2315 tree type = TREE_TYPE (decl);
2316 tree size = TYPE_SIZE (type);
2317 return get_rvalue (size, NULL);
2318 }
2319 else
2320 {
2321 tree size = decl_init_size (decl, false);
2322 if (size)
2323 return get_rvalue (size, NULL);
2324 }
2325 }
2326 break;
2327 case RK_SIZED:
2328 /* Look through sized regions to get at the capacity
2329 of the underlying regions. */
2330 return get_capacity (reg->get_parent_region ());
2331 }
2332
2333 if (const svalue *recorded = get_dynamic_extents (reg))
2334 return recorded;
2335
2336 return m_mgr->get_or_create_unknown_svalue (sizetype);
2337 }
2338
2339 /* If CTXT is non-NULL, use it to warn about any problems accessing REG,
2340 using DIR to determine if this access is a read or write. */
2341
2342 void
check_region_access(const region * reg,enum access_direction dir,region_model_context * ctxt) const2343 region_model::check_region_access (const region *reg,
2344 enum access_direction dir,
2345 region_model_context *ctxt) const
2346 {
2347 /* Fail gracefully if CTXT is NULL. */
2348 if (!ctxt)
2349 return;
2350
2351 check_region_for_taint (reg, dir, ctxt);
2352
2353 switch (dir)
2354 {
2355 default:
2356 gcc_unreachable ();
2357 case DIR_READ:
2358 /* Currently a no-op. */
2359 break;
2360 case DIR_WRITE:
2361 check_for_writable_region (reg, ctxt);
2362 break;
2363 }
2364 }
2365
2366 /* If CTXT is non-NULL, use it to warn about any problems writing to REG. */
2367
2368 void
check_region_for_write(const region * dest_reg,region_model_context * ctxt) const2369 region_model::check_region_for_write (const region *dest_reg,
2370 region_model_context *ctxt) const
2371 {
2372 check_region_access (dest_reg, DIR_WRITE, ctxt);
2373 }
2374
2375 /* If CTXT is non-NULL, use it to warn about any problems reading from REG. */
2376
2377 void
check_region_for_read(const region * src_reg,region_model_context * ctxt) const2378 region_model::check_region_for_read (const region *src_reg,
2379 region_model_context *ctxt) const
2380 {
2381 check_region_access (src_reg, DIR_READ, ctxt);
2382 }
2383
2384 /* Set the value of the region given by LHS_REG to the value given
2385 by RHS_SVAL.
2386 Use CTXT to report any warnings associated with writing to LHS_REG. */
2387
2388 void
set_value(const region * lhs_reg,const svalue * rhs_sval,region_model_context * ctxt)2389 region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
2390 region_model_context *ctxt)
2391 {
2392 gcc_assert (lhs_reg);
2393 gcc_assert (rhs_sval);
2394
2395 check_region_for_write (lhs_reg, ctxt);
2396
2397 m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
2398 ctxt ? ctxt->get_uncertainty () : NULL);
2399 }
2400
2401 /* Set the value of the region given by LHS to the value given by RHS. */
2402
2403 void
set_value(tree lhs,tree rhs,region_model_context * ctxt)2404 region_model::set_value (tree lhs, tree rhs, region_model_context *ctxt)
2405 {
2406 const region *lhs_reg = get_lvalue (lhs, ctxt);
2407 const svalue *rhs_sval = get_rvalue (rhs, ctxt);
2408 gcc_assert (lhs_reg);
2409 gcc_assert (rhs_sval);
2410 set_value (lhs_reg, rhs_sval, ctxt);
2411 }
2412
2413 /* Remove all bindings overlapping REG within the store. */
2414
2415 void
clobber_region(const region * reg)2416 region_model::clobber_region (const region *reg)
2417 {
2418 m_store.clobber_region (m_mgr->get_store_manager(), reg);
2419 }
2420
2421 /* Remove any bindings for REG within the store. */
2422
2423 void
purge_region(const region * reg)2424 region_model::purge_region (const region *reg)
2425 {
2426 m_store.purge_region (m_mgr->get_store_manager(), reg);
2427 }
2428
2429 /* Fill REG with SVAL. */
2430
2431 void
fill_region(const region * reg,const svalue * sval)2432 region_model::fill_region (const region *reg, const svalue *sval)
2433 {
2434 m_store.fill_region (m_mgr->get_store_manager(), reg, sval);
2435 }
2436
2437 /* Zero-fill REG. */
2438
2439 void
zero_fill_region(const region * reg)2440 region_model::zero_fill_region (const region *reg)
2441 {
2442 m_store.zero_fill_region (m_mgr->get_store_manager(), reg);
2443 }
2444
2445 /* Mark REG as having unknown content. */
2446
2447 void
mark_region_as_unknown(const region * reg,uncertainty_t * uncertainty)2448 region_model::mark_region_as_unknown (const region *reg,
2449 uncertainty_t *uncertainty)
2450 {
2451 m_store.mark_region_as_unknown (m_mgr->get_store_manager(), reg,
2452 uncertainty);
2453 }
2454
2455 /* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
2456 this model. */
2457
2458 tristate
eval_condition(const svalue * lhs,enum tree_code op,const svalue * rhs) const2459 region_model::eval_condition (const svalue *lhs,
2460 enum tree_code op,
2461 const svalue *rhs) const
2462 {
2463 /* For now, make no attempt to capture constraints on floating-point
2464 values. */
2465 if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
2466 || (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
2467 return tristate::unknown ();
2468
2469 tristate ts = eval_condition_without_cm (lhs, op, rhs);
2470 if (ts.is_known ())
2471 return ts;
2472
2473 /* Otherwise, try constraints. */
2474 return m_constraints->eval_condition (lhs, op, rhs);
2475 }
2476
2477 /* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
2478 this model, without resorting to the constraint_manager.
2479
2480 This is exposed so that impl_region_model_context::on_state_leak can
2481 check for equality part-way through region_model::purge_unused_svalues
2482 without risking creating new ECs. */
2483
2484 tristate
eval_condition_without_cm(const svalue * lhs,enum tree_code op,const svalue * rhs) const2485 region_model::eval_condition_without_cm (const svalue *lhs,
2486 enum tree_code op,
2487 const svalue *rhs) const
2488 {
2489 gcc_assert (lhs);
2490 gcc_assert (rhs);
2491
2492 /* See what we know based on the values. */
2493
2494 /* For now, make no attempt to capture constraints on floating-point
2495 values. */
2496 if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
2497 || (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
2498 return tristate::unknown ();
2499
2500 /* Unwrap any unmergeable values. */
2501 lhs = lhs->unwrap_any_unmergeable ();
2502 rhs = rhs->unwrap_any_unmergeable ();
2503
2504 if (lhs == rhs)
2505 {
2506 /* If we have the same svalue, then we have equality
2507 (apart from NaN-handling).
2508 TODO: should this definitely be the case for poisoned values? */
2509 /* Poisoned and unknown values are "unknowable". */
2510 if (lhs->get_kind () == SK_POISONED
2511 || lhs->get_kind () == SK_UNKNOWN)
2512 return tristate::TS_UNKNOWN;
2513
2514 switch (op)
2515 {
2516 case EQ_EXPR:
2517 case GE_EXPR:
2518 case LE_EXPR:
2519 return tristate::TS_TRUE;
2520
2521 case NE_EXPR:
2522 case GT_EXPR:
2523 case LT_EXPR:
2524 return tristate::TS_FALSE;
2525
2526 default:
2527 /* For other ops, use the logic below. */
2528 break;
2529 }
2530 }
2531
2532 /* If we have a pair of region_svalues, compare them. */
2533 if (const region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
2534 if (const region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
2535 {
2536 tristate res = region_svalue::eval_condition (lhs_ptr, op, rhs_ptr);
2537 if (res.is_known ())
2538 return res;
2539 /* Otherwise, only known through constraints. */
2540 }
2541
2542 /* If we have a pair of constants, compare them. */
2543 if (const constant_svalue *cst_lhs = lhs->dyn_cast_constant_svalue ())
2544 if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
2545 return constant_svalue::eval_condition (cst_lhs, op, cst_rhs);
2546
2547 /* Handle comparison against zero. */
2548 if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
2549 if (zerop (cst_rhs->get_constant ()))
2550 {
2551 if (const region_svalue *ptr = lhs->dyn_cast_region_svalue ())
2552 {
2553 /* A region_svalue is a non-NULL pointer, except in certain
2554 special cases (see the comment for region::non_null_p). */
2555 const region *pointee = ptr->get_pointee ();
2556 if (pointee->non_null_p ())
2557 {
2558 switch (op)
2559 {
2560 default:
2561 gcc_unreachable ();
2562
2563 case EQ_EXPR:
2564 case GE_EXPR:
2565 case LE_EXPR:
2566 return tristate::TS_FALSE;
2567
2568 case NE_EXPR:
2569 case GT_EXPR:
2570 case LT_EXPR:
2571 return tristate::TS_TRUE;
2572 }
2573 }
2574 }
2575 else if (const binop_svalue *binop = lhs->dyn_cast_binop_svalue ())
2576 {
2577 /* Treat offsets from a non-NULL pointer as being non-NULL. This
2578 isn't strictly true, in that eventually ptr++ will wrap
2579 around and be NULL, but it won't occur in practise and thus
2580 can be used to suppress effectively false positives that we
2581 shouldn't warn for. */
2582 if (binop->get_op () == POINTER_PLUS_EXPR)
2583 {
2584 tristate lhs_ts
2585 = eval_condition_without_cm (binop->get_arg0 (),
2586 op, rhs);
2587 if (lhs_ts.is_known ())
2588 return lhs_ts;
2589 }
2590 }
2591 }
2592
2593 /* Handle rejection of equality for comparisons of the initial values of
2594 "external" values (such as params) with the address of locals. */
2595 if (const initial_svalue *init_lhs = lhs->dyn_cast_initial_svalue ())
2596 if (const region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
2597 {
2598 tristate res = compare_initial_and_pointer (init_lhs, rhs_ptr);
2599 if (res.is_known ())
2600 return res;
2601 }
2602 if (const initial_svalue *init_rhs = rhs->dyn_cast_initial_svalue ())
2603 if (const region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
2604 {
2605 tristate res = compare_initial_and_pointer (init_rhs, lhs_ptr);
2606 if (res.is_known ())
2607 return res;
2608 }
2609
2610 if (const widening_svalue *widen_lhs = lhs->dyn_cast_widening_svalue ())
2611 if (tree rhs_cst = rhs->maybe_get_constant ())
2612 {
2613 tristate res = widen_lhs->eval_condition_without_cm (op, rhs_cst);
2614 if (res.is_known ())
2615 return res;
2616 }
2617
2618 return tristate::TS_UNKNOWN;
2619 }
2620
2621 /* Subroutine of region_model::eval_condition_without_cm, for rejecting
2622 equality of INIT_VAL(PARM) with &LOCAL. */
2623
2624 tristate
compare_initial_and_pointer(const initial_svalue * init,const region_svalue * ptr) const2625 region_model::compare_initial_and_pointer (const initial_svalue *init,
2626 const region_svalue *ptr) const
2627 {
2628 const region *pointee = ptr->get_pointee ();
2629
2630 /* If we have a pointer to something within a stack frame, it can't be the
2631 initial value of a param. */
2632 if (pointee->maybe_get_frame_region ())
2633 if (init->initial_value_of_param_p ())
2634 return tristate::TS_FALSE;
2635
2636 return tristate::TS_UNKNOWN;
2637 }
2638
2639 /* Handle various constraints of the form:
2640 LHS: ((bool)INNER_LHS INNER_OP INNER_RHS))
2641 OP : == or !=
2642 RHS: zero
2643 and (with a cast):
2644 LHS: CAST([long]int, ((bool)INNER_LHS INNER_OP INNER_RHS))
2645 OP : == or !=
2646 RHS: zero
2647 by adding constraints for INNER_LHS INNEROP INNER_RHS.
2648
2649 Return true if this function can fully handle the constraint; if
2650 so, add the implied constraint(s) and write true to *OUT if they
2651 are consistent with existing constraints, or write false to *OUT
2652 if they contradicts existing constraints.
2653
2654 Return false for cases that this function doeesn't know how to handle.
2655
2656 For example, if we're checking a stored conditional, we'll have
2657 something like:
2658 LHS: CAST(long int, (&HEAP_ALLOCATED_REGION(8)!=(int *)0B))
2659 OP : NE_EXPR
2660 RHS: zero
2661 which this function can turn into an add_constraint of:
2662 (&HEAP_ALLOCATED_REGION(8) != (int *)0B)
2663
2664 Similarly, optimized && and || conditionals lead to e.g.
2665 if (p && q)
2666 becoming gimple like this:
2667 _1 = p_6 == 0B;
2668 _2 = q_8 == 0B
2669 _3 = _1 | _2
2670 On the "_3 is false" branch we can have constraints of the form:
2671 ((&HEAP_ALLOCATED_REGION(8)!=(int *)0B)
2672 | (&HEAP_ALLOCATED_REGION(10)!=(int *)0B))
2673 == 0
2674 which implies that both _1 and _2 are false,
2675 which this function can turn into a pair of add_constraints of
2676 (&HEAP_ALLOCATED_REGION(8)!=(int *)0B)
2677 and:
2678 (&HEAP_ALLOCATED_REGION(10)!=(int *)0B). */
2679
2680 bool
add_constraints_from_binop(const svalue * outer_lhs,enum tree_code outer_op,const svalue * outer_rhs,bool * out,region_model_context * ctxt)2681 region_model::add_constraints_from_binop (const svalue *outer_lhs,
2682 enum tree_code outer_op,
2683 const svalue *outer_rhs,
2684 bool *out,
2685 region_model_context *ctxt)
2686 {
2687 while (const svalue *cast = outer_lhs->maybe_undo_cast ())
2688 outer_lhs = cast;
2689 const binop_svalue *binop_sval = outer_lhs->dyn_cast_binop_svalue ();
2690 if (!binop_sval)
2691 return false;
2692 if (!outer_rhs->all_zeroes_p ())
2693 return false;
2694
2695 const svalue *inner_lhs = binop_sval->get_arg0 ();
2696 enum tree_code inner_op = binop_sval->get_op ();
2697 const svalue *inner_rhs = binop_sval->get_arg1 ();
2698
2699 if (outer_op != NE_EXPR && outer_op != EQ_EXPR)
2700 return false;
2701
2702 /* We have either
2703 - "OUTER_LHS != false" (i.e. OUTER is true), or
2704 - "OUTER_LHS == false" (i.e. OUTER is false). */
2705 bool is_true = outer_op == NE_EXPR;
2706
2707 switch (inner_op)
2708 {
2709 default:
2710 return false;
2711
2712 case EQ_EXPR:
2713 case NE_EXPR:
2714 {
2715 /* ...and "(inner_lhs OP inner_rhs) == 0"
2716 then (inner_lhs OP inner_rhs) must have the same
2717 logical value as LHS. */
2718 if (!is_true)
2719 inner_op = invert_tree_comparison (inner_op, false /* honor_nans */);
2720 *out = add_constraint (inner_lhs, inner_op, inner_rhs, ctxt);
2721 return true;
2722 }
2723 break;
2724
2725 case BIT_AND_EXPR:
2726 if (is_true)
2727 {
2728 /* ...and "(inner_lhs & inner_rhs) != 0"
2729 then both inner_lhs and inner_rhs must be true. */
2730 const svalue *false_sval
2731 = m_mgr->get_or_create_constant_svalue (boolean_false_node);
2732 bool sat1 = add_constraint (inner_lhs, NE_EXPR, false_sval, ctxt);
2733 bool sat2 = add_constraint (inner_rhs, NE_EXPR, false_sval, ctxt);
2734 *out = sat1 && sat2;
2735 return true;
2736 }
2737 return false;
2738
2739 case BIT_IOR_EXPR:
2740 if (!is_true)
2741 {
2742 /* ...and "(inner_lhs | inner_rhs) == 0"
2743 i.e. "(inner_lhs | inner_rhs)" is false
2744 then both inner_lhs and inner_rhs must be false. */
2745 const svalue *false_sval
2746 = m_mgr->get_or_create_constant_svalue (boolean_false_node);
2747 bool sat1 = add_constraint (inner_lhs, EQ_EXPR, false_sval, ctxt);
2748 bool sat2 = add_constraint (inner_rhs, EQ_EXPR, false_sval, ctxt);
2749 *out = sat1 && sat2;
2750 return true;
2751 }
2752 return false;
2753 }
2754 }
2755
2756 /* Attempt to add the constraint "LHS OP RHS" to this region_model.
2757 If it is consistent with existing constraints, add it, and return true.
2758 Return false if it contradicts existing constraints.
2759 Use CTXT for reporting any diagnostics associated with the accesses. */
2760
2761 bool
add_constraint(tree lhs,enum tree_code op,tree rhs,region_model_context * ctxt)2762 region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
2763 region_model_context *ctxt)
2764 {
2765 /* For now, make no attempt to capture constraints on floating-point
2766 values. */
2767 if (FLOAT_TYPE_P (TREE_TYPE (lhs)) || FLOAT_TYPE_P (TREE_TYPE (rhs)))
2768 return true;
2769
2770 const svalue *lhs_sval = get_rvalue (lhs, ctxt);
2771 const svalue *rhs_sval = get_rvalue (rhs, ctxt);
2772
2773 return add_constraint (lhs_sval, op, rhs_sval, ctxt);
2774 }
2775
2776 /* Attempt to add the constraint "LHS OP RHS" to this region_model.
2777 If it is consistent with existing constraints, add it, and return true.
2778 Return false if it contradicts existing constraints.
2779 Use CTXT for reporting any diagnostics associated with the accesses. */
2780
2781 bool
add_constraint(const svalue * lhs,enum tree_code op,const svalue * rhs,region_model_context * ctxt)2782 region_model::add_constraint (const svalue *lhs,
2783 enum tree_code op,
2784 const svalue *rhs,
2785 region_model_context *ctxt)
2786 {
2787 tristate t_cond = eval_condition (lhs, op, rhs);
2788
2789 /* If we already have the condition, do nothing. */
2790 if (t_cond.is_true ())
2791 return true;
2792
2793 /* Reject a constraint that would contradict existing knowledge, as
2794 unsatisfiable. */
2795 if (t_cond.is_false ())
2796 return false;
2797
2798 bool out;
2799 if (add_constraints_from_binop (lhs, op, rhs, &out, ctxt))
2800 return out;
2801
2802 /* Store the constraint. */
2803 m_constraints->add_constraint (lhs, op, rhs);
2804
2805 /* Notify the context, if any. This exists so that the state machines
2806 in a program_state can be notified about the condition, and so can
2807 set sm-state for e.g. unchecked->checked, both for cfg-edges, and
2808 when synthesizing constraints as above. */
2809 if (ctxt)
2810 ctxt->on_condition (lhs, op, rhs);
2811
2812 /* If we have ®ION == NULL, then drop dynamic extents for REGION (for
2813 the case where REGION is heap-allocated and thus could be NULL). */
2814 if (tree rhs_cst = rhs->maybe_get_constant ())
2815 if (op == EQ_EXPR && zerop (rhs_cst))
2816 if (const region_svalue *region_sval = lhs->dyn_cast_region_svalue ())
2817 unset_dynamic_extents (region_sval->get_pointee ());
2818
2819 return true;
2820 }
2821
2822 /* As above, but when returning false, if OUT is non-NULL, write a
2823 new rejected_constraint to *OUT. */
2824
2825 bool
add_constraint(tree lhs,enum tree_code op,tree rhs,region_model_context * ctxt,rejected_constraint ** out)2826 region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
2827 region_model_context *ctxt,
2828 rejected_constraint **out)
2829 {
2830 bool sat = add_constraint (lhs, op, rhs, ctxt);
2831 if (!sat && out)
2832 *out = new rejected_op_constraint (*this, lhs, op, rhs);
2833 return sat;
2834 }
2835
2836 /* Determine what is known about the condition "LHS OP RHS" within
2837 this model.
2838 Use CTXT for reporting any diagnostics associated with the accesses. */
2839
2840 tristate
eval_condition(tree lhs,enum tree_code op,tree rhs,region_model_context * ctxt)2841 region_model::eval_condition (tree lhs,
2842 enum tree_code op,
2843 tree rhs,
2844 region_model_context *ctxt)
2845 {
2846 /* For now, make no attempt to model constraints on floating-point
2847 values. */
2848 if (FLOAT_TYPE_P (TREE_TYPE (lhs)) || FLOAT_TYPE_P (TREE_TYPE (rhs)))
2849 return tristate::unknown ();
2850
2851 return eval_condition (get_rvalue (lhs, ctxt), op, get_rvalue (rhs, ctxt));
2852 }
2853
2854 /* Implementation of region_model::get_representative_path_var.
2855 Attempt to return a path_var that represents SVAL, or return NULL_TREE.
2856 Use VISITED to prevent infinite mutual recursion with the overload for
2857 regions. */
2858
2859 path_var
get_representative_path_var_1(const svalue * sval,svalue_set * visited) const2860 region_model::get_representative_path_var_1 (const svalue *sval,
2861 svalue_set *visited) const
2862 {
2863 gcc_assert (sval);
2864
2865 /* Prevent infinite recursion. */
2866 if (visited->contains (sval))
2867 return path_var (NULL_TREE, 0);
2868 visited->add (sval);
2869
2870 /* Handle casts by recursion into get_representative_path_var. */
2871 if (const svalue *cast_sval = sval->maybe_undo_cast ())
2872 {
2873 path_var result = get_representative_path_var (cast_sval, visited);
2874 tree orig_type = sval->get_type ();
2875 /* If necessary, wrap the result in a cast. */
2876 if (result.m_tree && orig_type)
2877 result.m_tree = build1 (NOP_EXPR, orig_type, result.m_tree);
2878 return result;
2879 }
2880
2881 auto_vec<path_var> pvs;
2882 m_store.get_representative_path_vars (this, visited, sval, &pvs);
2883
2884 if (tree cst = sval->maybe_get_constant ())
2885 pvs.safe_push (path_var (cst, 0));
2886
2887 /* Handle string literals and various other pointers. */
2888 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
2889 {
2890 const region *reg = ptr_sval->get_pointee ();
2891 if (path_var pv = get_representative_path_var (reg, visited))
2892 return path_var (build1 (ADDR_EXPR,
2893 sval->get_type (),
2894 pv.m_tree),
2895 pv.m_stack_depth);
2896 }
2897
2898 /* If we have a sub_svalue, look for ways to represent the parent. */
2899 if (const sub_svalue *sub_sval = sval->dyn_cast_sub_svalue ())
2900 {
2901 const svalue *parent_sval = sub_sval->get_parent ();
2902 const region *subreg = sub_sval->get_subregion ();
2903 if (path_var parent_pv
2904 = get_representative_path_var (parent_sval, visited))
2905 if (const field_region *field_reg = subreg->dyn_cast_field_region ())
2906 return path_var (build3 (COMPONENT_REF,
2907 sval->get_type (),
2908 parent_pv.m_tree,
2909 field_reg->get_field (),
2910 NULL_TREE),
2911 parent_pv.m_stack_depth);
2912 }
2913
2914 /* Handle binops. */
2915 if (const binop_svalue *binop_sval = sval->dyn_cast_binop_svalue ())
2916 if (path_var lhs_pv
2917 = get_representative_path_var (binop_sval->get_arg0 (), visited))
2918 if (path_var rhs_pv
2919 = get_representative_path_var (binop_sval->get_arg1 (), visited))
2920 return path_var (build2 (binop_sval->get_op (),
2921 sval->get_type (),
2922 lhs_pv.m_tree, rhs_pv.m_tree),
2923 lhs_pv.m_stack_depth);
2924
2925 if (pvs.length () < 1)
2926 return path_var (NULL_TREE, 0);
2927
2928 pvs.qsort (readability_comparator);
2929 return pvs[0];
2930 }
2931
2932 /* Attempt to return a path_var that represents SVAL, or return NULL_TREE.
2933 Use VISITED to prevent infinite mutual recursion with the overload for
2934 regions
2935
2936 This function defers to get_representative_path_var_1 to do the work;
2937 it adds verification that get_representative_path_var_1 returned a tree
2938 of the correct type. */
2939
2940 path_var
get_representative_path_var(const svalue * sval,svalue_set * visited) const2941 region_model::get_representative_path_var (const svalue *sval,
2942 svalue_set *visited) const
2943 {
2944 if (sval == NULL)
2945 return path_var (NULL_TREE, 0);
2946
2947 tree orig_type = sval->get_type ();
2948
2949 path_var result = get_representative_path_var_1 (sval, visited);
2950
2951 /* Verify that the result has the same type as SVAL, if any. */
2952 if (result.m_tree && orig_type)
2953 gcc_assert (TREE_TYPE (result.m_tree) == orig_type);
2954
2955 return result;
2956 }
2957
2958 /* Attempt to return a tree that represents SVAL, or return NULL_TREE.
2959
2960 Strip off any top-level cast, to avoid messages like
2961 double-free of '(void *)ptr'
2962 from analyzer diagnostics. */
2963
2964 tree
get_representative_tree(const svalue * sval) const2965 region_model::get_representative_tree (const svalue *sval) const
2966 {
2967 svalue_set visited;
2968 tree expr = get_representative_path_var (sval, &visited).m_tree;
2969
2970 /* Strip off any top-level cast. */
2971 if (expr && TREE_CODE (expr) == NOP_EXPR)
2972 expr = TREE_OPERAND (expr, 0);
2973
2974 return fixup_tree_for_diagnostic (expr);
2975 }
2976
2977 /* Implementation of region_model::get_representative_path_var.
2978
2979 Attempt to return a path_var that represents REG, or return
2980 the NULL path_var.
2981 For example, a region for a field of a local would be a path_var
2982 wrapping a COMPONENT_REF.
2983 Use VISITED to prevent infinite mutual recursion with the overload for
2984 svalues. */
2985
2986 path_var
get_representative_path_var_1(const region * reg,svalue_set * visited) const2987 region_model::get_representative_path_var_1 (const region *reg,
2988 svalue_set *visited) const
2989 {
2990 switch (reg->get_kind ())
2991 {
2992 default:
2993 gcc_unreachable ();
2994
2995 case RK_FRAME:
2996 case RK_GLOBALS:
2997 case RK_CODE:
2998 case RK_HEAP:
2999 case RK_STACK:
3000 case RK_ROOT:
3001 /* Regions that represent memory spaces are not expressible as trees. */
3002 return path_var (NULL_TREE, 0);
3003
3004 case RK_FUNCTION:
3005 {
3006 const function_region *function_reg
3007 = as_a <const function_region *> (reg);
3008 return path_var (function_reg->get_fndecl (), 0);
3009 }
3010 case RK_LABEL:
3011 {
3012 const label_region *label_reg = as_a <const label_region *> (reg);
3013 return path_var (label_reg->get_label (), 0);
3014 }
3015
3016 case RK_SYMBOLIC:
3017 {
3018 const symbolic_region *symbolic_reg
3019 = as_a <const symbolic_region *> (reg);
3020 const svalue *pointer = symbolic_reg->get_pointer ();
3021 path_var pointer_pv = get_representative_path_var (pointer, visited);
3022 if (!pointer_pv)
3023 return path_var (NULL_TREE, 0);
3024 tree offset = build_int_cst (pointer->get_type (), 0);
3025 return path_var (build2 (MEM_REF,
3026 reg->get_type (),
3027 pointer_pv.m_tree,
3028 offset),
3029 pointer_pv.m_stack_depth);
3030 }
3031 case RK_DECL:
3032 {
3033 const decl_region *decl_reg = as_a <const decl_region *> (reg);
3034 return path_var (decl_reg->get_decl (), decl_reg->get_stack_depth ());
3035 }
3036 case RK_FIELD:
3037 {
3038 const field_region *field_reg = as_a <const field_region *> (reg);
3039 path_var parent_pv
3040 = get_representative_path_var (reg->get_parent_region (), visited);
3041 if (!parent_pv)
3042 return path_var (NULL_TREE, 0);
3043 return path_var (build3 (COMPONENT_REF,
3044 reg->get_type (),
3045 parent_pv.m_tree,
3046 field_reg->get_field (),
3047 NULL_TREE),
3048 parent_pv.m_stack_depth);
3049 }
3050
3051 case RK_ELEMENT:
3052 {
3053 const element_region *element_reg
3054 = as_a <const element_region *> (reg);
3055 path_var parent_pv
3056 = get_representative_path_var (reg->get_parent_region (), visited);
3057 if (!parent_pv)
3058 return path_var (NULL_TREE, 0);
3059 path_var index_pv
3060 = get_representative_path_var (element_reg->get_index (), visited);
3061 if (!index_pv)
3062 return path_var (NULL_TREE, 0);
3063 return path_var (build4 (ARRAY_REF,
3064 reg->get_type (),
3065 parent_pv.m_tree, index_pv.m_tree,
3066 NULL_TREE, NULL_TREE),
3067 parent_pv.m_stack_depth);
3068 }
3069
3070 case RK_OFFSET:
3071 {
3072 const offset_region *offset_reg
3073 = as_a <const offset_region *> (reg);
3074 path_var parent_pv
3075 = get_representative_path_var (reg->get_parent_region (), visited);
3076 if (!parent_pv)
3077 return path_var (NULL_TREE, 0);
3078 path_var offset_pv
3079 = get_representative_path_var (offset_reg->get_byte_offset (),
3080 visited);
3081 if (!offset_pv || TREE_CODE (offset_pv.m_tree) != INTEGER_CST)
3082 return path_var (NULL_TREE, 0);
3083 tree addr_parent = build1 (ADDR_EXPR,
3084 build_pointer_type (reg->get_type ()),
3085 parent_pv.m_tree);
3086 return path_var (build2 (MEM_REF,
3087 reg->get_type (),
3088 addr_parent, offset_pv.m_tree),
3089 parent_pv.m_stack_depth);
3090 }
3091
3092 case RK_SIZED:
3093 return path_var (NULL_TREE, 0);
3094
3095 case RK_CAST:
3096 {
3097 path_var parent_pv
3098 = get_representative_path_var (reg->get_parent_region (), visited);
3099 if (!parent_pv)
3100 return path_var (NULL_TREE, 0);
3101 return path_var (build1 (NOP_EXPR,
3102 reg->get_type (),
3103 parent_pv.m_tree),
3104 parent_pv.m_stack_depth);
3105 }
3106
3107 case RK_HEAP_ALLOCATED:
3108 case RK_ALLOCA:
3109 /* No good way to express heap-allocated/alloca regions as trees. */
3110 return path_var (NULL_TREE, 0);
3111
3112 case RK_STRING:
3113 {
3114 const string_region *string_reg = as_a <const string_region *> (reg);
3115 return path_var (string_reg->get_string_cst (), 0);
3116 }
3117
3118 case RK_UNKNOWN:
3119 return path_var (NULL_TREE, 0);
3120 }
3121 }
3122
3123 /* Attempt to return a path_var that represents REG, or return
3124 the NULL path_var.
3125 For example, a region for a field of a local would be a path_var
3126 wrapping a COMPONENT_REF.
3127 Use VISITED to prevent infinite mutual recursion with the overload for
3128 svalues.
3129
3130 This function defers to get_representative_path_var_1 to do the work;
3131 it adds verification that get_representative_path_var_1 returned a tree
3132 of the correct type. */
3133
3134 path_var
get_representative_path_var(const region * reg,svalue_set * visited) const3135 region_model::get_representative_path_var (const region *reg,
3136 svalue_set *visited) const
3137 {
3138 path_var result = get_representative_path_var_1 (reg, visited);
3139
3140 /* Verify that the result has the same type as REG, if any. */
3141 if (result.m_tree && reg->get_type ())
3142 gcc_assert (TREE_TYPE (result.m_tree) == reg->get_type ());
3143
3144 return result;
3145 }
3146
3147 /* Update this model for any phis in SNODE, assuming we came from
3148 LAST_CFG_SUPEREDGE. */
3149
3150 void
update_for_phis(const supernode * snode,const cfg_superedge * last_cfg_superedge,region_model_context * ctxt)3151 region_model::update_for_phis (const supernode *snode,
3152 const cfg_superedge *last_cfg_superedge,
3153 region_model_context *ctxt)
3154 {
3155 gcc_assert (last_cfg_superedge);
3156
3157 /* Copy this state and pass it to handle_phi so that all of the phi stmts
3158 are effectively handled simultaneously. */
3159 const region_model old_state (*this);
3160
3161 for (gphi_iterator gpi = const_cast<supernode *>(snode)->start_phis ();
3162 !gsi_end_p (gpi); gsi_next (&gpi))
3163 {
3164 gphi *phi = gpi.phi ();
3165
3166 tree src = last_cfg_superedge->get_phi_arg (phi);
3167 tree lhs = gimple_phi_result (phi);
3168
3169 /* Update next_state based on phi and old_state. */
3170 handle_phi (phi, lhs, src, old_state, ctxt);
3171 }
3172 }
3173
3174 /* Attempt to update this model for taking EDGE (where the last statement
3175 was LAST_STMT), returning true if the edge can be taken, false
3176 otherwise.
3177 When returning false, if OUT is non-NULL, write a new rejected_constraint
3178 to it.
3179
3180 For CFG superedges where LAST_STMT is a conditional or a switch
3181 statement, attempt to add the relevant conditions for EDGE to this
3182 model, returning true if they are feasible, or false if they are
3183 impossible.
3184
3185 For call superedges, push frame information and store arguments
3186 into parameters.
3187
3188 For return superedges, pop frame information and store return
3189 values into any lhs.
3190
3191 Rejection of call/return superedges happens elsewhere, in
3192 program_point::on_edge (i.e. based on program point, rather
3193 than program state). */
3194
3195 bool
maybe_update_for_edge(const superedge & edge,const gimple * last_stmt,region_model_context * ctxt,rejected_constraint ** out)3196 region_model::maybe_update_for_edge (const superedge &edge,
3197 const gimple *last_stmt,
3198 region_model_context *ctxt,
3199 rejected_constraint **out)
3200 {
3201 /* Handle frame updates for interprocedural edges. */
3202 switch (edge.m_kind)
3203 {
3204 default:
3205 break;
3206
3207 case SUPEREDGE_CALL:
3208 {
3209 const call_superedge *call_edge = as_a <const call_superedge *> (&edge);
3210 update_for_call_superedge (*call_edge, ctxt);
3211 }
3212 break;
3213
3214 case SUPEREDGE_RETURN:
3215 {
3216 const return_superedge *return_edge
3217 = as_a <const return_superedge *> (&edge);
3218 update_for_return_superedge (*return_edge, ctxt);
3219 }
3220 break;
3221
3222 case SUPEREDGE_INTRAPROCEDURAL_CALL:
3223 {
3224 const callgraph_superedge *cg_sedge
3225 = as_a <const callgraph_superedge *> (&edge);
3226 update_for_call_summary (*cg_sedge, ctxt);
3227 }
3228 break;
3229 }
3230
3231 if (last_stmt == NULL)
3232 return true;
3233
3234 /* Apply any constraints for conditionals/switch statements. */
3235
3236 if (const gcond *cond_stmt = dyn_cast <const gcond *> (last_stmt))
3237 {
3238 const cfg_superedge *cfg_sedge = as_a <const cfg_superedge *> (&edge);
3239 return apply_constraints_for_gcond (*cfg_sedge, cond_stmt, ctxt, out);
3240 }
3241
3242 if (const gswitch *switch_stmt = dyn_cast <const gswitch *> (last_stmt))
3243 {
3244 const switch_cfg_superedge *switch_sedge
3245 = as_a <const switch_cfg_superedge *> (&edge);
3246 return apply_constraints_for_gswitch (*switch_sedge, switch_stmt,
3247 ctxt, out);
3248 }
3249
3250 /* Apply any constraints due to an exception being thrown. */
3251 if (const cfg_superedge *cfg_sedge = dyn_cast <const cfg_superedge *> (&edge))
3252 if (cfg_sedge->get_flags () & EDGE_EH)
3253 return apply_constraints_for_exception (last_stmt, ctxt, out);
3254
3255 return true;
3256 }
3257
3258 /* Push a new frame_region on to the stack region.
3259 Populate the frame_region with child regions for the function call's
3260 parameters, using values from the arguments at the callsite in the
3261 caller's frame. */
3262
3263 void
update_for_gcall(const gcall * call_stmt,region_model_context * ctxt,function * callee)3264 region_model::update_for_gcall (const gcall *call_stmt,
3265 region_model_context *ctxt,
3266 function *callee)
3267 {
3268 /* Build a vec of argument svalues, using the current top
3269 frame for resolving tree expressions. */
3270 auto_vec<const svalue *> arg_svals (gimple_call_num_args (call_stmt));
3271
3272 for (unsigned i = 0; i < gimple_call_num_args (call_stmt); i++)
3273 {
3274 tree arg = gimple_call_arg (call_stmt, i);
3275 arg_svals.quick_push (get_rvalue (arg, ctxt));
3276 }
3277
3278 if(!callee)
3279 {
3280 /* Get the function * from the gcall. */
3281 tree fn_decl = get_fndecl_for_call (call_stmt,ctxt);
3282 callee = DECL_STRUCT_FUNCTION (fn_decl);
3283 }
3284
3285 push_frame (callee, &arg_svals, ctxt);
3286 }
3287
3288 /* Pop the top-most frame_region from the stack, and copy the return
3289 region's values (if any) into the region for the lvalue of the LHS of
3290 the call (if any). */
3291
3292 void
update_for_return_gcall(const gcall * call_stmt,region_model_context * ctxt)3293 region_model::update_for_return_gcall (const gcall *call_stmt,
3294 region_model_context *ctxt)
3295 {
3296 /* Get the region for the result of the call, within the caller frame. */
3297 const region *result_dst_reg = NULL;
3298 tree lhs = gimple_call_lhs (call_stmt);
3299 if (lhs)
3300 {
3301 /* Normally we access the top-level frame, which is:
3302 path_var (expr, get_stack_depth () - 1)
3303 whereas here we need the caller frame, hence "- 2" here. */
3304 gcc_assert (get_stack_depth () >= 2);
3305 result_dst_reg = get_lvalue (path_var (lhs, get_stack_depth () - 2),
3306 ctxt);
3307 }
3308
3309 pop_frame (result_dst_reg, NULL, ctxt);
3310 }
3311
3312 /* Extract calling information from the superedge and update the model for the
3313 call */
3314
3315 void
update_for_call_superedge(const call_superedge & call_edge,region_model_context * ctxt)3316 region_model::update_for_call_superedge (const call_superedge &call_edge,
3317 region_model_context *ctxt)
3318 {
3319 const gcall *call_stmt = call_edge.get_call_stmt ();
3320 update_for_gcall (call_stmt, ctxt, call_edge.get_callee_function ());
3321 }
3322
3323 /* Extract calling information from the return superedge and update the model
3324 for the returning call */
3325
3326 void
update_for_return_superedge(const return_superedge & return_edge,region_model_context * ctxt)3327 region_model::update_for_return_superedge (const return_superedge &return_edge,
3328 region_model_context *ctxt)
3329 {
3330 const gcall *call_stmt = return_edge.get_call_stmt ();
3331 update_for_return_gcall (call_stmt, ctxt);
3332 }
3333
3334 /* Update this region_model with a summary of the effect of calling
3335 and returning from CG_SEDGE.
3336
3337 TODO: Currently this is extremely simplistic: we merely set the
3338 return value to "unknown". A proper implementation would e.g. update
3339 sm-state, and presumably be reworked to support multiple outcomes. */
3340
3341 void
update_for_call_summary(const callgraph_superedge & cg_sedge,region_model_context * ctxt)3342 region_model::update_for_call_summary (const callgraph_superedge &cg_sedge,
3343 region_model_context *ctxt)
3344 {
3345 /* For now, set any return value to "unknown". */
3346 const gcall *call_stmt = cg_sedge.get_call_stmt ();
3347 tree lhs = gimple_call_lhs (call_stmt);
3348 if (lhs)
3349 mark_region_as_unknown (get_lvalue (lhs, ctxt),
3350 ctxt ? ctxt->get_uncertainty () : NULL);
3351
3352 // TODO: actually implement some kind of summary here
3353 }
3354
3355 /* Given a true or false edge guarded by conditional statement COND_STMT,
3356 determine appropriate constraints for the edge to be taken.
3357
3358 If they are feasible, add the constraints and return true.
3359
3360 Return false if the constraints contradict existing knowledge
3361 (and so the edge should not be taken).
3362 When returning false, if OUT is non-NULL, write a new rejected_constraint
3363 to it. */
3364
3365 bool
apply_constraints_for_gcond(const cfg_superedge & sedge,const gcond * cond_stmt,region_model_context * ctxt,rejected_constraint ** out)3366 region_model::apply_constraints_for_gcond (const cfg_superedge &sedge,
3367 const gcond *cond_stmt,
3368 region_model_context *ctxt,
3369 rejected_constraint **out)
3370 {
3371 ::edge cfg_edge = sedge.get_cfg_edge ();
3372 gcc_assert (cfg_edge != NULL);
3373 gcc_assert (cfg_edge->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE));
3374
3375 enum tree_code op = gimple_cond_code (cond_stmt);
3376 tree lhs = gimple_cond_lhs (cond_stmt);
3377 tree rhs = gimple_cond_rhs (cond_stmt);
3378 if (cfg_edge->flags & EDGE_FALSE_VALUE)
3379 op = invert_tree_comparison (op, false /* honor_nans */);
3380 return add_constraint (lhs, op, rhs, ctxt, out);
3381 }
3382
3383 /* Given an EDGE guarded by SWITCH_STMT, determine appropriate constraints
3384 for the edge to be taken.
3385
3386 If they are feasible, add the constraints and return true.
3387
3388 Return false if the constraints contradict existing knowledge
3389 (and so the edge should not be taken).
3390 When returning false, if OUT is non-NULL, write a new rejected_constraint
3391 to it. */
3392
3393 bool
apply_constraints_for_gswitch(const switch_cfg_superedge & edge,const gswitch * switch_stmt,region_model_context * ctxt,rejected_constraint ** out)3394 region_model::apply_constraints_for_gswitch (const switch_cfg_superedge &edge,
3395 const gswitch *switch_stmt,
3396 region_model_context *ctxt,
3397 rejected_constraint **out)
3398 {
3399 bounded_ranges_manager *ranges_mgr = get_range_manager ();
3400 const bounded_ranges *all_cases_ranges
3401 = ranges_mgr->get_or_create_ranges_for_switch (&edge, switch_stmt);
3402 tree index = gimple_switch_index (switch_stmt);
3403 const svalue *index_sval = get_rvalue (index, ctxt);
3404 bool sat = m_constraints->add_bounded_ranges (index_sval, all_cases_ranges);
3405 if (!sat && out)
3406 *out = new rejected_ranges_constraint (*this, index, all_cases_ranges);
3407 return sat;
3408 }
3409
3410 /* Apply any constraints due to an exception being thrown at LAST_STMT.
3411
3412 If they are feasible, add the constraints and return true.
3413
3414 Return false if the constraints contradict existing knowledge
3415 (and so the edge should not be taken).
3416 When returning false, if OUT is non-NULL, write a new rejected_constraint
3417 to it. */
3418
3419 bool
apply_constraints_for_exception(const gimple * last_stmt,region_model_context * ctxt,rejected_constraint ** out)3420 region_model::apply_constraints_for_exception (const gimple *last_stmt,
3421 region_model_context *ctxt,
3422 rejected_constraint **out)
3423 {
3424 gcc_assert (last_stmt);
3425 if (const gcall *call = dyn_cast <const gcall *> (last_stmt))
3426 if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
3427 if (is_named_call_p (callee_fndecl, "operator new", call, 1)
3428 || is_named_call_p (callee_fndecl, "operator new []", call, 1))
3429 {
3430 /* We have an exception thrown from operator new.
3431 Add a constraint that the result was NULL, to avoid a false
3432 leak report due to the result being lost when following
3433 the EH edge. */
3434 if (tree lhs = gimple_call_lhs (call))
3435 return add_constraint (lhs, EQ_EXPR, null_pointer_node, ctxt, out);
3436 return true;
3437 }
3438 return true;
3439 }
3440
3441 /* For use with push_frame when handling a top-level call within the analysis.
3442 PARAM has a defined but unknown initial value.
3443 Anything it points to has escaped, since the calling context "knows"
3444 the pointer, and thus calls to unknown functions could read/write into
3445 the region. */
3446
3447 void
on_top_level_param(tree param,region_model_context * ctxt)3448 region_model::on_top_level_param (tree param,
3449 region_model_context *ctxt)
3450 {
3451 if (POINTER_TYPE_P (TREE_TYPE (param)))
3452 {
3453 const region *param_reg = get_lvalue (param, ctxt);
3454 const svalue *init_ptr_sval
3455 = m_mgr->get_or_create_initial_value (param_reg);
3456 const region *pointee_reg = m_mgr->get_symbolic_region (init_ptr_sval);
3457 m_store.mark_as_escaped (pointee_reg);
3458 }
3459 }
3460
3461 /* Update this region_model to reflect pushing a frame onto the stack
3462 for a call to FUN.
3463
3464 If ARG_SVALS is non-NULL, use it to populate the parameters
3465 in the new frame.
3466 Otherwise, the params have their initial_svalues.
3467
3468 Return the frame_region for the new frame. */
3469
3470 const region *
push_frame(function * fun,const vec<const svalue * > * arg_svals,region_model_context * ctxt)3471 region_model::push_frame (function *fun, const vec<const svalue *> *arg_svals,
3472 region_model_context *ctxt)
3473 {
3474 m_current_frame = m_mgr->get_frame_region (m_current_frame, fun);
3475 if (arg_svals)
3476 {
3477 /* Arguments supplied from a caller frame. */
3478 tree fndecl = fun->decl;
3479 unsigned idx = 0;
3480 for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
3481 iter_parm = DECL_CHAIN (iter_parm), ++idx)
3482 {
3483 /* If there's a mismatching declaration, the call stmt might
3484 not have enough args. Handle this case by leaving the
3485 rest of the params as uninitialized. */
3486 if (idx >= arg_svals->length ())
3487 break;
3488 tree parm_lval = iter_parm;
3489 if (tree parm_default_ssa = ssa_default_def (fun, iter_parm))
3490 parm_lval = parm_default_ssa;
3491 const region *parm_reg = get_lvalue (parm_lval, ctxt);
3492 const svalue *arg_sval = (*arg_svals)[idx];
3493 set_value (parm_reg, arg_sval, ctxt);
3494 }
3495 }
3496 else
3497 {
3498 /* Otherwise we have a top-level call within the analysis. The params
3499 have defined but unknown initial values.
3500 Anything they point to has escaped. */
3501 tree fndecl = fun->decl;
3502 for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
3503 iter_parm = DECL_CHAIN (iter_parm))
3504 {
3505 if (tree parm_default_ssa = ssa_default_def (fun, iter_parm))
3506 on_top_level_param (parm_default_ssa, ctxt);
3507 else
3508 on_top_level_param (iter_parm, ctxt);
3509 }
3510 }
3511
3512 return m_current_frame;
3513 }
3514
3515 /* Get the function of the top-most frame in this region_model's stack.
3516 There must be such a frame. */
3517
3518 function *
get_current_function() const3519 region_model::get_current_function () const
3520 {
3521 const frame_region *frame = get_current_frame ();
3522 gcc_assert (frame);
3523 return frame->get_function ();
3524 }
3525
3526 /* Pop the topmost frame_region from this region_model's stack;
3527
3528 If RESULT_DST_REG is non-null, copy any return value from the frame
3529 into RESULT_DST_REG's region.
3530 If OUT_RESULT is non-null, copy any return value from the frame
3531 into *OUT_RESULT.
3532
3533 Purge the frame region and all its descendent regions.
3534 Convert any pointers that point into such regions into
3535 POISON_KIND_POPPED_STACK svalues. */
3536
3537 void
pop_frame(const region * result_dst_reg,const svalue ** out_result,region_model_context * ctxt)3538 region_model::pop_frame (const region *result_dst_reg,
3539 const svalue **out_result,
3540 region_model_context *ctxt)
3541 {
3542 gcc_assert (m_current_frame);
3543
3544 /* Evaluate the result, within the callee frame. */
3545 const frame_region *frame_reg = m_current_frame;
3546 tree fndecl = m_current_frame->get_function ()->decl;
3547 tree result = DECL_RESULT (fndecl);
3548 if (result && TREE_TYPE (result) != void_type_node)
3549 {
3550 if (result_dst_reg)
3551 {
3552 /* Copy the result to RESULT_DST_REG. */
3553 copy_region (result_dst_reg,
3554 get_lvalue (result, ctxt),
3555 ctxt);
3556 }
3557 if (out_result)
3558 *out_result = get_rvalue (result, ctxt);
3559 }
3560
3561 /* Pop the frame. */
3562 m_current_frame = m_current_frame->get_calling_frame ();
3563
3564 unbind_region_and_descendents (frame_reg,POISON_KIND_POPPED_STACK);
3565 }
3566
3567 /* Get the number of frames in this region_model's stack. */
3568
3569 int
get_stack_depth() const3570 region_model::get_stack_depth () const
3571 {
3572 const frame_region *frame = get_current_frame ();
3573 if (frame)
3574 return frame->get_stack_depth ();
3575 else
3576 return 0;
3577 }
3578
3579 /* Get the frame_region with the given index within the stack.
3580 The frame_region must exist. */
3581
3582 const frame_region *
get_frame_at_index(int index) const3583 region_model::get_frame_at_index (int index) const
3584 {
3585 const frame_region *frame = get_current_frame ();
3586 gcc_assert (frame);
3587 gcc_assert (index >= 0);
3588 gcc_assert (index <= frame->get_index ());
3589 while (index != frame->get_index ())
3590 {
3591 frame = frame->get_calling_frame ();
3592 gcc_assert (frame);
3593 }
3594 return frame;
3595 }
3596
3597 /* Unbind svalues for any regions in REG and below.
3598 Find any pointers to such regions; convert them to
3599 poisoned values of kind PKIND.
3600 Also purge any dynamic extents. */
3601
3602 void
unbind_region_and_descendents(const region * reg,enum poison_kind pkind)3603 region_model::unbind_region_and_descendents (const region *reg,
3604 enum poison_kind pkind)
3605 {
3606 /* Gather a set of base regions to be unbound. */
3607 hash_set<const region *> base_regs;
3608 for (store::cluster_map_t::iterator iter = m_store.begin ();
3609 iter != m_store.end (); ++iter)
3610 {
3611 const region *iter_base_reg = (*iter).first;
3612 if (iter_base_reg->descendent_of_p (reg))
3613 base_regs.add (iter_base_reg);
3614 }
3615 for (hash_set<const region *>::iterator iter = base_regs.begin ();
3616 iter != base_regs.end (); ++iter)
3617 m_store.purge_cluster (*iter);
3618
3619 /* Find any pointers to REG or its descendents; convert to poisoned. */
3620 poison_any_pointers_to_descendents (reg, pkind);
3621
3622 /* Purge dynamic extents of any base regions in REG and below
3623 (e.g. VLAs and alloca stack regions). */
3624 for (auto iter : m_dynamic_extents)
3625 {
3626 const region *iter_reg = iter.first;
3627 if (iter_reg->descendent_of_p (reg))
3628 unset_dynamic_extents (iter_reg);
3629 }
3630 }
3631
3632 /* Implementation of BindingVisitor.
3633 Update the bound svalues for regions below REG to use poisoned
3634 values instead. */
3635
3636 struct bad_pointer_finder
3637 {
bad_pointer_finderana::bad_pointer_finder3638 bad_pointer_finder (const region *reg, enum poison_kind pkind,
3639 region_model_manager *mgr)
3640 : m_reg (reg), m_pkind (pkind), m_mgr (mgr), m_count (0)
3641 {}
3642
on_bindingana::bad_pointer_finder3643 void on_binding (const binding_key *, const svalue *&sval)
3644 {
3645 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
3646 {
3647 const region *ptr_dst = ptr_sval->get_pointee ();
3648 /* Poison ptrs to descendents of REG, but not to REG itself,
3649 otherwise double-free detection doesn't work (since sm-state
3650 for "free" is stored on the original ptr svalue). */
3651 if (ptr_dst->descendent_of_p (m_reg)
3652 && ptr_dst != m_reg)
3653 {
3654 sval = m_mgr->get_or_create_poisoned_svalue (m_pkind,
3655 sval->get_type ());
3656 ++m_count;
3657 }
3658 }
3659 }
3660
3661 const region *m_reg;
3662 enum poison_kind m_pkind;
3663 region_model_manager *const m_mgr;
3664 int m_count;
3665 };
3666
3667 /* Find any pointers to REG or its descendents; convert them to
3668 poisoned values of kind PKIND.
3669 Return the number of pointers that were poisoned. */
3670
3671 int
poison_any_pointers_to_descendents(const region * reg,enum poison_kind pkind)3672 region_model::poison_any_pointers_to_descendents (const region *reg,
3673 enum poison_kind pkind)
3674 {
3675 bad_pointer_finder bv (reg, pkind, m_mgr);
3676 m_store.for_each_binding (bv);
3677 return bv.m_count;
3678 }
3679
3680 /* Attempt to merge THIS with OTHER_MODEL, writing the result
3681 to OUT_MODEL. Use POINT to distinguish values created as a
3682 result of merging. */
3683
3684 bool
can_merge_with_p(const region_model & other_model,const program_point & point,region_model * out_model,const extrinsic_state * ext_state,const program_state * state_a,const program_state * state_b) const3685 region_model::can_merge_with_p (const region_model &other_model,
3686 const program_point &point,
3687 region_model *out_model,
3688 const extrinsic_state *ext_state,
3689 const program_state *state_a,
3690 const program_state *state_b) const
3691 {
3692 gcc_assert (out_model);
3693 gcc_assert (m_mgr == other_model.m_mgr);
3694 gcc_assert (m_mgr == out_model->m_mgr);
3695
3696 if (m_current_frame != other_model.m_current_frame)
3697 return false;
3698 out_model->m_current_frame = m_current_frame;
3699
3700 model_merger m (this, &other_model, point, out_model,
3701 ext_state, state_a, state_b);
3702
3703 if (!store::can_merge_p (&m_store, &other_model.m_store,
3704 &out_model->m_store, m_mgr->get_store_manager (),
3705 &m))
3706 return false;
3707
3708 if (!m_dynamic_extents.can_merge_with_p (other_model.m_dynamic_extents,
3709 &out_model->m_dynamic_extents))
3710 return false;
3711
3712 /* Merge constraints. */
3713 constraint_manager::merge (*m_constraints,
3714 *other_model.m_constraints,
3715 out_model->m_constraints);
3716
3717 return true;
3718 }
3719
3720 /* Attempt to get the fndecl used at CALL, if known, or NULL_TREE
3721 otherwise. */
3722
3723 tree
get_fndecl_for_call(const gcall * call,region_model_context * ctxt)3724 region_model::get_fndecl_for_call (const gcall *call,
3725 region_model_context *ctxt)
3726 {
3727 tree fn_ptr = gimple_call_fn (call);
3728 if (fn_ptr == NULL_TREE)
3729 return NULL_TREE;
3730 const svalue *fn_ptr_sval = get_rvalue (fn_ptr, ctxt);
3731 if (const region_svalue *fn_ptr_ptr
3732 = fn_ptr_sval->dyn_cast_region_svalue ())
3733 {
3734 const region *reg = fn_ptr_ptr->get_pointee ();
3735 if (const function_region *fn_reg = reg->dyn_cast_function_region ())
3736 {
3737 tree fn_decl = fn_reg->get_fndecl ();
3738 cgraph_node *node = cgraph_node::get (fn_decl);
3739 if (!node)
3740 return NULL_TREE;
3741 const cgraph_node *ultimate_node = node->ultimate_alias_target ();
3742 if (ultimate_node)
3743 return ultimate_node->decl;
3744 }
3745 }
3746
3747 return NULL_TREE;
3748 }
3749
3750 /* Would be much simpler to use a lambda here, if it were supported. */
3751
3752 struct append_ssa_names_cb_data
3753 {
3754 const region_model *model;
3755 auto_vec<const decl_region *> *out;
3756 };
3757
3758 /* Populate *OUT with all decl_regions for SSA names in the current
3759 frame that have clusters within the store. */
3760
3761 void
3762 region_model::
get_ssa_name_regions_for_current_frame(auto_vec<const decl_region * > * out) const3763 get_ssa_name_regions_for_current_frame (auto_vec<const decl_region *> *out)
3764 const
3765 {
3766 append_ssa_names_cb_data data;
3767 data.model = this;
3768 data.out = out;
3769 m_store.for_each_cluster (append_ssa_names_cb, &data);
3770 }
3771
3772 /* Implementation detail of get_ssa_name_regions_for_current_frame. */
3773
3774 void
append_ssa_names_cb(const region * base_reg,append_ssa_names_cb_data * cb_data)3775 region_model::append_ssa_names_cb (const region *base_reg,
3776 append_ssa_names_cb_data *cb_data)
3777 {
3778 if (base_reg->get_parent_region () != cb_data->model->m_current_frame)
3779 return;
3780 if (const decl_region *decl_reg = base_reg->dyn_cast_decl_region ())
3781 {
3782 if (TREE_CODE (decl_reg->get_decl ()) == SSA_NAME)
3783 cb_data->out->safe_push (decl_reg);
3784 }
3785 }
3786
3787 /* Return a new region describing a heap-allocated block of memory.
3788 Use CTXT to complain about tainted sizes. */
3789
3790 const region *
create_region_for_heap_alloc(const svalue * size_in_bytes,region_model_context * ctxt)3791 region_model::create_region_for_heap_alloc (const svalue *size_in_bytes,
3792 region_model_context *ctxt)
3793 {
3794 const region *reg = m_mgr->create_region_for_heap_alloc ();
3795 if (compat_types_p (size_in_bytes->get_type (), size_type_node))
3796 set_dynamic_extents (reg, size_in_bytes, ctxt);
3797 return reg;
3798 }
3799
3800 /* Return a new region describing a block of memory allocated within the
3801 current frame.
3802 Use CTXT to complain about tainted sizes. */
3803
3804 const region *
create_region_for_alloca(const svalue * size_in_bytes,region_model_context * ctxt)3805 region_model::create_region_for_alloca (const svalue *size_in_bytes,
3806 region_model_context *ctxt)
3807 {
3808 const region *reg = m_mgr->create_region_for_alloca (m_current_frame);
3809 if (compat_types_p (size_in_bytes->get_type (), size_type_node))
3810 set_dynamic_extents (reg, size_in_bytes, ctxt);
3811 return reg;
3812 }
3813
3814 /* Record that the size of REG is SIZE_IN_BYTES.
3815 Use CTXT to complain about tainted sizes. */
3816
3817 void
set_dynamic_extents(const region * reg,const svalue * size_in_bytes,region_model_context * ctxt)3818 region_model::set_dynamic_extents (const region *reg,
3819 const svalue *size_in_bytes,
3820 region_model_context *ctxt)
3821 {
3822 assert_compat_types (size_in_bytes->get_type (), size_type_node);
3823 if (ctxt)
3824 check_dynamic_size_for_taint (reg->get_memory_space (), size_in_bytes,
3825 ctxt);
3826 m_dynamic_extents.put (reg, size_in_bytes);
3827 }
3828
3829 /* Get the recording of REG in bytes, or NULL if no dynamic size was
3830 recorded. */
3831
3832 const svalue *
get_dynamic_extents(const region * reg) const3833 region_model::get_dynamic_extents (const region *reg) const
3834 {
3835 if (const svalue * const *slot = m_dynamic_extents.get (reg))
3836 return *slot;
3837 return NULL;
3838 }
3839
3840 /* Unset any recorded dynamic size of REG. */
3841
3842 void
unset_dynamic_extents(const region * reg)3843 region_model::unset_dynamic_extents (const region *reg)
3844 {
3845 m_dynamic_extents.remove (reg);
3846 }
3847
3848 /* class noop_region_model_context : public region_model_context. */
3849
3850 void
bifurcate(custom_edge_info * info)3851 noop_region_model_context::bifurcate (custom_edge_info *info)
3852 {
3853 delete info;
3854 }
3855
3856 void
terminate_path()3857 noop_region_model_context::terminate_path ()
3858 {
3859 }
3860
3861 /* struct model_merger. */
3862
3863 /* Dump a multiline representation of this merger to PP. */
3864
3865 void
dump_to_pp(pretty_printer * pp,bool simple) const3866 model_merger::dump_to_pp (pretty_printer *pp, bool simple) const
3867 {
3868 pp_string (pp, "model A:");
3869 pp_newline (pp);
3870 m_model_a->dump_to_pp (pp, simple, true);
3871 pp_newline (pp);
3872
3873 pp_string (pp, "model B:");
3874 pp_newline (pp);
3875 m_model_b->dump_to_pp (pp, simple, true);
3876 pp_newline (pp);
3877
3878 pp_string (pp, "merged model:");
3879 pp_newline (pp);
3880 m_merged_model->dump_to_pp (pp, simple, true);
3881 pp_newline (pp);
3882 }
3883
3884 /* Dump a multiline representation of this merger to FILE. */
3885
3886 void
dump(FILE * fp,bool simple) const3887 model_merger::dump (FILE *fp, bool simple) const
3888 {
3889 pretty_printer pp;
3890 pp_format_decoder (&pp) = default_tree_printer;
3891 pp_show_color (&pp) = pp_show_color (global_dc->printer);
3892 pp.buffer->stream = fp;
3893 dump_to_pp (&pp, simple);
3894 pp_flush (&pp);
3895 }
3896
3897 /* Dump a multiline representation of this merger to stderr. */
3898
3899 DEBUG_FUNCTION void
dump(bool simple) const3900 model_merger::dump (bool simple) const
3901 {
3902 dump (stderr, simple);
3903 }
3904
3905 /* Return true if it's OK to merge SVAL with other svalues. */
3906
3907 bool
mergeable_svalue_p(const svalue * sval) const3908 model_merger::mergeable_svalue_p (const svalue *sval) const
3909 {
3910 if (m_ext_state)
3911 {
3912 /* Reject merging svalues that have non-purgable sm-state,
3913 to avoid falsely reporting memory leaks by merging them
3914 with something else. For example, given a local var "p",
3915 reject the merger of a:
3916 store_a mapping "p" to a malloc-ed ptr
3917 with:
3918 store_b mapping "p" to a NULL ptr. */
3919 if (m_state_a)
3920 if (!m_state_a->can_purge_p (*m_ext_state, sval))
3921 return false;
3922 if (m_state_b)
3923 if (!m_state_b->can_purge_p (*m_ext_state, sval))
3924 return false;
3925 }
3926 return true;
3927 }
3928
3929 } // namespace ana
3930
3931 /* Dump RMODEL fully to stderr (i.e. without summarization). */
3932
3933 DEBUG_FUNCTION void
debug(const region_model & rmodel)3934 debug (const region_model &rmodel)
3935 {
3936 rmodel.dump (false);
3937 }
3938
3939 /* class rejected_op_constraint : public rejected_constraint. */
3940
3941 void
dump_to_pp(pretty_printer * pp) const3942 rejected_op_constraint::dump_to_pp (pretty_printer *pp) const
3943 {
3944 region_model m (m_model);
3945 const svalue *lhs_sval = m.get_rvalue (m_lhs, NULL);
3946 const svalue *rhs_sval = m.get_rvalue (m_rhs, NULL);
3947 lhs_sval->dump_to_pp (pp, true);
3948 pp_printf (pp, " %s ", op_symbol_code (m_op));
3949 rhs_sval->dump_to_pp (pp, true);
3950 }
3951
3952 /* class rejected_ranges_constraint : public rejected_constraint. */
3953
3954 void
dump_to_pp(pretty_printer * pp) const3955 rejected_ranges_constraint::dump_to_pp (pretty_printer *pp) const
3956 {
3957 region_model m (m_model);
3958 const svalue *sval = m.get_rvalue (m_expr, NULL);
3959 sval->dump_to_pp (pp, true);
3960 pp_string (pp, " in ");
3961 m_ranges->dump_to_pp (pp, true);
3962 }
3963
3964 /* class engine. */
3965
3966 /* Dump the managed objects by class to LOGGER, and the per-class totals. */
3967
3968 void
log_stats(logger * logger) const3969 engine::log_stats (logger *logger) const
3970 {
3971 m_mgr.log_stats (logger, true);
3972 }
3973
3974 namespace ana {
3975
3976 #if CHECKING_P
3977
3978 namespace selftest {
3979
3980 /* Build a constant tree of the given type from STR. */
3981
3982 static tree
build_real_cst_from_string(tree type,const char * str)3983 build_real_cst_from_string (tree type, const char *str)
3984 {
3985 REAL_VALUE_TYPE real;
3986 real_from_string (&real, str);
3987 return build_real (type, real);
3988 }
3989
3990 /* Append various "interesting" constants to OUT (e.g. NaN). */
3991
3992 static void
append_interesting_constants(auto_vec<tree> * out)3993 append_interesting_constants (auto_vec<tree> *out)
3994 {
3995 out->safe_push (build_int_cst (integer_type_node, 0));
3996 out->safe_push (build_int_cst (integer_type_node, 42));
3997 out->safe_push (build_int_cst (unsigned_type_node, 0));
3998 out->safe_push (build_int_cst (unsigned_type_node, 42));
3999 out->safe_push (build_real_cst_from_string (float_type_node, "QNaN"));
4000 out->safe_push (build_real_cst_from_string (float_type_node, "-QNaN"));
4001 out->safe_push (build_real_cst_from_string (float_type_node, "SNaN"));
4002 out->safe_push (build_real_cst_from_string (float_type_node, "-SNaN"));
4003 out->safe_push (build_real_cst_from_string (float_type_node, "0.0"));
4004 out->safe_push (build_real_cst_from_string (float_type_node, "-0.0"));
4005 out->safe_push (build_real_cst_from_string (float_type_node, "Inf"));
4006 out->safe_push (build_real_cst_from_string (float_type_node, "-Inf"));
4007 }
4008
4009 /* Verify that tree_cmp is a well-behaved comparator for qsort, even
4010 if the underlying constants aren't comparable. */
4011
4012 static void
test_tree_cmp_on_constants()4013 test_tree_cmp_on_constants ()
4014 {
4015 auto_vec<tree> csts;
4016 append_interesting_constants (&csts);
4017
4018 /* Try sorting every triple. */
4019 const unsigned num = csts.length ();
4020 for (unsigned i = 0; i < num; i++)
4021 for (unsigned j = 0; j < num; j++)
4022 for (unsigned k = 0; k < num; k++)
4023 {
4024 auto_vec<tree> v (3);
4025 v.quick_push (csts[i]);
4026 v.quick_push (csts[j]);
4027 v.quick_push (csts[k]);
4028 v.qsort (tree_cmp);
4029 }
4030 }
4031
4032 /* Implementation detail of the ASSERT_CONDITION_* macros. */
4033
4034 void
assert_condition(const location & loc,region_model & model,const svalue * lhs,tree_code op,const svalue * rhs,tristate expected)4035 assert_condition (const location &loc,
4036 region_model &model,
4037 const svalue *lhs, tree_code op, const svalue *rhs,
4038 tristate expected)
4039 {
4040 tristate actual = model.eval_condition (lhs, op, rhs);
4041 ASSERT_EQ_AT (loc, actual, expected);
4042 }
4043
4044 /* Implementation detail of the ASSERT_CONDITION_* macros. */
4045
4046 void
assert_condition(const location & loc,region_model & model,tree lhs,tree_code op,tree rhs,tristate expected)4047 assert_condition (const location &loc,
4048 region_model &model,
4049 tree lhs, tree_code op, tree rhs,
4050 tristate expected)
4051 {
4052 tristate actual = model.eval_condition (lhs, op, rhs, NULL);
4053 ASSERT_EQ_AT (loc, actual, expected);
4054 }
4055
4056 /* Implementation detail of ASSERT_DUMP_TREE_EQ. */
4057
4058 static void
assert_dump_tree_eq(const location & loc,tree t,const char * expected)4059 assert_dump_tree_eq (const location &loc, tree t, const char *expected)
4060 {
4061 auto_fix_quotes sentinel;
4062 pretty_printer pp;
4063 pp_format_decoder (&pp) = default_tree_printer;
4064 dump_tree (&pp, t);
4065 ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
4066 }
4067
4068 /* Assert that dump_tree (T) is EXPECTED. */
4069
4070 #define ASSERT_DUMP_TREE_EQ(T, EXPECTED) \
4071 SELFTEST_BEGIN_STMT \
4072 assert_dump_tree_eq ((SELFTEST_LOCATION), (T), (EXPECTED)); \
4073 SELFTEST_END_STMT
4074
4075 /* Implementation detail of ASSERT_DUMP_EQ. */
4076
4077 static void
assert_dump_eq(const location & loc,const region_model & model,bool summarize,const char * expected)4078 assert_dump_eq (const location &loc,
4079 const region_model &model,
4080 bool summarize,
4081 const char *expected)
4082 {
4083 auto_fix_quotes sentinel;
4084 pretty_printer pp;
4085 pp_format_decoder (&pp) = default_tree_printer;
4086
4087 model.dump_to_pp (&pp, summarize, true);
4088 ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
4089 }
4090
4091 /* Assert that MODEL.dump_to_pp (SUMMARIZE) is EXPECTED. */
4092
4093 #define ASSERT_DUMP_EQ(MODEL, SUMMARIZE, EXPECTED) \
4094 SELFTEST_BEGIN_STMT \
4095 assert_dump_eq ((SELFTEST_LOCATION), (MODEL), (SUMMARIZE), (EXPECTED)); \
4096 SELFTEST_END_STMT
4097
4098 /* Smoketest for region_model::dump_to_pp. */
4099
4100 static void
test_dump()4101 test_dump ()
4102 {
4103 region_model_manager mgr;
4104 region_model model (&mgr);
4105
4106 ASSERT_DUMP_EQ (model, false,
4107 "stack depth: 0\n"
4108 "m_called_unknown_fn: FALSE\n"
4109 "constraint_manager:\n"
4110 " equiv classes:\n"
4111 " constraints:\n");
4112 ASSERT_DUMP_EQ (model, true,
4113 "stack depth: 0\n"
4114 "m_called_unknown_fn: FALSE\n"
4115 "constraint_manager:\n"
4116 " equiv classes:\n"
4117 " constraints:\n");
4118 }
4119
4120 /* Helper function for selftests. Create a struct or union type named NAME,
4121 with the fields given by the FIELD_DECLS in FIELDS.
4122 If IS_STRUCT is true create a RECORD_TYPE (aka a struct), otherwise
4123 create a UNION_TYPE. */
4124
4125 static tree
make_test_compound_type(const char * name,bool is_struct,const auto_vec<tree> * fields)4126 make_test_compound_type (const char *name, bool is_struct,
4127 const auto_vec<tree> *fields)
4128 {
4129 tree t = make_node (is_struct ? RECORD_TYPE : UNION_TYPE);
4130 TYPE_NAME (t) = get_identifier (name);
4131 TYPE_SIZE (t) = 0;
4132
4133 tree fieldlist = NULL;
4134 int i;
4135 tree field;
4136 FOR_EACH_VEC_ELT (*fields, i, field)
4137 {
4138 gcc_assert (TREE_CODE (field) == FIELD_DECL);
4139 DECL_CONTEXT (field) = t;
4140 fieldlist = chainon (field, fieldlist);
4141 }
4142 fieldlist = nreverse (fieldlist);
4143 TYPE_FIELDS (t) = fieldlist;
4144
4145 layout_type (t);
4146 return t;
4147 }
4148
4149 /* Selftest fixture for creating the type "struct coord {int x; int y; };". */
4150
4151 struct coord_test
4152 {
coord_testana::selftest::coord_test4153 coord_test ()
4154 {
4155 auto_vec<tree> fields;
4156 m_x_field = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
4157 get_identifier ("x"), integer_type_node);
4158 fields.safe_push (m_x_field);
4159 m_y_field = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
4160 get_identifier ("y"), integer_type_node);
4161 fields.safe_push (m_y_field);
4162 m_coord_type = make_test_compound_type ("coord", true, &fields);
4163 }
4164
4165 tree m_x_field;
4166 tree m_y_field;
4167 tree m_coord_type;
4168 };
4169
4170 /* Verify usage of a struct. */
4171
4172 static void
test_struct()4173 test_struct ()
4174 {
4175 coord_test ct;
4176
4177 tree c = build_global_decl ("c", ct.m_coord_type);
4178 tree c_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
4179 c, ct.m_x_field, NULL_TREE);
4180 tree c_y = build3 (COMPONENT_REF, TREE_TYPE (ct.m_y_field),
4181 c, ct.m_y_field, NULL_TREE);
4182
4183 tree int_17 = build_int_cst (integer_type_node, 17);
4184 tree int_m3 = build_int_cst (integer_type_node, -3);
4185
4186 region_model_manager mgr;
4187 region_model model (&mgr);
4188 model.set_value (c_x, int_17, NULL);
4189 model.set_value (c_y, int_m3, NULL);
4190
4191 /* Verify get_offset for "c.x". */
4192 {
4193 const region *c_x_reg = model.get_lvalue (c_x, NULL);
4194 region_offset offset = c_x_reg->get_offset ();
4195 ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, NULL));
4196 ASSERT_EQ (offset.get_bit_offset (), 0);
4197 }
4198
4199 /* Verify get_offset for "c.y". */
4200 {
4201 const region *c_y_reg = model.get_lvalue (c_y, NULL);
4202 region_offset offset = c_y_reg->get_offset ();
4203 ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, NULL));
4204 ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
4205 }
4206 }
4207
4208 /* Verify usage of an array element. */
4209
4210 static void
test_array_1()4211 test_array_1 ()
4212 {
4213 tree tlen = size_int (10);
4214 tree arr_type = build_array_type (char_type_node, build_index_type (tlen));
4215
4216 tree a = build_global_decl ("a", arr_type);
4217
4218 region_model_manager mgr;
4219 region_model model (&mgr);
4220 tree int_0 = build_int_cst (integer_type_node, 0);
4221 tree a_0 = build4 (ARRAY_REF, char_type_node,
4222 a, int_0, NULL_TREE, NULL_TREE);
4223 tree char_A = build_int_cst (char_type_node, 'A');
4224 model.set_value (a_0, char_A, NULL);
4225 }
4226
4227 /* Verify that region_model::get_representative_tree works as expected. */
4228
4229 static void
test_get_representative_tree()4230 test_get_representative_tree ()
4231 {
4232 region_model_manager mgr;
4233
4234 /* STRING_CST. */
4235 {
4236 tree string_cst = build_string (4, "foo");
4237 region_model m (&mgr);
4238 const svalue *str_sval = m.get_rvalue (string_cst, NULL);
4239 tree rep = m.get_representative_tree (str_sval);
4240 ASSERT_EQ (rep, string_cst);
4241 }
4242
4243 /* String literal. */
4244 {
4245 tree string_cst_ptr = build_string_literal (4, "foo");
4246 region_model m (&mgr);
4247 const svalue *str_sval = m.get_rvalue (string_cst_ptr, NULL);
4248 tree rep = m.get_representative_tree (str_sval);
4249 ASSERT_DUMP_TREE_EQ (rep, "&\"foo\"[0]");
4250 }
4251
4252 /* Value of an element within an array. */
4253 {
4254 tree tlen = size_int (10);
4255 tree arr_type = build_array_type (char_type_node, build_index_type (tlen));
4256 tree a = build_global_decl ("a", arr_type);
4257 placeholder_svalue test_sval (char_type_node, "test value");
4258
4259 /* Value of a[3]. */
4260 {
4261 test_region_model_context ctxt;
4262 region_model model (&mgr);
4263 tree int_3 = build_int_cst (integer_type_node, 3);
4264 tree a_3 = build4 (ARRAY_REF, char_type_node,
4265 a, int_3, NULL_TREE, NULL_TREE);
4266 const region *a_3_reg = model.get_lvalue (a_3, &ctxt);
4267 model.set_value (a_3_reg, &test_sval, &ctxt);
4268 tree rep = model.get_representative_tree (&test_sval);
4269 ASSERT_DUMP_TREE_EQ (rep, "a[3]");
4270 }
4271
4272 /* Value of a[0]. */
4273 {
4274 test_region_model_context ctxt;
4275 region_model model (&mgr);
4276 tree idx = build_int_cst (integer_type_node, 0);
4277 tree a_0 = build4 (ARRAY_REF, char_type_node,
4278 a, idx, NULL_TREE, NULL_TREE);
4279 const region *a_0_reg = model.get_lvalue (a_0, &ctxt);
4280 model.set_value (a_0_reg, &test_sval, &ctxt);
4281 tree rep = model.get_representative_tree (&test_sval);
4282 ASSERT_DUMP_TREE_EQ (rep, "a[0]");
4283 }
4284 }
4285
4286 /* Value of a field within a struct. */
4287 {
4288 coord_test ct;
4289
4290 tree c = build_global_decl ("c", ct.m_coord_type);
4291 tree c_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
4292 c, ct.m_x_field, NULL_TREE);
4293 tree c_y = build3 (COMPONENT_REF, TREE_TYPE (ct.m_y_field),
4294 c, ct.m_y_field, NULL_TREE);
4295
4296 test_region_model_context ctxt;
4297
4298 /* Value of initial field. */
4299 {
4300 region_model m (&mgr);
4301 const region *c_x_reg = m.get_lvalue (c_x, &ctxt);
4302 placeholder_svalue test_sval_x (integer_type_node, "test x val");
4303 m.set_value (c_x_reg, &test_sval_x, &ctxt);
4304 tree rep = m.get_representative_tree (&test_sval_x);
4305 ASSERT_DUMP_TREE_EQ (rep, "c.x");
4306 }
4307
4308 /* Value of non-initial field. */
4309 {
4310 region_model m (&mgr);
4311 const region *c_y_reg = m.get_lvalue (c_y, &ctxt);
4312 placeholder_svalue test_sval_y (integer_type_node, "test y val");
4313 m.set_value (c_y_reg, &test_sval_y, &ctxt);
4314 tree rep = m.get_representative_tree (&test_sval_y);
4315 ASSERT_DUMP_TREE_EQ (rep, "c.y");
4316 }
4317 }
4318 }
4319
4320 /* Verify that calling region_model::get_rvalue repeatedly on the same
4321 tree constant retrieves the same svalue *. */
4322
4323 static void
test_unique_constants()4324 test_unique_constants ()
4325 {
4326 tree int_0 = build_int_cst (integer_type_node, 0);
4327 tree int_42 = build_int_cst (integer_type_node, 42);
4328
4329 test_region_model_context ctxt;
4330 region_model_manager mgr;
4331 region_model model (&mgr);
4332 ASSERT_EQ (model.get_rvalue (int_0, &ctxt), model.get_rvalue (int_0, &ctxt));
4333 ASSERT_EQ (model.get_rvalue (int_42, &ctxt),
4334 model.get_rvalue (int_42, &ctxt));
4335 ASSERT_NE (model.get_rvalue (int_0, &ctxt), model.get_rvalue (int_42, &ctxt));
4336 ASSERT_EQ (ctxt.get_num_diagnostics (), 0);
4337
4338 /* A "(const int)42" will be a different tree from "(int)42)"... */
4339 tree const_int_type_node
4340 = build_qualified_type (integer_type_node, TYPE_QUAL_CONST);
4341 tree const_int_42 = build_int_cst (const_int_type_node, 42);
4342 ASSERT_NE (int_42, const_int_42);
4343 /* It should have a different const_svalue. */
4344 const svalue *int_42_sval = model.get_rvalue (int_42, &ctxt);
4345 const svalue *const_int_42_sval = model.get_rvalue (const_int_42, &ctxt);
4346 ASSERT_NE (int_42_sval, const_int_42_sval);
4347 /* But they should compare as equal. */
4348 ASSERT_CONDITION_TRUE (model, int_42_sval, EQ_EXPR, const_int_42_sval);
4349 ASSERT_CONDITION_FALSE (model, int_42_sval, NE_EXPR, const_int_42_sval);
4350 }
4351
4352 /* Verify that each type gets its own singleton unknown_svalue within a
4353 region_model_manager, and that NULL_TREE gets its own singleton. */
4354
4355 static void
test_unique_unknowns()4356 test_unique_unknowns ()
4357 {
4358 region_model_manager mgr;
4359 const svalue *unknown_int
4360 = mgr.get_or_create_unknown_svalue (integer_type_node);
4361 /* Repeated calls with the same type should get the same "unknown"
4362 svalue. */
4363 const svalue *unknown_int_2
4364 = mgr.get_or_create_unknown_svalue (integer_type_node);
4365 ASSERT_EQ (unknown_int, unknown_int_2);
4366
4367 /* Different types (or the NULL type) should have different
4368 unknown_svalues. */
4369 const svalue *unknown_NULL_type = mgr.get_or_create_unknown_svalue (NULL);
4370 ASSERT_NE (unknown_NULL_type, unknown_int);
4371
4372 /* Repeated calls with NULL for the type should get the same "unknown"
4373 svalue. */
4374 const svalue *unknown_NULL_type_2 = mgr.get_or_create_unknown_svalue (NULL);
4375 ASSERT_EQ (unknown_NULL_type, unknown_NULL_type_2);
4376 }
4377
4378 /* Verify that initial_svalue are handled as expected. */
4379
4380 static void
test_initial_svalue_folding()4381 test_initial_svalue_folding ()
4382 {
4383 region_model_manager mgr;
4384 tree x = build_global_decl ("x", integer_type_node);
4385 tree y = build_global_decl ("y", integer_type_node);
4386
4387 test_region_model_context ctxt;
4388 region_model model (&mgr);
4389 const svalue *x_init = model.get_rvalue (x, &ctxt);
4390 const svalue *y_init = model.get_rvalue (y, &ctxt);
4391 ASSERT_NE (x_init, y_init);
4392 const region *x_reg = model.get_lvalue (x, &ctxt);
4393 ASSERT_EQ (x_init, mgr.get_or_create_initial_value (x_reg));
4394
4395 }
4396
4397 /* Verify that unary ops are folded as expected. */
4398
4399 static void
test_unaryop_svalue_folding()4400 test_unaryop_svalue_folding ()
4401 {
4402 region_model_manager mgr;
4403 tree x = build_global_decl ("x", integer_type_node);
4404 tree y = build_global_decl ("y", integer_type_node);
4405
4406 test_region_model_context ctxt;
4407 region_model model (&mgr);
4408 const svalue *x_init = model.get_rvalue (x, &ctxt);
4409 const svalue *y_init = model.get_rvalue (y, &ctxt);
4410 const region *x_reg = model.get_lvalue (x, &ctxt);
4411 ASSERT_EQ (x_init, mgr.get_or_create_initial_value (x_reg));
4412
4413 /* "(int)x" -> "x". */
4414 ASSERT_EQ (x_init, mgr.get_or_create_cast (integer_type_node, x_init));
4415
4416 /* "(void *)x" -> something other than "x". */
4417 ASSERT_NE (x_init, mgr.get_or_create_cast (ptr_type_node, x_init));
4418
4419 /* "!(x == y)" -> "x != y". */
4420 ASSERT_EQ (mgr.get_or_create_unaryop
4421 (boolean_type_node, TRUTH_NOT_EXPR,
4422 mgr.get_or_create_binop (boolean_type_node, EQ_EXPR,
4423 x_init, y_init)),
4424 mgr.get_or_create_binop (boolean_type_node, NE_EXPR,
4425 x_init, y_init));
4426 /* "!(x > y)" -> "x <= y". */
4427 ASSERT_EQ (mgr.get_or_create_unaryop
4428 (boolean_type_node, TRUTH_NOT_EXPR,
4429 mgr.get_or_create_binop (boolean_type_node, GT_EXPR,
4430 x_init, y_init)),
4431 mgr.get_or_create_binop (boolean_type_node, LE_EXPR,
4432 x_init, y_init));
4433 }
4434
4435 /* Verify that binops on constant svalues are folded. */
4436
4437 static void
test_binop_svalue_folding()4438 test_binop_svalue_folding ()
4439 {
4440 #define NUM_CSTS 10
4441 tree cst_int[NUM_CSTS];
4442 region_model_manager mgr;
4443 const svalue *cst_sval[NUM_CSTS];
4444 for (int i = 0; i < NUM_CSTS; i++)
4445 {
4446 cst_int[i] = build_int_cst (integer_type_node, i);
4447 cst_sval[i] = mgr.get_or_create_constant_svalue (cst_int[i]);
4448 ASSERT_EQ (cst_sval[i]->get_kind (), SK_CONSTANT);
4449 ASSERT_EQ (cst_sval[i]->maybe_get_constant (), cst_int[i]);
4450 }
4451
4452 for (int i = 0; i < NUM_CSTS; i++)
4453 for (int j = 0; j < NUM_CSTS; j++)
4454 {
4455 if (i != j)
4456 ASSERT_NE (cst_sval[i], cst_sval[j]);
4457 if (i + j < NUM_CSTS)
4458 {
4459 const svalue *sum
4460 = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
4461 cst_sval[i], cst_sval[j]);
4462 ASSERT_EQ (sum, cst_sval[i + j]);
4463 }
4464 if (i - j >= 0)
4465 {
4466 const svalue *difference
4467 = mgr.get_or_create_binop (integer_type_node, MINUS_EXPR,
4468 cst_sval[i], cst_sval[j]);
4469 ASSERT_EQ (difference, cst_sval[i - j]);
4470 }
4471 if (i * j < NUM_CSTS)
4472 {
4473 const svalue *product
4474 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4475 cst_sval[i], cst_sval[j]);
4476 ASSERT_EQ (product, cst_sval[i * j]);
4477 }
4478 const svalue *eq = mgr.get_or_create_binop (integer_type_node, EQ_EXPR,
4479 cst_sval[i], cst_sval[j]);
4480 ASSERT_EQ (eq, i == j ? cst_sval[1] : cst_sval [0]);
4481 const svalue *neq = mgr.get_or_create_binop (integer_type_node, NE_EXPR,
4482 cst_sval[i], cst_sval[j]);
4483 ASSERT_EQ (neq, i != j ? cst_sval[1] : cst_sval [0]);
4484 // etc
4485 }
4486
4487 tree x = build_global_decl ("x", integer_type_node);
4488
4489 test_region_model_context ctxt;
4490 region_model model (&mgr);
4491 const svalue *x_init = model.get_rvalue (x, &ctxt);
4492
4493 /* PLUS_EXPR folding. */
4494 const svalue *x_init_plus_zero
4495 = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
4496 x_init, cst_sval[0]);
4497 ASSERT_EQ (x_init_plus_zero, x_init);
4498 const svalue *zero_plus_x_init
4499 = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
4500 cst_sval[0], x_init);
4501 ASSERT_EQ (zero_plus_x_init, x_init);
4502
4503 /* MULT_EXPR folding. */
4504 const svalue *x_init_times_zero
4505 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4506 x_init, cst_sval[0]);
4507 ASSERT_EQ (x_init_times_zero, cst_sval[0]);
4508 const svalue *zero_times_x_init
4509 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4510 cst_sval[0], x_init);
4511 ASSERT_EQ (zero_times_x_init, cst_sval[0]);
4512
4513 const svalue *x_init_times_one
4514 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4515 x_init, cst_sval[1]);
4516 ASSERT_EQ (x_init_times_one, x_init);
4517 const svalue *one_times_x_init
4518 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4519 cst_sval[1], x_init);
4520 ASSERT_EQ (one_times_x_init, x_init);
4521
4522 // etc
4523 // TODO: do we want to use the match-and-simplify DSL for this?
4524
4525 /* Verify that binops put any constants on the RHS. */
4526 const svalue *four_times_x_init
4527 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4528 cst_sval[4], x_init);
4529 const svalue *x_init_times_four
4530 = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
4531 x_init, cst_sval[4]);
4532 ASSERT_EQ (four_times_x_init, x_init_times_four);
4533 const binop_svalue *binop = four_times_x_init->dyn_cast_binop_svalue ();
4534 ASSERT_EQ (binop->get_op (), MULT_EXPR);
4535 ASSERT_EQ (binop->get_arg0 (), x_init);
4536 ASSERT_EQ (binop->get_arg1 (), cst_sval[4]);
4537
4538 /* Verify that ((x + 1) + 1) == (x + 2). */
4539 const svalue *x_init_plus_one
4540 = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
4541 x_init, cst_sval[1]);
4542 const svalue *x_init_plus_two
4543 = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
4544 x_init, cst_sval[2]);
4545 const svalue *x_init_plus_one_plus_one
4546 = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
4547 x_init_plus_one, cst_sval[1]);
4548 ASSERT_EQ (x_init_plus_one_plus_one, x_init_plus_two);
4549 }
4550
4551 /* Verify that sub_svalues are folded as expected. */
4552
4553 static void
test_sub_svalue_folding()4554 test_sub_svalue_folding ()
4555 {
4556 coord_test ct;
4557 tree c = build_global_decl ("c", ct.m_coord_type);
4558 tree c_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
4559 c, ct.m_x_field, NULL_TREE);
4560
4561 region_model_manager mgr;
4562 region_model model (&mgr);
4563 test_region_model_context ctxt;
4564 const region *c_x_reg = model.get_lvalue (c_x, &ctxt);
4565
4566 /* Verify that sub_svalue of "unknown" simply
4567 yields an unknown. */
4568
4569 const svalue *unknown = mgr.get_or_create_unknown_svalue (ct.m_coord_type);
4570 const svalue *sub = mgr.get_or_create_sub_svalue (TREE_TYPE (ct.m_x_field),
4571 unknown, c_x_reg);
4572 ASSERT_EQ (sub->get_kind (), SK_UNKNOWN);
4573 ASSERT_EQ (sub->get_type (), TREE_TYPE (ct.m_x_field));
4574 }
4575
4576 /* Test that region::descendent_of_p works as expected. */
4577
4578 static void
test_descendent_of_p()4579 test_descendent_of_p ()
4580 {
4581 region_model_manager mgr;
4582 const region *stack = mgr.get_stack_region ();
4583 const region *heap = mgr.get_heap_region ();
4584 const region *code = mgr.get_code_region ();
4585 const region *globals = mgr.get_globals_region ();
4586
4587 /* descendent_of_p should return true when used on the region itself. */
4588 ASSERT_TRUE (stack->descendent_of_p (stack));
4589 ASSERT_FALSE (stack->descendent_of_p (heap));
4590 ASSERT_FALSE (stack->descendent_of_p (code));
4591 ASSERT_FALSE (stack->descendent_of_p (globals));
4592
4593 tree x = build_global_decl ("x", integer_type_node);
4594 const region *x_reg = mgr.get_region_for_global (x);
4595 ASSERT_TRUE (x_reg->descendent_of_p (globals));
4596
4597 /* A cast_region should be a descendent of the original region. */
4598 const region *cast_reg = mgr.get_cast_region (x_reg, ptr_type_node);
4599 ASSERT_TRUE (cast_reg->descendent_of_p (x_reg));
4600 }
4601
4602 /* Verify that simple assignments work as expected. */
4603
4604 static void
test_assignment()4605 test_assignment ()
4606 {
4607 tree int_0 = build_int_cst (integer_type_node, 0);
4608 tree x = build_global_decl ("x", integer_type_node);
4609 tree y = build_global_decl ("y", integer_type_node);
4610
4611 /* "x == 0", then use of y, then "y = 0;". */
4612 region_model_manager mgr;
4613 region_model model (&mgr);
4614 ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
4615 ASSERT_CONDITION_UNKNOWN (model, y, EQ_EXPR, int_0);
4616 model.set_value (model.get_lvalue (y, NULL),
4617 model.get_rvalue (int_0, NULL),
4618 NULL);
4619 ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, int_0);
4620 ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, x);
4621 }
4622
4623 /* Verify that compound assignments work as expected. */
4624
4625 static void
test_compound_assignment()4626 test_compound_assignment ()
4627 {
4628 coord_test ct;
4629
4630 tree c = build_global_decl ("c", ct.m_coord_type);
4631 tree c_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
4632 c, ct.m_x_field, NULL_TREE);
4633 tree c_y = build3 (COMPONENT_REF, TREE_TYPE (ct.m_y_field),
4634 c, ct.m_y_field, NULL_TREE);
4635 tree d = build_global_decl ("d", ct.m_coord_type);
4636 tree d_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
4637 d, ct.m_x_field, NULL_TREE);
4638 tree d_y = build3 (COMPONENT_REF, TREE_TYPE (ct.m_y_field),
4639 d, ct.m_y_field, NULL_TREE);
4640
4641 tree int_17 = build_int_cst (integer_type_node, 17);
4642 tree int_m3 = build_int_cst (integer_type_node, -3);
4643
4644 region_model_manager mgr;
4645 region_model model (&mgr);
4646 model.set_value (c_x, int_17, NULL);
4647 model.set_value (c_y, int_m3, NULL);
4648
4649 /* Copy c to d. */
4650 model.copy_region (model.get_lvalue (d, NULL), model.get_lvalue (c, NULL),
4651 NULL);
4652 /* Check that the fields have the same svalues. */
4653 ASSERT_EQ (model.get_rvalue (c_x, NULL), model.get_rvalue (d_x, NULL));
4654 ASSERT_EQ (model.get_rvalue (c_y, NULL), model.get_rvalue (d_y, NULL));
4655 }
4656
4657 /* Verify the details of pushing and popping stack frames. */
4658
4659 static void
test_stack_frames()4660 test_stack_frames ()
4661 {
4662 tree int_42 = build_int_cst (integer_type_node, 42);
4663 tree int_10 = build_int_cst (integer_type_node, 10);
4664 tree int_5 = build_int_cst (integer_type_node, 5);
4665 tree int_0 = build_int_cst (integer_type_node, 0);
4666
4667 auto_vec <tree> param_types;
4668 tree parent_fndecl = make_fndecl (integer_type_node,
4669 "parent_fn",
4670 param_types);
4671 allocate_struct_function (parent_fndecl, true);
4672
4673 tree child_fndecl = make_fndecl (integer_type_node,
4674 "child_fn",
4675 param_types);
4676 allocate_struct_function (child_fndecl, true);
4677
4678 /* "a" and "b" in the parent frame. */
4679 tree a = build_decl (UNKNOWN_LOCATION, PARM_DECL,
4680 get_identifier ("a"),
4681 integer_type_node);
4682 tree b = build_decl (UNKNOWN_LOCATION, PARM_DECL,
4683 get_identifier ("b"),
4684 integer_type_node);
4685 /* "x" and "y" in a child frame. */
4686 tree x = build_decl (UNKNOWN_LOCATION, PARM_DECL,
4687 get_identifier ("x"),
4688 integer_type_node);
4689 tree y = build_decl (UNKNOWN_LOCATION, PARM_DECL,
4690 get_identifier ("y"),
4691 integer_type_node);
4692
4693 /* "p" global. */
4694 tree p = build_global_decl ("p", ptr_type_node);
4695
4696 /* "q" global. */
4697 tree q = build_global_decl ("q", ptr_type_node);
4698
4699 region_model_manager mgr;
4700 test_region_model_context ctxt;
4701 region_model model (&mgr);
4702
4703 /* Push stack frame for "parent_fn". */
4704 const region *parent_frame_reg
4705 = model.push_frame (DECL_STRUCT_FUNCTION (parent_fndecl),
4706 NULL, &ctxt);
4707 ASSERT_EQ (model.get_current_frame (), parent_frame_reg);
4708 ASSERT_TRUE (model.region_exists_p (parent_frame_reg));
4709 const region *a_in_parent_reg = model.get_lvalue (a, &ctxt);
4710 model.set_value (a_in_parent_reg,
4711 model.get_rvalue (int_42, &ctxt),
4712 &ctxt);
4713 ASSERT_EQ (a_in_parent_reg->maybe_get_frame_region (), parent_frame_reg);
4714
4715 model.add_constraint (b, LT_EXPR, int_10, &ctxt);
4716 ASSERT_EQ (model.eval_condition (b, LT_EXPR, int_10, &ctxt),
4717 tristate (tristate::TS_TRUE));
4718
4719 /* Push stack frame for "child_fn". */
4720 const region *child_frame_reg
4721 = model.push_frame (DECL_STRUCT_FUNCTION (child_fndecl), NULL, &ctxt);
4722 ASSERT_EQ (model.get_current_frame (), child_frame_reg);
4723 ASSERT_TRUE (model.region_exists_p (child_frame_reg));
4724 const region *x_in_child_reg = model.get_lvalue (x, &ctxt);
4725 model.set_value (x_in_child_reg,
4726 model.get_rvalue (int_0, &ctxt),
4727 &ctxt);
4728 ASSERT_EQ (x_in_child_reg->maybe_get_frame_region (), child_frame_reg);
4729
4730 model.add_constraint (y, NE_EXPR, int_5, &ctxt);
4731 ASSERT_EQ (model.eval_condition (y, NE_EXPR, int_5, &ctxt),
4732 tristate (tristate::TS_TRUE));
4733
4734 /* Point a global pointer at a local in the child frame: p = &x. */
4735 const region *p_in_globals_reg = model.get_lvalue (p, &ctxt);
4736 model.set_value (p_in_globals_reg,
4737 mgr.get_ptr_svalue (ptr_type_node, x_in_child_reg),
4738 &ctxt);
4739 ASSERT_EQ (p_in_globals_reg->maybe_get_frame_region (), NULL);
4740
4741 /* Point another global pointer at p: q = &p. */
4742 const region *q_in_globals_reg = model.get_lvalue (q, &ctxt);
4743 model.set_value (q_in_globals_reg,
4744 mgr.get_ptr_svalue (ptr_type_node, p_in_globals_reg),
4745 &ctxt);
4746
4747 /* Test region::descendent_of_p. */
4748 ASSERT_TRUE (child_frame_reg->descendent_of_p (child_frame_reg));
4749 ASSERT_TRUE (x_in_child_reg->descendent_of_p (child_frame_reg));
4750 ASSERT_FALSE (a_in_parent_reg->descendent_of_p (child_frame_reg));
4751
4752 /* Pop the "child_fn" frame from the stack. */
4753 model.pop_frame (NULL, NULL, &ctxt);
4754 ASSERT_FALSE (model.region_exists_p (child_frame_reg));
4755 ASSERT_TRUE (model.region_exists_p (parent_frame_reg));
4756
4757 /* Verify that p (which was pointing at the local "x" in the popped
4758 frame) has been poisoned. */
4759 const svalue *new_p_sval = model.get_rvalue (p, NULL);
4760 ASSERT_EQ (new_p_sval->get_kind (), SK_POISONED);
4761 ASSERT_EQ (new_p_sval->dyn_cast_poisoned_svalue ()->get_poison_kind (),
4762 POISON_KIND_POPPED_STACK);
4763
4764 /* Verify that q still points to p, in spite of the region
4765 renumbering. */
4766 const svalue *new_q_sval = model.get_rvalue (q, &ctxt);
4767 ASSERT_EQ (new_q_sval->get_kind (), SK_REGION);
4768 ASSERT_EQ (new_q_sval->maybe_get_region (),
4769 model.get_lvalue (p, &ctxt));
4770
4771 /* Verify that top of stack has been updated. */
4772 ASSERT_EQ (model.get_current_frame (), parent_frame_reg);
4773
4774 /* Verify locals in parent frame. */
4775 /* Verify "a" still has its value. */
4776 const svalue *new_a_sval = model.get_rvalue (a, &ctxt);
4777 ASSERT_EQ (new_a_sval->get_kind (), SK_CONSTANT);
4778 ASSERT_EQ (new_a_sval->dyn_cast_constant_svalue ()->get_constant (),
4779 int_42);
4780 /* Verify "b" still has its constraint. */
4781 ASSERT_EQ (model.eval_condition (b, LT_EXPR, int_10, &ctxt),
4782 tristate (tristate::TS_TRUE));
4783 }
4784
4785 /* Verify that get_representative_path_var works as expected, that
4786 we can map from regions to parms and back within a recursive call
4787 stack. */
4788
4789 static void
test_get_representative_path_var()4790 test_get_representative_path_var ()
4791 {
4792 auto_vec <tree> param_types;
4793 tree fndecl = make_fndecl (integer_type_node,
4794 "factorial",
4795 param_types);
4796 allocate_struct_function (fndecl, true);
4797
4798 /* Parm "n". */
4799 tree n = build_decl (UNKNOWN_LOCATION, PARM_DECL,
4800 get_identifier ("n"),
4801 integer_type_node);
4802
4803 region_model_manager mgr;
4804 test_region_model_context ctxt;
4805 region_model model (&mgr);
4806
4807 /* Push 5 stack frames for "factorial", each with a param */
4808 auto_vec<const region *> parm_regs;
4809 auto_vec<const svalue *> parm_svals;
4810 for (int depth = 0; depth < 5; depth++)
4811 {
4812 const region *frame_n_reg
4813 = model.push_frame (DECL_STRUCT_FUNCTION (fndecl), NULL, &ctxt);
4814 const region *parm_n_reg = model.get_lvalue (path_var (n, depth), &ctxt);
4815 parm_regs.safe_push (parm_n_reg);
4816
4817 ASSERT_EQ (parm_n_reg->get_parent_region (), frame_n_reg);
4818 const svalue *sval_n = mgr.get_or_create_initial_value (parm_n_reg);
4819 parm_svals.safe_push (sval_n);
4820 }
4821
4822 /* Verify that we can recognize that the regions are the parms,
4823 at every depth. */
4824 for (int depth = 0; depth < 5; depth++)
4825 {
4826 {
4827 svalue_set visited;
4828 ASSERT_EQ (model.get_representative_path_var (parm_regs[depth],
4829 &visited),
4830 path_var (n, depth + 1));
4831 }
4832 /* ...and that we can lookup lvalues for locals for all frames,
4833 not just the top. */
4834 ASSERT_EQ (model.get_lvalue (path_var (n, depth), NULL),
4835 parm_regs[depth]);
4836 /* ...and that we can locate the svalues. */
4837 {
4838 svalue_set visited;
4839 ASSERT_EQ (model.get_representative_path_var (parm_svals[depth],
4840 &visited),
4841 path_var (n, depth + 1));
4842 }
4843 }
4844 }
4845
4846 /* Ensure that region_model::operator== works as expected. */
4847
4848 static void
test_equality_1()4849 test_equality_1 ()
4850 {
4851 tree int_42 = build_int_cst (integer_type_node, 42);
4852 tree int_17 = build_int_cst (integer_type_node, 17);
4853
4854 /* Verify that "empty" region_model instances are equal to each other. */
4855 region_model_manager mgr;
4856 region_model model0 (&mgr);
4857 region_model model1 (&mgr);
4858 ASSERT_EQ (model0, model1);
4859
4860 /* Verify that setting state in model1 makes the models non-equal. */
4861 tree x = build_global_decl ("x", integer_type_node);
4862 model0.set_value (x, int_42, NULL);
4863 ASSERT_EQ (model0.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
4864 ASSERT_NE (model0, model1);
4865
4866 /* Verify the copy-ctor. */
4867 region_model model2 (model0);
4868 ASSERT_EQ (model0, model2);
4869 ASSERT_EQ (model2.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
4870 ASSERT_NE (model1, model2);
4871
4872 /* Verify that models obtained from copy-ctor are independently editable
4873 w/o affecting the original model. */
4874 model2.set_value (x, int_17, NULL);
4875 ASSERT_NE (model0, model2);
4876 ASSERT_EQ (model2.get_rvalue (x, NULL)->maybe_get_constant (), int_17);
4877 ASSERT_EQ (model0.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
4878 }
4879
4880 /* Verify that region models for
4881 x = 42; y = 113;
4882 and
4883 y = 113; x = 42;
4884 are equal. */
4885
4886 static void
test_canonicalization_2()4887 test_canonicalization_2 ()
4888 {
4889 tree int_42 = build_int_cst (integer_type_node, 42);
4890 tree int_113 = build_int_cst (integer_type_node, 113);
4891 tree x = build_global_decl ("x", integer_type_node);
4892 tree y = build_global_decl ("y", integer_type_node);
4893
4894 region_model_manager mgr;
4895 region_model model0 (&mgr);
4896 model0.set_value (model0.get_lvalue (x, NULL),
4897 model0.get_rvalue (int_42, NULL),
4898 NULL);
4899 model0.set_value (model0.get_lvalue (y, NULL),
4900 model0.get_rvalue (int_113, NULL),
4901 NULL);
4902
4903 region_model model1 (&mgr);
4904 model1.set_value (model1.get_lvalue (y, NULL),
4905 model1.get_rvalue (int_113, NULL),
4906 NULL);
4907 model1.set_value (model1.get_lvalue (x, NULL),
4908 model1.get_rvalue (int_42, NULL),
4909 NULL);
4910
4911 ASSERT_EQ (model0, model1);
4912 }
4913
4914 /* Verify that constraints for
4915 x > 3 && y > 42
4916 and
4917 y > 42 && x > 3
4918 are equal after canonicalization. */
4919
4920 static void
test_canonicalization_3()4921 test_canonicalization_3 ()
4922 {
4923 tree int_3 = build_int_cst (integer_type_node, 3);
4924 tree int_42 = build_int_cst (integer_type_node, 42);
4925 tree x = build_global_decl ("x", integer_type_node);
4926 tree y = build_global_decl ("y", integer_type_node);
4927
4928 region_model_manager mgr;
4929 region_model model0 (&mgr);
4930 model0.add_constraint (x, GT_EXPR, int_3, NULL);
4931 model0.add_constraint (y, GT_EXPR, int_42, NULL);
4932
4933 region_model model1 (&mgr);
4934 model1.add_constraint (y, GT_EXPR, int_42, NULL);
4935 model1.add_constraint (x, GT_EXPR, int_3, NULL);
4936
4937 model0.canonicalize ();
4938 model1.canonicalize ();
4939 ASSERT_EQ (model0, model1);
4940 }
4941
4942 /* Verify that we can canonicalize a model containing NaN and other real
4943 constants. */
4944
4945 static void
test_canonicalization_4()4946 test_canonicalization_4 ()
4947 {
4948 auto_vec<tree> csts;
4949 append_interesting_constants (&csts);
4950
4951 region_model_manager mgr;
4952 region_model model (&mgr);
4953
4954 for (tree cst : csts)
4955 model.get_rvalue (cst, NULL);
4956
4957 model.canonicalize ();
4958 }
4959
4960 /* Assert that if we have two region_model instances
4961 with values VAL_A and VAL_B for EXPR that they are
4962 mergable. Write the merged model to *OUT_MERGED_MODEL,
4963 and the merged svalue ptr to *OUT_MERGED_SVALUE.
4964 If VAL_A or VAL_B are NULL_TREE, don't populate EXPR
4965 for that region_model. */
4966
4967 static void
assert_region_models_merge(tree expr,tree val_a,tree val_b,region_model * out_merged_model,const svalue ** out_merged_svalue)4968 assert_region_models_merge (tree expr, tree val_a, tree val_b,
4969 region_model *out_merged_model,
4970 const svalue **out_merged_svalue)
4971 {
4972 program_point point (program_point::origin ());
4973 test_region_model_context ctxt;
4974 region_model_manager *mgr = out_merged_model->get_manager ();
4975 region_model model0 (mgr);
4976 region_model model1 (mgr);
4977 if (val_a)
4978 model0.set_value (model0.get_lvalue (expr, &ctxt),
4979 model0.get_rvalue (val_a, &ctxt),
4980 &ctxt);
4981 if (val_b)
4982 model1.set_value (model1.get_lvalue (expr, &ctxt),
4983 model1.get_rvalue (val_b, &ctxt),
4984 &ctxt);
4985
4986 /* They should be mergeable. */
4987 ASSERT_TRUE (model0.can_merge_with_p (model1, point, out_merged_model));
4988 *out_merged_svalue = out_merged_model->get_rvalue (expr, &ctxt);
4989 }
4990
4991 /* Verify that we can merge region_model instances. */
4992
4993 static void
test_state_merging()4994 test_state_merging ()
4995 {
4996 tree int_42 = build_int_cst (integer_type_node, 42);
4997 tree int_113 = build_int_cst (integer_type_node, 113);
4998 tree x = build_global_decl ("x", integer_type_node);
4999 tree y = build_global_decl ("y", integer_type_node);
5000 tree z = build_global_decl ("z", integer_type_node);
5001 tree p = build_global_decl ("p", ptr_type_node);
5002
5003 tree addr_of_y = build1 (ADDR_EXPR, ptr_type_node, y);
5004 tree addr_of_z = build1 (ADDR_EXPR, ptr_type_node, z);
5005
5006 auto_vec <tree> param_types;
5007 tree test_fndecl = make_fndecl (integer_type_node, "test_fn", param_types);
5008 allocate_struct_function (test_fndecl, true);
5009
5010 /* Param "a". */
5011 tree a = build_decl (UNKNOWN_LOCATION, PARM_DECL,
5012 get_identifier ("a"),
5013 integer_type_node);
5014 tree addr_of_a = build1 (ADDR_EXPR, ptr_type_node, a);
5015
5016 /* Param "q", a pointer. */
5017 tree q = build_decl (UNKNOWN_LOCATION, PARM_DECL,
5018 get_identifier ("q"),
5019 ptr_type_node);
5020
5021 program_point point (program_point::origin ());
5022 region_model_manager mgr;
5023
5024 {
5025 region_model model0 (&mgr);
5026 region_model model1 (&mgr);
5027 region_model merged (&mgr);
5028 /* Verify empty models can be merged. */
5029 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5030 ASSERT_EQ (model0, merged);
5031 }
5032
5033 /* Verify that we can merge two contradictory constraints on the
5034 value for a global. */
5035 /* TODO: verify that the merged model doesn't have a value for
5036 the global */
5037 {
5038 region_model model0 (&mgr);
5039 region_model model1 (&mgr);
5040 region_model merged (&mgr);
5041 test_region_model_context ctxt;
5042 model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
5043 model1.add_constraint (x, EQ_EXPR, int_113, &ctxt);
5044 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5045 ASSERT_NE (model0, merged);
5046 ASSERT_NE (model1, merged);
5047 }
5048
5049 /* Verify handling of a PARM_DECL. */
5050 {
5051 test_region_model_context ctxt;
5052 region_model model0 (&mgr);
5053 region_model model1 (&mgr);
5054 ASSERT_EQ (model0.get_stack_depth (), 0);
5055 model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, &ctxt);
5056 ASSERT_EQ (model0.get_stack_depth (), 1);
5057 model1.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, &ctxt);
5058
5059 placeholder_svalue test_sval (integer_type_node, "test sval");
5060 model0.set_value (model0.get_lvalue (a, &ctxt), &test_sval, &ctxt);
5061 model1.set_value (model1.get_lvalue (a, &ctxt), &test_sval, &ctxt);
5062 ASSERT_EQ (model0, model1);
5063
5064 /* They should be mergeable, and the result should be the same. */
5065 region_model merged (&mgr);
5066 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5067 ASSERT_EQ (model0, merged);
5068 /* In particular, "a" should have the placeholder value. */
5069 ASSERT_EQ (merged.get_rvalue (a, &ctxt), &test_sval);
5070 }
5071
5072 /* Verify handling of a global. */
5073 {
5074 test_region_model_context ctxt;
5075 region_model model0 (&mgr);
5076 region_model model1 (&mgr);
5077
5078 placeholder_svalue test_sval (integer_type_node, "test sval");
5079 model0.set_value (model0.get_lvalue (x, &ctxt), &test_sval, &ctxt);
5080 model1.set_value (model1.get_lvalue (x, &ctxt), &test_sval, &ctxt);
5081 ASSERT_EQ (model0, model1);
5082
5083 /* They should be mergeable, and the result should be the same. */
5084 region_model merged (&mgr);
5085 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5086 ASSERT_EQ (model0, merged);
5087 /* In particular, "x" should have the placeholder value. */
5088 ASSERT_EQ (merged.get_rvalue (x, &ctxt), &test_sval);
5089 }
5090
5091 /* Use global-handling to verify various combinations of values. */
5092
5093 /* Two equal constant values. */
5094 {
5095 region_model merged (&mgr);
5096 const svalue *merged_x_sval;
5097 assert_region_models_merge (x, int_42, int_42, &merged, &merged_x_sval);
5098
5099 /* In particular, there should be a constant value for "x". */
5100 ASSERT_EQ (merged_x_sval->get_kind (), SK_CONSTANT);
5101 ASSERT_EQ (merged_x_sval->dyn_cast_constant_svalue ()->get_constant (),
5102 int_42);
5103 }
5104
5105 /* Two non-equal constant values. */
5106 {
5107 region_model merged (&mgr);
5108 const svalue *merged_x_sval;
5109 assert_region_models_merge (x, int_42, int_113, &merged, &merged_x_sval);
5110
5111 /* In particular, there should be a "widening" value for "x". */
5112 ASSERT_EQ (merged_x_sval->get_kind (), SK_WIDENING);
5113 }
5114
5115 /* Initial and constant. */
5116 {
5117 region_model merged (&mgr);
5118 const svalue *merged_x_sval;
5119 assert_region_models_merge (x, NULL_TREE, int_113, &merged, &merged_x_sval);
5120
5121 /* In particular, there should be an unknown value for "x". */
5122 ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
5123 }
5124
5125 /* Constant and initial. */
5126 {
5127 region_model merged (&mgr);
5128 const svalue *merged_x_sval;
5129 assert_region_models_merge (x, int_42, NULL_TREE, &merged, &merged_x_sval);
5130
5131 /* In particular, there should be an unknown value for "x". */
5132 ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
5133 }
5134
5135 /* Unknown and constant. */
5136 // TODO
5137
5138 /* Pointers: NULL and NULL. */
5139 // TODO
5140
5141 /* Pointers: NULL and non-NULL. */
5142 // TODO
5143
5144 /* Pointers: non-NULL and non-NULL: ptr to a local. */
5145 {
5146 region_model model0 (&mgr);
5147 model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
5148 model0.set_value (model0.get_lvalue (p, NULL),
5149 model0.get_rvalue (addr_of_a, NULL), NULL);
5150
5151 region_model model1 (model0);
5152 ASSERT_EQ (model0, model1);
5153
5154 /* They should be mergeable, and the result should be the same. */
5155 region_model merged (&mgr);
5156 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5157 ASSERT_EQ (model0, merged);
5158 }
5159
5160 /* Pointers: non-NULL and non-NULL: ptr to a global. */
5161 {
5162 region_model merged (&mgr);
5163 /* p == &y in both input models. */
5164 const svalue *merged_p_sval;
5165 assert_region_models_merge (p, addr_of_y, addr_of_y, &merged,
5166 &merged_p_sval);
5167
5168 /* We should get p == &y in the merged model. */
5169 ASSERT_EQ (merged_p_sval->get_kind (), SK_REGION);
5170 const region_svalue *merged_p_ptr
5171 = merged_p_sval->dyn_cast_region_svalue ();
5172 const region *merged_p_star_reg = merged_p_ptr->get_pointee ();
5173 ASSERT_EQ (merged_p_star_reg, merged.get_lvalue (y, NULL));
5174 }
5175
5176 /* Pointers: non-NULL ptrs to different globals: should be unknown. */
5177 {
5178 region_model merged (&mgr);
5179 /* x == &y vs x == &z in the input models; these are actually casts
5180 of the ptrs to "int". */
5181 const svalue *merged_x_sval;
5182 // TODO:
5183 assert_region_models_merge (x, addr_of_y, addr_of_z, &merged,
5184 &merged_x_sval);
5185
5186 /* We should get x == unknown in the merged model. */
5187 ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
5188 }
5189
5190 /* Pointers: non-NULL and non-NULL: ptr to a heap region. */
5191 {
5192 test_region_model_context ctxt;
5193 region_model model0 (&mgr);
5194 tree size = build_int_cst (size_type_node, 1024);
5195 const svalue *size_sval = mgr.get_or_create_constant_svalue (size);
5196 const region *new_reg
5197 = model0.create_region_for_heap_alloc (size_sval, &ctxt);
5198 const svalue *ptr_sval = mgr.get_ptr_svalue (ptr_type_node, new_reg);
5199 model0.set_value (model0.get_lvalue (p, &ctxt),
5200 ptr_sval, &ctxt);
5201
5202 region_model model1 (model0);
5203
5204 ASSERT_EQ (model0, model1);
5205
5206 region_model merged (&mgr);
5207 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5208
5209 /* The merged model ought to be identical. */
5210 ASSERT_EQ (model0, merged);
5211 }
5212
5213 /* Two regions sharing the same placeholder svalue should continue sharing
5214 it after self-merger. */
5215 {
5216 test_region_model_context ctxt;
5217 region_model model0 (&mgr);
5218 placeholder_svalue placeholder_sval (integer_type_node, "test");
5219 model0.set_value (model0.get_lvalue (x, &ctxt),
5220 &placeholder_sval, &ctxt);
5221 model0.set_value (model0.get_lvalue (y, &ctxt), &placeholder_sval, &ctxt);
5222 region_model model1 (model0);
5223
5224 /* They should be mergeable, and the result should be the same. */
5225 region_model merged (&mgr);
5226 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5227 ASSERT_EQ (model0, merged);
5228
5229 /* In particular, we should have x == y. */
5230 ASSERT_EQ (merged.eval_condition (x, EQ_EXPR, y, &ctxt),
5231 tristate (tristate::TS_TRUE));
5232 }
5233
5234 {
5235 region_model model0 (&mgr);
5236 region_model model1 (&mgr);
5237 test_region_model_context ctxt;
5238 model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
5239 model1.add_constraint (x, NE_EXPR, int_42, &ctxt);
5240 region_model merged (&mgr);
5241 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5242 }
5243
5244 {
5245 region_model model0 (&mgr);
5246 region_model model1 (&mgr);
5247 test_region_model_context ctxt;
5248 model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
5249 model1.add_constraint (x, NE_EXPR, int_42, &ctxt);
5250 model1.add_constraint (x, EQ_EXPR, int_113, &ctxt);
5251 region_model merged (&mgr);
5252 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5253 }
5254
5255 // TODO: what can't we merge? need at least one such test
5256
5257 /* TODO: various things
5258 - heap regions
5259 - value merging:
5260 - every combination, but in particular
5261 - pairs of regions
5262 */
5263
5264 /* Views. */
5265 {
5266 test_region_model_context ctxt;
5267 region_model model0 (&mgr);
5268
5269 const region *x_reg = model0.get_lvalue (x, &ctxt);
5270 const region *x_as_ptr = mgr.get_cast_region (x_reg, ptr_type_node);
5271 model0.set_value (x_as_ptr, model0.get_rvalue (addr_of_y, &ctxt), &ctxt);
5272
5273 region_model model1 (model0);
5274 ASSERT_EQ (model1, model0);
5275
5276 /* They should be mergeable, and the result should be the same. */
5277 region_model merged (&mgr);
5278 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5279 }
5280
5281 /* Verify that we can merge a model in which a local in an older stack
5282 frame points to a local in a more recent stack frame. */
5283 {
5284 region_model model0 (&mgr);
5285 model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
5286 const region *q_in_first_frame = model0.get_lvalue (q, NULL);
5287
5288 /* Push a second frame. */
5289 const region *reg_2nd_frame
5290 = model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
5291
5292 /* Have a pointer in the older frame point to a local in the
5293 more recent frame. */
5294 const svalue *sval_ptr = model0.get_rvalue (addr_of_a, NULL);
5295 model0.set_value (q_in_first_frame, sval_ptr, NULL);
5296
5297 /* Verify that it's pointing at the newer frame. */
5298 const region *reg_pointee = sval_ptr->maybe_get_region ();
5299 ASSERT_EQ (reg_pointee->get_parent_region (), reg_2nd_frame);
5300
5301 model0.canonicalize ();
5302
5303 region_model model1 (model0);
5304 ASSERT_EQ (model0, model1);
5305
5306 /* They should be mergeable, and the result should be the same
5307 (after canonicalization, at least). */
5308 region_model merged (&mgr);
5309 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5310 merged.canonicalize ();
5311 ASSERT_EQ (model0, merged);
5312 }
5313
5314 /* Verify that we can merge a model in which a local points to a global. */
5315 {
5316 region_model model0 (&mgr);
5317 model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
5318 model0.set_value (model0.get_lvalue (q, NULL),
5319 model0.get_rvalue (addr_of_y, NULL), NULL);
5320
5321 region_model model1 (model0);
5322 ASSERT_EQ (model0, model1);
5323
5324 /* They should be mergeable, and the result should be the same
5325 (after canonicalization, at least). */
5326 region_model merged (&mgr);
5327 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5328 ASSERT_EQ (model0, merged);
5329 }
5330 }
5331
5332 /* Verify that constraints are correctly merged when merging region_model
5333 instances. */
5334
5335 static void
test_constraint_merging()5336 test_constraint_merging ()
5337 {
5338 tree int_0 = build_int_cst (integer_type_node, 0);
5339 tree int_5 = build_int_cst (integer_type_node, 5);
5340 tree x = build_global_decl ("x", integer_type_node);
5341 tree y = build_global_decl ("y", integer_type_node);
5342 tree z = build_global_decl ("z", integer_type_node);
5343 tree n = build_global_decl ("n", integer_type_node);
5344
5345 region_model_manager mgr;
5346 test_region_model_context ctxt;
5347
5348 /* model0: 0 <= (x == y) < n. */
5349 region_model model0 (&mgr);
5350 model0.add_constraint (x, EQ_EXPR, y, &ctxt);
5351 model0.add_constraint (x, GE_EXPR, int_0, NULL);
5352 model0.add_constraint (x, LT_EXPR, n, NULL);
5353
5354 /* model1: z != 5 && (0 <= x < n). */
5355 region_model model1 (&mgr);
5356 model1.add_constraint (z, NE_EXPR, int_5, NULL);
5357 model1.add_constraint (x, GE_EXPR, int_0, NULL);
5358 model1.add_constraint (x, LT_EXPR, n, NULL);
5359
5360 /* They should be mergeable; the merged constraints should
5361 be: (0 <= x < n). */
5362 program_point point (program_point::origin ());
5363 region_model merged (&mgr);
5364 ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
5365
5366 ASSERT_EQ (merged.eval_condition (x, GE_EXPR, int_0, &ctxt),
5367 tristate (tristate::TS_TRUE));
5368 ASSERT_EQ (merged.eval_condition (x, LT_EXPR, n, &ctxt),
5369 tristate (tristate::TS_TRUE));
5370
5371 ASSERT_EQ (merged.eval_condition (z, NE_EXPR, int_5, &ctxt),
5372 tristate (tristate::TS_UNKNOWN));
5373 ASSERT_EQ (merged.eval_condition (x, LT_EXPR, y, &ctxt),
5374 tristate (tristate::TS_UNKNOWN));
5375 }
5376
5377 /* Verify that widening_svalue::eval_condition_without_cm works as
5378 expected. */
5379
5380 static void
test_widening_constraints()5381 test_widening_constraints ()
5382 {
5383 program_point point (program_point::origin ());
5384 tree int_0 = build_int_cst (integer_type_node, 0);
5385 tree int_m1 = build_int_cst (integer_type_node, -1);
5386 tree int_1 = build_int_cst (integer_type_node, 1);
5387 tree int_256 = build_int_cst (integer_type_node, 256);
5388 region_model_manager mgr;
5389 test_region_model_context ctxt;
5390 const svalue *int_0_sval = mgr.get_or_create_constant_svalue (int_0);
5391 const svalue *int_1_sval = mgr.get_or_create_constant_svalue (int_1);
5392 const svalue *w_zero_then_one_sval
5393 = mgr.get_or_create_widening_svalue (integer_type_node, point,
5394 int_0_sval, int_1_sval);
5395 const widening_svalue *w_zero_then_one
5396 = w_zero_then_one_sval->dyn_cast_widening_svalue ();
5397 ASSERT_EQ (w_zero_then_one->get_direction (),
5398 widening_svalue::DIR_ASCENDING);
5399 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_m1),
5400 tristate::TS_FALSE);
5401 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_0),
5402 tristate::TS_FALSE);
5403 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_1),
5404 tristate::TS_UNKNOWN);
5405 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_256),
5406 tristate::TS_UNKNOWN);
5407
5408 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_m1),
5409 tristate::TS_FALSE);
5410 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_0),
5411 tristate::TS_UNKNOWN);
5412 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_1),
5413 tristate::TS_UNKNOWN);
5414 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_256),
5415 tristate::TS_UNKNOWN);
5416
5417 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_m1),
5418 tristate::TS_TRUE);
5419 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_0),
5420 tristate::TS_UNKNOWN);
5421 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_1),
5422 tristate::TS_UNKNOWN);
5423 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_256),
5424 tristate::TS_UNKNOWN);
5425
5426 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_m1),
5427 tristate::TS_TRUE);
5428 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_0),
5429 tristate::TS_TRUE);
5430 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_1),
5431 tristate::TS_UNKNOWN);
5432 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_256),
5433 tristate::TS_UNKNOWN);
5434
5435 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_m1),
5436 tristate::TS_FALSE);
5437 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_0),
5438 tristate::TS_UNKNOWN);
5439 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_1),
5440 tristate::TS_UNKNOWN);
5441 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_256),
5442 tristate::TS_UNKNOWN);
5443
5444 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_m1),
5445 tristate::TS_TRUE);
5446 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_0),
5447 tristate::TS_UNKNOWN);
5448 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_1),
5449 tristate::TS_UNKNOWN);
5450 ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_256),
5451 tristate::TS_UNKNOWN);
5452 }
5453
5454 /* Verify merging constraints for states simulating successive iterations
5455 of a loop.
5456 Simulate:
5457 for (i = 0; i < 256; i++)
5458 [...body...]
5459 i.e. this gimple:.
5460 i_15 = 0;
5461 goto <bb 4>;
5462
5463 <bb 4> :
5464 i_11 = PHI <i_15(2), i_23(3)>
5465 if (i_11 <= 255)
5466 goto <bb 3>;
5467 else
5468 goto [AFTER LOOP]
5469
5470 <bb 3> :
5471 [LOOP BODY]
5472 i_23 = i_11 + 1;
5473
5474 and thus these ops (and resultant states):
5475 i_11 = PHI()
5476 {i_11: 0}
5477 add_constraint (i_11 <= 255) [for the true edge]
5478 {i_11: 0} [constraint was a no-op]
5479 i_23 = i_11 + 1;
5480 {i_22: 1}
5481 i_11 = PHI()
5482 {i_11: WIDENED (at phi, 0, 1)}
5483 add_constraint (i_11 <= 255) [for the true edge]
5484 {i_11: WIDENED (at phi, 0, 1); WIDENED <= 255}
5485 i_23 = i_11 + 1;
5486 {i_23: (WIDENED (at phi, 0, 1) + 1); WIDENED <= 255}
5487 i_11 = PHI(); merge with state at phi above
5488 {i_11: WIDENED (at phi, 0, 1); WIDENED <= 256}
5489 [changing meaning of "WIDENED" here]
5490 if (i_11 <= 255)
5491 T: {i_11: WIDENED (at phi, 0, 1); WIDENED <= 255}; cache hit
5492 F: {i_11: 256}
5493 */
5494
5495 static void
test_iteration_1()5496 test_iteration_1 ()
5497 {
5498 program_point point (program_point::origin ());
5499
5500 tree int_0 = build_int_cst (integer_type_node, 0);
5501 tree int_1 = build_int_cst (integer_type_node, 1);
5502 tree int_256 = build_int_cst (integer_type_node, 256);
5503 tree int_257 = build_int_cst (integer_type_node, 257);
5504 tree i = build_global_decl ("i", integer_type_node);
5505
5506 region_model_manager mgr;
5507 test_region_model_context ctxt;
5508
5509 /* model0: i: 0. */
5510 region_model model0 (&mgr);
5511 model0.set_value (i, int_0, &ctxt);
5512
5513 /* model1: i: 1. */
5514 region_model model1 (&mgr);
5515 model1.set_value (i, int_1, &ctxt);
5516
5517 /* Should merge "i" to a widened value. */
5518 region_model model2 (&mgr);
5519 ASSERT_TRUE (model1.can_merge_with_p (model0, point, &model2));
5520 const svalue *merged_i = model2.get_rvalue (i, &ctxt);
5521 ASSERT_EQ (merged_i->get_kind (), SK_WIDENING);
5522 const widening_svalue *w = merged_i->dyn_cast_widening_svalue ();
5523 ASSERT_EQ (w->get_direction (), widening_svalue::DIR_ASCENDING);
5524
5525 /* Add constraint: i < 256 */
5526 model2.add_constraint (i, LT_EXPR, int_256, &ctxt);
5527 ASSERT_EQ (model2.eval_condition (i, LT_EXPR, int_256, &ctxt),
5528 tristate (tristate::TS_TRUE));
5529 ASSERT_EQ (model2.eval_condition (i, GE_EXPR, int_0, &ctxt),
5530 tristate (tristate::TS_TRUE));
5531
5532 /* Try merging with the initial state. */
5533 region_model model3 (&mgr);
5534 ASSERT_TRUE (model2.can_merge_with_p (model0, point, &model3));
5535 /* Merging the merged value with the initial value should be idempotent,
5536 so that the analysis converges. */
5537 ASSERT_EQ (model3.get_rvalue (i, &ctxt), merged_i);
5538 /* Merger of 0 and a widening value with constraint < CST
5539 should retain the constraint, even though it was implicit
5540 for the 0 case. */
5541 ASSERT_EQ (model3.eval_condition (i, LT_EXPR, int_256, &ctxt),
5542 tristate (tristate::TS_TRUE));
5543 /* ...and we should have equality: the analysis should have converged. */
5544 ASSERT_EQ (model3, model2);
5545
5546 /* "i_23 = i_11 + 1;" */
5547 region_model model4 (model3);
5548 ASSERT_EQ (model4, model2);
5549 model4.set_value (i, build2 (PLUS_EXPR, integer_type_node, i, int_1), &ctxt);
5550 const svalue *plus_one = model4.get_rvalue (i, &ctxt);
5551 ASSERT_EQ (plus_one->get_kind (), SK_BINOP);
5552
5553 /* Try merging with the "i: 1" state. */
5554 region_model model5 (&mgr);
5555 ASSERT_TRUE (model4.can_merge_with_p (model1, point, &model5));
5556 ASSERT_EQ (model5.get_rvalue (i, &ctxt), plus_one);
5557 ASSERT_EQ (model5, model4);
5558
5559 /* "i_11 = PHI();" merge with state at phi above.
5560 For i, we should have a merger of WIDENING with WIDENING + 1,
5561 and this should be WIDENING again. */
5562 region_model model6 (&mgr);
5563 ASSERT_TRUE (model5.can_merge_with_p (model2, point, &model6));
5564 const svalue *merged_widening = model6.get_rvalue (i, &ctxt);
5565 ASSERT_EQ (merged_widening->get_kind (), SK_WIDENING);
5566
5567 ASSERT_CONDITION_TRUE (model6, i, LT_EXPR, int_257);
5568 }
5569
5570 /* Verify that if we mark a pointer to a malloc-ed region as non-NULL,
5571 all cast pointers to that region are also known to be non-NULL. */
5572
5573 static void
test_malloc_constraints()5574 test_malloc_constraints ()
5575 {
5576 region_model_manager mgr;
5577 region_model model (&mgr);
5578 tree p = build_global_decl ("p", ptr_type_node);
5579 tree char_star = build_pointer_type (char_type_node);
5580 tree q = build_global_decl ("q", char_star);
5581 tree null_ptr = build_int_cst (ptr_type_node, 0);
5582
5583 const svalue *size_in_bytes
5584 = mgr.get_or_create_unknown_svalue (size_type_node);
5585 const region *reg = model.create_region_for_heap_alloc (size_in_bytes, NULL);
5586 const svalue *sval = mgr.get_ptr_svalue (ptr_type_node, reg);
5587 model.set_value (model.get_lvalue (p, NULL), sval, NULL);
5588 model.set_value (q, p, NULL);
5589
5590 ASSERT_CONDITION_UNKNOWN (model, p, NE_EXPR, null_ptr);
5591 ASSERT_CONDITION_UNKNOWN (model, p, EQ_EXPR, null_ptr);
5592 ASSERT_CONDITION_UNKNOWN (model, q, NE_EXPR, null_ptr);
5593 ASSERT_CONDITION_UNKNOWN (model, q, EQ_EXPR, null_ptr);
5594
5595 model.add_constraint (p, NE_EXPR, null_ptr, NULL);
5596
5597 ASSERT_CONDITION_TRUE (model, p, NE_EXPR, null_ptr);
5598 ASSERT_CONDITION_FALSE (model, p, EQ_EXPR, null_ptr);
5599 ASSERT_CONDITION_TRUE (model, q, NE_EXPR, null_ptr);
5600 ASSERT_CONDITION_FALSE (model, q, EQ_EXPR, null_ptr);
5601 }
5602
5603 /* Smoketest of getting and setting the value of a variable. */
5604
5605 static void
test_var()5606 test_var ()
5607 {
5608 /* "int i;" */
5609 tree i = build_global_decl ("i", integer_type_node);
5610
5611 tree int_17 = build_int_cst (integer_type_node, 17);
5612 tree int_m3 = build_int_cst (integer_type_node, -3);
5613
5614 region_model_manager mgr;
5615 region_model model (&mgr);
5616
5617 const region *i_reg = model.get_lvalue (i, NULL);
5618 ASSERT_EQ (i_reg->get_kind (), RK_DECL);
5619
5620 /* Reading "i" should give a symbolic "initial value". */
5621 const svalue *sval_init = model.get_rvalue (i, NULL);
5622 ASSERT_EQ (sval_init->get_kind (), SK_INITIAL);
5623 ASSERT_EQ (sval_init->dyn_cast_initial_svalue ()->get_region (), i_reg);
5624 /* ..and doing it again should give the same "initial value". */
5625 ASSERT_EQ (model.get_rvalue (i, NULL), sval_init);
5626
5627 /* "i = 17;". */
5628 model.set_value (i, int_17, NULL);
5629 ASSERT_EQ (model.get_rvalue (i, NULL),
5630 model.get_rvalue (int_17, NULL));
5631
5632 /* "i = -3;". */
5633 model.set_value (i, int_m3, NULL);
5634 ASSERT_EQ (model.get_rvalue (i, NULL),
5635 model.get_rvalue (int_m3, NULL));
5636
5637 /* Verify get_offset for "i". */
5638 {
5639 region_offset offset = i_reg->get_offset ();
5640 ASSERT_EQ (offset.get_base_region (), i_reg);
5641 ASSERT_EQ (offset.get_bit_offset (), 0);
5642 }
5643 }
5644
5645 static void
test_array_2()5646 test_array_2 ()
5647 {
5648 /* "int arr[10];" */
5649 tree tlen = size_int (10);
5650 tree arr_type
5651 = build_array_type (integer_type_node, build_index_type (tlen));
5652 tree arr = build_global_decl ("arr", arr_type);
5653
5654 /* "int i;" */
5655 tree i = build_global_decl ("i", integer_type_node);
5656
5657 tree int_0 = build_int_cst (integer_type_node, 0);
5658 tree int_1 = build_int_cst (integer_type_node, 1);
5659
5660 tree arr_0 = build4 (ARRAY_REF, integer_type_node,
5661 arr, int_0, NULL_TREE, NULL_TREE);
5662 tree arr_1 = build4 (ARRAY_REF, integer_type_node,
5663 arr, int_1, NULL_TREE, NULL_TREE);
5664 tree arr_i = build4 (ARRAY_REF, integer_type_node,
5665 arr, i, NULL_TREE, NULL_TREE);
5666
5667 tree int_17 = build_int_cst (integer_type_node, 17);
5668 tree int_42 = build_int_cst (integer_type_node, 42);
5669 tree int_m3 = build_int_cst (integer_type_node, -3);
5670
5671 region_model_manager mgr;
5672 region_model model (&mgr);
5673 /* "arr[0] = 17;". */
5674 model.set_value (arr_0, int_17, NULL);
5675 /* "arr[1] = -3;". */
5676 model.set_value (arr_1, int_m3, NULL);
5677
5678 ASSERT_EQ (model.get_rvalue (arr_0, NULL), model.get_rvalue (int_17, NULL));
5679 ASSERT_EQ (model.get_rvalue (arr_1, NULL), model.get_rvalue (int_m3, NULL));
5680
5681 /* Overwrite a pre-existing binding: "arr[1] = 42;". */
5682 model.set_value (arr_1, int_42, NULL);
5683 ASSERT_EQ (model.get_rvalue (arr_1, NULL), model.get_rvalue (int_42, NULL));
5684
5685 /* Verify get_offset for "arr[0]". */
5686 {
5687 const region *arr_0_reg = model.get_lvalue (arr_0, NULL);
5688 region_offset offset = arr_0_reg->get_offset ();
5689 ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
5690 ASSERT_EQ (offset.get_bit_offset (), 0);
5691 }
5692
5693 /* Verify get_offset for "arr[1]". */
5694 {
5695 const region *arr_1_reg = model.get_lvalue (arr_1, NULL);
5696 region_offset offset = arr_1_reg->get_offset ();
5697 ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
5698 ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
5699 }
5700
5701 /* "arr[i] = i;" - this should remove the earlier bindings. */
5702 model.set_value (arr_i, i, NULL);
5703 ASSERT_EQ (model.get_rvalue (arr_i, NULL), model.get_rvalue (i, NULL));
5704 ASSERT_EQ (model.get_rvalue (arr_0, NULL)->get_kind (), SK_UNKNOWN);
5705
5706 /* "arr[0] = 17;" - this should remove the arr[i] binding. */
5707 model.set_value (arr_0, int_17, NULL);
5708 ASSERT_EQ (model.get_rvalue (arr_0, NULL), model.get_rvalue (int_17, NULL));
5709 ASSERT_EQ (model.get_rvalue (arr_i, NULL)->get_kind (), SK_UNKNOWN);
5710 }
5711
5712 /* Smoketest of dereferencing a pointer via MEM_REF. */
5713
5714 static void
test_mem_ref()5715 test_mem_ref ()
5716 {
5717 /*
5718 x = 17;
5719 p = &x;
5720 *p;
5721 */
5722 tree x = build_global_decl ("x", integer_type_node);
5723 tree int_star = build_pointer_type (integer_type_node);
5724 tree p = build_global_decl ("p", int_star);
5725
5726 tree int_17 = build_int_cst (integer_type_node, 17);
5727 tree addr_of_x = build1 (ADDR_EXPR, int_star, x);
5728 tree offset_0 = build_int_cst (integer_type_node, 0);
5729 tree star_p = build2 (MEM_REF, integer_type_node, p, offset_0);
5730
5731 region_model_manager mgr;
5732 region_model model (&mgr);
5733
5734 /* "x = 17;". */
5735 model.set_value (x, int_17, NULL);
5736
5737 /* "p = &x;". */
5738 model.set_value (p, addr_of_x, NULL);
5739
5740 const svalue *sval = model.get_rvalue (star_p, NULL);
5741 ASSERT_EQ (sval->maybe_get_constant (), int_17);
5742 }
5743
5744 /* Test for a POINTER_PLUS_EXPR followed by a MEM_REF.
5745 Analogous to this code:
5746 void test_6 (int a[10])
5747 {
5748 __analyzer_eval (a[3] == 42); [should be UNKNOWN]
5749 a[3] = 42;
5750 __analyzer_eval (a[3] == 42); [should be TRUE]
5751 }
5752 from data-model-1.c, which looks like this at the gimple level:
5753 # __analyzer_eval (a[3] == 42); [should be UNKNOWN]
5754 int *_1 = a_10(D) + 12; # POINTER_PLUS_EXPR
5755 int _2 = *_1; # MEM_REF
5756 _Bool _3 = _2 == 42;
5757 int _4 = (int) _3;
5758 __analyzer_eval (_4);
5759
5760 # a[3] = 42;
5761 int *_5 = a_10(D) + 12; # POINTER_PLUS_EXPR
5762 *_5 = 42; # MEM_REF
5763
5764 # __analyzer_eval (a[3] == 42); [should be TRUE]
5765 int *_6 = a_10(D) + 12; # POINTER_PLUS_EXPR
5766 int _7 = *_6; # MEM_REF
5767 _Bool _8 = _7 == 42;
5768 int _9 = (int) _8;
5769 __analyzer_eval (_9); */
5770
5771 static void
test_POINTER_PLUS_EXPR_then_MEM_REF()5772 test_POINTER_PLUS_EXPR_then_MEM_REF ()
5773 {
5774 tree int_star = build_pointer_type (integer_type_node);
5775 tree a = build_global_decl ("a", int_star);
5776 tree offset_12 = build_int_cst (size_type_node, 12);
5777 tree pointer_plus_expr = build2 (POINTER_PLUS_EXPR, int_star, a, offset_12);
5778 tree offset_0 = build_int_cst (integer_type_node, 0);
5779 tree mem_ref = build2 (MEM_REF, integer_type_node,
5780 pointer_plus_expr, offset_0);
5781 region_model_manager mgr;
5782 region_model m (&mgr);
5783
5784 tree int_42 = build_int_cst (integer_type_node, 42);
5785 m.set_value (mem_ref, int_42, NULL);
5786 ASSERT_EQ (m.get_rvalue (mem_ref, NULL)->maybe_get_constant (), int_42);
5787 }
5788
5789 /* Verify that malloc works. */
5790
5791 static void
test_malloc()5792 test_malloc ()
5793 {
5794 tree int_star = build_pointer_type (integer_type_node);
5795 tree p = build_global_decl ("p", int_star);
5796 tree n = build_global_decl ("n", integer_type_node);
5797 tree n_times_4 = build2 (MULT_EXPR, size_type_node,
5798 n, build_int_cst (size_type_node, 4));
5799
5800 region_model_manager mgr;
5801 test_region_model_context ctxt;
5802 region_model model (&mgr);
5803
5804 /* "p = malloc (n * 4);". */
5805 const svalue *size_sval = model.get_rvalue (n_times_4, &ctxt);
5806 const region *reg = model.create_region_for_heap_alloc (size_sval, &ctxt);
5807 const svalue *ptr = mgr.get_ptr_svalue (int_star, reg);
5808 model.set_value (model.get_lvalue (p, &ctxt), ptr, &ctxt);
5809 ASSERT_EQ (model.get_capacity (reg), size_sval);
5810 }
5811
5812 /* Verify that alloca works. */
5813
5814 static void
test_alloca()5815 test_alloca ()
5816 {
5817 auto_vec <tree> param_types;
5818 tree fndecl = make_fndecl (integer_type_node,
5819 "test_fn",
5820 param_types);
5821 allocate_struct_function (fndecl, true);
5822
5823
5824 tree int_star = build_pointer_type (integer_type_node);
5825 tree p = build_global_decl ("p", int_star);
5826 tree n = build_global_decl ("n", integer_type_node);
5827 tree n_times_4 = build2 (MULT_EXPR, size_type_node,
5828 n, build_int_cst (size_type_node, 4));
5829
5830 region_model_manager mgr;
5831 test_region_model_context ctxt;
5832 region_model model (&mgr);
5833
5834 /* Push stack frame. */
5835 const region *frame_reg
5836 = model.push_frame (DECL_STRUCT_FUNCTION (fndecl),
5837 NULL, &ctxt);
5838 /* "p = alloca (n * 4);". */
5839 const svalue *size_sval = model.get_rvalue (n_times_4, &ctxt);
5840 const region *reg = model.create_region_for_alloca (size_sval, &ctxt);
5841 ASSERT_EQ (reg->get_parent_region (), frame_reg);
5842 const svalue *ptr = mgr.get_ptr_svalue (int_star, reg);
5843 model.set_value (model.get_lvalue (p, &ctxt), ptr, &ctxt);
5844 ASSERT_EQ (model.get_capacity (reg), size_sval);
5845
5846 /* Verify that the pointers to the alloca region are replaced by
5847 poisoned values when the frame is popped. */
5848 model.pop_frame (NULL, NULL, &ctxt);
5849 ASSERT_EQ (model.get_rvalue (p, NULL)->get_kind (), SK_POISONED);
5850 }
5851
5852 /* Verify that svalue::involves_p works. */
5853
5854 static void
test_involves_p()5855 test_involves_p ()
5856 {
5857 region_model_manager mgr;
5858 tree int_star = build_pointer_type (integer_type_node);
5859 tree p = build_global_decl ("p", int_star);
5860 tree q = build_global_decl ("q", int_star);
5861
5862 test_region_model_context ctxt;
5863 region_model model (&mgr);
5864 const svalue *p_init = model.get_rvalue (p, &ctxt);
5865 const svalue *q_init = model.get_rvalue (q, &ctxt);
5866
5867 ASSERT_TRUE (p_init->involves_p (p_init));
5868 ASSERT_FALSE (p_init->involves_p (q_init));
5869
5870 const region *star_p_reg = mgr.get_symbolic_region (p_init);
5871 const region *star_q_reg = mgr.get_symbolic_region (q_init);
5872
5873 const svalue *init_star_p = mgr.get_or_create_initial_value (star_p_reg);
5874 const svalue *init_star_q = mgr.get_or_create_initial_value (star_q_reg);
5875
5876 ASSERT_TRUE (init_star_p->involves_p (p_init));
5877 ASSERT_FALSE (p_init->involves_p (init_star_p));
5878 ASSERT_FALSE (init_star_p->involves_p (q_init));
5879 ASSERT_TRUE (init_star_q->involves_p (q_init));
5880 ASSERT_FALSE (init_star_q->involves_p (p_init));
5881 }
5882
5883 /* Run all of the selftests within this file. */
5884
5885 void
analyzer_region_model_cc_tests()5886 analyzer_region_model_cc_tests ()
5887 {
5888 test_tree_cmp_on_constants ();
5889 test_dump ();
5890 test_struct ();
5891 test_array_1 ();
5892 test_get_representative_tree ();
5893 test_unique_constants ();
5894 test_unique_unknowns ();
5895 test_initial_svalue_folding ();
5896 test_unaryop_svalue_folding ();
5897 test_binop_svalue_folding ();
5898 test_sub_svalue_folding ();
5899 test_descendent_of_p ();
5900 test_assignment ();
5901 test_compound_assignment ();
5902 test_stack_frames ();
5903 test_get_representative_path_var ();
5904 test_equality_1 ();
5905 test_canonicalization_2 ();
5906 test_canonicalization_3 ();
5907 test_canonicalization_4 ();
5908 test_state_merging ();
5909 test_constraint_merging ();
5910 test_widening_constraints ();
5911 test_iteration_1 ();
5912 test_malloc_constraints ();
5913 test_var ();
5914 test_array_2 ();
5915 test_mem_ref ();
5916 test_POINTER_PLUS_EXPR_then_MEM_REF ();
5917 test_malloc ();
5918 test_alloca ();
5919 test_involves_p ();
5920 }
5921
5922 } // namespace selftest
5923
5924 #endif /* CHECKING_P */
5925
5926 } // namespace ana
5927
5928 #endif /* #if ENABLE_ANALYZER */
5929