1 /* The analysis "engine".
2 Copyright (C) 2019-2022 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "fold-const.h"
27 #include "gcc-rich-location.h"
28 #include "alloc-pool.h"
29 #include "fibonacci_heap.h"
30 #include "shortest-paths.h"
31 #include "diagnostic-core.h"
32 #include "diagnostic-event-id.h"
33 #include "diagnostic-path.h"
34 #include "function.h"
35 #include "pretty-print.h"
36 #include "sbitmap.h"
37 #include "bitmap.h"
38 #include "tristate.h"
39 #include "ordered-hash-map.h"
40 #include "selftest.h"
41 #include "json.h"
42 #include "analyzer/analyzer.h"
43 #include "analyzer/analyzer-logging.h"
44 #include "analyzer/call-string.h"
45 #include "analyzer/program-point.h"
46 #include "analyzer/store.h"
47 #include "analyzer/region-model.h"
48 #include "analyzer/constraint-manager.h"
49 #include "analyzer/sm.h"
50 #include "analyzer/pending-diagnostic.h"
51 #include "analyzer/diagnostic-manager.h"
52 #include "cfg.h"
53 #include "basic-block.h"
54 #include "gimple.h"
55 #include "gimple-iterator.h"
56 #include "gimple-pretty-print.h"
57 #include "cgraph.h"
58 #include "digraph.h"
59 #include "analyzer/supergraph.h"
60 #include "analyzer/program-state.h"
61 #include "analyzer/exploded-graph.h"
62 #include "analyzer/analysis-plan.h"
63 #include "analyzer/checker-path.h"
64 #include "analyzer/state-purge.h"
65 #include "analyzer/bar-chart.h"
66 #include "analyzer/call-info.h"
67 #include <zlib.h>
68 #include "plugin.h"
69 #include "target.h"
70 #include <memory>
71 #include "stringpool.h"
72 #include "attribs.h"
73 #include "tree-dfa.h"
74
75 /* For an overview, see gcc/doc/analyzer.texi. */
76
77 #if ENABLE_ANALYZER
78
79 namespace ana {
80
81 /* class impl_region_model_context : public region_model_context. */
82
83 impl_region_model_context::
impl_region_model_context(exploded_graph & eg,exploded_node * enode_for_diag,const program_state * old_state,program_state * new_state,uncertainty_t * uncertainty,path_context * path_ctxt,const gimple * stmt,stmt_finder * stmt_finder)84 impl_region_model_context (exploded_graph &eg,
85 exploded_node *enode_for_diag,
86 const program_state *old_state,
87 program_state *new_state,
88 uncertainty_t *uncertainty,
89 path_context *path_ctxt,
90 const gimple *stmt,
91 stmt_finder *stmt_finder)
92 : m_eg (&eg), m_logger (eg.get_logger ()),
93 m_enode_for_diag (enode_for_diag),
94 m_old_state (old_state),
95 m_new_state (new_state),
96 m_stmt (stmt),
97 m_stmt_finder (stmt_finder),
98 m_ext_state (eg.get_ext_state ()),
99 m_uncertainty (uncertainty),
100 m_path_ctxt (path_ctxt)
101 {
102 }
103
104 impl_region_model_context::
impl_region_model_context(program_state * state,const extrinsic_state & ext_state,uncertainty_t * uncertainty,logger * logger)105 impl_region_model_context (program_state *state,
106 const extrinsic_state &ext_state,
107 uncertainty_t *uncertainty,
108 logger *logger)
109 : m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
110 m_old_state (NULL),
111 m_new_state (state),
112 m_stmt (NULL),
113 m_stmt_finder (NULL),
114 m_ext_state (ext_state),
115 m_uncertainty (uncertainty),
116 m_path_ctxt (NULL)
117 {
118 }
119
120 bool
warn(pending_diagnostic * d)121 impl_region_model_context::warn (pending_diagnostic *d)
122 {
123 LOG_FUNC (get_logger ());
124 if (m_stmt == NULL && m_stmt_finder == NULL)
125 {
126 if (get_logger ())
127 get_logger ()->log ("rejecting diagnostic: no stmt");
128 delete d;
129 return false;
130 }
131 if (m_eg)
132 return m_eg->get_diagnostic_manager ().add_diagnostic
133 (m_enode_for_diag, m_enode_for_diag->get_supernode (),
134 m_stmt, m_stmt_finder, d);
135 else
136 {
137 delete d;
138 return false;
139 }
140 }
141
142 void
add_note(pending_note * pn)143 impl_region_model_context::add_note (pending_note *pn)
144 {
145 LOG_FUNC (get_logger ());
146 if (m_eg)
147 m_eg->get_diagnostic_manager ().add_note (pn);
148 else
149 delete pn;
150 }
151
152 void
on_svalue_leak(const svalue * sval)153 impl_region_model_context::on_svalue_leak (const svalue *sval)
154
155 {
156 for (sm_state_map *smap : m_new_state->m_checker_states)
157 smap->on_svalue_leak (sval, this);
158 }
159
160 void
161 impl_region_model_context::
on_liveness_change(const svalue_set & live_svalues,const region_model * model)162 on_liveness_change (const svalue_set &live_svalues,
163 const region_model *model)
164 {
165 for (sm_state_map *smap : m_new_state->m_checker_states)
166 smap->on_liveness_change (live_svalues, model, this);
167 }
168
169 void
on_unknown_change(const svalue * sval,bool is_mutable)170 impl_region_model_context::on_unknown_change (const svalue *sval,
171 bool is_mutable)
172 {
173 for (sm_state_map *smap : m_new_state->m_checker_states)
174 smap->on_unknown_change (sval, is_mutable, m_ext_state);
175 }
176
177 void
on_escaped_function(tree fndecl)178 impl_region_model_context::on_escaped_function (tree fndecl)
179 {
180 m_eg->on_escaped_function (fndecl);
181 }
182
183 uncertainty_t *
get_uncertainty()184 impl_region_model_context::get_uncertainty ()
185 {
186 return m_uncertainty;
187 }
188
189 /* Purge state involving SVAL. The region_model has already been purged,
190 so we only need to purge other state in the program_state:
191 the sm-state. */
192
193 void
purge_state_involving(const svalue * sval)194 impl_region_model_context::purge_state_involving (const svalue *sval)
195 {
196 int i;
197 sm_state_map *smap;
198 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, i, smap)
199 smap->purge_state_involving (sval, m_ext_state);
200 }
201
202 void
bifurcate(custom_edge_info * info)203 impl_region_model_context::bifurcate (custom_edge_info *info)
204 {
205 if (m_path_ctxt)
206 m_path_ctxt->bifurcate (info);
207 else
208 delete info;
209 }
210
211 void
terminate_path()212 impl_region_model_context::terminate_path ()
213 {
214 if (m_path_ctxt)
215 return m_path_ctxt->terminate_path ();
216 }
217
218 bool
get_malloc_map(sm_state_map ** out_smap,const state_machine ** out_sm,unsigned * out_sm_idx)219 impl_region_model_context::get_malloc_map (sm_state_map **out_smap,
220 const state_machine **out_sm,
221 unsigned *out_sm_idx)
222 {
223 unsigned malloc_sm_idx;
224 if (!m_ext_state.get_sm_idx_by_name ("malloc", &malloc_sm_idx))
225 return false;
226
227 *out_smap = m_new_state->m_checker_states[malloc_sm_idx];
228 *out_sm = &m_ext_state.get_sm (malloc_sm_idx);
229 *out_sm_idx = malloc_sm_idx;
230 return true;
231 }
232
233 bool
get_taint_map(sm_state_map ** out_smap,const state_machine ** out_sm,unsigned * out_sm_idx)234 impl_region_model_context::get_taint_map (sm_state_map **out_smap,
235 const state_machine **out_sm,
236 unsigned *out_sm_idx)
237 {
238 if (!m_new_state)
239 return false;
240
241 unsigned taint_sm_idx;
242 if (!m_ext_state.get_sm_idx_by_name ("taint", &taint_sm_idx))
243 return false;
244
245 *out_smap = m_new_state->m_checker_states[taint_sm_idx];
246 *out_sm = &m_ext_state.get_sm (taint_sm_idx);
247 *out_sm_idx = taint_sm_idx;
248 return true;
249 }
250
251 /* struct setjmp_record. */
252
253 int
cmp(const setjmp_record & rec1,const setjmp_record & rec2)254 setjmp_record::cmp (const setjmp_record &rec1, const setjmp_record &rec2)
255 {
256 if (int cmp_enode = rec1.m_enode->m_index - rec2.m_enode->m_index)
257 return cmp_enode;
258 gcc_assert (&rec1 == &rec2);
259 return 0;
260 }
261
262 /* class setjmp_svalue : public svalue. */
263
264 /* Implementation of svalue::accept vfunc for setjmp_svalue. */
265
266 void
accept(visitor * v) const267 setjmp_svalue::accept (visitor *v) const
268 {
269 v->visit_setjmp_svalue (this);
270 }
271
272 /* Implementation of svalue::dump_to_pp vfunc for setjmp_svalue. */
273
274 void
dump_to_pp(pretty_printer * pp,bool simple) const275 setjmp_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
276 {
277 if (simple)
278 pp_printf (pp, "SETJMP(EN: %i)", get_enode_index ());
279 else
280 pp_printf (pp, "setjmp_svalue(EN%i)", get_enode_index ());
281 }
282
283 /* Get the index of the stored exploded_node. */
284
285 int
get_enode_index() const286 setjmp_svalue::get_enode_index () const
287 {
288 return m_setjmp_record.m_enode->m_index;
289 }
290
291 /* Concrete implementation of sm_context, wiring it up to the rest of this
292 file. */
293
294 class impl_sm_context : public sm_context
295 {
296 public:
impl_sm_context(exploded_graph & eg,int sm_idx,const state_machine & sm,exploded_node * enode_for_diag,const program_state * old_state,program_state * new_state,const sm_state_map * old_smap,sm_state_map * new_smap,path_context * path_ctxt,stmt_finder * stmt_finder=NULL,bool unknown_side_effects=false)297 impl_sm_context (exploded_graph &eg,
298 int sm_idx,
299 const state_machine &sm,
300 exploded_node *enode_for_diag,
301 const program_state *old_state,
302 program_state *new_state,
303 const sm_state_map *old_smap,
304 sm_state_map *new_smap,
305 path_context *path_ctxt,
306 stmt_finder *stmt_finder = NULL,
307 bool unknown_side_effects = false)
308 : sm_context (sm_idx, sm),
309 m_logger (eg.get_logger ()),
310 m_eg (eg), m_enode_for_diag (enode_for_diag),
311 m_old_state (old_state), m_new_state (new_state),
312 m_old_smap (old_smap), m_new_smap (new_smap),
313 m_path_ctxt (path_ctxt),
314 m_stmt_finder (stmt_finder),
315 m_unknown_side_effects (unknown_side_effects)
316 {
317 }
318
get_logger() const319 logger *get_logger () const { return m_logger.get_logger (); }
320
get_fndecl_for_call(const gcall * call)321 tree get_fndecl_for_call (const gcall *call) FINAL OVERRIDE
322 {
323 impl_region_model_context old_ctxt
324 (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
325 NULL, call);
326 region_model *model = m_new_state->m_region_model;
327 return model->get_fndecl_for_call (call, &old_ctxt);
328 }
329
get_state(const gimple * stmt ATTRIBUTE_UNUSED,tree var)330 state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
331 tree var)
332 {
333 logger * const logger = get_logger ();
334 LOG_FUNC (logger);
335 /* Use NULL ctxt on this get_rvalue call to avoid triggering
336 uninitialized value warnings. */
337 const svalue *var_old_sval
338 = m_old_state->m_region_model->get_rvalue (var, NULL);
339
340 state_machine::state_t current
341 = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
342 return current;
343 }
get_state(const gimple * stmt ATTRIBUTE_UNUSED,const svalue * sval)344 state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
345 const svalue *sval)
346 {
347 logger * const logger = get_logger ();
348 LOG_FUNC (logger);
349 state_machine::state_t current
350 = m_old_smap->get_state (sval, m_eg.get_ext_state ());
351 return current;
352 }
353
354
set_next_state(const gimple * stmt,tree var,state_machine::state_t to,tree origin)355 void set_next_state (const gimple *stmt,
356 tree var,
357 state_machine::state_t to,
358 tree origin)
359 {
360 logger * const logger = get_logger ();
361 LOG_FUNC (logger);
362 impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
363 m_old_state, m_new_state,
364 NULL, NULL,
365 stmt);
366 const svalue *var_new_sval
367 = m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
368 const svalue *origin_new_sval
369 = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
370
371 /* We use the new sval here to avoid issues with uninitialized values. */
372 state_machine::state_t current
373 = m_old_smap->get_state (var_new_sval, m_eg.get_ext_state ());
374 if (logger)
375 logger->log ("%s: state transition of %qE: %s -> %s",
376 m_sm.get_name (),
377 var,
378 current->get_name (),
379 to->get_name ());
380 m_new_smap->set_state (m_new_state->m_region_model, var_new_sval,
381 to, origin_new_sval, m_eg.get_ext_state ());
382 }
383
set_next_state(const gimple * stmt,const svalue * sval,state_machine::state_t to,tree origin)384 void set_next_state (const gimple *stmt,
385 const svalue *sval,
386 state_machine::state_t to,
387 tree origin)
388 {
389 logger * const logger = get_logger ();
390 LOG_FUNC (logger);
391 impl_region_model_context old_ctxt
392 (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
393 NULL, stmt);
394
395 impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
396 m_old_state, m_new_state,
397 NULL, NULL,
398 stmt);
399 const svalue *origin_new_sval
400 = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
401
402 state_machine::state_t current
403 = m_old_smap->get_state (sval, m_eg.get_ext_state ());
404 if (logger)
405 {
406 logger->start_log_line ();
407 logger->log_partial ("%s: state transition of ",
408 m_sm.get_name ());
409 sval->dump_to_pp (logger->get_printer (), true);
410 logger->log_partial (": %s -> %s",
411 current->get_name (),
412 to->get_name ());
413 logger->end_log_line ();
414 }
415 m_new_smap->set_state (m_new_state->m_region_model, sval,
416 to, origin_new_sval, m_eg.get_ext_state ());
417 }
418
warn(const supernode * snode,const gimple * stmt,tree var,pending_diagnostic * d)419 void warn (const supernode *snode, const gimple *stmt,
420 tree var, pending_diagnostic *d) FINAL OVERRIDE
421 {
422 LOG_FUNC (get_logger ());
423 gcc_assert (d); // take ownership
424 impl_region_model_context old_ctxt
425 (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, NULL);
426
427 const svalue *var_old_sval
428 = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
429 state_machine::state_t current
430 = (var
431 ? m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ())
432 : m_old_smap->get_global_state ());
433 m_eg.get_diagnostic_manager ().add_diagnostic
434 (&m_sm, m_enode_for_diag, snode, stmt, m_stmt_finder,
435 var, var_old_sval, current, d);
436 }
437
438 /* Hook for picking more readable trees for SSA names of temporaries,
439 so that rather than e.g.
440 "double-free of '<unknown>'"
441 we can print:
442 "double-free of 'inbuf.data'". */
443
get_diagnostic_tree(tree expr)444 tree get_diagnostic_tree (tree expr) FINAL OVERRIDE
445 {
446 /* Only for SSA_NAMEs of temporaries; otherwise, return EXPR, as it's
447 likely to be the least surprising tree to report. */
448 if (TREE_CODE (expr) != SSA_NAME)
449 return expr;
450 if (SSA_NAME_VAR (expr) != NULL)
451 return expr;
452
453 gcc_assert (m_new_state);
454 const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, NULL);
455 /* Find trees for all regions storing the value. */
456 if (tree t = m_new_state->m_region_model->get_representative_tree (sval))
457 return t;
458 else
459 return expr;
460 }
461
get_diagnostic_tree(const svalue * sval)462 tree get_diagnostic_tree (const svalue *sval) FINAL OVERRIDE
463 {
464 return m_new_state->m_region_model->get_representative_tree (sval);
465 }
466
get_global_state() const467 state_machine::state_t get_global_state () const FINAL OVERRIDE
468 {
469 return m_old_state->m_checker_states[m_sm_idx]->get_global_state ();
470 }
471
set_global_state(state_machine::state_t state)472 void set_global_state (state_machine::state_t state) FINAL OVERRIDE
473 {
474 m_new_state->m_checker_states[m_sm_idx]->set_global_state (state);
475 }
476
on_custom_transition(custom_transition * transition)477 void on_custom_transition (custom_transition *transition) FINAL OVERRIDE
478 {
479 transition->impl_transition (&m_eg,
480 const_cast<exploded_node *> (m_enode_for_diag),
481 m_sm_idx);
482 }
483
is_zero_assignment(const gimple * stmt)484 tree is_zero_assignment (const gimple *stmt) FINAL OVERRIDE
485 {
486 const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
487 if (!assign_stmt)
488 return NULL_TREE;
489 impl_region_model_context old_ctxt
490 (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, stmt);
491 if (const svalue *sval
492 = m_new_state->m_region_model->get_gassign_result (assign_stmt,
493 &old_ctxt))
494 if (tree cst = sval->maybe_get_constant ())
495 if (::zerop(cst))
496 return gimple_assign_lhs (assign_stmt);
497 return NULL_TREE;
498 }
499
get_path_context() const500 path_context *get_path_context () const FINAL OVERRIDE
501 {
502 return m_path_ctxt;
503 }
504
unknown_side_effects_p() const505 bool unknown_side_effects_p () const FINAL OVERRIDE
506 {
507 return m_unknown_side_effects;
508 }
509
get_old_program_state() const510 const program_state *get_old_program_state () const FINAL OVERRIDE
511 {
512 return m_old_state;
513 }
514
515 log_user m_logger;
516 exploded_graph &m_eg;
517 exploded_node *m_enode_for_diag;
518 const program_state *m_old_state;
519 program_state *m_new_state;
520 const sm_state_map *m_old_smap;
521 sm_state_map *m_new_smap;
522 path_context *m_path_ctxt;
523 stmt_finder *m_stmt_finder;
524
525 /* Are we handling an external function with unknown side effects? */
526 bool m_unknown_side_effects;
527 };
528
529 /* Subclass of stmt_finder for finding the best stmt to report the leak at,
530 given the emission path. */
531
532 class leak_stmt_finder : public stmt_finder
533 {
534 public:
leak_stmt_finder(const exploded_graph & eg,tree var)535 leak_stmt_finder (const exploded_graph &eg, tree var)
536 : m_eg (eg), m_var (var) {}
537
clone() const538 stmt_finder *clone () const FINAL OVERRIDE
539 {
540 return new leak_stmt_finder (m_eg, m_var);
541 }
542
find_stmt(const exploded_path & epath)543 const gimple *find_stmt (const exploded_path &epath)
544 FINAL OVERRIDE
545 {
546 logger * const logger = m_eg.get_logger ();
547 LOG_FUNC (logger);
548
549 if (m_var && TREE_CODE (m_var) == SSA_NAME)
550 {
551 /* Locate the final write to this SSA name in the path. */
552 const gimple *def_stmt = SSA_NAME_DEF_STMT (m_var);
553
554 int idx_of_def_stmt;
555 bool found = epath.find_stmt_backwards (def_stmt, &idx_of_def_stmt);
556 if (!found)
557 goto not_found;
558
559 /* What was the next write to the underlying var
560 after the SSA name was set? (if any). */
561
562 for (unsigned idx = idx_of_def_stmt + 1;
563 idx < epath.m_edges.length ();
564 ++idx)
565 {
566 const exploded_edge *eedge = epath.m_edges[idx];
567 if (logger)
568 logger->log ("eedge[%i]: EN %i -> EN %i",
569 idx,
570 eedge->m_src->m_index,
571 eedge->m_dest->m_index);
572 const exploded_node *dst_node = eedge->m_dest;
573 const program_point &dst_point = dst_node->get_point ();
574 const gimple *stmt = dst_point.get_stmt ();
575 if (!stmt)
576 continue;
577 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
578 {
579 tree lhs = gimple_assign_lhs (assign);
580 if (TREE_CODE (lhs) == SSA_NAME
581 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (m_var))
582 return assign;
583 }
584 }
585 }
586
587 not_found:
588
589 /* Look backwards for the first statement with a location. */
590 int i;
591 const exploded_edge *eedge;
592 FOR_EACH_VEC_ELT_REVERSE (epath.m_edges, i, eedge)
593 {
594 if (logger)
595 logger->log ("eedge[%i]: EN %i -> EN %i",
596 i,
597 eedge->m_src->m_index,
598 eedge->m_dest->m_index);
599 const exploded_node *dst_node = eedge->m_dest;
600 const program_point &dst_point = dst_node->get_point ();
601 const gimple *stmt = dst_point.get_stmt ();
602 if (stmt)
603 if (get_pure_location (stmt->location) != UNKNOWN_LOCATION)
604 return stmt;
605 }
606
607 gcc_unreachable ();
608 return NULL;
609 }
610
611 private:
612 const exploded_graph &m_eg;
613 tree m_var;
614 };
615
616 /* A measurement of how good EXPR is for presenting to the user, so
617 that e.g. we can say prefer printing
618 "leak of 'tmp.m_ptr'"
619 over:
620 "leak of '<unknown>'". */
621
622 static int
readability(const_tree expr)623 readability (const_tree expr)
624 {
625 /* Arbitrarily-chosen "high readability" value. */
626 const int HIGH_READABILITY = 65536;
627
628 gcc_assert (expr);
629 switch (TREE_CODE (expr))
630 {
631 case COMPONENT_REF:
632 case MEM_REF:
633 /* Impose a slight readability penalty relative to that of
634 operand 0. */
635 return readability (TREE_OPERAND (expr, 0)) - 16;
636
637 case SSA_NAME:
638 {
639 if (tree var = SSA_NAME_VAR (expr))
640 {
641 if (DECL_ARTIFICIAL (var))
642 {
643 /* If we have an SSA name for an artificial var,
644 only use it if it has a debug expr associated with
645 it that fixup_tree_for_diagnostic can use. */
646 if (VAR_P (var) && DECL_HAS_DEBUG_EXPR_P (var))
647 return readability (DECL_DEBUG_EXPR (var)) - 1;
648 }
649 else
650 {
651 /* Slightly favor the underlying var over the SSA name to
652 avoid having them compare equal. */
653 return readability (var) - 1;
654 }
655 }
656 /* Avoid printing '<unknown>' for SSA names for temporaries. */
657 return -1;
658 }
659 break;
660
661 case PARM_DECL:
662 case VAR_DECL:
663 if (DECL_NAME (expr))
664 return HIGH_READABILITY;
665 else
666 /* We don't want to print temporaries. For example, the C FE
667 prints them as e.g. "<Uxxxx>" where "xxxx" is the low 16 bits
668 of the tree pointer (see pp_c_tree_decl_identifier). */
669 return -1;
670
671 case RESULT_DECL:
672 /* Printing "<return-value>" isn't ideal, but is less awful than
673 trying to print a temporary. */
674 return HIGH_READABILITY / 2;
675
676 case NOP_EXPR:
677 {
678 /* Impose a moderate readability penalty for casts. */
679 const int CAST_PENALTY = 32;
680 return readability (TREE_OPERAND (expr, 0)) - CAST_PENALTY;
681 }
682
683 case INTEGER_CST:
684 return HIGH_READABILITY;
685
686 default:
687 return 0;
688 }
689
690 return 0;
691 }
692
693 /* A qsort comparator for trees to sort them into most user-readable to
694 least user-readable. */
695
696 int
readability_comparator(const void * p1,const void * p2)697 readability_comparator (const void *p1, const void *p2)
698 {
699 path_var pv1 = *(path_var const *)p1;
700 path_var pv2 = *(path_var const *)p2;
701
702 const int tree_r1 = readability (pv1.m_tree);
703 const int tree_r2 = readability (pv2.m_tree);
704
705 /* Favor items that are deeper on the stack and hence more recent;
706 this also favors locals over globals. */
707 const int COST_PER_FRAME = 64;
708 const int depth_r1 = pv1.m_stack_depth * COST_PER_FRAME;
709 const int depth_r2 = pv2.m_stack_depth * COST_PER_FRAME;
710
711 /* Combine the scores from the tree and from the stack depth.
712 This e.g. lets us have a slightly penalized cast in the most
713 recent stack frame "beat" an uncast value in an older stack frame. */
714 const int sum_r1 = tree_r1 + depth_r1;
715 const int sum_r2 = tree_r2 + depth_r2;
716 if (int cmp = sum_r2 - sum_r1)
717 return cmp;
718
719 /* Otherwise, more readable trees win. */
720 if (int cmp = tree_r2 - tree_r1)
721 return cmp;
722
723 /* Otherwise, if they have the same readability, then impose an
724 arbitrary deterministic ordering on them. */
725
726 if (int cmp = TREE_CODE (pv1.m_tree) - TREE_CODE (pv2.m_tree))
727 return cmp;
728
729 switch (TREE_CODE (pv1.m_tree))
730 {
731 default:
732 break;
733 case SSA_NAME:
734 if (int cmp = (SSA_NAME_VERSION (pv1.m_tree)
735 - SSA_NAME_VERSION (pv2.m_tree)))
736 return cmp;
737 break;
738 case PARM_DECL:
739 case VAR_DECL:
740 case RESULT_DECL:
741 if (int cmp = DECL_UID (pv1.m_tree) - DECL_UID (pv2.m_tree))
742 return cmp;
743 break;
744 }
745
746 /* TODO: We ought to find ways of sorting such cases. */
747 return 0;
748 }
749
750 /* Return true is SNODE is the EXIT node of a function, or is one
751 of the final snodes within its function.
752
753 Specifically, handle the final supernodes before the EXIT node,
754 for the case of clobbers that happen immediately before exiting.
755 We need a run of snodes leading to the return_p snode, where all edges are
756 intraprocedural, and every snode has just one successor.
757
758 We use this when suppressing leak reports at the end of "main". */
759
760 static bool
returning_from_function_p(const supernode * snode)761 returning_from_function_p (const supernode *snode)
762 {
763 if (!snode)
764 return false;
765
766 unsigned count = 0;
767 const supernode *iter = snode;
768 while (true)
769 {
770 if (iter->return_p ())
771 return true;
772 if (iter->m_succs.length () != 1)
773 return false;
774 const superedge *sedge = iter->m_succs[0];
775 if (sedge->get_kind () != SUPEREDGE_CFG_EDGE)
776 return false;
777 iter = sedge->m_dest;
778
779 /* Impose a limit to ensure we terminate for pathological cases.
780
781 We only care about the final 3 nodes, due to cases like:
782 BB:
783 (clobber causing leak)
784
785 BB:
786 <label>:
787 return _val;
788
789 EXIT BB.*/
790 if (++count > 3)
791 return false;
792 }
793 }
794
795 /* Find the best tree for SVAL and call SM's on_leak vfunc with it.
796 If on_leak returns a pending_diagnostic, queue it up to be reported,
797 so that we potentially complain about a leak of SVAL in the given STATE. */
798
799 void
on_state_leak(const state_machine & sm,const svalue * sval,state_machine::state_t state)800 impl_region_model_context::on_state_leak (const state_machine &sm,
801 const svalue *sval,
802 state_machine::state_t state)
803 {
804 logger * const logger = get_logger ();
805 LOG_SCOPE (logger);
806 if (logger)
807 {
808 logger->start_log_line ();
809 logger->log_partial ("considering leak of ");
810 sval->dump_to_pp (logger->get_printer (), true);
811 logger->end_log_line ();
812 }
813
814 if (!m_eg)
815 return;
816
817 /* m_old_state also needs to be non-NULL so that the sm_ctxt can look
818 up the old state of SVAL. */
819 gcc_assert (m_old_state);
820
821 /* SVAL has leaked within the new state: it is not used by any reachable
822 regions.
823 We need to convert it back to a tree, but since it's likely no regions
824 use it, we have to find the "best" tree for it in the old_state. */
825 svalue_set visited;
826 path_var leaked_pv
827 = m_old_state->m_region_model->get_representative_path_var (sval,
828 &visited);
829
830 /* Strip off top-level casts */
831 if (leaked_pv.m_tree && TREE_CODE (leaked_pv.m_tree) == NOP_EXPR)
832 leaked_pv.m_tree = TREE_OPERAND (leaked_pv.m_tree, 0);
833
834 /* This might be NULL; the pending_diagnostic subclasses need to cope
835 with this. */
836 tree leaked_tree = leaked_pv.m_tree;
837 if (logger)
838 {
839 if (leaked_tree)
840 logger->log ("best leaked_tree: %qE", leaked_tree);
841 else
842 logger->log ("best leaked_tree: NULL");
843 }
844
845 leak_stmt_finder stmt_finder (*m_eg, leaked_tree);
846 gcc_assert (m_enode_for_diag);
847
848 /* Don't complain about leaks when returning from "main". */
849 if (returning_from_function_p (m_enode_for_diag->get_supernode ()))
850 {
851 tree fndecl = m_enode_for_diag->get_function ()->decl;
852 if (id_equal (DECL_NAME (fndecl), "main"))
853 {
854 if (logger)
855 logger->log ("not reporting leak from main");
856 return;
857 }
858 }
859
860 tree leaked_tree_for_diag = fixup_tree_for_diagnostic (leaked_tree);
861 pending_diagnostic *pd = sm.on_leak (leaked_tree_for_diag);
862 if (pd)
863 m_eg->get_diagnostic_manager ().add_diagnostic
864 (&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
865 m_stmt, &stmt_finder,
866 leaked_tree_for_diag, sval, state, pd);
867 }
868
869 /* Implementation of region_model_context::on_condition vfunc.
870 Notify all state machines about the condition, which could lead to
871 state transitions. */
872
873 void
on_condition(const svalue * lhs,enum tree_code op,const svalue * rhs)874 impl_region_model_context::on_condition (const svalue *lhs,
875 enum tree_code op,
876 const svalue *rhs)
877 {
878 int sm_idx;
879 sm_state_map *smap;
880 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
881 {
882 const state_machine &sm = m_ext_state.get_sm (sm_idx);
883 impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
884 m_old_state, m_new_state,
885 m_old_state->m_checker_states[sm_idx],
886 m_new_state->m_checker_states[sm_idx],
887 m_path_ctxt);
888 sm.on_condition (&sm_ctxt,
889 (m_enode_for_diag
890 ? m_enode_for_diag->get_supernode ()
891 : NULL),
892 m_stmt,
893 lhs, op, rhs);
894 }
895 }
896
897 /* Implementation of region_model_context::on_phi vfunc.
898 Notify all state machines about the phi, which could lead to
899 state transitions. */
900
901 void
on_phi(const gphi * phi,tree rhs)902 impl_region_model_context::on_phi (const gphi *phi, tree rhs)
903 {
904 int sm_idx;
905 sm_state_map *smap;
906 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
907 {
908 const state_machine &sm = m_ext_state.get_sm (sm_idx);
909 impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
910 m_old_state, m_new_state,
911 m_old_state->m_checker_states[sm_idx],
912 m_new_state->m_checker_states[sm_idx],
913 m_path_ctxt);
914 sm.on_phi (&sm_ctxt, m_enode_for_diag->get_supernode (), phi, rhs);
915 }
916 }
917
918 /* Implementation of region_model_context::on_unexpected_tree_code vfunc.
919 Mark the new state as being invalid for further exploration.
920 TODO(stage1): introduce a warning for when this occurs. */
921
922 void
on_unexpected_tree_code(tree t,const dump_location_t & loc)923 impl_region_model_context::on_unexpected_tree_code (tree t,
924 const dump_location_t &loc)
925 {
926 logger * const logger = get_logger ();
927 if (logger)
928 logger->log ("unhandled tree code: %qs in %qs at %s:%i",
929 get_tree_code_name (TREE_CODE (t)),
930 loc.get_impl_location ().m_function,
931 loc.get_impl_location ().m_file,
932 loc.get_impl_location ().m_line);
933 if (m_new_state)
934 m_new_state->m_valid = false;
935 }
936
937 /* struct point_and_state. */
938
939 /* Assert that this object is sane. */
940
941 void
validate(const extrinsic_state & ext_state) const942 point_and_state::validate (const extrinsic_state &ext_state) const
943 {
944 /* Skip this in a release build. */
945 #if !CHECKING_P
946 return;
947 #endif
948
949 m_point.validate ();
950
951 m_state.validate (ext_state);
952
953 /* Verify that the callstring's model of the stack corresponds to that
954 of the region_model. */
955 /* They should have the same depth. */
956 gcc_assert (m_point.get_stack_depth ()
957 == m_state.m_region_model->get_stack_depth ());
958 /* Check the functions in the callstring vs those in the frames
959 at each depth. */
960 for (const frame_region *iter_frame
961 = m_state.m_region_model->get_current_frame ();
962 iter_frame; iter_frame = iter_frame->get_calling_frame ())
963 {
964 int index = iter_frame->get_index ();
965 gcc_assert (m_point.get_function_at_depth (index)
966 == iter_frame->get_function ());
967 }
968 }
969
970 /* Subroutine of print_enode_indices: print a run of indices from START_IDX
971 to END_IDX to PP, using and updating *FIRST_RUN. */
972
973 static void
print_run(pretty_printer * pp,int start_idx,int end_idx,bool * first_run)974 print_run (pretty_printer *pp, int start_idx, int end_idx,
975 bool *first_run)
976 {
977 if (!(*first_run))
978 pp_string (pp, ", ");
979 *first_run = false;
980 if (start_idx == end_idx)
981 pp_printf (pp, "EN: %i", start_idx);
982 else
983 pp_printf (pp, "EN: %i-%i", start_idx, end_idx);
984 }
985
986 /* Print the indices within ENODES to PP, collecting them as
987 runs/singletons e.g. "EN: 4-7, EN: 20-23, EN: 42". */
988
989 static void
print_enode_indices(pretty_printer * pp,const auto_vec<exploded_node * > & enodes)990 print_enode_indices (pretty_printer *pp,
991 const auto_vec<exploded_node *> &enodes)
992 {
993 int cur_start_idx = -1;
994 int cur_finish_idx = -1;
995 bool first_run = true;
996 unsigned i;
997 exploded_node *enode;
998 FOR_EACH_VEC_ELT (enodes, i, enode)
999 {
1000 if (cur_start_idx == -1)
1001 {
1002 gcc_assert (cur_finish_idx == -1);
1003 cur_start_idx = cur_finish_idx = enode->m_index;
1004 }
1005 else
1006 {
1007 if (enode->m_index == cur_finish_idx + 1)
1008 /* Continuation of a run. */
1009 cur_finish_idx = enode->m_index;
1010 else
1011 {
1012 /* Finish existing run, start a new one. */
1013 gcc_assert (cur_start_idx >= 0);
1014 gcc_assert (cur_finish_idx >= 0);
1015 print_run (pp, cur_start_idx, cur_finish_idx,
1016 &first_run);
1017 cur_start_idx = cur_finish_idx = enode->m_index;
1018 }
1019 }
1020 }
1021 /* Finish any existing run. */
1022 if (cur_start_idx >= 0)
1023 {
1024 gcc_assert (cur_finish_idx >= 0);
1025 print_run (pp, cur_start_idx, cur_finish_idx,
1026 &first_run);
1027 }
1028 }
1029
1030 /* struct eg_traits::dump_args_t. */
1031
1032 /* The <FILENAME>.eg.dot output can quickly become unwieldy if we show
1033 full details for all enodes (both in terms of CPU time to render it,
1034 and in terms of being meaningful to a human viewing it).
1035
1036 If we show just the IDs then the resulting graph is usually viewable,
1037 but then we have to keep switching back and forth between the .dot
1038 view and other dumps.
1039
1040 This function implements a heuristic for showing detail at the enodes
1041 that (we hope) matter, and just the ID at other enodes, fixing the CPU
1042 usage of the .dot viewer, and drawing the attention of the viewer
1043 to these enodes.
1044
1045 Return true if ENODE should be shown in detail in .dot output.
1046 Return false if no detail should be shown for ENODE. */
1047
1048 bool
show_enode_details_p(const exploded_node & enode) const1049 eg_traits::dump_args_t::show_enode_details_p (const exploded_node &enode) const
1050 {
1051 /* If the number of exploded nodes isn't too large, we may as well show
1052 all enodes in full detail in the .dot output. */
1053 if (m_eg.m_nodes.length ()
1054 <= (unsigned) param_analyzer_max_enodes_for_full_dump)
1055 return true;
1056
1057 /* Otherwise, assume that what's most interesting are state explosions,
1058 and thus the places where this happened.
1059 Expand enodes at program points where we hit the per-enode limit, so we
1060 can investigate what exploded. */
1061 const per_program_point_data *per_point_data
1062 = m_eg.get_per_program_point_data (enode.get_point ());
1063 return per_point_data->m_excess_enodes > 0;
1064 }
1065
1066 /* class exploded_node : public dnode<eg_traits>. */
1067
1068 const char *
status_to_str(enum status s)1069 exploded_node::status_to_str (enum status s)
1070 {
1071 switch (s)
1072 {
1073 default: gcc_unreachable ();
1074 case STATUS_WORKLIST: return "WORKLIST";
1075 case STATUS_PROCESSED: return "PROCESSED";
1076 case STATUS_MERGER: return "MERGER";
1077 case STATUS_BULK_MERGED: return "BULK_MERGED";
1078 }
1079 }
1080
1081 /* exploded_node's ctor. */
1082
exploded_node(const point_and_state & ps,int index)1083 exploded_node::exploded_node (const point_and_state &ps,
1084 int index)
1085 : m_ps (ps), m_status (STATUS_WORKLIST), m_index (index),
1086 m_num_processed_stmts (0)
1087 {
1088 gcc_checking_assert (ps.get_state ().m_region_model->canonicalized_p ());
1089 }
1090
1091 /* Get the stmt that was processed in this enode at index IDX.
1092 IDX is an index within the stmts processed at this enode, rather
1093 than within those of the supernode. */
1094
1095 const gimple *
get_processed_stmt(unsigned idx) const1096 exploded_node::get_processed_stmt (unsigned idx) const
1097 {
1098 gcc_assert (idx < m_num_processed_stmts);
1099 const program_point &point = get_point ();
1100 gcc_assert (point.get_kind () == PK_BEFORE_STMT);
1101 const supernode *snode = get_supernode ();
1102 const unsigned int point_stmt_idx = point.get_stmt_idx ();
1103 const unsigned int idx_within_snode = point_stmt_idx + idx;
1104 const gimple *stmt = snode->m_stmts[idx_within_snode];
1105 return stmt;
1106 }
1107
1108 /* For use by dump_dot, get a value for the .dot "fillcolor" attribute.
1109 Colorize by sm-state, to make it easier to see how sm-state propagates
1110 through the exploded_graph. */
1111
1112 const char *
get_dot_fillcolor() const1113 exploded_node::get_dot_fillcolor () const
1114 {
1115 const program_state &state = get_state ();
1116
1117 /* We want to be able to easily distinguish the no-sm-state case,
1118 and to be able to distinguish cases where there's a single state
1119 from each other.
1120
1121 Sum the sm_states, and use the result to choose from a table,
1122 modulo table-size, special-casing the "no sm-state" case. */
1123 int total_sm_state = 0;
1124 int i;
1125 sm_state_map *smap;
1126 FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
1127 {
1128 for (sm_state_map::iterator_t iter = smap->begin ();
1129 iter != smap->end ();
1130 ++iter)
1131 total_sm_state += (*iter).second.m_state->get_id ();
1132 total_sm_state += smap->get_global_state ()->get_id ();
1133 }
1134
1135 if (total_sm_state > 0)
1136 {
1137 /* An arbitrarily-picked collection of light colors. */
1138 const char * const colors[]
1139 = {"azure", "coral", "cornsilk", "lightblue", "yellow",
1140 "honeydew", "lightpink", "lightsalmon", "palegreen1",
1141 "wheat", "seashell"};
1142 const int num_colors = sizeof (colors) / sizeof (colors[0]);
1143 return colors[total_sm_state % num_colors];
1144 }
1145 else
1146 /* No sm-state. */
1147 return "lightgrey";
1148 }
1149
1150 /* Implementation of dnode::dump_dot vfunc for exploded_node. */
1151
1152 void
dump_dot(graphviz_out * gv,const dump_args_t & args) const1153 exploded_node::dump_dot (graphviz_out *gv, const dump_args_t &args) const
1154 {
1155 pretty_printer *pp = gv->get_pp ();
1156
1157 dump_dot_id (pp);
1158 pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
1159 get_dot_fillcolor ());
1160 pp_write_text_to_stream (pp);
1161
1162 pp_printf (pp, "EN: %i", m_index);
1163 if (m_status == STATUS_MERGER)
1164 pp_string (pp, " (merger)");
1165 else if (m_status == STATUS_BULK_MERGED)
1166 pp_string (pp, " (bulk merged)");
1167 pp_newline (pp);
1168
1169 if (args.show_enode_details_p (*this))
1170 {
1171 format f (true);
1172 m_ps.get_point ().print (pp, f);
1173 pp_newline (pp);
1174
1175 const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
1176 const program_state &state = m_ps.get_state ();
1177 state.dump_to_pp (ext_state, false, true, pp);
1178 pp_newline (pp);
1179
1180 dump_processed_stmts (pp);
1181 }
1182
1183 dump_saved_diagnostics (pp);
1184
1185 args.dump_extra_info (this, pp);
1186
1187 pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
1188
1189 pp_string (pp, "\"];\n\n");
1190
1191 /* It can be hard to locate the saved diagnostics as text within the
1192 enode nodes, so add extra nodes to the graph for each saved_diagnostic,
1193 highlighted in red.
1194 Compare with dump_saved_diagnostics. */
1195 {
1196 unsigned i;
1197 const saved_diagnostic *sd;
1198 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
1199 {
1200 sd->dump_as_dot_node (pp);
1201
1202 /* Add edge connecting this enode to the saved_diagnostic. */
1203 dump_dot_id (pp);
1204 pp_string (pp, " -> ");
1205 sd->dump_dot_id (pp);
1206 pp_string (pp, " [style=\"dotted\" arrowhead=\"none\"];");
1207 pp_newline (pp);
1208 }
1209 }
1210
1211 pp_flush (pp);
1212 }
1213
1214 /* Show any stmts that were processed within this enode,
1215 and their index within the supernode. */
1216 void
dump_processed_stmts(pretty_printer * pp) const1217 exploded_node::dump_processed_stmts (pretty_printer *pp) const
1218 {
1219 if (m_num_processed_stmts > 0)
1220 {
1221 const program_point &point = get_point ();
1222 gcc_assert (point.get_kind () == PK_BEFORE_STMT);
1223 const supernode *snode = get_supernode ();
1224 const unsigned int point_stmt_idx = point.get_stmt_idx ();
1225
1226 pp_printf (pp, "stmts: %i", m_num_processed_stmts);
1227 pp_newline (pp);
1228 for (unsigned i = 0; i < m_num_processed_stmts; i++)
1229 {
1230 const unsigned int idx_within_snode = point_stmt_idx + i;
1231 const gimple *stmt = snode->m_stmts[idx_within_snode];
1232 pp_printf (pp, " %i: ", idx_within_snode);
1233 pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
1234 pp_newline (pp);
1235 }
1236 }
1237 }
1238
1239 /* Dump any saved_diagnostics at this enode to PP. */
1240
1241 void
dump_saved_diagnostics(pretty_printer * pp) const1242 exploded_node::dump_saved_diagnostics (pretty_printer *pp) const
1243 {
1244 unsigned i;
1245 const saved_diagnostic *sd;
1246 FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
1247 {
1248 pp_printf (pp, "DIAGNOSTIC: %s (sd: %i)",
1249 sd->m_d->get_kind (), sd->get_index ());
1250 pp_newline (pp);
1251 }
1252 }
1253
1254 /* Dump this to PP in a form suitable for use as an id in .dot output. */
1255
1256 void
dump_dot_id(pretty_printer * pp) const1257 exploded_node::dump_dot_id (pretty_printer *pp) const
1258 {
1259 pp_printf (pp, "exploded_node_%i", m_index);
1260 }
1261
1262 /* Dump a multiline representation of this node to PP. */
1263
1264 void
dump_to_pp(pretty_printer * pp,const extrinsic_state & ext_state) const1265 exploded_node::dump_to_pp (pretty_printer *pp,
1266 const extrinsic_state &ext_state) const
1267 {
1268 pp_printf (pp, "EN: %i", m_index);
1269 pp_newline (pp);
1270
1271 format f (true);
1272 m_ps.get_point ().print (pp, f);
1273 pp_newline (pp);
1274
1275 m_ps.get_state ().dump_to_pp (ext_state, false, true, pp);
1276 pp_newline (pp);
1277 }
1278
1279 /* Dump a multiline representation of this node to FILE. */
1280
1281 void
dump(FILE * fp,const extrinsic_state & ext_state) const1282 exploded_node::dump (FILE *fp,
1283 const extrinsic_state &ext_state) const
1284 {
1285 pretty_printer pp;
1286 pp_format_decoder (&pp) = default_tree_printer;
1287 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1288 pp.buffer->stream = fp;
1289 dump_to_pp (&pp, ext_state);
1290 pp_flush (&pp);
1291 }
1292
1293 /* Dump a multiline representation of this node to stderr. */
1294
1295 DEBUG_FUNCTION void
dump(const extrinsic_state & ext_state) const1296 exploded_node::dump (const extrinsic_state &ext_state) const
1297 {
1298 dump (stderr, ext_state);
1299 }
1300
1301 /* Return a new json::object of the form
1302 {"point" : object for program_point,
1303 "state" : object for program_state,
1304 "status" : str,
1305 "idx" : int,
1306 "processed_stmts" : int}. */
1307
1308 json::object *
to_json(const extrinsic_state & ext_state) const1309 exploded_node::to_json (const extrinsic_state &ext_state) const
1310 {
1311 json::object *enode_obj = new json::object ();
1312
1313 enode_obj->set ("point", get_point ().to_json ());
1314 enode_obj->set ("state", get_state ().to_json (ext_state));
1315 enode_obj->set ("status", new json::string (status_to_str (m_status)));
1316 enode_obj->set ("idx", new json::integer_number (m_index));
1317 enode_obj->set ("processed_stmts",
1318 new json::integer_number (m_num_processed_stmts));
1319
1320 return enode_obj;
1321 }
1322
1323 } // namespace ana
1324
1325 /* Return true if FNDECL has a gimple body. */
1326 // TODO: is there a pre-canned way to do this?
1327
1328 bool
fndecl_has_gimple_body_p(tree fndecl)1329 fndecl_has_gimple_body_p (tree fndecl)
1330 {
1331 if (fndecl == NULL_TREE)
1332 return false;
1333
1334 cgraph_node *n = cgraph_node::get (fndecl);
1335 if (!n)
1336 return false;
1337
1338 return n->has_gimple_body_p ();
1339 }
1340
1341 namespace ana {
1342
1343 /* Modify STATE in place, applying the effects of the stmt at this node's
1344 point. */
1345
1346 exploded_node::on_stmt_flags
on_stmt(exploded_graph & eg,const supernode * snode,const gimple * stmt,program_state * state,uncertainty_t * uncertainty,path_context * path_ctxt)1347 exploded_node::on_stmt (exploded_graph &eg,
1348 const supernode *snode,
1349 const gimple *stmt,
1350 program_state *state,
1351 uncertainty_t *uncertainty,
1352 path_context *path_ctxt)
1353 {
1354 logger *logger = eg.get_logger ();
1355 LOG_SCOPE (logger);
1356 if (logger)
1357 {
1358 logger->start_log_line ();
1359 pp_gimple_stmt_1 (logger->get_printer (), stmt, 0, (dump_flags_t)0);
1360 logger->end_log_line ();
1361 }
1362
1363 /* Update input_location in case of ICE: make it easier to track down which
1364 source construct we're failing to handle. */
1365 input_location = stmt->location;
1366
1367 gcc_assert (state->m_region_model);
1368
1369 /* Preserve the old state. It is used here for looking
1370 up old checker states, for determining state transitions, and
1371 also within impl_region_model_context and impl_sm_context for
1372 going from tree to svalue_id. */
1373 const program_state old_state (*state);
1374
1375 impl_region_model_context ctxt (eg, this,
1376 &old_state, state, uncertainty,
1377 path_ctxt, stmt);
1378
1379 bool unknown_side_effects = false;
1380 bool terminate_path = false;
1381
1382 on_stmt_pre (eg, stmt, state, &terminate_path,
1383 &unknown_side_effects, &ctxt);
1384
1385 if (terminate_path)
1386 return on_stmt_flags::terminate_path ();
1387
1388 int sm_idx;
1389 sm_state_map *smap;
1390 FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
1391 {
1392 const state_machine &sm = eg.get_ext_state ().get_sm (sm_idx);
1393 const sm_state_map *old_smap
1394 = old_state.m_checker_states[sm_idx];
1395 sm_state_map *new_smap = state->m_checker_states[sm_idx];
1396 impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
1397 old_smap, new_smap, path_ctxt, NULL,
1398 unknown_side_effects);
1399
1400 /* Allow the state_machine to handle the stmt. */
1401 if (sm.on_stmt (&sm_ctxt, snode, stmt))
1402 unknown_side_effects = false;
1403 }
1404
1405 if (path_ctxt->terminate_path_p ())
1406 return on_stmt_flags::terminate_path ();
1407
1408 on_stmt_post (stmt, state, unknown_side_effects, &ctxt);
1409
1410 return on_stmt_flags ();
1411 }
1412
1413 /* Handle the pre-sm-state part of STMT, modifying STATE in-place.
1414 Write true to *OUT_TERMINATE_PATH if the path should be terminated.
1415 Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
1416 side effects. */
1417
1418 void
on_stmt_pre(exploded_graph & eg,const gimple * stmt,program_state * state,bool * out_terminate_path,bool * out_unknown_side_effects,region_model_context * ctxt)1419 exploded_node::on_stmt_pre (exploded_graph &eg,
1420 const gimple *stmt,
1421 program_state *state,
1422 bool *out_terminate_path,
1423 bool *out_unknown_side_effects,
1424 region_model_context *ctxt)
1425 {
1426 /* Handle special-case calls that require the full program_state. */
1427 if (const gcall *call = dyn_cast <const gcall *> (stmt))
1428 {
1429 if (is_special_named_call_p (call, "__analyzer_dump", 0))
1430 {
1431 /* Handle the builtin "__analyzer_dump" by dumping state
1432 to stderr. */
1433 state->dump (eg.get_ext_state (), true);
1434 return;
1435 }
1436 else if (is_special_named_call_p (call, "__analyzer_dump_state", 2))
1437 {
1438 state->impl_call_analyzer_dump_state (call, eg.get_ext_state (),
1439 ctxt);
1440 return;
1441 }
1442 else if (is_setjmp_call_p (call))
1443 {
1444 state->m_region_model->on_setjmp (call, this, ctxt);
1445 return;
1446 }
1447 else if (is_longjmp_call_p (call))
1448 {
1449 on_longjmp (eg, call, state, ctxt);
1450 *out_terminate_path = true;
1451 return;
1452 }
1453 }
1454
1455 /* Otherwise, defer to m_region_model. */
1456 state->m_region_model->on_stmt_pre (stmt,
1457 out_terminate_path,
1458 out_unknown_side_effects,
1459 ctxt);
1460 }
1461
1462 /* Handle the post-sm-state part of STMT, modifying STATE in-place. */
1463
1464 void
on_stmt_post(const gimple * stmt,program_state * state,bool unknown_side_effects,region_model_context * ctxt)1465 exploded_node::on_stmt_post (const gimple *stmt,
1466 program_state *state,
1467 bool unknown_side_effects,
1468 region_model_context *ctxt)
1469 {
1470 if (const gcall *call = dyn_cast <const gcall *> (stmt))
1471 state->m_region_model->on_call_post (call, unknown_side_effects, ctxt);
1472 }
1473
1474 /* Consider the effect of following superedge SUCC from this node.
1475
1476 Return true if it's feasible to follow the edge, or false
1477 if it's infeasible.
1478
1479 Examples: if it's the "true" branch within
1480 a CFG and we know the conditional is false, we know it's infeasible.
1481 If it's one of multiple interprocedual "return" edges, then only
1482 the edge back to the most recent callsite is feasible.
1483
1484 Update NEXT_STATE accordingly (e.g. to record that a condition was
1485 true or false, or that the NULL-ness of a pointer has been checked,
1486 pushing/popping stack frames, etc).
1487
1488 Update NEXT_POINT accordingly (updating the call string). */
1489
1490 bool
on_edge(exploded_graph & eg,const superedge * succ,program_point * next_point,program_state * next_state,uncertainty_t * uncertainty)1491 exploded_node::on_edge (exploded_graph &eg,
1492 const superedge *succ,
1493 program_point *next_point,
1494 program_state *next_state,
1495 uncertainty_t *uncertainty)
1496 {
1497 LOG_FUNC (eg.get_logger ());
1498
1499 if (!next_point->on_edge (eg, succ))
1500 return false;
1501
1502 if (!next_state->on_edge (eg, this, succ, uncertainty))
1503 return false;
1504
1505 return true;
1506 }
1507
1508 /* Verify that the stack at LONGJMP_POINT is still valid, given a call
1509 to "setjmp" at SETJMP_POINT - the stack frame that "setjmp" was
1510 called in must still be valid.
1511
1512 Caveat: this merely checks the call_strings in the points; it doesn't
1513 detect the case where a frame returns and is then called again. */
1514
1515 static bool
valid_longjmp_stack_p(const program_point & longjmp_point,const program_point & setjmp_point)1516 valid_longjmp_stack_p (const program_point &longjmp_point,
1517 const program_point &setjmp_point)
1518 {
1519 const call_string &cs_at_longjmp = longjmp_point.get_call_string ();
1520 const call_string &cs_at_setjmp = setjmp_point.get_call_string ();
1521
1522 if (cs_at_longjmp.length () < cs_at_setjmp.length ())
1523 return false;
1524
1525 /* Check that the call strings match, up to the depth of the
1526 setjmp point. */
1527 for (unsigned depth = 0; depth < cs_at_setjmp.length (); depth++)
1528 if (cs_at_longjmp[depth] != cs_at_setjmp[depth])
1529 return false;
1530
1531 return true;
1532 }
1533
1534 /* A pending_diagnostic subclass for complaining about bad longjmps,
1535 where the enclosing function of the "setjmp" has returned (and thus
1536 the stack frame no longer exists). */
1537
1538 class stale_jmp_buf : public pending_diagnostic_subclass<stale_jmp_buf>
1539 {
1540 public:
stale_jmp_buf(const gcall * setjmp_call,const gcall * longjmp_call,const program_point & setjmp_point)1541 stale_jmp_buf (const gcall *setjmp_call, const gcall *longjmp_call,
1542 const program_point &setjmp_point)
1543 : m_setjmp_call (setjmp_call), m_longjmp_call (longjmp_call),
1544 m_setjmp_point (setjmp_point), m_stack_pop_event (NULL)
1545 {}
1546
get_controlling_option() const1547 int get_controlling_option () const FINAL OVERRIDE
1548 {
1549 return OPT_Wanalyzer_stale_setjmp_buffer;
1550 }
1551
emit(rich_location * richloc)1552 bool emit (rich_location *richloc) FINAL OVERRIDE
1553 {
1554 return warning_at
1555 (richloc, get_controlling_option (),
1556 "%qs called after enclosing function of %qs has returned",
1557 get_user_facing_name (m_longjmp_call),
1558 get_user_facing_name (m_setjmp_call));
1559 }
1560
get_kind() const1561 const char *get_kind () const FINAL OVERRIDE
1562 { return "stale_jmp_buf"; }
1563
operator ==(const stale_jmp_buf & other) const1564 bool operator== (const stale_jmp_buf &other) const
1565 {
1566 return (m_setjmp_call == other.m_setjmp_call
1567 && m_longjmp_call == other.m_longjmp_call);
1568 }
1569
1570 bool
maybe_add_custom_events_for_superedge(const exploded_edge & eedge,checker_path * emission_path)1571 maybe_add_custom_events_for_superedge (const exploded_edge &eedge,
1572 checker_path *emission_path)
1573 FINAL OVERRIDE
1574 {
1575 /* Detect exactly when the stack first becomes invalid,
1576 and issue an event then. */
1577 if (m_stack_pop_event)
1578 return false;
1579 const exploded_node *src_node = eedge.m_src;
1580 const program_point &src_point = src_node->get_point ();
1581 const exploded_node *dst_node = eedge.m_dest;
1582 const program_point &dst_point = dst_node->get_point ();
1583 if (valid_longjmp_stack_p (src_point, m_setjmp_point)
1584 && !valid_longjmp_stack_p (dst_point, m_setjmp_point))
1585 {
1586 /* Compare with diagnostic_manager::add_events_for_superedge. */
1587 const int src_stack_depth = src_point.get_stack_depth ();
1588 m_stack_pop_event = new precanned_custom_event
1589 (src_point.get_location (),
1590 src_point.get_fndecl (),
1591 src_stack_depth,
1592 "stack frame is popped here, invalidating saved environment");
1593 emission_path->add_event (m_stack_pop_event);
1594 return false;
1595 }
1596 return false;
1597 }
1598
describe_final_event(const evdesc::final_event & ev)1599 label_text describe_final_event (const evdesc::final_event &ev)
1600 {
1601 if (m_stack_pop_event)
1602 return ev.formatted_print
1603 ("%qs called after enclosing function of %qs returned at %@",
1604 get_user_facing_name (m_longjmp_call),
1605 get_user_facing_name (m_setjmp_call),
1606 m_stack_pop_event->get_id_ptr ());
1607 else
1608 return ev.formatted_print
1609 ("%qs called after enclosing function of %qs has returned",
1610 get_user_facing_name (m_longjmp_call),
1611 get_user_facing_name (m_setjmp_call));;
1612 }
1613
1614
1615 private:
1616 const gcall *m_setjmp_call;
1617 const gcall *m_longjmp_call;
1618 program_point m_setjmp_point;
1619 custom_event *m_stack_pop_event;
1620 };
1621
1622 /* Handle LONGJMP_CALL, a call to longjmp or siglongjmp.
1623
1624 Attempt to locate where setjmp/sigsetjmp was called on the jmp_buf and build
1625 an exploded_node and exploded_edge to it representing a rewind to that frame,
1626 handling the various kinds of failure that can occur. */
1627
1628 void
on_longjmp(exploded_graph & eg,const gcall * longjmp_call,program_state * new_state,region_model_context * ctxt)1629 exploded_node::on_longjmp (exploded_graph &eg,
1630 const gcall *longjmp_call,
1631 program_state *new_state,
1632 region_model_context *ctxt)
1633 {
1634 tree buf_ptr = gimple_call_arg (longjmp_call, 0);
1635 gcc_assert (POINTER_TYPE_P (TREE_TYPE (buf_ptr)));
1636
1637 region_model *new_region_model = new_state->m_region_model;
1638 const svalue *buf_ptr_sval = new_region_model->get_rvalue (buf_ptr, ctxt);
1639 const region *buf = new_region_model->deref_rvalue (buf_ptr_sval, buf_ptr,
1640 ctxt);
1641
1642 const svalue *buf_content_sval
1643 = new_region_model->get_store_value (buf, ctxt);
1644 const setjmp_svalue *setjmp_sval
1645 = buf_content_sval->dyn_cast_setjmp_svalue ();
1646 if (!setjmp_sval)
1647 return;
1648
1649 const setjmp_record tmp_setjmp_record = setjmp_sval->get_setjmp_record ();
1650
1651 /* Build a custom enode and eedge for rewinding from the longjmp/siglongjmp
1652 call back to the setjmp/sigsetjmp. */
1653 rewind_info_t rewind_info (tmp_setjmp_record, longjmp_call);
1654
1655 const gcall *setjmp_call = rewind_info.get_setjmp_call ();
1656 const program_point &setjmp_point = rewind_info.get_setjmp_point ();
1657
1658 const program_point &longjmp_point = get_point ();
1659
1660 /* Verify that the setjmp's call_stack hasn't been popped. */
1661 if (!valid_longjmp_stack_p (longjmp_point, setjmp_point))
1662 {
1663 ctxt->warn (new stale_jmp_buf (setjmp_call, longjmp_call, setjmp_point));
1664 return;
1665 }
1666
1667 gcc_assert (longjmp_point.get_stack_depth ()
1668 >= setjmp_point.get_stack_depth ());
1669
1670 /* Update the state for use by the destination node. */
1671
1672 /* Stash the current number of diagnostics so that we can update
1673 any that this adds to show where the longjmp is rewinding to. */
1674
1675 diagnostic_manager *dm = &eg.get_diagnostic_manager ();
1676 unsigned prev_num_diagnostics = dm->get_num_diagnostics ();
1677
1678 new_region_model->on_longjmp (longjmp_call, setjmp_call,
1679 setjmp_point.get_stack_depth (), ctxt);
1680
1681 /* Detect leaks in the new state relative to the old state. */
1682 program_state::detect_leaks (get_state (), *new_state, NULL,
1683 eg.get_ext_state (), ctxt);
1684
1685 program_point next_point
1686 = program_point::after_supernode (setjmp_point.get_supernode (),
1687 setjmp_point.get_call_string ());
1688
1689 exploded_node *next
1690 = eg.get_or_create_node (next_point, *new_state, this);
1691
1692 /* Create custom exploded_edge for a longjmp. */
1693 if (next)
1694 {
1695 exploded_edge *eedge
1696 = eg.add_edge (const_cast<exploded_node *> (this), next, NULL,
1697 new rewind_info_t (tmp_setjmp_record, longjmp_call));
1698
1699 /* For any diagnostics that were queued here (such as leaks) we want
1700 the checker_path to show the rewinding events after the "final event"
1701 so that the user sees where the longjmp is rewinding to (otherwise the
1702 path is meaningless).
1703
1704 For example, we want to emit something like:
1705 | NN | {
1706 | NN | longjmp (env, 1);
1707 | | ~~~~~~~~~~~~~~~~
1708 | | |
1709 | | (10) 'ptr' leaks here; was allocated at (7)
1710 | | (11) rewinding from 'longjmp' in 'inner'...
1711 |
1712 <-------------+
1713 |
1714 'outer': event 12
1715 |
1716 | NN | i = setjmp(env);
1717 | | ^~~~~~
1718 | | |
1719 | | (12) ...to 'setjmp' in 'outer' (saved at (2))
1720
1721 where the "final" event above is event (10), but we want to append
1722 events (11) and (12) afterwards.
1723
1724 Do this by setting m_trailing_eedge on any diagnostics that were
1725 just saved. */
1726 unsigned num_diagnostics = dm->get_num_diagnostics ();
1727 for (unsigned i = prev_num_diagnostics; i < num_diagnostics; i++)
1728 {
1729 saved_diagnostic *sd = dm->get_saved_diagnostic (i);
1730 sd->m_trailing_eedge = eedge;
1731 }
1732 }
1733 }
1734
1735 /* Subroutine of exploded_graph::process_node for finding the successors
1736 of the supernode for a function exit basic block.
1737
1738 Ensure that pop_frame is called, potentially queuing diagnostics about
1739 leaks. */
1740
1741 void
detect_leaks(exploded_graph & eg)1742 exploded_node::detect_leaks (exploded_graph &eg)
1743 {
1744 LOG_FUNC_1 (eg.get_logger (), "EN: %i", m_index);
1745
1746 gcc_assert (get_point ().get_supernode ()->return_p ());
1747
1748 /* If we're not a "top-level" function, do nothing; pop_frame
1749 will be called when handling the return superedge. */
1750 if (get_point ().get_stack_depth () > 1)
1751 return;
1752
1753 /* We have a "top-level" function. */
1754 gcc_assert (get_point ().get_stack_depth () == 1);
1755
1756 const program_state &old_state = get_state ();
1757
1758 /* Work with a temporary copy of the state: pop the frame, and see
1759 what leaks (via purge_unused_svalues). */
1760 program_state new_state (old_state);
1761
1762 gcc_assert (new_state.m_region_model);
1763
1764 uncertainty_t uncertainty;
1765 impl_region_model_context ctxt (eg, this,
1766 &old_state, &new_state, &uncertainty, NULL,
1767 get_stmt ());
1768 const svalue *result = NULL;
1769 new_state.m_region_model->pop_frame (NULL, &result, &ctxt);
1770 program_state::detect_leaks (old_state, new_state, result,
1771 eg.get_ext_state (), &ctxt);
1772 }
1773
1774 /* Dump the successors and predecessors of this enode to OUTF. */
1775
1776 void
dump_succs_and_preds(FILE * outf) const1777 exploded_node::dump_succs_and_preds (FILE *outf) const
1778 {
1779 unsigned i;
1780 exploded_edge *e;
1781 {
1782 auto_vec<exploded_node *> preds (m_preds.length ());
1783 FOR_EACH_VEC_ELT (m_preds, i, e)
1784 preds.quick_push (e->m_src);
1785 pretty_printer pp;
1786 print_enode_indices (&pp, preds);
1787 fprintf (outf, "preds: %s\n",
1788 pp_formatted_text (&pp));
1789 }
1790 {
1791 auto_vec<exploded_node *> succs (m_succs.length ());
1792 FOR_EACH_VEC_ELT (m_succs, i, e)
1793 succs.quick_push (e->m_dest);
1794 pretty_printer pp;
1795 print_enode_indices (&pp, succs);
1796 fprintf (outf, "succs: %s\n",
1797 pp_formatted_text (&pp));
1798 }
1799 }
1800
1801 /* class dynamic_call_info_t : public custom_edge_info. */
1802
1803 /* Implementation of custom_edge_info::update_model vfunc
1804 for dynamic_call_info_t.
1805
1806 Update state for a dynamically discovered call (or return), by pushing
1807 or popping the a frame for the appropriate function. */
1808
1809 bool
update_model(region_model * model,const exploded_edge * eedge,region_model_context * ctxt) const1810 dynamic_call_info_t::update_model (region_model *model,
1811 const exploded_edge *eedge,
1812 region_model_context *ctxt) const
1813 {
1814 gcc_assert (eedge);
1815 if (m_is_returning_call)
1816 model->update_for_return_gcall (m_dynamic_call, ctxt);
1817 else
1818 {
1819 function *callee = eedge->m_dest->get_function ();
1820 model->update_for_gcall (m_dynamic_call, ctxt, callee);
1821 }
1822 return true;
1823 }
1824
1825 /* Implementation of custom_edge_info::add_events_to_path vfunc
1826 for dynamic_call_info_t. */
1827
1828 void
add_events_to_path(checker_path * emission_path,const exploded_edge & eedge) const1829 dynamic_call_info_t::add_events_to_path (checker_path *emission_path,
1830 const exploded_edge &eedge) const
1831 {
1832 const exploded_node *src_node = eedge.m_src;
1833 const program_point &src_point = src_node->get_point ();
1834 const int src_stack_depth = src_point.get_stack_depth ();
1835 const exploded_node *dest_node = eedge.m_dest;
1836 const program_point &dest_point = dest_node->get_point ();
1837 const int dest_stack_depth = dest_point.get_stack_depth ();
1838
1839 if (m_is_returning_call)
1840 emission_path->add_event (new return_event (eedge, (m_dynamic_call
1841 ? m_dynamic_call->location
1842 : UNKNOWN_LOCATION),
1843 dest_point.get_fndecl (),
1844 dest_stack_depth));
1845 else
1846 emission_path->add_event (new call_event (eedge, (m_dynamic_call
1847 ? m_dynamic_call->location
1848 : UNKNOWN_LOCATION),
1849 src_point.get_fndecl (),
1850 src_stack_depth));
1851
1852 }
1853
1854 /* class rewind_info_t : public custom_edge_info. */
1855
1856 /* Implementation of custom_edge_info::update_model vfunc
1857 for rewind_info_t.
1858
1859 Update state for the special-case of a rewind of a longjmp
1860 to a setjmp (which doesn't have a superedge, but does affect
1861 state). */
1862
1863 bool
update_model(region_model * model,const exploded_edge * eedge,region_model_context *) const1864 rewind_info_t::update_model (region_model *model,
1865 const exploded_edge *eedge,
1866 region_model_context *) const
1867 {
1868 gcc_assert (eedge);
1869 const program_point &longjmp_point = eedge->m_src->get_point ();
1870 const program_point &setjmp_point = eedge->m_dest->get_point ();
1871
1872 gcc_assert (longjmp_point.get_stack_depth ()
1873 >= setjmp_point.get_stack_depth ());
1874
1875 model->on_longjmp (get_longjmp_call (),
1876 get_setjmp_call (),
1877 setjmp_point.get_stack_depth (), NULL);
1878 return true;
1879 }
1880
1881 /* Implementation of custom_edge_info::add_events_to_path vfunc
1882 for rewind_info_t. */
1883
1884 void
add_events_to_path(checker_path * emission_path,const exploded_edge & eedge) const1885 rewind_info_t::add_events_to_path (checker_path *emission_path,
1886 const exploded_edge &eedge) const
1887 {
1888 const exploded_node *src_node = eedge.m_src;
1889 const program_point &src_point = src_node->get_point ();
1890 const int src_stack_depth = src_point.get_stack_depth ();
1891 const exploded_node *dst_node = eedge.m_dest;
1892 const program_point &dst_point = dst_node->get_point ();
1893 const int dst_stack_depth = dst_point.get_stack_depth ();
1894
1895 emission_path->add_event
1896 (new rewind_from_longjmp_event
1897 (&eedge, get_longjmp_call ()->location,
1898 src_point.get_fndecl (),
1899 src_stack_depth, this));
1900 emission_path->add_event
1901 (new rewind_to_setjmp_event
1902 (&eedge, get_setjmp_call ()->location,
1903 dst_point.get_fndecl (),
1904 dst_stack_depth, this));
1905 }
1906
1907 /* class exploded_edge : public dedge<eg_traits>. */
1908
1909 /* exploded_edge's ctor. */
1910
exploded_edge(exploded_node * src,exploded_node * dest,const superedge * sedge,custom_edge_info * custom_info)1911 exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
1912 const superedge *sedge,
1913 custom_edge_info *custom_info)
1914 : dedge<eg_traits> (src, dest), m_sedge (sedge),
1915 m_custom_info (custom_info)
1916 {
1917 }
1918
1919 /* exploded_edge's dtor. */
1920
~exploded_edge()1921 exploded_edge::~exploded_edge ()
1922 {
1923 delete m_custom_info;
1924 }
1925
1926 /* Implementation of dedge::dump_dot vfunc for exploded_edge.
1927 Use the label of the underlying superedge, if any. */
1928
1929 void
dump_dot(graphviz_out * gv,const dump_args_t &) const1930 exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &) const
1931 {
1932 pretty_printer *pp = gv->get_pp ();
1933
1934 m_src->dump_dot_id (pp);
1935 pp_string (pp, " -> ");
1936 m_dest->dump_dot_id (pp);
1937 dump_dot_label (pp);
1938 }
1939
1940 /* Second half of exploded_edge::dump_dot. This is split out
1941 for use by trimmed_graph::dump_dot and base_feasible_edge::dump_dot. */
1942
1943 void
dump_dot_label(pretty_printer * pp) const1944 exploded_edge::dump_dot_label (pretty_printer *pp) const
1945 {
1946 const char *style = "\"solid,bold\"";
1947 const char *color = "black";
1948 int weight = 10;
1949 const char *constraint = "true";
1950
1951 if (m_sedge)
1952 switch (m_sedge->m_kind)
1953 {
1954 default:
1955 gcc_unreachable ();
1956 case SUPEREDGE_CFG_EDGE:
1957 break;
1958 case SUPEREDGE_CALL:
1959 color = "red";
1960 //constraint = "false";
1961 break;
1962 case SUPEREDGE_RETURN:
1963 color = "green";
1964 //constraint = "false";
1965 break;
1966 case SUPEREDGE_INTRAPROCEDURAL_CALL:
1967 style = "\"dotted\"";
1968 break;
1969 }
1970 if (m_custom_info)
1971 {
1972 color = "red";
1973 style = "\"dotted\"";
1974 }
1975
1976 pp_printf (pp,
1977 (" [style=%s, color=%s, weight=%d, constraint=%s,"
1978 " headlabel=\""),
1979 style, color, weight, constraint);
1980
1981 if (m_sedge)
1982 m_sedge->dump_label_to_pp (pp, false);
1983 else if (m_custom_info)
1984 m_custom_info->print (pp);
1985
1986 //pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
1987
1988 pp_printf (pp, "\"];\n");
1989 }
1990
1991 /* Return a new json::object of the form
1992 {"src_idx": int, the index of the source exploded edge,
1993 "dst_idx": int, the index of the destination exploded edge,
1994 "sedge": (optional) object for the superedge, if any,
1995 "custom": (optional) str, a description, if this is a custom edge}. */
1996
1997 json::object *
to_json() const1998 exploded_edge::to_json () const
1999 {
2000 json::object *eedge_obj = new json::object ();
2001 eedge_obj->set ("src_idx", new json::integer_number (m_src->m_index));
2002 eedge_obj->set ("dst_idx", new json::integer_number (m_dest->m_index));
2003 if (m_sedge)
2004 eedge_obj->set ("sedge", m_sedge->to_json ());
2005 if (m_custom_info)
2006 {
2007 pretty_printer pp;
2008 pp_format_decoder (&pp) = default_tree_printer;
2009 m_custom_info->print (&pp);
2010 eedge_obj->set ("custom", new json::string (pp_formatted_text (&pp)));
2011 }
2012 return eedge_obj;
2013 }
2014
2015 /* struct stats. */
2016
2017 /* stats' ctor. */
2018
stats(int num_supernodes)2019 stats::stats (int num_supernodes)
2020 : m_node_reuse_count (0),
2021 m_node_reuse_after_merge_count (0),
2022 m_num_supernodes (num_supernodes)
2023 {
2024 for (int i = 0; i < NUM_POINT_KINDS; i++)
2025 m_num_nodes[i] = 0;
2026 }
2027
2028 /* Log these stats in multiline form to LOGGER. */
2029
2030 void
log(logger * logger) const2031 stats::log (logger *logger) const
2032 {
2033 gcc_assert (logger);
2034 for (int i = 0; i < NUM_POINT_KINDS; i++)
2035 if (m_num_nodes[i] > 0)
2036 logger->log ("m_num_nodes[%s]: %i",
2037 point_kind_to_string (static_cast <enum point_kind> (i)),
2038 m_num_nodes[i]);
2039 logger->log ("m_node_reuse_count: %i", m_node_reuse_count);
2040 logger->log ("m_node_reuse_after_merge_count: %i",
2041 m_node_reuse_after_merge_count);
2042 }
2043
2044 /* Dump these stats in multiline form to OUT. */
2045
2046 void
dump(FILE * out) const2047 stats::dump (FILE *out) const
2048 {
2049 for (int i = 0; i < NUM_POINT_KINDS; i++)
2050 if (m_num_nodes[i] > 0)
2051 fprintf (out, "m_num_nodes[%s]: %i\n",
2052 point_kind_to_string (static_cast <enum point_kind> (i)),
2053 m_num_nodes[i]);
2054 fprintf (out, "m_node_reuse_count: %i\n", m_node_reuse_count);
2055 fprintf (out, "m_node_reuse_after_merge_count: %i\n",
2056 m_node_reuse_after_merge_count);
2057
2058 if (m_num_supernodes > 0)
2059 fprintf (out, "PK_AFTER_SUPERNODE nodes per supernode: %.2f\n",
2060 (float)m_num_nodes[PK_AFTER_SUPERNODE] / (float)m_num_supernodes);
2061 }
2062
2063 /* Return the total number of enodes recorded within this object. */
2064
2065 int
get_total_enodes() const2066 stats::get_total_enodes () const
2067 {
2068 int result = 0;
2069 for (int i = 0; i < NUM_POINT_KINDS; i++)
2070 result += m_num_nodes[i];
2071 return result;
2072 }
2073
2074 /* strongly_connected_components's ctor. Tarjan's SCC algorithm. */
2075
2076 strongly_connected_components::
strongly_connected_components(const supergraph & sg,logger * logger)2077 strongly_connected_components (const supergraph &sg, logger *logger)
2078 : m_sg (sg), m_per_node (m_sg.num_nodes ())
2079 {
2080 LOG_SCOPE (logger);
2081 auto_timevar tv (TV_ANALYZER_SCC);
2082
2083 for (int i = 0; i < m_sg.num_nodes (); i++)
2084 m_per_node.quick_push (per_node_data ());
2085
2086 for (int i = 0; i < m_sg.num_nodes (); i++)
2087 if (m_per_node[i].m_index == -1)
2088 strong_connect (i);
2089
2090 if (0)
2091 dump ();
2092 }
2093
2094 /* Dump this object to stderr. */
2095
2096 DEBUG_FUNCTION void
dump() const2097 strongly_connected_components::dump () const
2098 {
2099 for (int i = 0; i < m_sg.num_nodes (); i++)
2100 {
2101 const per_node_data &v = m_per_node[i];
2102 fprintf (stderr, "SN %i: index: %i lowlink: %i on_stack: %i\n",
2103 i, v.m_index, v.m_lowlink, v.m_on_stack);
2104 }
2105 }
2106
2107 /* Return a new json::array of per-snode SCC ids. */
2108
2109 json::array *
to_json() const2110 strongly_connected_components::to_json () const
2111 {
2112 json::array *scc_arr = new json::array ();
2113 for (int i = 0; i < m_sg.num_nodes (); i++)
2114 scc_arr->append (new json::integer_number (get_scc_id (i)));
2115 return scc_arr;
2116 }
2117
2118 /* Subroutine of strongly_connected_components's ctor, part of Tarjan's
2119 SCC algorithm. */
2120
2121 void
strong_connect(unsigned index)2122 strongly_connected_components::strong_connect (unsigned index)
2123 {
2124 supernode *v_snode = m_sg.get_node_by_index (index);
2125
2126 /* Set the depth index for v to the smallest unused index. */
2127 per_node_data *v = &m_per_node[index];
2128 v->m_index = index;
2129 v->m_lowlink = index;
2130 m_stack.safe_push (index);
2131 v->m_on_stack = true;
2132 index++;
2133
2134 /* Consider successors of v. */
2135 unsigned i;
2136 superedge *sedge;
2137 FOR_EACH_VEC_ELT (v_snode->m_succs, i, sedge)
2138 {
2139 if (sedge->get_kind () != SUPEREDGE_CFG_EDGE
2140 && sedge->get_kind () != SUPEREDGE_INTRAPROCEDURAL_CALL)
2141 continue;
2142 supernode *w_snode = sedge->m_dest;
2143 per_node_data *w = &m_per_node[w_snode->m_index];
2144 if (w->m_index == -1)
2145 {
2146 /* Successor w has not yet been visited; recurse on it. */
2147 strong_connect (w_snode->m_index);
2148 v->m_lowlink = MIN (v->m_lowlink, w->m_lowlink);
2149 }
2150 else if (w->m_on_stack)
2151 {
2152 /* Successor w is in stack S and hence in the current SCC
2153 If w is not on stack, then (v, w) is a cross-edge in the DFS
2154 tree and must be ignored. */
2155 v->m_lowlink = MIN (v->m_lowlink, w->m_index);
2156 }
2157 }
2158
2159 /* If v is a root node, pop the stack and generate an SCC. */
2160
2161 if (v->m_lowlink == v->m_index)
2162 {
2163 per_node_data *w;
2164 do {
2165 int idx = m_stack.pop ();
2166 w = &m_per_node[idx];
2167 w->m_on_stack = false;
2168 } while (w != v);
2169 }
2170 }
2171
2172 /* worklist's ctor. */
2173
worklist(const exploded_graph & eg,const analysis_plan & plan)2174 worklist::worklist (const exploded_graph &eg, const analysis_plan &plan)
2175 : m_scc (eg.get_supergraph (), eg.get_logger ()),
2176 m_plan (plan),
2177 m_queue (key_t (*this, NULL))
2178 {
2179 }
2180
2181 /* Return the number of nodes in the worklist. */
2182
2183 unsigned
length() const2184 worklist::length () const
2185 {
2186 return m_queue.nodes ();
2187 }
2188
2189 /* Return the next node in the worklist, removing it. */
2190
2191 exploded_node *
take_next()2192 worklist::take_next ()
2193 {
2194 return m_queue.extract_min ();
2195 }
2196
2197 /* Return the next node in the worklist without removing it. */
2198
2199 exploded_node *
peek_next()2200 worklist::peek_next ()
2201 {
2202 return m_queue.min ();
2203 }
2204
2205 /* Add ENODE to the worklist. */
2206
2207 void
add_node(exploded_node * enode)2208 worklist::add_node (exploded_node *enode)
2209 {
2210 gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
2211 m_queue.insert (key_t (*this, enode), enode);
2212 }
2213
2214 /* Comparator for implementing worklist::key_t comparison operators.
2215 Return negative if KA is before KB
2216 Return positive if KA is after KB
2217 Return 0 if they are equal.
2218
2219 The ordering of the worklist is critical for performance and for
2220 avoiding node explosions. Ideally we want all enodes at a CFG join-point
2221 with the same callstring to be sorted next to each other in the worklist
2222 so that a run of consecutive enodes can be merged and processed "in bulk"
2223 rather than individually or pairwise, minimizing the number of new enodes
2224 created. */
2225
2226 int
cmp(const worklist::key_t & ka,const worklist::key_t & kb)2227 worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
2228 {
2229 const program_point &point_a = ka.m_enode->get_point ();
2230 const program_point &point_b = kb.m_enode->get_point ();
2231 const call_string &call_string_a = point_a.get_call_string ();
2232 const call_string &call_string_b = point_b.get_call_string ();
2233
2234 /* Order empty-callstring points with different functions based on the
2235 analysis_plan, so that we generate summaries before they are used. */
2236 if (flag_analyzer_call_summaries
2237 && call_string_a.empty_p ()
2238 && call_string_b.empty_p ()
2239 && point_a.get_function () != NULL
2240 && point_b.get_function () != NULL
2241 && point_a.get_function () != point_b.get_function ())
2242 {
2243 if (int cmp = ka.m_worklist.m_plan.cmp_function (point_a.get_function (),
2244 point_b.get_function ()))
2245 return cmp;
2246 }
2247
2248 /* Sort by callstring, so that nodes with deeper call strings are processed
2249 before those with shallower call strings.
2250 If we have
2251 splitting BB
2252 / \
2253 / \
2254 fn call no fn call
2255 \ /
2256 \ /
2257 join BB
2258 then we want the path inside the function call to be fully explored up
2259 to the return to the join BB before we explore on the "no fn call" path,
2260 so that both enodes at the join BB reach the front of the worklist at
2261 the same time and thus have a chance of being merged. */
2262 int cs_cmp = call_string::cmp (call_string_a, call_string_b);
2263 if (cs_cmp)
2264 return cs_cmp;
2265
2266 /* Order by SCC. */
2267 int scc_id_a = ka.get_scc_id (ka.m_enode);
2268 int scc_id_b = kb.get_scc_id (kb.m_enode);
2269 if (scc_id_a != scc_id_b)
2270 return scc_id_a - scc_id_b;
2271
2272 /* If in same SCC, order by supernode index (an arbitrary but stable
2273 ordering). */
2274 const supernode *snode_a = ka.m_enode->get_supernode ();
2275 const supernode *snode_b = kb.m_enode->get_supernode ();
2276 if (snode_a == NULL)
2277 {
2278 if (snode_b != NULL)
2279 /* One is NULL. */
2280 return -1;
2281 else
2282 /* Both are NULL. */
2283 return 0;
2284 }
2285 if (snode_b == NULL)
2286 /* One is NULL. */
2287 return 1;
2288 /* Neither are NULL. */
2289 gcc_assert (snode_a && snode_b);
2290 if (snode_a->m_index != snode_b->m_index)
2291 return snode_a->m_index - snode_b->m_index;
2292
2293 gcc_assert (snode_a == snode_b);
2294
2295 /* Order within supernode via program point. */
2296 int within_snode_cmp
2297 = function_point::cmp_within_supernode (point_a.get_function_point (),
2298 point_b.get_function_point ());
2299 if (within_snode_cmp)
2300 return within_snode_cmp;
2301
2302 /* Otherwise, we ought to have the same program_point. */
2303 gcc_assert (point_a == point_b);
2304
2305 const program_state &state_a = ka.m_enode->get_state ();
2306 const program_state &state_b = kb.m_enode->get_state ();
2307
2308 /* Sort by sm-state, so that identical sm-states are grouped
2309 together in the worklist. */
2310 for (unsigned sm_idx = 0; sm_idx < state_a.m_checker_states.length ();
2311 ++sm_idx)
2312 {
2313 sm_state_map *smap_a = state_a.m_checker_states[sm_idx];
2314 sm_state_map *smap_b = state_b.m_checker_states[sm_idx];
2315
2316 if (int smap_cmp = sm_state_map::cmp (*smap_a, *smap_b))
2317 return smap_cmp;
2318 }
2319
2320 /* Otherwise, we have two enodes at the same program point but with
2321 different states. We don't have a good total ordering on states,
2322 so order them by enode index, so that we have at least have a
2323 stable sort. */
2324 return ka.m_enode->m_index - kb.m_enode->m_index;
2325 }
2326
2327 /* Return a new json::object of the form
2328 {"scc" : [per-snode-IDs]}, */
2329
2330 json::object *
to_json() const2331 worklist::to_json () const
2332 {
2333 json::object *worklist_obj = new json::object ();
2334
2335 worklist_obj->set ("scc", m_scc.to_json ());
2336
2337 /* The following field isn't yet being JSONified:
2338 queue_t m_queue; */
2339
2340 return worklist_obj;
2341 }
2342
2343 /* exploded_graph's ctor. */
2344
exploded_graph(const supergraph & sg,logger * logger,const extrinsic_state & ext_state,const state_purge_map * purge_map,const analysis_plan & plan,int verbosity)2345 exploded_graph::exploded_graph (const supergraph &sg, logger *logger,
2346 const extrinsic_state &ext_state,
2347 const state_purge_map *purge_map,
2348 const analysis_plan &plan,
2349 int verbosity)
2350 : m_sg (sg), m_logger (logger),
2351 m_worklist (*this, plan),
2352 m_ext_state (ext_state),
2353 m_purge_map (purge_map),
2354 m_plan (plan),
2355 m_diagnostic_manager (logger, ext_state.get_engine (), verbosity),
2356 m_global_stats (m_sg.num_nodes ()),
2357 m_functionless_stats (m_sg.num_nodes ()),
2358 m_PK_AFTER_SUPERNODE_per_snode (m_sg.num_nodes ())
2359 {
2360 m_origin = get_or_create_node (program_point::origin (),
2361 program_state (ext_state), NULL);
2362 for (int i = 0; i < m_sg.num_nodes (); i++)
2363 m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
2364 }
2365
2366 /* exploded_graph's dtor. */
2367
~exploded_graph()2368 exploded_graph::~exploded_graph ()
2369 {
2370 for (auto iter : m_per_point_data)
2371 delete iter.second;
2372 for (auto iter : m_per_function_data)
2373 delete iter.second;
2374 for (auto iter : m_per_function_stats)
2375 delete iter.second;
2376 for (auto iter : m_per_call_string_data)
2377 delete iter.second;
2378 }
2379
2380 /* Subroutine for use when implementing __attribute__((tainted_args))
2381 on functions and on function pointer fields in structs.
2382
2383 Called on STATE representing a call to FNDECL.
2384 Mark all params of FNDECL in STATE as "tainted". Mark the value of all
2385 regions pointed to by params of FNDECL as "tainted".
2386
2387 Return true if successful; return false if the "taint" state machine
2388 was not found. */
2389
2390 static bool
mark_params_as_tainted(program_state * state,tree fndecl,const extrinsic_state & ext_state)2391 mark_params_as_tainted (program_state *state, tree fndecl,
2392 const extrinsic_state &ext_state)
2393 {
2394 unsigned taint_sm_idx;
2395 if (!ext_state.get_sm_idx_by_name ("taint", &taint_sm_idx))
2396 return false;
2397 sm_state_map *smap = state->m_checker_states[taint_sm_idx];
2398
2399 const state_machine &sm = ext_state.get_sm (taint_sm_idx);
2400 state_machine::state_t tainted = sm.get_state_by_name ("tainted");
2401
2402 region_model_manager *mgr = ext_state.get_model_manager ();
2403
2404 function *fun = DECL_STRUCT_FUNCTION (fndecl);
2405 gcc_assert (fun);
2406
2407 for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
2408 iter_parm = DECL_CHAIN (iter_parm))
2409 {
2410 tree param = iter_parm;
2411 if (tree parm_default_ssa = ssa_default_def (fun, iter_parm))
2412 param = parm_default_ssa;
2413 const region *param_reg = state->m_region_model->get_lvalue (param, NULL);
2414 const svalue *init_sval = mgr->get_or_create_initial_value (param_reg);
2415 smap->set_state (state->m_region_model, init_sval,
2416 tainted, NULL /*origin_new_sval*/, ext_state);
2417 if (POINTER_TYPE_P (TREE_TYPE (param)))
2418 {
2419 const region *pointee_reg = mgr->get_symbolic_region (init_sval);
2420 /* Mark "*param" as tainted. */
2421 const svalue *init_pointee_sval
2422 = mgr->get_or_create_initial_value (pointee_reg);
2423 smap->set_state (state->m_region_model, init_pointee_sval,
2424 tainted, NULL /*origin_new_sval*/, ext_state);
2425 }
2426 }
2427
2428 return true;
2429 }
2430
2431 /* Custom event for use by tainted_args_function_info when a function
2432 has been marked with __attribute__((tainted_args)). */
2433
2434 class tainted_args_function_custom_event : public custom_event
2435 {
2436 public:
tainted_args_function_custom_event(location_t loc,tree fndecl,int depth)2437 tainted_args_function_custom_event (location_t loc, tree fndecl, int depth)
2438 : custom_event (loc, fndecl, depth),
2439 m_fndecl (fndecl)
2440 {
2441 }
2442
get_desc(bool can_colorize) const2443 label_text get_desc (bool can_colorize) const FINAL OVERRIDE
2444 {
2445 return make_label_text
2446 (can_colorize,
2447 "function %qE marked with %<__attribute__((tainted_args))%>",
2448 m_fndecl);
2449 }
2450
2451 private:
2452 tree m_fndecl;
2453 };
2454
2455 /* Custom exploded_edge info for top-level calls to a function
2456 marked with __attribute__((tainted_args)). */
2457
2458 class tainted_args_function_info : public custom_edge_info
2459 {
2460 public:
tainted_args_function_info(tree fndecl)2461 tainted_args_function_info (tree fndecl)
2462 : m_fndecl (fndecl)
2463 {}
2464
print(pretty_printer * pp) const2465 void print (pretty_printer *pp) const FINAL OVERRIDE
2466 {
2467 pp_string (pp, "call to tainted_args function");
2468 };
2469
update_model(region_model *,const exploded_edge *,region_model_context *) const2470 bool update_model (region_model *,
2471 const exploded_edge *,
2472 region_model_context *) const FINAL OVERRIDE
2473 {
2474 /* No-op. */
2475 return true;
2476 }
2477
add_events_to_path(checker_path * emission_path,const exploded_edge &) const2478 void add_events_to_path (checker_path *emission_path,
2479 const exploded_edge &) const FINAL OVERRIDE
2480 {
2481 emission_path->add_event
2482 (new tainted_args_function_custom_event
2483 (DECL_SOURCE_LOCATION (m_fndecl), m_fndecl, 0));
2484 }
2485
2486 private:
2487 tree m_fndecl;
2488 };
2489
2490 /* Ensure that there is an exploded_node representing an external call to
2491 FUN, adding it to the worklist if creating it.
2492
2493 Add an edge from the origin exploded_node to the function entrypoint
2494 exploded_node.
2495
2496 Return the exploded_node for the entrypoint to the function. */
2497
2498 exploded_node *
add_function_entry(function * fun)2499 exploded_graph::add_function_entry (function *fun)
2500 {
2501 gcc_assert (gimple_has_body_p (fun->decl));
2502
2503 /* Be idempotent. */
2504 if (m_functions_with_enodes.contains (fun))
2505 {
2506 logger * const logger = get_logger ();
2507 if (logger)
2508 logger->log ("entrypoint for %qE already exists", fun->decl);
2509 return NULL;
2510 }
2511
2512 program_point point = program_point::from_function_entry (m_sg, fun);
2513 program_state state (m_ext_state);
2514 state.push_frame (m_ext_state, fun);
2515
2516 custom_edge_info *edge_info = NULL;
2517
2518 if (lookup_attribute ("tainted_args", DECL_ATTRIBUTES (fun->decl)))
2519 {
2520 if (mark_params_as_tainted (&state, fun->decl, m_ext_state))
2521 edge_info = new tainted_args_function_info (fun->decl);
2522 }
2523
2524 if (!state.m_valid)
2525 return NULL;
2526
2527 exploded_node *enode = get_or_create_node (point, state, NULL);
2528 if (!enode)
2529 {
2530 delete edge_info;
2531 return NULL;
2532 }
2533
2534 add_edge (m_origin, enode, NULL, edge_info);
2535
2536 m_functions_with_enodes.add (fun);
2537
2538 return enode;
2539 }
2540
2541 /* Get or create an exploded_node for (POINT, STATE).
2542 If a new node is created, it is added to the worklist.
2543
2544 Use ENODE_FOR_DIAG, a pre-existing enode, for any diagnostics
2545 that need to be emitted (e.g. when purging state *before* we have
2546 a new enode). */
2547
2548 exploded_node *
get_or_create_node(const program_point & point,const program_state & state,exploded_node * enode_for_diag)2549 exploded_graph::get_or_create_node (const program_point &point,
2550 const program_state &state,
2551 exploded_node *enode_for_diag)
2552 {
2553 logger * const logger = get_logger ();
2554 LOG_FUNC (logger);
2555 if (logger)
2556 {
2557 format f (false);
2558 pretty_printer *pp = logger->get_printer ();
2559 logger->start_log_line ();
2560 pp_string (pp, "point: ");
2561 point.print (pp, f);
2562 logger->end_log_line ();
2563 logger->start_log_line ();
2564 pp_string (pp, "state: ");
2565 state.dump_to_pp (m_ext_state, true, false, pp);
2566 logger->end_log_line ();
2567 }
2568
2569 /* Stop exploring paths for which we don't know how to effectively
2570 model the state. */
2571 if (!state.m_valid)
2572 {
2573 if (logger)
2574 logger->log ("invalid state; not creating node");
2575 return NULL;
2576 }
2577
2578 auto_cfun sentinel (point.get_function ());
2579
2580 state.validate (get_ext_state ());
2581
2582 //state.dump (get_ext_state ());
2583
2584 /* Prune state to try to improve the chances of a cache hit,
2585 avoiding generating redundant nodes. */
2586 uncertainty_t uncertainty;
2587 program_state pruned_state
2588 = state.prune_for_point (*this, point, enode_for_diag, &uncertainty);
2589
2590 pruned_state.validate (get_ext_state ());
2591
2592 //pruned_state.dump (get_ext_state ());
2593
2594 if (logger)
2595 {
2596 pretty_printer *pp = logger->get_printer ();
2597 logger->start_log_line ();
2598 pp_string (pp, "pruned_state: ");
2599 pruned_state.dump_to_pp (m_ext_state, true, false, pp);
2600 logger->end_log_line ();
2601 pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true,
2602 false);
2603 }
2604
2605 stats *per_fn_stats = get_or_create_function_stats (point.get_function ());
2606
2607 stats *per_cs_stats
2608 = &get_or_create_per_call_string_data (point.get_call_string ())->m_stats;
2609
2610 point_and_state ps (point, pruned_state);
2611 ps.validate (m_ext_state);
2612 if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
2613 {
2614 /* An exploded_node for PS already exists. */
2615 if (logger)
2616 logger->log ("reused EN: %i", (*slot)->m_index);
2617 m_global_stats.m_node_reuse_count++;
2618 per_fn_stats->m_node_reuse_count++;
2619 per_cs_stats->m_node_reuse_count++;
2620 return *slot;
2621 }
2622
2623 per_program_point_data *per_point_data
2624 = get_or_create_per_program_point_data (point);
2625
2626 /* Consider merging state with another enode at this program_point. */
2627 if (flag_analyzer_state_merge)
2628 {
2629 exploded_node *existing_enode;
2630 unsigned i;
2631 FOR_EACH_VEC_ELT (per_point_data->m_enodes, i, existing_enode)
2632 {
2633 if (logger)
2634 logger->log ("considering merging with existing EN: %i for point",
2635 existing_enode->m_index);
2636 gcc_assert (existing_enode->get_point () == point);
2637 const program_state &existing_state = existing_enode->get_state ();
2638
2639 /* This merges successfully within the loop. */
2640
2641 program_state merged_state (m_ext_state);
2642 if (pruned_state.can_merge_with_p (existing_state, m_ext_state, point,
2643 &merged_state))
2644 {
2645 merged_state.validate (m_ext_state);
2646 if (logger)
2647 logger->log ("merging new state with that of EN: %i",
2648 existing_enode->m_index);
2649
2650 /* Try again for a cache hit.
2651 Whether we get one or not, merged_state's value_ids have no
2652 relationship to those of the input state, and thus to those
2653 of CHANGE, so we must purge any svalue_ids from *CHANGE. */
2654 ps.set_state (merged_state);
2655
2656 if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
2657 {
2658 /* An exploded_node for PS already exists. */
2659 if (logger)
2660 logger->log ("reused EN: %i", (*slot)->m_index);
2661 m_global_stats.m_node_reuse_after_merge_count++;
2662 per_fn_stats->m_node_reuse_after_merge_count++;
2663 per_cs_stats->m_node_reuse_after_merge_count++;
2664 return *slot;
2665 }
2666 }
2667 else
2668 if (logger)
2669 logger->log ("not merging new state with that of EN: %i",
2670 existing_enode->m_index);
2671 }
2672 }
2673
2674 /* Impose a limit on the number of enodes per program point, and
2675 simply stop if we exceed it. */
2676 if ((int)per_point_data->m_enodes.length ()
2677 >= param_analyzer_max_enodes_per_program_point)
2678 {
2679 pretty_printer pp;
2680 point.print (&pp, format (false));
2681 print_enode_indices (&pp, per_point_data->m_enodes);
2682 if (logger)
2683 logger->log ("not creating enode; too many at program point: %s",
2684 pp_formatted_text (&pp));
2685 warning_at (point.get_location (), OPT_Wanalyzer_too_complex,
2686 "terminating analysis for this program point: %s",
2687 pp_formatted_text (&pp));
2688 per_point_data->m_excess_enodes++;
2689 return NULL;
2690 }
2691
2692 ps.validate (m_ext_state);
2693
2694 /* An exploded_node for "ps" doesn't already exist; create one. */
2695 exploded_node *node = new exploded_node (ps, m_nodes.length ());
2696 add_node (node);
2697 m_point_and_state_to_node.put (node->get_ps_key (), node);
2698
2699 /* Update per-program_point data. */
2700 per_point_data->m_enodes.safe_push (node);
2701
2702 const enum point_kind node_pk = node->get_point ().get_kind ();
2703 m_global_stats.m_num_nodes[node_pk]++;
2704 per_fn_stats->m_num_nodes[node_pk]++;
2705 per_cs_stats->m_num_nodes[node_pk]++;
2706
2707 if (node_pk == PK_AFTER_SUPERNODE)
2708 m_PK_AFTER_SUPERNODE_per_snode[point.get_supernode ()->m_index]++;
2709
2710 if (logger)
2711 {
2712 format f (false);
2713 pretty_printer *pp = logger->get_printer ();
2714 logger->log ("created EN: %i", node->m_index);
2715 logger->start_log_line ();
2716 pp_string (pp, "point: ");
2717 point.print (pp, f);
2718 logger->end_log_line ();
2719 logger->start_log_line ();
2720 pp_string (pp, "pruned_state: ");
2721 pruned_state.dump_to_pp (m_ext_state, true, false, pp);
2722 logger->end_log_line ();
2723 }
2724
2725 /* Add the new node to the worlist. */
2726 m_worklist.add_node (node);
2727 return node;
2728 }
2729
2730 /* Add an exploded_edge from SRC to DEST, recording its association
2731 with SEDGE (which may be NULL), and, if non-NULL, taking ownership
2732 of REWIND_INFO.
2733 Return the newly-created eedge. */
2734
2735 exploded_edge *
add_edge(exploded_node * src,exploded_node * dest,const superedge * sedge,custom_edge_info * custom_info)2736 exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
2737 const superedge *sedge,
2738 custom_edge_info *custom_info)
2739 {
2740 if (get_logger ())
2741 get_logger ()->log ("creating edge EN: %i -> EN: %i",
2742 src->m_index, dest->m_index);
2743 exploded_edge *e = new exploded_edge (src, dest, sedge, custom_info);
2744 digraph<eg_traits>::add_edge (e);
2745 return e;
2746 }
2747
2748 /* Ensure that this graph has per-program_point-data for POINT;
2749 borrow a pointer to it. */
2750
2751 per_program_point_data *
2752 exploded_graph::
get_or_create_per_program_point_data(const program_point & point)2753 get_or_create_per_program_point_data (const program_point &point)
2754 {
2755 if (per_program_point_data **slot = m_per_point_data.get (&point))
2756 return *slot;
2757
2758 per_program_point_data *per_point_data = new per_program_point_data (point);
2759 m_per_point_data.put (&per_point_data->m_key, per_point_data);
2760 return per_point_data;
2761 }
2762
2763 /* Get this graph's per-program-point-data for POINT if there is any,
2764 otherwise NULL. */
2765
2766 per_program_point_data *
get_per_program_point_data(const program_point & point) const2767 exploded_graph::get_per_program_point_data (const program_point &point) const
2768 {
2769 if (per_program_point_data **slot
2770 = const_cast <point_map_t &> (m_per_point_data).get (&point))
2771 return *slot;
2772
2773 return NULL;
2774 }
2775
2776 /* Ensure that this graph has per-call_string-data for CS;
2777 borrow a pointer to it. */
2778
2779 per_call_string_data *
get_or_create_per_call_string_data(const call_string & cs)2780 exploded_graph::get_or_create_per_call_string_data (const call_string &cs)
2781 {
2782 if (per_call_string_data **slot = m_per_call_string_data.get (&cs))
2783 return *slot;
2784
2785 per_call_string_data *data = new per_call_string_data (cs, m_sg.num_nodes ());
2786 m_per_call_string_data.put (&data->m_key,
2787 data);
2788 return data;
2789 }
2790
2791 /* Ensure that this graph has per-function-data for FUN;
2792 borrow a pointer to it. */
2793
2794 per_function_data *
get_or_create_per_function_data(function * fun)2795 exploded_graph::get_or_create_per_function_data (function *fun)
2796 {
2797 if (per_function_data **slot = m_per_function_data.get (fun))
2798 return *slot;
2799
2800 per_function_data *data = new per_function_data ();
2801 m_per_function_data.put (fun, data);
2802 return data;
2803 }
2804
2805 /* Get this graph's per-function-data for FUN if there is any,
2806 otherwise NULL. */
2807
2808 per_function_data *
get_per_function_data(function * fun) const2809 exploded_graph::get_per_function_data (function *fun) const
2810 {
2811 if (per_function_data **slot
2812 = const_cast <per_function_data_t &> (m_per_function_data).get (fun))
2813 return *slot;
2814
2815 return NULL;
2816 }
2817
2818 /* Return true if FUN should be traversed directly, rather than only as
2819 called via other functions. */
2820
2821 static bool
toplevel_function_p(function * fun,logger * logger)2822 toplevel_function_p (function *fun, logger *logger)
2823 {
2824 /* Don't directly traverse into functions that have an "__analyzer_"
2825 prefix. Doing so is useful for the analyzer test suite, allowing
2826 us to have functions that are called in traversals, but not directly
2827 explored, thus testing how the analyzer handles calls and returns.
2828 With this, we can have DejaGnu directives that cover just the case
2829 of where a function is called by another function, without generating
2830 excess messages from the case of the first function being traversed
2831 directly. */
2832 #define ANALYZER_PREFIX "__analyzer_"
2833 if (!strncmp (IDENTIFIER_POINTER (DECL_NAME (fun->decl)), ANALYZER_PREFIX,
2834 strlen (ANALYZER_PREFIX)))
2835 {
2836 if (logger)
2837 logger->log ("not traversing %qE (starts with %qs)",
2838 fun->decl, ANALYZER_PREFIX);
2839 return false;
2840 }
2841
2842 if (logger)
2843 logger->log ("traversing %qE (all checks passed)", fun->decl);
2844
2845 return true;
2846 }
2847
2848 /* Custom event for use by tainted_call_info when a callback field has been
2849 marked with __attribute__((tainted_args)), for labelling the field. */
2850
2851 class tainted_args_field_custom_event : public custom_event
2852 {
2853 public:
tainted_args_field_custom_event(tree field)2854 tainted_args_field_custom_event (tree field)
2855 : custom_event (DECL_SOURCE_LOCATION (field), NULL_TREE, 0),
2856 m_field (field)
2857 {
2858 }
2859
get_desc(bool can_colorize) const2860 label_text get_desc (bool can_colorize) const FINAL OVERRIDE
2861 {
2862 return make_label_text (can_colorize,
2863 "field %qE of %qT"
2864 " is marked with %<__attribute__((tainted_args))%>",
2865 m_field, DECL_CONTEXT (m_field));
2866 }
2867
2868 private:
2869 tree m_field;
2870 };
2871
2872 /* Custom event for use by tainted_call_info when a callback field has been
2873 marked with __attribute__((tainted_args)), for labelling the function used
2874 in that callback. */
2875
2876 class tainted_args_callback_custom_event : public custom_event
2877 {
2878 public:
tainted_args_callback_custom_event(location_t loc,tree fndecl,int depth,tree field)2879 tainted_args_callback_custom_event (location_t loc, tree fndecl, int depth,
2880 tree field)
2881 : custom_event (loc, fndecl, depth),
2882 m_field (field)
2883 {
2884 }
2885
get_desc(bool can_colorize) const2886 label_text get_desc (bool can_colorize) const FINAL OVERRIDE
2887 {
2888 return make_label_text (can_colorize,
2889 "function %qE used as initializer for field %qE"
2890 " marked with %<__attribute__((tainted_args))%>",
2891 m_fndecl, m_field);
2892 }
2893
2894 private:
2895 tree m_field;
2896 };
2897
2898 /* Custom edge info for use when adding a function used by a callback field
2899 marked with '__attribute__((tainted_args))'. */
2900
2901 class tainted_args_call_info : public custom_edge_info
2902 {
2903 public:
tainted_args_call_info(tree field,tree fndecl,location_t loc)2904 tainted_args_call_info (tree field, tree fndecl, location_t loc)
2905 : m_field (field), m_fndecl (fndecl), m_loc (loc)
2906 {}
2907
print(pretty_printer * pp) const2908 void print (pretty_printer *pp) const FINAL OVERRIDE
2909 {
2910 pp_string (pp, "call to tainted field");
2911 };
2912
update_model(region_model *,const exploded_edge *,region_model_context *) const2913 bool update_model (region_model *,
2914 const exploded_edge *,
2915 region_model_context *) const FINAL OVERRIDE
2916 {
2917 /* No-op. */
2918 return true;
2919 }
2920
add_events_to_path(checker_path * emission_path,const exploded_edge &) const2921 void add_events_to_path (checker_path *emission_path,
2922 const exploded_edge &) const FINAL OVERRIDE
2923 {
2924 /* Show the field in the struct declaration, e.g.
2925 "(1) field 'store' is marked with '__attribute__((tainted_args))'" */
2926 emission_path->add_event
2927 (new tainted_args_field_custom_event (m_field));
2928
2929 /* Show the callback in the initializer
2930 e.g.
2931 "(2) function 'gadget_dev_desc_UDC_store' used as initializer
2932 for field 'store' marked with '__attribute__((tainted_args))'". */
2933 emission_path->add_event
2934 (new tainted_args_callback_custom_event (m_loc, m_fndecl, 0, m_field));
2935 }
2936
2937 private:
2938 tree m_field;
2939 tree m_fndecl;
2940 location_t m_loc;
2941 };
2942
2943 /* Given an initializer at LOC for FIELD marked with
2944 '__attribute__((tainted_args))' initialized with FNDECL, add an
2945 entrypoint to FNDECL to EG (and to its worklist) where the params to
2946 FNDECL are marked as tainted. */
2947
2948 static void
add_tainted_args_callback(exploded_graph * eg,tree field,tree fndecl,location_t loc)2949 add_tainted_args_callback (exploded_graph *eg, tree field, tree fndecl,
2950 location_t loc)
2951 {
2952 logger *logger = eg->get_logger ();
2953
2954 LOG_SCOPE (logger);
2955
2956 if (!gimple_has_body_p (fndecl))
2957 return;
2958
2959 const extrinsic_state &ext_state = eg->get_ext_state ();
2960
2961 function *fun = DECL_STRUCT_FUNCTION (fndecl);
2962 gcc_assert (fun);
2963
2964 program_point point
2965 = program_point::from_function_entry (eg->get_supergraph (), fun);
2966 program_state state (ext_state);
2967 state.push_frame (ext_state, fun);
2968
2969 if (!mark_params_as_tainted (&state, fndecl, ext_state))
2970 return;
2971
2972 if (!state.m_valid)
2973 return;
2974
2975 exploded_node *enode = eg->get_or_create_node (point, state, NULL);
2976 if (logger)
2977 {
2978 if (enode)
2979 logger->log ("created EN %i for tainted_args %qE entrypoint",
2980 enode->m_index, fndecl);
2981 else
2982 {
2983 logger->log ("did not create enode for tainted_args %qE entrypoint",
2984 fndecl);
2985 return;
2986 }
2987 }
2988
2989 tainted_args_call_info *info
2990 = new tainted_args_call_info (field, fndecl, loc);
2991 eg->add_edge (eg->get_origin (), enode, NULL, info);
2992 }
2993
2994 /* Callback for walk_tree for finding callbacks within initializers;
2995 ensure that any callback initializer where the corresponding field is
2996 marked with '__attribute__((tainted_args))' is treated as an entrypoint
2997 to the analysis, special-casing that the inputs to the callback are
2998 untrustworthy. */
2999
3000 static tree
add_any_callbacks(tree * tp,int *,void * data)3001 add_any_callbacks (tree *tp, int *, void *data)
3002 {
3003 exploded_graph *eg = (exploded_graph *)data;
3004 if (TREE_CODE (*tp) == CONSTRUCTOR)
3005 {
3006 /* Find fields with the "tainted_args" attribute.
3007 walk_tree only walks the values, not the index values;
3008 look at the index values. */
3009 unsigned HOST_WIDE_INT idx;
3010 constructor_elt *ce;
3011
3012 for (idx = 0; vec_safe_iterate (CONSTRUCTOR_ELTS (*tp), idx, &ce);
3013 idx++)
3014 if (ce->index && TREE_CODE (ce->index) == FIELD_DECL)
3015 if (lookup_attribute ("tainted_args", DECL_ATTRIBUTES (ce->index)))
3016 {
3017 tree value = ce->value;
3018 if (TREE_CODE (value) == ADDR_EXPR
3019 && TREE_CODE (TREE_OPERAND (value, 0)) == FUNCTION_DECL)
3020 add_tainted_args_callback (eg, ce->index,
3021 TREE_OPERAND (value, 0),
3022 EXPR_LOCATION (value));
3023 }
3024 }
3025
3026 return NULL_TREE;
3027 }
3028
3029 /* Add initial nodes to EG, with entrypoints for externally-callable
3030 functions. */
3031
3032 void
build_initial_worklist()3033 exploded_graph::build_initial_worklist ()
3034 {
3035 logger * const logger = get_logger ();
3036 LOG_SCOPE (logger);
3037
3038 cgraph_node *node;
3039 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
3040 {
3041 function *fun = node->get_fun ();
3042 if (!toplevel_function_p (fun, logger))
3043 continue;
3044 exploded_node *enode = add_function_entry (fun);
3045 if (logger)
3046 {
3047 if (enode)
3048 logger->log ("created EN %i for %qE entrypoint",
3049 enode->m_index, fun->decl);
3050 else
3051 logger->log ("did not create enode for %qE entrypoint", fun->decl);
3052 }
3053 }
3054
3055 /* Find callbacks that are reachable from global initializers. */
3056 varpool_node *vpnode;
3057 FOR_EACH_VARIABLE (vpnode)
3058 {
3059 tree decl = vpnode->decl;
3060 tree init = DECL_INITIAL (decl);
3061 if (!init)
3062 continue;
3063 walk_tree (&init, add_any_callbacks, this, NULL);
3064 }
3065 }
3066
3067 /* The main loop of the analysis.
3068 Take freshly-created exploded_nodes from the worklist, calling
3069 process_node on them to explore the <point, state> graph.
3070 Add edges to their successors, potentially creating new successors
3071 (which are also added to the worklist). */
3072
3073 void
process_worklist()3074 exploded_graph::process_worklist ()
3075 {
3076 logger * const logger = get_logger ();
3077 LOG_SCOPE (logger);
3078 auto_timevar tv (TV_ANALYZER_WORKLIST);
3079
3080 while (m_worklist.length () > 0)
3081 {
3082 exploded_node *node = m_worklist.take_next ();
3083 gcc_assert (node->get_status () == exploded_node::STATUS_WORKLIST);
3084 gcc_assert (node->m_succs.length () == 0
3085 || node == m_origin);
3086
3087 if (logger)
3088 logger->log ("next to process: EN: %i", node->m_index);
3089
3090 /* If we have a run of nodes that are before-supernode, try merging and
3091 processing them together, rather than pairwise or individually. */
3092 if (flag_analyzer_state_merge && node != m_origin)
3093 if (maybe_process_run_of_before_supernode_enodes (node))
3094 goto handle_limit;
3095
3096 /* Avoid exponential explosions of nodes by attempting to merge
3097 nodes that are at the same program point and which have
3098 sufficiently similar state. */
3099 if (flag_analyzer_state_merge && node != m_origin)
3100 if (exploded_node *node_2 = m_worklist.peek_next ())
3101 {
3102 gcc_assert (node_2->get_status ()
3103 == exploded_node::STATUS_WORKLIST);
3104 gcc_assert (node->m_succs.length () == 0);
3105 gcc_assert (node_2->m_succs.length () == 0);
3106
3107 gcc_assert (node != node_2);
3108
3109 if (logger)
3110 logger->log ("peek worklist: EN: %i", node_2->m_index);
3111
3112 if (node->get_point () == node_2->get_point ())
3113 {
3114 const program_point &point = node->get_point ();
3115 if (logger)
3116 {
3117 format f (false);
3118 pretty_printer *pp = logger->get_printer ();
3119 logger->start_log_line ();
3120 logger->log_partial
3121 ("got potential merge EN: %i and EN: %i at ",
3122 node->m_index, node_2->m_index);
3123 point.print (pp, f);
3124 logger->end_log_line ();
3125 }
3126 const program_state &state = node->get_state ();
3127 const program_state &state_2 = node_2->get_state ();
3128
3129 /* They shouldn't be equal, or we wouldn't have two
3130 separate nodes. */
3131 gcc_assert (state != state_2);
3132
3133 program_state merged_state (m_ext_state);
3134 if (state.can_merge_with_p (state_2, m_ext_state,
3135 point, &merged_state))
3136 {
3137 if (logger)
3138 logger->log ("merging EN: %i and EN: %i",
3139 node->m_index, node_2->m_index);
3140
3141 if (merged_state == state)
3142 {
3143 /* Then merge node_2 into node by adding an edge. */
3144 add_edge (node_2, node, NULL);
3145
3146 /* Remove node_2 from the worklist. */
3147 m_worklist.take_next ();
3148 node_2->set_status (exploded_node::STATUS_MERGER);
3149
3150 /* Continue processing "node" below. */
3151 }
3152 else if (merged_state == state_2)
3153 {
3154 /* Then merge node into node_2, and leave node_2
3155 in the worklist, to be processed on the next
3156 iteration. */
3157 add_edge (node, node_2, NULL);
3158 node->set_status (exploded_node::STATUS_MERGER);
3159 continue;
3160 }
3161 else
3162 {
3163 /* We have a merged state that differs from
3164 both state and state_2. */
3165
3166 /* Remove node_2 from the worklist. */
3167 m_worklist.take_next ();
3168
3169 /* Create (or get) an exploded node for the merged
3170 states, adding to the worklist. */
3171 exploded_node *merged_enode
3172 = get_or_create_node (node->get_point (),
3173 merged_state, node);
3174 if (merged_enode == NULL)
3175 continue;
3176
3177 if (logger)
3178 logger->log ("merged EN: %i and EN: %i into EN: %i",
3179 node->m_index, node_2->m_index,
3180 merged_enode->m_index);
3181
3182 /* "node" and "node_2" have both now been removed
3183 from the worklist; we should not process them.
3184
3185 "merged_enode" may be a new node; if so it will be
3186 processed in a subsequent iteration.
3187 Alternatively, "merged_enode" could be an existing
3188 node; one way the latter can
3189 happen is if we end up merging a succession of
3190 similar nodes into one. */
3191
3192 /* If merged_node is one of the two we were merging,
3193 add it back to the worklist to ensure it gets
3194 processed.
3195
3196 Add edges from the merged nodes to it (but not a
3197 self-edge). */
3198 if (merged_enode == node)
3199 m_worklist.add_node (merged_enode);
3200 else
3201 {
3202 add_edge (node, merged_enode, NULL);
3203 node->set_status (exploded_node::STATUS_MERGER);
3204 }
3205
3206 if (merged_enode == node_2)
3207 m_worklist.add_node (merged_enode);
3208 else
3209 {
3210 add_edge (node_2, merged_enode, NULL);
3211 node_2->set_status (exploded_node::STATUS_MERGER);
3212 }
3213
3214 continue;
3215 }
3216 }
3217
3218 /* TODO: should we attempt more than two nodes,
3219 or just do pairs of nodes? (and hope that we get
3220 a cascade of mergers). */
3221 }
3222 }
3223
3224 process_node (node);
3225
3226 handle_limit:
3227 /* Impose a hard limit on the number of exploded nodes, to ensure
3228 that the analysis terminates in the face of pathological state
3229 explosion (or bugs).
3230
3231 Specifically, the limit is on the number of PK_AFTER_SUPERNODE
3232 exploded nodes, looking at supernode exit events.
3233
3234 We use exit rather than entry since there can be multiple
3235 entry ENs, one per phi; the number of PK_AFTER_SUPERNODE ought
3236 to be equivalent to the number of supernodes multiplied by the
3237 number of states. */
3238 const int limit = m_sg.num_nodes () * param_analyzer_bb_explosion_factor;
3239 if (m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE] > limit)
3240 {
3241 if (logger)
3242 logger->log ("bailing out; too many nodes");
3243 warning_at (node->get_point ().get_location (),
3244 OPT_Wanalyzer_too_complex,
3245 "analysis bailed out early"
3246 " (%i 'after-snode' enodes; %i enodes)",
3247 m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE],
3248 m_nodes.length ());
3249 return;
3250 }
3251 }
3252 }
3253
3254 /* Attempt to process a consecutive run of sufficiently-similar nodes in
3255 the worklist at a CFG join-point (having already popped ENODE from the
3256 head of the worklist).
3257
3258 If ENODE's point is of the form (before-supernode, SNODE) and the next
3259 nodes in the worklist are a consecutive run of enodes of the same form,
3260 for the same supernode as ENODE (but potentially from different in-edges),
3261 process them all together, setting their status to STATUS_BULK_MERGED,
3262 and return true.
3263 Otherwise, return false, in which case ENODE must be processed in the
3264 normal way.
3265
3266 When processing them all together, generate successor states based
3267 on phi nodes for the appropriate CFG edges, and then attempt to merge
3268 these states into a minimal set of merged successor states, partitioning
3269 the inputs by merged successor state.
3270
3271 Create new exploded nodes for all of the merged states, and add edges
3272 connecting the input enodes to the corresponding merger exploded nodes.
3273
3274 We hope we have a much smaller number of merged successor states
3275 compared to the number of input enodes - ideally just one,
3276 if all successor states can be merged.
3277
3278 Processing and merging many together as one operation rather than as
3279 pairs avoids scaling issues where per-pair mergers could bloat the
3280 graph with merger nodes (especially so after switch statements). */
3281
3282 bool
3283 exploded_graph::
maybe_process_run_of_before_supernode_enodes(exploded_node * enode)3284 maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
3285 {
3286 /* A struct for tracking per-input state. */
3287 struct item
3288 {
3289 item (exploded_node *input_enode)
3290 : m_input_enode (input_enode),
3291 m_processed_state (input_enode->get_state ()),
3292 m_merger_idx (-1)
3293 {}
3294
3295 exploded_node *m_input_enode;
3296 program_state m_processed_state;
3297 int m_merger_idx;
3298 };
3299
3300 gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
3301 gcc_assert (enode->m_succs.length () == 0);
3302
3303 const program_point &point = enode->get_point ();
3304
3305 if (point.get_kind () != PK_BEFORE_SUPERNODE)
3306 return false;
3307
3308 const supernode *snode = point.get_supernode ();
3309
3310 logger * const logger = get_logger ();
3311 LOG_SCOPE (logger);
3312
3313 /* Find a run of enodes in the worklist that are before the same supernode,
3314 but potentially from different in-edges. */
3315 auto_vec <exploded_node *> enodes;
3316 enodes.safe_push (enode);
3317 while (exploded_node *enode_2 = m_worklist.peek_next ())
3318 {
3319 gcc_assert (enode_2->get_status ()
3320 == exploded_node::STATUS_WORKLIST);
3321 gcc_assert (enode_2->m_succs.length () == 0);
3322
3323 const program_point &point_2 = enode_2->get_point ();
3324
3325 if (point_2.get_kind () == PK_BEFORE_SUPERNODE
3326 && point_2.get_supernode () == snode
3327 && point_2.get_call_string () == point.get_call_string ())
3328 {
3329 enodes.safe_push (enode_2);
3330 m_worklist.take_next ();
3331 }
3332 else
3333 break;
3334 }
3335
3336 /* If the only node is ENODE, then give up. */
3337 if (enodes.length () == 1)
3338 return false;
3339
3340 if (logger)
3341 logger->log ("got run of %i enodes for SN: %i",
3342 enodes.length (), snode->m_index);
3343
3344 /* All of these enodes have a shared successor point (even if they
3345 were for different in-edges). */
3346 program_point next_point (point.get_next ());
3347
3348 /* Calculate the successor state for each enode in enodes. */
3349 auto_delete_vec<item> items (enodes.length ());
3350 unsigned i;
3351 exploded_node *iter_enode;
3352 FOR_EACH_VEC_ELT (enodes, i, iter_enode)
3353 {
3354 item *it = new item (iter_enode);
3355 items.quick_push (it);
3356 const program_state &state = iter_enode->get_state ();
3357 program_state *next_state = &it->m_processed_state;
3358 next_state->validate (m_ext_state);
3359 const program_point &iter_point = iter_enode->get_point ();
3360 if (const superedge *iter_sedge = iter_point.get_from_edge ())
3361 {
3362 uncertainty_t uncertainty;
3363 impl_region_model_context ctxt (*this, iter_enode,
3364 &state, next_state,
3365 &uncertainty, NULL, NULL);
3366 const cfg_superedge *last_cfg_superedge
3367 = iter_sedge->dyn_cast_cfg_superedge ();
3368 if (last_cfg_superedge)
3369 next_state->m_region_model->update_for_phis
3370 (snode, last_cfg_superedge, &ctxt);
3371 }
3372 next_state->validate (m_ext_state);
3373 }
3374
3375 /* Attempt to partition the items into a set of merged states.
3376 We hope we have a much smaller number of merged states
3377 compared to the number of input enodes - ideally just one,
3378 if all can be merged. */
3379 auto_delete_vec <program_state> merged_states;
3380 auto_vec<item *> first_item_for_each_merged_state;
3381 item *it;
3382 FOR_EACH_VEC_ELT (items, i, it)
3383 {
3384 const program_state &it_state = it->m_processed_state;
3385 program_state *merged_state;
3386 unsigned iter_merger_idx;
3387 FOR_EACH_VEC_ELT (merged_states, iter_merger_idx, merged_state)
3388 {
3389 merged_state->validate (m_ext_state);
3390 program_state merge (m_ext_state);
3391 if (it_state.can_merge_with_p (*merged_state, m_ext_state,
3392 next_point, &merge))
3393 {
3394 *merged_state = merge;
3395 merged_state->validate (m_ext_state);
3396 it->m_merger_idx = iter_merger_idx;
3397 if (logger)
3398 logger->log ("reusing merger state %i for item %i (EN: %i)",
3399 it->m_merger_idx, i, it->m_input_enode->m_index);
3400 goto got_merger;
3401 }
3402 }
3403 /* If it couldn't be merged with any existing merged_states,
3404 create a new one. */
3405 if (it->m_merger_idx == -1)
3406 {
3407 it->m_merger_idx = merged_states.length ();
3408 merged_states.safe_push (new program_state (it_state));
3409 first_item_for_each_merged_state.safe_push (it);
3410 if (logger)
3411 logger->log ("using new merger state %i for item %i (EN: %i)",
3412 it->m_merger_idx, i, it->m_input_enode->m_index);
3413 }
3414 got_merger:
3415 gcc_assert (it->m_merger_idx >= 0);
3416 gcc_assert ((unsigned)it->m_merger_idx < merged_states.length ());
3417 }
3418
3419 /* Create merger nodes. */
3420 auto_vec<exploded_node *> next_enodes (merged_states.length ());
3421 program_state *merged_state;
3422 FOR_EACH_VEC_ELT (merged_states, i, merged_state)
3423 {
3424 exploded_node *src_enode
3425 = first_item_for_each_merged_state[i]->m_input_enode;
3426 exploded_node *next
3427 = get_or_create_node (next_point, *merged_state, src_enode);
3428 /* "next" could be NULL; we handle that when adding the edges below. */
3429 next_enodes.quick_push (next);
3430 if (logger)
3431 {
3432 if (next)
3433 logger->log ("using EN: %i for merger state %i", next->m_index, i);
3434 else
3435 logger->log ("using NULL enode for merger state %i", i);
3436 }
3437 }
3438
3439 /* Create edges from each input enode to the appropriate successor enode.
3440 Update the status of the now-processed input enodes. */
3441 FOR_EACH_VEC_ELT (items, i, it)
3442 {
3443 exploded_node *next = next_enodes[it->m_merger_idx];
3444 if (next)
3445 add_edge (it->m_input_enode, next, NULL);
3446 it->m_input_enode->set_status (exploded_node::STATUS_BULK_MERGED);
3447 }
3448
3449 if (logger)
3450 logger->log ("merged %i in-enodes into %i out-enode(s) at SN: %i",
3451 items.length (), merged_states.length (), snode->m_index);
3452
3453 return true;
3454 }
3455
3456 /* Return true if STMT must appear at the start of its exploded node, and
3457 thus we can't consolidate its effects within a run of other statements,
3458 where PREV_STMT was the previous statement. */
3459
3460 static bool
stmt_requires_new_enode_p(const gimple * stmt,const gimple * prev_stmt)3461 stmt_requires_new_enode_p (const gimple *stmt,
3462 const gimple *prev_stmt)
3463 {
3464 if (const gcall *call = dyn_cast <const gcall *> (stmt))
3465 {
3466 /* Stop consolidating at calls to
3467 "__analyzer_dump_exploded_nodes", so they always appear at the
3468 start of an exploded_node. */
3469 if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
3470 1))
3471 return true;
3472
3473 /* sm-signal.cc injects an additional custom eedge at "signal" calls
3474 from the registration enode to the handler enode, separate from the
3475 regular next state, which defeats the "detect state change" logic
3476 in process_node. Work around this via special-casing, to ensure
3477 we split the enode immediately before any "signal" call. */
3478 if (is_special_named_call_p (call, "signal", 2))
3479 return true;
3480 }
3481
3482 /* If we had a PREV_STMT with an unknown location, and this stmt
3483 has a known location, then if a state change happens here, it
3484 could be consolidated into PREV_STMT, giving us an event with
3485 no location. Ensure that STMT gets its own exploded_node to
3486 avoid this. */
3487 if (get_pure_location (prev_stmt->location) == UNKNOWN_LOCATION
3488 && get_pure_location (stmt->location) != UNKNOWN_LOCATION)
3489 return true;
3490
3491 return false;
3492 }
3493
3494 /* Return true if OLD_STATE and NEW_STATE are sufficiently different that
3495 we should split enodes and create an exploded_edge separating them
3496 (which makes it easier to identify state changes of intereset when
3497 constructing checker_paths). */
3498
3499 static bool
state_change_requires_new_enode_p(const program_state & old_state,const program_state & new_state)3500 state_change_requires_new_enode_p (const program_state &old_state,
3501 const program_state &new_state)
3502 {
3503 /* Changes in dynamic extents signify creations of heap/alloca regions
3504 and resizings of heap regions; likely to be of interest in
3505 diagnostic paths. */
3506 if (old_state.m_region_model->get_dynamic_extents ()
3507 != new_state.m_region_model->get_dynamic_extents ())
3508 return true;
3509
3510 /* Changes in sm-state are of interest. */
3511 int sm_idx;
3512 sm_state_map *smap;
3513 FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
3514 {
3515 const sm_state_map *old_smap = old_state.m_checker_states[sm_idx];
3516 const sm_state_map *new_smap = new_state.m_checker_states[sm_idx];
3517 if (*old_smap != *new_smap)
3518 return true;
3519 }
3520
3521 return false;
3522 }
3523
3524 /* Create enodes and eedges for the function calls that doesn't have an
3525 underlying call superedge.
3526
3527 Such case occurs when GCC's middle end didn't know which function to
3528 call but the analyzer does (with the help of current state).
3529
3530 Some example such calls are dynamically dispatched calls to virtual
3531 functions or calls that happen via function pointer. */
3532
3533 bool
maybe_create_dynamic_call(const gcall * call,tree fn_decl,exploded_node * node,program_state next_state,program_point & next_point,uncertainty_t * uncertainty,logger * logger)3534 exploded_graph::maybe_create_dynamic_call (const gcall *call,
3535 tree fn_decl,
3536 exploded_node *node,
3537 program_state next_state,
3538 program_point &next_point,
3539 uncertainty_t *uncertainty,
3540 logger *logger)
3541 {
3542 LOG_FUNC (logger);
3543
3544 const program_point *this_point = &node->get_point ();
3545 function *fun = DECL_STRUCT_FUNCTION (fn_decl);
3546 if (fun)
3547 {
3548 const supergraph &sg = this->get_supergraph ();
3549 supernode *sn_entry = sg.get_node_for_function_entry (fun);
3550 supernode *sn_exit = sg.get_node_for_function_exit (fun);
3551
3552 program_point new_point
3553 = program_point::before_supernode (sn_entry,
3554 NULL,
3555 this_point->get_call_string ());
3556
3557 new_point.push_to_call_stack (sn_exit,
3558 next_point.get_supernode());
3559
3560 /* Impose a maximum recursion depth and don't analyze paths
3561 that exceed it further.
3562 This is something of a blunt workaround, but it only
3563 applies to recursion (and mutual recursion), not to
3564 general call stacks. */
3565 if (new_point.get_call_string ().calc_recursion_depth ()
3566 > param_analyzer_max_recursion_depth)
3567 {
3568 if (logger)
3569 logger->log ("rejecting call edge: recursion limit exceeded");
3570 return false;
3571 }
3572
3573 next_state.push_call (*this, node, call, uncertainty);
3574
3575 if (next_state.m_valid)
3576 {
3577 if (logger)
3578 logger->log ("Discovered call to %s [SN: %i -> SN: %i]",
3579 function_name(fun),
3580 this_point->get_supernode ()->m_index,
3581 sn_entry->m_index);
3582
3583 exploded_node *enode = get_or_create_node (new_point,
3584 next_state,
3585 node);
3586 if (enode)
3587 add_edge (node,enode, NULL,
3588 new dynamic_call_info_t (call));
3589 return true;
3590 }
3591 }
3592 return false;
3593 }
3594
3595 /* Subclass of path_context for use within exploded_graph::process_node,
3596 so that we can split states e.g. at "realloc" calls. */
3597
3598 class impl_path_context : public path_context
3599 {
3600 public:
impl_path_context(const program_state * cur_state)3601 impl_path_context (const program_state *cur_state)
3602 : m_cur_state (cur_state),
3603 m_terminate_path (false)
3604 {
3605 }
3606
bifurcation_p() const3607 bool bifurcation_p () const
3608 {
3609 return m_custom_eedge_infos.length () > 0;
3610 }
3611
get_state_at_bifurcation() const3612 const program_state &get_state_at_bifurcation () const
3613 {
3614 gcc_assert (m_state_at_bifurcation);
3615 return *m_state_at_bifurcation;
3616 }
3617
3618 void
bifurcate(custom_edge_info * info)3619 bifurcate (custom_edge_info *info) FINAL OVERRIDE
3620 {
3621 if (m_state_at_bifurcation)
3622 /* Verify that the state at bifurcation is consistent when we
3623 split into multiple out-edges. */
3624 gcc_assert (*m_state_at_bifurcation == *m_cur_state);
3625 else
3626 /* Take a copy of the cur_state at the moment when bifurcation
3627 happens. */
3628 m_state_at_bifurcation
3629 = std::unique_ptr<program_state> (new program_state (*m_cur_state));
3630
3631 /* Take ownership of INFO. */
3632 m_custom_eedge_infos.safe_push (info);
3633 }
3634
terminate_path()3635 void terminate_path () FINAL OVERRIDE
3636 {
3637 m_terminate_path = true;
3638 }
3639
terminate_path_p() const3640 bool terminate_path_p () const FINAL OVERRIDE
3641 {
3642 return m_terminate_path;
3643 }
3644
get_custom_eedge_infos()3645 const vec<custom_edge_info *> & get_custom_eedge_infos ()
3646 {
3647 return m_custom_eedge_infos;
3648 }
3649
3650 private:
3651 const program_state *m_cur_state;
3652
3653 /* Lazily-created copy of the state before the split. */
3654 std::unique_ptr<program_state> m_state_at_bifurcation;
3655
3656 auto_vec <custom_edge_info *> m_custom_eedge_infos;
3657
3658 bool m_terminate_path;
3659 };
3660
3661 /* The core of exploded_graph::process_worklist (the main analysis loop),
3662 handling one node in the worklist.
3663
3664 Get successor <point, state> pairs for NODE, calling get_or_create on
3665 them, and adding an exploded_edge to each successors.
3666
3667 Freshly-created nodes will be added to the worklist. */
3668
3669 void
process_node(exploded_node * node)3670 exploded_graph::process_node (exploded_node *node)
3671 {
3672 logger * const logger = get_logger ();
3673 LOG_FUNC_1 (logger, "EN: %i", node->m_index);
3674
3675 node->set_status (exploded_node::STATUS_PROCESSED);
3676
3677 const program_point &point = node->get_point ();
3678
3679 /* Update cfun and input_location in case of an ICE: make it easier to
3680 track down which source construct we're failing to handle. */
3681 auto_cfun sentinel (node->get_function ());
3682 const gimple *stmt = point.get_stmt ();
3683 if (stmt)
3684 input_location = stmt->location;
3685
3686 const program_state &state = node->get_state ();
3687 if (logger)
3688 {
3689 pretty_printer *pp = logger->get_printer ();
3690 logger->start_log_line ();
3691 pp_string (pp, "point: ");
3692 point.print (pp, format (false));
3693 pp_string (pp, ", state: ");
3694 state.dump_to_pp (m_ext_state, true, false, pp);
3695 logger->end_log_line ();
3696 }
3697
3698 switch (point.get_kind ())
3699 {
3700 default:
3701 gcc_unreachable ();
3702 case PK_ORIGIN:
3703 /* This node exists to simplify finding the shortest path
3704 to an exploded_node. */
3705 break;
3706
3707 case PK_BEFORE_SUPERNODE:
3708 {
3709 program_state next_state (state);
3710 uncertainty_t uncertainty;
3711
3712 if (point.get_from_edge ())
3713 {
3714 impl_region_model_context ctxt (*this, node,
3715 &state, &next_state,
3716 &uncertainty, NULL, NULL);
3717 const cfg_superedge *last_cfg_superedge
3718 = point.get_from_edge ()->dyn_cast_cfg_superedge ();
3719 if (last_cfg_superedge)
3720 next_state.m_region_model->update_for_phis
3721 (node->get_supernode (),
3722 last_cfg_superedge,
3723 &ctxt);
3724 program_state::detect_leaks (state, next_state, NULL,
3725 get_ext_state (), &ctxt);
3726 }
3727
3728 program_point next_point (point.get_next ());
3729 exploded_node *next = get_or_create_node (next_point, next_state, node);
3730 if (next)
3731 add_edge (node, next, NULL);
3732 }
3733 break;
3734 case PK_BEFORE_STMT:
3735 {
3736 /* Determine the effect of a run of one or more statements
3737 within one supernode, generating an edge to the program_point
3738 after the last statement that's processed.
3739
3740 Stop iterating statements and thus consolidating into one enode
3741 when:
3742 - reaching the end of the statements in the supernode
3743 - if an sm-state-change occurs (so that it gets its own
3744 exploded_node)
3745 - if "-fanalyzer-fine-grained" is active
3746 - encountering certain statements must appear at the start of
3747 their enode (for which stmt_requires_new_enode_p returns true)
3748
3749 Update next_state in-place, to get the result of the one
3750 or more stmts that are processed.
3751
3752 Split the node in-place if an sm-state-change occurs, so that
3753 the sm-state-change occurs on an edge where the src enode has
3754 exactly one stmt, the one that caused the change. */
3755 program_state next_state (state);
3756
3757 impl_path_context path_ctxt (&next_state);
3758
3759 uncertainty_t uncertainty;
3760 const supernode *snode = point.get_supernode ();
3761 unsigned stmt_idx;
3762 const gimple *prev_stmt = NULL;
3763 for (stmt_idx = point.get_stmt_idx ();
3764 stmt_idx < snode->m_stmts.length ();
3765 stmt_idx++)
3766 {
3767 const gimple *stmt = snode->m_stmts[stmt_idx];
3768
3769 if (stmt_idx > point.get_stmt_idx ())
3770 if (stmt_requires_new_enode_p (stmt, prev_stmt))
3771 {
3772 stmt_idx--;
3773 break;
3774 }
3775 prev_stmt = stmt;
3776
3777 program_state old_state (next_state);
3778
3779 /* Process the stmt. */
3780 exploded_node::on_stmt_flags flags
3781 = node->on_stmt (*this, snode, stmt, &next_state, &uncertainty,
3782 &path_ctxt);
3783 node->m_num_processed_stmts++;
3784
3785 /* If flags.m_terminate_path, stop analyzing; any nodes/edges
3786 will have been added by on_stmt (e.g. for handling longjmp). */
3787 if (flags.m_terminate_path)
3788 return;
3789
3790 if (next_state.m_region_model)
3791 {
3792 impl_region_model_context ctxt (*this, node,
3793 &old_state, &next_state,
3794 &uncertainty, NULL, stmt);
3795 program_state::detect_leaks (old_state, next_state, NULL,
3796 get_ext_state (), &ctxt);
3797 }
3798
3799 unsigned next_idx = stmt_idx + 1;
3800 program_point next_point
3801 = (next_idx < point.get_supernode ()->m_stmts.length ()
3802 ? program_point::before_stmt (point.get_supernode (), next_idx,
3803 point.get_call_string ())
3804 : program_point::after_supernode (point.get_supernode (),
3805 point.get_call_string ()));
3806 next_state = next_state.prune_for_point (*this, next_point, node,
3807 &uncertainty);
3808
3809 if (flag_analyzer_fine_grained
3810 || state_change_requires_new_enode_p (old_state, next_state)
3811 || path_ctxt.bifurcation_p ()
3812 || path_ctxt.terminate_path_p ())
3813 {
3814 program_point split_point
3815 = program_point::before_stmt (point.get_supernode (),
3816 stmt_idx,
3817 point.get_call_string ());
3818 if (split_point != node->get_point ())
3819 {
3820 /* If we're not at the start of NODE, split the enode at
3821 this stmt, so we have:
3822 node -> split_enode
3823 so that when split_enode is processed the next edge
3824 we add will be:
3825 split_enode -> next
3826 and any state change will effectively occur on that
3827 latter edge, and split_enode will contain just stmt. */
3828 if (logger)
3829 logger->log ("getting split_enode");
3830 exploded_node *split_enode
3831 = get_or_create_node (split_point, old_state, node);
3832 if (!split_enode)
3833 return;
3834 /* "stmt" will be reprocessed when split_enode is
3835 processed. */
3836 node->m_num_processed_stmts--;
3837 if (logger)
3838 logger->log ("creating edge to split_enode");
3839 add_edge (node, split_enode, NULL);
3840 return;
3841 }
3842 else
3843 /* If we're at the start of NODE, stop iterating,
3844 so that an edge will be created from NODE to
3845 (next_point, next_state) below. */
3846 break;
3847 }
3848 }
3849 unsigned next_idx = stmt_idx + 1;
3850 program_point next_point
3851 = (next_idx < point.get_supernode ()->m_stmts.length ()
3852 ? program_point::before_stmt (point.get_supernode (), next_idx,
3853 point.get_call_string ())
3854 : program_point::after_supernode (point.get_supernode (),
3855 point.get_call_string ()));
3856 if (path_ctxt.terminate_path_p ())
3857 {
3858 if (logger)
3859 logger->log ("not adding node: terminating path");
3860 }
3861 else
3862 {
3863 exploded_node *next
3864 = get_or_create_node (next_point, next_state, node);
3865 if (next)
3866 add_edge (node, next, NULL);
3867 }
3868
3869 /* If we have custom edge infos, "bifurcate" the state
3870 accordingly, potentially creating a new state/enode/eedge
3871 instances. For example, to handle a "realloc" call, we
3872 might split into 3 states, for the "failure",
3873 "resizing in place", and "moving to a new buffer" cases. */
3874 for (auto edge_info : path_ctxt.get_custom_eedge_infos ())
3875 {
3876 if (logger)
3877 {
3878 logger->start_log_line ();
3879 logger->log_partial ("bifurcating for edge: ");
3880 edge_info->print (logger->get_printer ());
3881 logger->end_log_line ();
3882 }
3883 program_state bifurcated_new_state
3884 (path_ctxt.get_state_at_bifurcation ());
3885
3886 /* Apply edge_info to state. */
3887 impl_region_model_context
3888 bifurcation_ctxt (*this,
3889 node, // enode_for_diag
3890 &path_ctxt.get_state_at_bifurcation (),
3891 &bifurcated_new_state,
3892 NULL, // uncertainty_t *uncertainty
3893 NULL, // path_context *path_ctxt
3894 stmt);
3895 if (edge_info->update_model (bifurcated_new_state.m_region_model,
3896 NULL, /* no exploded_edge yet. */
3897 &bifurcation_ctxt))
3898 {
3899 exploded_node *next2
3900 = get_or_create_node (next_point, bifurcated_new_state, node);
3901 if (next2)
3902 {
3903 /* Take ownership of edge_info. */
3904 add_edge (node, next2, NULL, edge_info);
3905 }
3906 else
3907 delete edge_info;
3908 }
3909 else
3910 {
3911 if (logger)
3912 logger->log ("infeasible state, not adding node");
3913 delete edge_info;
3914 }
3915 }
3916 }
3917 break;
3918 case PK_AFTER_SUPERNODE:
3919 {
3920 bool found_a_superedge = false;
3921 bool is_an_exit_block = false;
3922 /* If this is an EXIT BB, detect leaks, and potentially
3923 create a function summary. */
3924 if (point.get_supernode ()->return_p ())
3925 {
3926 is_an_exit_block = true;
3927 node->detect_leaks (*this);
3928 if (flag_analyzer_call_summaries
3929 && point.get_call_string ().empty_p ())
3930 {
3931 /* TODO: create function summary
3932 There can be more than one; each corresponds to a different
3933 final enode in the function. */
3934 if (logger)
3935 {
3936 pretty_printer *pp = logger->get_printer ();
3937 logger->start_log_line ();
3938 logger->log_partial
3939 ("would create function summary for %qE; state: ",
3940 point.get_fndecl ());
3941 state.dump_to_pp (m_ext_state, true, false, pp);
3942 logger->end_log_line ();
3943 }
3944 per_function_data *per_fn_data
3945 = get_or_create_per_function_data (point.get_function ());
3946 per_fn_data->add_call_summary (node);
3947 }
3948 }
3949 /* Traverse into successors of the supernode. */
3950 int i;
3951 superedge *succ;
3952 FOR_EACH_VEC_ELT (point.get_supernode ()->m_succs, i, succ)
3953 {
3954 found_a_superedge = true;
3955 if (logger)
3956 logger->log ("considering SN: %i -> SN: %i",
3957 succ->m_src->m_index, succ->m_dest->m_index);
3958
3959 program_point next_point
3960 = program_point::before_supernode (succ->m_dest, succ,
3961 point.get_call_string ());
3962 program_state next_state (state);
3963 uncertainty_t uncertainty;
3964
3965 /* Make use the current state and try to discover and analyse
3966 indirect function calls (a call that doesn't have an underlying
3967 cgraph edge representing call).
3968
3969 Some examples of such calls are virtual function calls
3970 and calls that happen via a function pointer. */
3971 if (succ->m_kind == SUPEREDGE_INTRAPROCEDURAL_CALL
3972 && !(succ->get_any_callgraph_edge ()))
3973 {
3974 const gcall *call
3975 = point.get_supernode ()->get_final_call ();
3976
3977 impl_region_model_context ctxt (*this,
3978 node,
3979 &state,
3980 &next_state,
3981 &uncertainty,
3982 NULL,
3983 point.get_stmt());
3984
3985 region_model *model = state.m_region_model;
3986 bool call_discovered = false;
3987
3988 if (tree fn_decl = model->get_fndecl_for_call(call,&ctxt))
3989 call_discovered = maybe_create_dynamic_call (call,
3990 fn_decl,
3991 node,
3992 next_state,
3993 next_point,
3994 &uncertainty,
3995 logger);
3996 if (!call_discovered)
3997 {
3998 /* An unknown function or a special function was called
3999 at this point, in such case, don't terminate the
4000 analysis of the current function.
4001
4002 The analyzer handles calls to such functions while
4003 analysing the stmt itself, so the function call
4004 must have been handled by the anlyzer till now. */
4005 exploded_node *next
4006 = get_or_create_node (next_point,
4007 next_state,
4008 node);
4009 if (next)
4010 add_edge (node, next, succ);
4011 }
4012 }
4013
4014 if (!node->on_edge (*this, succ, &next_point, &next_state,
4015 &uncertainty))
4016 {
4017 if (logger)
4018 logger->log ("skipping impossible edge to SN: %i",
4019 succ->m_dest->m_index);
4020 continue;
4021 }
4022 exploded_node *next = get_or_create_node (next_point, next_state,
4023 node);
4024 if (next)
4025 add_edge (node, next, succ);
4026 }
4027
4028 /* Return from the calls which doesn't have a return superedge.
4029 Such case occurs when GCC's middle end didn't knew which function to
4030 call but analyzer did. */
4031 if((is_an_exit_block && !found_a_superedge)
4032 && (!point.get_call_string ().empty_p ()))
4033 {
4034 const call_string cs = point.get_call_string ();
4035 program_point next_point
4036 = program_point::before_supernode (cs.get_caller_node (),
4037 NULL,
4038 cs);
4039 program_state next_state (state);
4040 uncertainty_t uncertainty;
4041
4042 const gcall *call
4043 = next_point.get_supernode ()->get_returning_call ();
4044
4045 if(call)
4046 next_state.returning_call (*this, node, call, &uncertainty);
4047
4048 if (next_state.m_valid)
4049 {
4050 next_point.pop_from_call_stack ();
4051 exploded_node *enode = get_or_create_node (next_point,
4052 next_state,
4053 node);
4054 if (enode)
4055 add_edge (node, enode, NULL,
4056 new dynamic_call_info_t (call, true));
4057 }
4058 }
4059 }
4060 break;
4061 }
4062 }
4063
4064 /* Ensure that this graph has a stats instance for FN, return it.
4065 FN can be NULL, in which case a stats instances is returned covering
4066 "functionless" parts of the graph (the origin node). */
4067
4068 stats *
get_or_create_function_stats(function * fn)4069 exploded_graph::get_or_create_function_stats (function *fn)
4070 {
4071 if (!fn)
4072 return &m_functionless_stats;
4073
4074 if (stats **slot = m_per_function_stats.get (fn))
4075 return *slot;
4076 else
4077 {
4078 int num_supernodes = fn ? n_basic_blocks_for_fn (fn) : 0;
4079 /* not quite the num supernodes, but nearly. */
4080 stats *new_stats = new stats (num_supernodes);
4081 m_per_function_stats.put (fn, new_stats);
4082 return new_stats;
4083 }
4084 }
4085
4086 /* Print bar charts to PP showing:
4087 - the number of enodes per function, and
4088 - for each function:
4089 - the number of enodes per supernode/BB
4090 - the number of excess enodes per supernode/BB beyond the
4091 per-program-point limit, if there were any. */
4092
4093 void
print_bar_charts(pretty_printer * pp) const4094 exploded_graph::print_bar_charts (pretty_printer *pp) const
4095 {
4096 cgraph_node *cgnode;
4097
4098 pp_string (pp, "enodes per function:");
4099 pp_newline (pp);
4100 bar_chart enodes_per_function;
4101 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cgnode)
4102 {
4103 function *fn = cgnode->get_fun ();
4104 const stats * const *s_ptr
4105 = const_cast <function_stat_map_t &> (m_per_function_stats).get (fn);
4106 enodes_per_function.add_item (function_name (fn),
4107 s_ptr ? (*s_ptr)->get_total_enodes () : 0);
4108 }
4109 enodes_per_function.print (pp);
4110
4111 /* Accumulate number of enodes per supernode. */
4112 auto_vec<unsigned> enodes_per_supernode (m_sg.num_nodes ());
4113 for (int i = 0; i < m_sg.num_nodes (); i++)
4114 enodes_per_supernode.quick_push (0);
4115 int i;
4116 exploded_node *enode;
4117 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4118 {
4119 const supernode *iter_snode = enode->get_supernode ();
4120 if (!iter_snode)
4121 continue;
4122 enodes_per_supernode[iter_snode->m_index]++;
4123 }
4124
4125 /* Accumulate excess enodes per supernode. */
4126 auto_vec<unsigned> excess_enodes_per_supernode (m_sg.num_nodes ());
4127 for (int i = 0; i < m_sg.num_nodes (); i++)
4128 excess_enodes_per_supernode.quick_push (0);
4129 for (point_map_t::iterator iter = m_per_point_data.begin ();
4130 iter != m_per_point_data.end (); ++iter)
4131 {
4132 const program_point *point = (*iter).first;
4133 const supernode *iter_snode = point->get_supernode ();
4134 if (!iter_snode)
4135 continue;
4136 const per_program_point_data *point_data = (*iter).second;
4137 excess_enodes_per_supernode[iter_snode->m_index]
4138 += point_data->m_excess_enodes;
4139 }
4140
4141 /* Show per-function bar_charts of enodes per supernode/BB. */
4142 pp_string (pp, "per-function enodes per supernode/BB:");
4143 pp_newline (pp);
4144 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cgnode)
4145 {
4146 function *fn = cgnode->get_fun ();
4147 pp_printf (pp, "function: %qs", function_name (fn));
4148 pp_newline (pp);
4149
4150 bar_chart enodes_per_snode;
4151 bar_chart excess_enodes_per_snode;
4152 bool have_excess_enodes = false;
4153 for (int i = 0; i < m_sg.num_nodes (); i++)
4154 {
4155 const supernode *iter_snode = m_sg.get_node_by_index (i);
4156 if (iter_snode->get_function () != fn)
4157 continue;
4158 pretty_printer tmp_pp;
4159 pp_printf (&tmp_pp, "sn %i (bb %i)",
4160 iter_snode->m_index, iter_snode->m_bb->index);
4161 enodes_per_snode.add_item (pp_formatted_text (&tmp_pp),
4162 enodes_per_supernode[iter_snode->m_index]);
4163 const int num_excess
4164 = excess_enodes_per_supernode[iter_snode->m_index];
4165 excess_enodes_per_snode.add_item (pp_formatted_text (&tmp_pp),
4166 num_excess);
4167 if (num_excess)
4168 have_excess_enodes = true;
4169 }
4170 enodes_per_snode.print (pp);
4171 if (have_excess_enodes)
4172 {
4173 pp_printf (pp, "EXCESS ENODES:");
4174 pp_newline (pp);
4175 excess_enodes_per_snode.print (pp);
4176 }
4177 }
4178 }
4179
4180 /* Write all stats information to this graph's logger, if any. */
4181
4182 void
log_stats() const4183 exploded_graph::log_stats () const
4184 {
4185 logger * const logger = get_logger ();
4186 if (!logger)
4187 return;
4188
4189 LOG_SCOPE (logger);
4190
4191 m_ext_state.get_engine ()->log_stats (logger);
4192
4193 logger->log ("m_sg.num_nodes (): %i", m_sg.num_nodes ());
4194 logger->log ("m_nodes.length (): %i", m_nodes.length ());
4195 logger->log ("m_edges.length (): %i", m_edges.length ());
4196 logger->log ("remaining enodes in worklist: %i", m_worklist.length ());
4197
4198 logger->log ("global stats:");
4199 m_global_stats.log (logger);
4200
4201 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
4202 iter != m_per_function_stats.end ();
4203 ++iter)
4204 {
4205 function *fn = (*iter).first;
4206 log_scope s (logger, function_name (fn));
4207 (*iter).second->log (logger);
4208 }
4209
4210 print_bar_charts (logger->get_printer ());
4211 }
4212
4213 /* Dump all stats information to OUT. */
4214
4215 void
dump_stats(FILE * out) const4216 exploded_graph::dump_stats (FILE *out) const
4217 {
4218 fprintf (out, "m_sg.num_nodes (): %i\n", m_sg.num_nodes ());
4219 fprintf (out, "m_nodes.length (): %i\n", m_nodes.length ());
4220 fprintf (out, "m_edges.length (): %i\n", m_edges.length ());
4221 fprintf (out, "remaining enodes in worklist: %i", m_worklist.length ());
4222
4223 fprintf (out, "global stats:\n");
4224 m_global_stats.dump (out);
4225
4226 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
4227 iter != m_per_function_stats.end ();
4228 ++iter)
4229 {
4230 function *fn = (*iter).first;
4231 fprintf (out, "function: %s\n", function_name (fn));
4232 (*iter).second->dump (out);
4233 }
4234
4235 fprintf (out, "PK_AFTER_SUPERNODE per supernode:\n");
4236 for (unsigned i = 0; i < m_PK_AFTER_SUPERNODE_per_snode.length (); i++)
4237 fprintf (out, " SN %i: %3i\n", i, m_PK_AFTER_SUPERNODE_per_snode[i]);
4238 }
4239
4240 void
dump_states_for_supernode(FILE * out,const supernode * snode) const4241 exploded_graph::dump_states_for_supernode (FILE *out,
4242 const supernode *snode) const
4243 {
4244 fprintf (out, "PK_AFTER_SUPERNODE nodes for SN: %i\n", snode->m_index);
4245 int i;
4246 exploded_node *enode;
4247 int state_idx = 0;
4248 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4249 {
4250 const supernode *iter_snode = enode->get_supernode ();
4251 if (enode->get_point ().get_kind () == PK_AFTER_SUPERNODE
4252 && iter_snode == snode)
4253 {
4254 pretty_printer pp;
4255 pp_format_decoder (&pp) = default_tree_printer;
4256 enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
4257 fprintf (out, "state %i: EN: %i\n %s\n",
4258 state_idx++, enode->m_index,
4259 pp_formatted_text (&pp));
4260 }
4261 }
4262 fprintf (out, "#exploded_node for PK_AFTER_SUPERNODE for SN: %i = %i\n",
4263 snode->m_index, state_idx);
4264 }
4265
4266 /* Return a new json::object of the form
4267 {"nodes" : [objs for enodes],
4268 "edges" : [objs for eedges],
4269 "ext_state": object for extrinsic_state,
4270 "diagnostic_manager": object for diagnostic_manager}. */
4271
4272 json::object *
to_json() const4273 exploded_graph::to_json () const
4274 {
4275 json::object *egraph_obj = new json::object ();
4276
4277 /* Nodes. */
4278 {
4279 json::array *nodes_arr = new json::array ();
4280 unsigned i;
4281 exploded_node *n;
4282 FOR_EACH_VEC_ELT (m_nodes, i, n)
4283 nodes_arr->append (n->to_json (m_ext_state));
4284 egraph_obj->set ("nodes", nodes_arr);
4285 }
4286
4287 /* Edges. */
4288 {
4289 json::array *edges_arr = new json::array ();
4290 unsigned i;
4291 exploded_edge *n;
4292 FOR_EACH_VEC_ELT (m_edges, i, n)
4293 edges_arr->append (n->to_json ());
4294 egraph_obj->set ("edges", edges_arr);
4295 }
4296
4297 /* m_sg is JSONified at the top-level. */
4298
4299 egraph_obj->set ("ext_state", m_ext_state.to_json ());
4300 egraph_obj->set ("worklist", m_worklist.to_json ());
4301 egraph_obj->set ("diagnostic_manager", m_diagnostic_manager.to_json ());
4302
4303 /* The following fields aren't yet being JSONified:
4304 const state_purge_map *const m_purge_map;
4305 const analysis_plan &m_plan;
4306 stats m_global_stats;
4307 function_stat_map_t m_per_function_stats;
4308 stats m_functionless_stats;
4309 call_string_data_map_t m_per_call_string_data;
4310 auto_vec<int> m_PK_AFTER_SUPERNODE_per_snode; */
4311
4312 return egraph_obj;
4313 }
4314
4315 /* class exploded_path. */
4316
4317 /* Copy ctor. */
4318
exploded_path(const exploded_path & other)4319 exploded_path::exploded_path (const exploded_path &other)
4320 : m_edges (other.m_edges.length ())
4321 {
4322 int i;
4323 const exploded_edge *eedge;
4324 FOR_EACH_VEC_ELT (other.m_edges, i, eedge)
4325 m_edges.quick_push (eedge);
4326 }
4327
4328 /* Look for the last use of SEARCH_STMT within this path.
4329 If found write the edge's index to *OUT_IDX and return true, otherwise
4330 return false. */
4331
4332 bool
find_stmt_backwards(const gimple * search_stmt,int * out_idx) const4333 exploded_path::find_stmt_backwards (const gimple *search_stmt,
4334 int *out_idx) const
4335 {
4336 int i;
4337 const exploded_edge *eedge;
4338 FOR_EACH_VEC_ELT_REVERSE (m_edges, i, eedge)
4339 {
4340 const exploded_node *dst_node = eedge->m_dest;
4341 const program_point &dst_point = dst_node->get_point ();
4342 const gimple *stmt = dst_point.get_stmt ();
4343 if (stmt == search_stmt)
4344 {
4345 *out_idx = i;
4346 return true;
4347 }
4348 }
4349 return false;
4350 }
4351
4352 /* Get the final exploded_node in this path, which must be non-empty. */
4353
4354 exploded_node *
get_final_enode() const4355 exploded_path::get_final_enode () const
4356 {
4357 gcc_assert (m_edges.length () > 0);
4358 return m_edges[m_edges.length () - 1]->m_dest;
4359 }
4360
4361 /* Check state along this path, returning true if it is feasible.
4362 If OUT is non-NULL, and the path is infeasible, write a new
4363 feasibility_problem to *OUT. */
4364
4365 bool
feasible_p(logger * logger,feasibility_problem ** out,engine * eng,const exploded_graph * eg) const4366 exploded_path::feasible_p (logger *logger, feasibility_problem **out,
4367 engine *eng, const exploded_graph *eg) const
4368 {
4369 LOG_SCOPE (logger);
4370
4371 feasibility_state state (eng->get_model_manager (),
4372 eg->get_supergraph ());
4373
4374 /* Traverse the path, updating this state. */
4375 for (unsigned edge_idx = 0; edge_idx < m_edges.length (); edge_idx++)
4376 {
4377 const exploded_edge *eedge = m_edges[edge_idx];
4378 if (logger)
4379 logger->log ("considering edge %i: EN:%i -> EN:%i",
4380 edge_idx,
4381 eedge->m_src->m_index,
4382 eedge->m_dest->m_index);
4383
4384 rejected_constraint *rc = NULL;
4385 if (!state.maybe_update_for_edge (logger, eedge, &rc))
4386 {
4387 gcc_assert (rc);
4388 if (out)
4389 {
4390 const exploded_node &src_enode = *eedge->m_src;
4391 const program_point &src_point = src_enode.get_point ();
4392 const gimple *last_stmt
4393 = src_point.get_supernode ()->get_last_stmt ();
4394 *out = new feasibility_problem (edge_idx, *eedge,
4395 last_stmt, rc);
4396 }
4397 else
4398 delete rc;
4399 return false;
4400 }
4401
4402 if (logger)
4403 {
4404 logger->log ("state after edge %i: EN:%i -> EN:%i",
4405 edge_idx,
4406 eedge->m_src->m_index,
4407 eedge->m_dest->m_index);
4408 logger->start_log_line ();
4409 state.get_model ().dump_to_pp (logger->get_printer (), true, false);
4410 logger->end_log_line ();
4411 }
4412 }
4413
4414 return true;
4415 }
4416
4417 /* Dump this path in multiline form to PP.
4418 If EXT_STATE is non-NULL, then show the nodes. */
4419
4420 void
dump_to_pp(pretty_printer * pp,const extrinsic_state * ext_state) const4421 exploded_path::dump_to_pp (pretty_printer *pp,
4422 const extrinsic_state *ext_state) const
4423 {
4424 for (unsigned i = 0; i < m_edges.length (); i++)
4425 {
4426 const exploded_edge *eedge = m_edges[i];
4427 pp_printf (pp, "m_edges[%i]: EN %i -> EN %i",
4428 i,
4429 eedge->m_src->m_index,
4430 eedge->m_dest->m_index);
4431 pp_newline (pp);
4432
4433 if (ext_state)
4434 eedge->m_dest->dump_to_pp (pp, *ext_state);
4435 }
4436 }
4437
4438 /* Dump this path in multiline form to FP. */
4439
4440 void
dump(FILE * fp,const extrinsic_state * ext_state) const4441 exploded_path::dump (FILE *fp, const extrinsic_state *ext_state) const
4442 {
4443 pretty_printer pp;
4444 pp_format_decoder (&pp) = default_tree_printer;
4445 pp_show_color (&pp) = pp_show_color (global_dc->printer);
4446 pp.buffer->stream = fp;
4447 dump_to_pp (&pp, ext_state);
4448 pp_flush (&pp);
4449 }
4450
4451 /* Dump this path in multiline form to stderr. */
4452
4453 DEBUG_FUNCTION void
dump(const extrinsic_state * ext_state) const4454 exploded_path::dump (const extrinsic_state *ext_state) const
4455 {
4456 dump (stderr, ext_state);
4457 }
4458
4459 /* Dump this path verbosely to FILENAME. */
4460
4461 void
dump_to_file(const char * filename,const extrinsic_state & ext_state) const4462 exploded_path::dump_to_file (const char *filename,
4463 const extrinsic_state &ext_state) const
4464 {
4465 FILE *fp = fopen (filename, "w");
4466 if (!fp)
4467 return;
4468 pretty_printer pp;
4469 pp_format_decoder (&pp) = default_tree_printer;
4470 pp.buffer->stream = fp;
4471 dump_to_pp (&pp, &ext_state);
4472 pp_flush (&pp);
4473 fclose (fp);
4474 }
4475
4476 /* class feasibility_problem. */
4477
4478 void
dump_to_pp(pretty_printer * pp) const4479 feasibility_problem::dump_to_pp (pretty_printer *pp) const
4480 {
4481 pp_printf (pp, "edge from EN: %i to EN: %i",
4482 m_eedge.m_src->m_index, m_eedge.m_dest->m_index);
4483 if (m_rc)
4484 {
4485 pp_string (pp, "; rejected constraint: ");
4486 m_rc->dump_to_pp (pp);
4487 pp_string (pp, "; rmodel: ");
4488 m_rc->get_model ().dump_to_pp (pp, true, false);
4489 }
4490 }
4491
4492 /* class feasibility_state. */
4493
4494 /* Ctor for feasibility_state, at the beginning of a path. */
4495
feasibility_state(region_model_manager * manager,const supergraph & sg)4496 feasibility_state::feasibility_state (region_model_manager *manager,
4497 const supergraph &sg)
4498 : m_model (manager),
4499 m_snodes_visited (sg.m_nodes.length ())
4500 {
4501 bitmap_clear (m_snodes_visited);
4502 }
4503
4504 /* Copy ctor for feasibility_state, for extending a path. */
4505
feasibility_state(const feasibility_state & other)4506 feasibility_state::feasibility_state (const feasibility_state &other)
4507 : m_model (other.m_model),
4508 m_snodes_visited (const_sbitmap (other.m_snodes_visited)->n_bits)
4509 {
4510 bitmap_copy (m_snodes_visited, other.m_snodes_visited);
4511 }
4512
4513 /* The heart of feasibility-checking.
4514
4515 Attempt to update this state in-place based on traversing EEDGE
4516 in a path.
4517 Update the model for the stmts in the src enode.
4518 Attempt to add constraints for EEDGE.
4519
4520 If feasible, return true.
4521 Otherwise, return false and write to *OUT_RC. */
4522
4523 bool
maybe_update_for_edge(logger * logger,const exploded_edge * eedge,rejected_constraint ** out_rc)4524 feasibility_state::maybe_update_for_edge (logger *logger,
4525 const exploded_edge *eedge,
4526 rejected_constraint **out_rc)
4527 {
4528 const exploded_node &src_enode = *eedge->m_src;
4529 const program_point &src_point = src_enode.get_point ();
4530 if (logger)
4531 {
4532 logger->start_log_line ();
4533 src_point.print (logger->get_printer (), format (false));
4534 logger->end_log_line ();
4535 }
4536
4537 /* Update state for the stmts that were processed in each enode. */
4538 for (unsigned stmt_idx = 0; stmt_idx < src_enode.m_num_processed_stmts;
4539 stmt_idx++)
4540 {
4541 const gimple *stmt = src_enode.get_processed_stmt (stmt_idx);
4542
4543 /* Update cfun and input_location in case of ICE: make it easier to
4544 track down which source construct we're failing to handle. */
4545 auto_cfun sentinel (src_point.get_function ());
4546 input_location = stmt->location;
4547
4548 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
4549 m_model.on_assignment (assign, NULL);
4550 else if (const gasm *asm_stmt = dyn_cast <const gasm *> (stmt))
4551 m_model.on_asm_stmt (asm_stmt, NULL);
4552 else if (const gcall *call = dyn_cast <const gcall *> (stmt))
4553 {
4554 bool terminate_path;
4555 bool unknown_side_effects
4556 = m_model.on_call_pre (call, NULL, &terminate_path);
4557 m_model.on_call_post (call, unknown_side_effects, NULL);
4558 }
4559 else if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
4560 m_model.on_return (return_, NULL);
4561 }
4562
4563 const superedge *sedge = eedge->m_sedge;
4564 if (sedge)
4565 {
4566 if (logger)
4567 {
4568 char *desc = sedge->get_description (false);
4569 logger->log (" sedge: SN:%i -> SN:%i %s",
4570 sedge->m_src->m_index,
4571 sedge->m_dest->m_index,
4572 desc);
4573 free (desc);
4574 }
4575
4576 const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
4577 if (!m_model.maybe_update_for_edge (*sedge, last_stmt, NULL, out_rc))
4578 {
4579 if (logger)
4580 {
4581 logger->log ("rejecting due to region model");
4582 m_model.dump_to_pp (logger->get_printer (), true, false);
4583 }
4584 return false;
4585 }
4586 }
4587 else
4588 {
4589 /* Special-case the initial eedge from the origin node to the
4590 initial function by pushing a frame for it. */
4591 if (src_point.get_kind () == PK_ORIGIN)
4592 {
4593 gcc_assert (eedge->m_src->m_index == 0);
4594 gcc_assert (eedge->m_dest->get_point ().get_kind ()
4595 == PK_BEFORE_SUPERNODE);
4596 function *fun = eedge->m_dest->get_function ();
4597 gcc_assert (fun);
4598 m_model.push_frame (fun, NULL, NULL);
4599 if (logger)
4600 logger->log (" pushing frame for %qD", fun->decl);
4601 }
4602 else if (eedge->m_custom_info)
4603 {
4604 eedge->m_custom_info->update_model (&m_model, eedge, NULL);
4605 }
4606 }
4607
4608 /* Handle phi nodes on an edge leaving a PK_BEFORE_SUPERNODE (to
4609 a PK_BEFORE_STMT, or a PK_AFTER_SUPERNODE if no stmts).
4610 This will typically not be associated with a superedge. */
4611 if (src_point.get_from_edge ())
4612 {
4613 const cfg_superedge *last_cfg_superedge
4614 = src_point.get_from_edge ()->dyn_cast_cfg_superedge ();
4615 const exploded_node &dst_enode = *eedge->m_dest;
4616 const unsigned dst_snode_idx = dst_enode.get_supernode ()->m_index;
4617 if (last_cfg_superedge)
4618 {
4619 if (logger)
4620 logger->log (" update for phis");
4621 m_model.update_for_phis (src_enode.get_supernode (),
4622 last_cfg_superedge,
4623 NULL);
4624 /* If we've entering an snode that we've already visited on this
4625 epath, then we need do fix things up for loops; see the
4626 comment for store::loop_replay_fixup.
4627 Perhaps we should probably also verify the callstring,
4628 and track program_points, but hopefully doing it by supernode
4629 is good enough. */
4630 if (bitmap_bit_p (m_snodes_visited, dst_snode_idx))
4631 m_model.loop_replay_fixup (dst_enode.get_state ().m_region_model);
4632 }
4633 bitmap_set_bit (m_snodes_visited, dst_snode_idx);
4634 }
4635 return true;
4636 }
4637
4638 /* Dump this object to PP. */
4639
4640 void
dump_to_pp(pretty_printer * pp,bool simple,bool multiline) const4641 feasibility_state::dump_to_pp (pretty_printer *pp,
4642 bool simple, bool multiline) const
4643 {
4644 m_model.dump_to_pp (pp, simple, multiline);
4645 }
4646
4647 /* A family of cluster subclasses for use when generating .dot output for
4648 exploded graphs (-fdump-analyzer-exploded-graph), for grouping the
4649 enodes into hierarchical boxes.
4650
4651 All functionless enodes appear in the top-level graph.
4652 Every (function, call_string) pair gets its own cluster. Within that
4653 cluster, each supernode gets its own cluster.
4654
4655 Hence all enodes relating to a particular function with a particular
4656 callstring will be in a cluster together; all enodes for the same
4657 function but with a different callstring will be in a different
4658 cluster. */
4659
4660 /* Base class of cluster for clustering exploded_node instances in .dot
4661 output, based on various subclass-specific criteria. */
4662
4663 class exploded_cluster : public cluster<eg_traits>
4664 {
4665 };
4666
4667 /* Cluster containing all exploded_node instances for one supernode. */
4668
4669 class supernode_cluster : public exploded_cluster
4670 {
4671 public:
supernode_cluster(const supernode * supernode)4672 supernode_cluster (const supernode *supernode) : m_supernode (supernode) {}
4673
4674 // TODO: dtor?
4675
dump_dot(graphviz_out * gv,const dump_args_t & args) const4676 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4677 {
4678 gv->println ("subgraph \"cluster_supernode_%i\" {", m_supernode->m_index);
4679 gv->indent ();
4680 gv->println ("style=\"dashed\";");
4681 gv->println ("label=\"SN: %i (bb: %i; scc: %i)\";",
4682 m_supernode->m_index, m_supernode->m_bb->index,
4683 args.m_eg.get_scc_id (*m_supernode));
4684
4685 int i;
4686 exploded_node *enode;
4687 FOR_EACH_VEC_ELT (m_enodes, i, enode)
4688 enode->dump_dot (gv, args);
4689
4690 /* Terminate subgraph. */
4691 gv->outdent ();
4692 gv->println ("}");
4693 }
4694
add_node(exploded_node * en)4695 void add_node (exploded_node *en) FINAL OVERRIDE
4696 {
4697 m_enodes.safe_push (en);
4698 }
4699
4700 /* Comparator for use by auto_vec<supernode_cluster *>::qsort. */
4701
cmp_ptr_ptr(const void * p1,const void * p2)4702 static int cmp_ptr_ptr (const void *p1, const void *p2)
4703 {
4704 const supernode_cluster *c1
4705 = *(const supernode_cluster * const *)p1;
4706 const supernode_cluster *c2
4707 = *(const supernode_cluster * const *)p2;
4708 return c1->m_supernode->m_index - c2->m_supernode->m_index;
4709 }
4710
4711 private:
4712 const supernode *m_supernode;
4713 auto_vec <exploded_node *> m_enodes;
4714 };
4715
4716 /* Cluster containing all supernode_cluster instances for one
4717 (function, call_string) pair. */
4718
4719 class function_call_string_cluster : public exploded_cluster
4720 {
4721 public:
function_call_string_cluster(function * fun,call_string cs)4722 function_call_string_cluster (function *fun, call_string cs)
4723 : m_fun (fun), m_cs (cs) {}
4724
~function_call_string_cluster()4725 ~function_call_string_cluster ()
4726 {
4727 for (map_t::iterator iter = m_map.begin ();
4728 iter != m_map.end ();
4729 ++iter)
4730 delete (*iter).second;
4731 }
4732
dump_dot(graphviz_out * gv,const dump_args_t & args) const4733 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4734 {
4735 const char *funcname = function_name (m_fun);
4736
4737 gv->println ("subgraph \"cluster_function_%s\" {",
4738 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (m_fun->decl)));
4739 gv->indent ();
4740 gv->write_indent ();
4741 gv->print ("label=\"call string: ");
4742 m_cs.print (gv->get_pp ());
4743 gv->print (" function: %s \";", funcname);
4744 gv->print ("\n");
4745
4746 /* Dump m_map, sorting it to avoid churn when comparing dumps. */
4747 auto_vec<supernode_cluster *> child_clusters (m_map.elements ());
4748 for (map_t::iterator iter = m_map.begin ();
4749 iter != m_map.end ();
4750 ++iter)
4751 child_clusters.quick_push ((*iter).second);
4752
4753 child_clusters.qsort (supernode_cluster::cmp_ptr_ptr);
4754
4755 unsigned i;
4756 supernode_cluster *child_cluster;
4757 FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
4758 child_cluster->dump_dot (gv, args);
4759
4760 /* Terminate subgraph. */
4761 gv->outdent ();
4762 gv->println ("}");
4763 }
4764
add_node(exploded_node * en)4765 void add_node (exploded_node *en) FINAL OVERRIDE
4766 {
4767 const supernode *supernode = en->get_supernode ();
4768 gcc_assert (supernode);
4769 supernode_cluster **slot = m_map.get (supernode);
4770 if (slot)
4771 (*slot)->add_node (en);
4772 else
4773 {
4774 supernode_cluster *child = new supernode_cluster (supernode);
4775 m_map.put (supernode, child);
4776 child->add_node (en);
4777 }
4778 }
4779
4780 /* Comparator for use by auto_vec<function_call_string_cluster *>. */
4781
cmp_ptr_ptr(const void * p1,const void * p2)4782 static int cmp_ptr_ptr (const void *p1, const void *p2)
4783 {
4784 const function_call_string_cluster *c1
4785 = *(const function_call_string_cluster * const *)p1;
4786 const function_call_string_cluster *c2
4787 = *(const function_call_string_cluster * const *)p2;
4788 if (int cmp_names
4789 = strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c1->m_fun->decl)),
4790 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c2->m_fun->decl))))
4791 return cmp_names;
4792 return call_string::cmp (c1->m_cs, c2->m_cs);
4793 }
4794
4795 private:
4796 function *m_fun;
4797 call_string m_cs;
4798 typedef ordered_hash_map<const supernode *, supernode_cluster *> map_t;
4799 map_t m_map;
4800 };
4801
4802 /* Keys for root_cluster. */
4803
4804 struct function_call_string
4805 {
function_call_stringana::function_call_string4806 function_call_string (function *fun, call_string cs)
4807 : m_fun (fun), m_cs (cs)
4808 {
4809 gcc_assert (fun);
4810 }
4811
4812 function *m_fun;
4813 call_string m_cs;
4814 };
4815
4816 } // namespace ana
4817
4818 template <> struct default_hash_traits<function_call_string>
4819 : public pod_hash_traits<function_call_string>
4820 {
4821 static const bool empty_zero_p = false;
4822 };
4823
4824 template <>
4825 inline hashval_t
hash(value_type v)4826 pod_hash_traits<function_call_string>::hash (value_type v)
4827 {
4828 return pointer_hash <function>::hash (v.m_fun) ^ v.m_cs.hash ();
4829 }
4830
4831 template <>
4832 inline bool
equal(const value_type & existing,const value_type & candidate)4833 pod_hash_traits<function_call_string>::equal (const value_type &existing,
4834 const value_type &candidate)
4835 {
4836 return existing.m_fun == candidate.m_fun && existing.m_cs == candidate.m_cs;
4837 }
4838 template <>
4839 inline void
mark_deleted(value_type & v)4840 pod_hash_traits<function_call_string>::mark_deleted (value_type &v)
4841 {
4842 v.m_fun = reinterpret_cast<function *> (1);
4843 }
4844 template <>
4845 inline void
mark_empty(value_type & v)4846 pod_hash_traits<function_call_string>::mark_empty (value_type &v)
4847 {
4848 v.m_fun = NULL;
4849 }
4850 template <>
4851 inline bool
is_deleted(value_type v)4852 pod_hash_traits<function_call_string>::is_deleted (value_type v)
4853 {
4854 return v.m_fun == reinterpret_cast<function *> (1);
4855 }
4856 template <>
4857 inline bool
is_empty(value_type v)4858 pod_hash_traits<function_call_string>::is_empty (value_type v)
4859 {
4860 return v.m_fun == NULL;
4861 }
4862
4863 namespace ana {
4864
4865 /* Top-level cluster for generating .dot output for exploded graphs,
4866 handling the functionless nodes, and grouping the remaining nodes by
4867 callstring. */
4868
4869 class root_cluster : public exploded_cluster
4870 {
4871 public:
~root_cluster()4872 ~root_cluster ()
4873 {
4874 for (map_t::iterator iter = m_map.begin ();
4875 iter != m_map.end ();
4876 ++iter)
4877 delete (*iter).second;
4878 }
4879
dump_dot(graphviz_out * gv,const dump_args_t & args) const4880 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4881 {
4882 int i;
4883 exploded_node *enode;
4884 FOR_EACH_VEC_ELT (m_functionless_enodes, i, enode)
4885 enode->dump_dot (gv, args);
4886
4887 /* Dump m_map, sorting it to avoid churn when comparing dumps. */
4888 auto_vec<function_call_string_cluster *> child_clusters (m_map.elements ());
4889 for (map_t::iterator iter = m_map.begin ();
4890 iter != m_map.end ();
4891 ++iter)
4892 child_clusters.quick_push ((*iter).second);
4893
4894 child_clusters.qsort (function_call_string_cluster::cmp_ptr_ptr);
4895
4896 function_call_string_cluster *child_cluster;
4897 FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
4898 child_cluster->dump_dot (gv, args);
4899 }
4900
add_node(exploded_node * en)4901 void add_node (exploded_node *en) FINAL OVERRIDE
4902 {
4903 function *fun = en->get_function ();
4904 if (!fun)
4905 {
4906 m_functionless_enodes.safe_push (en);
4907 return;
4908 }
4909
4910 const call_string &cs = en->get_point ().get_call_string ();
4911 function_call_string key (fun, cs);
4912 function_call_string_cluster **slot = m_map.get (key);
4913 if (slot)
4914 (*slot)->add_node (en);
4915 else
4916 {
4917 function_call_string_cluster *child
4918 = new function_call_string_cluster (fun, cs);
4919 m_map.put (key, child);
4920 child->add_node (en);
4921 }
4922 }
4923
4924 private:
4925 /* This can't be an ordered_hash_map, as we can't store vec<call_string>,
4926 since it's not a POD; vec<>::quick_push has:
4927 *slot = obj;
4928 and the slot isn't initialized, so the assignment op dies when cleaning up
4929 un-inited *slot (within the truncate call). */
4930 typedef hash_map<function_call_string, function_call_string_cluster *> map_t;
4931 map_t m_map;
4932
4933 /* This should just be the origin exploded_node. */
4934 auto_vec <exploded_node *> m_functionless_enodes;
4935 };
4936
4937 /* Subclass of range_label for use within
4938 exploded_graph::dump_exploded_nodes for implementing
4939 -fdump-analyzer-exploded-nodes: a label for a specific
4940 exploded_node. */
4941
4942 class enode_label : public range_label
4943 {
4944 public:
enode_label(const extrinsic_state & ext_state,exploded_node * enode)4945 enode_label (const extrinsic_state &ext_state,
4946 exploded_node *enode)
4947 : m_ext_state (ext_state), m_enode (enode) {}
4948
get_text(unsigned) const4949 label_text get_text (unsigned) const FINAL OVERRIDE
4950 {
4951 pretty_printer pp;
4952 pp_format_decoder (&pp) = default_tree_printer;
4953 m_enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
4954 return make_label_text (false, "EN: %i: %s",
4955 m_enode->m_index, pp_formatted_text (&pp));
4956 }
4957
4958 private:
4959 const extrinsic_state &m_ext_state;
4960 exploded_node *m_enode;
4961 };
4962
4963 /* Postprocessing support for dumping the exploded nodes.
4964 Handle -fdump-analyzer-exploded-nodes,
4965 -fdump-analyzer-exploded-nodes-2, and the
4966 "__analyzer_dump_exploded_nodes" builtin. */
4967
4968 void
dump_exploded_nodes() const4969 exploded_graph::dump_exploded_nodes () const
4970 {
4971 // TODO
4972 /* Locate calls to __analyzer_dump_exploded_nodes. */
4973 // Print how many egs there are for them?
4974 /* Better: log them as we go, and record the exploded nodes
4975 in question. */
4976
4977 /* Show every enode. */
4978
4979 /* Gather them by stmt, so that we can more clearly see the
4980 "hotspots" requiring numerous exploded nodes. */
4981
4982 /* Alternatively, simply throw them all into one big rich_location
4983 and see if the label-printing will sort it out...
4984 This requires them all to be in the same source file. */
4985
4986 if (flag_dump_analyzer_exploded_nodes)
4987 {
4988 auto_timevar tv (TV_ANALYZER_DUMP);
4989 gcc_rich_location richloc (UNKNOWN_LOCATION);
4990 unsigned i;
4991 exploded_node *enode;
4992 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4993 {
4994 if (const gimple *stmt = enode->get_stmt ())
4995 {
4996 if (get_pure_location (richloc.get_loc ()) == UNKNOWN_LOCATION)
4997 richloc.set_range (0, stmt->location, SHOW_RANGE_WITH_CARET);
4998 else
4999 richloc.add_range (stmt->location,
5000 SHOW_RANGE_WITHOUT_CARET,
5001 new enode_label (m_ext_state, enode));
5002 }
5003 }
5004 warning_at (&richloc, 0, "%i exploded nodes", m_nodes.length ());
5005
5006 /* Repeat the warning without all the labels, so that message is visible
5007 (the other one may well have scrolled past the terminal limit). */
5008 warning_at (richloc.get_loc (), 0,
5009 "%i exploded nodes", m_nodes.length ());
5010
5011 if (m_worklist.length () > 0)
5012 warning_at (richloc.get_loc (), 0,
5013 "worklist still contains %i nodes", m_worklist.length ());
5014 }
5015
5016 /* Dump the egraph in textual form to a dump file. */
5017 if (flag_dump_analyzer_exploded_nodes_2)
5018 {
5019 auto_timevar tv (TV_ANALYZER_DUMP);
5020 char *filename
5021 = concat (dump_base_name, ".eg.txt", NULL);
5022 FILE *outf = fopen (filename, "w");
5023 if (!outf)
5024 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
5025 free (filename);
5026
5027 fprintf (outf, "exploded graph for %s\n", dump_base_name);
5028 fprintf (outf, " nodes: %i\n", m_nodes.length ());
5029 fprintf (outf, " edges: %i\n", m_edges.length ());
5030
5031 unsigned i;
5032 exploded_node *enode;
5033 FOR_EACH_VEC_ELT (m_nodes, i, enode)
5034 {
5035 fprintf (outf, "\nEN %i:\n", enode->m_index);
5036 enode->dump_succs_and_preds (outf);
5037 pretty_printer pp;
5038 enode->get_point ().print (&pp, format (true));
5039 fprintf (outf, "%s\n", pp_formatted_text (&pp));
5040 enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
5041 }
5042
5043 fclose (outf);
5044 }
5045
5046 /* Dump the egraph in textual form to multiple dump files, one per enode. */
5047 if (flag_dump_analyzer_exploded_nodes_3)
5048 {
5049 auto_timevar tv (TV_ANALYZER_DUMP);
5050
5051 unsigned i;
5052 exploded_node *enode;
5053 FOR_EACH_VEC_ELT (m_nodes, i, enode)
5054 {
5055 char *filename
5056 = xasprintf ("%s.en-%i.txt", dump_base_name, i);
5057 FILE *outf = fopen (filename, "w");
5058 if (!outf)
5059 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
5060 free (filename);
5061
5062 fprintf (outf, "EN %i:\n", enode->m_index);
5063 enode->dump_succs_and_preds (outf);
5064 pretty_printer pp;
5065 enode->get_point ().print (&pp, format (true));
5066 fprintf (outf, "%s\n", pp_formatted_text (&pp));
5067 enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
5068
5069 fclose (outf);
5070 }
5071 }
5072
5073 /* Emit a warning at any call to "__analyzer_dump_exploded_nodes",
5074 giving the number of processed exploded nodes for "before-stmt",
5075 and the IDs of processed, merger, and worklist enodes.
5076
5077 We highlight the count of *processed* enodes since this is of most
5078 interest in DejaGnu tests for ensuring that state merger has
5079 happened.
5080
5081 We don't show the count of merger and worklist enodes, as this is
5082 more of an implementation detail of the merging/worklist that we
5083 don't want to bake into our expected DejaGnu messages. */
5084
5085 unsigned i;
5086 exploded_node *enode;
5087 hash_set<const gimple *> seen;
5088 FOR_EACH_VEC_ELT (m_nodes, i, enode)
5089 {
5090 if (enode->get_point ().get_kind () != PK_BEFORE_STMT)
5091 continue;
5092
5093 if (const gimple *stmt = enode->get_stmt ())
5094 if (const gcall *call = dyn_cast <const gcall *> (stmt))
5095 if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
5096 1))
5097 {
5098 if (seen.contains (stmt))
5099 continue;
5100
5101 auto_vec<exploded_node *> processed_enodes;
5102 auto_vec<exploded_node *> merger_enodes;
5103 auto_vec<exploded_node *> worklist_enodes;
5104 /* This is O(N^2). */
5105 unsigned j;
5106 exploded_node *other_enode;
5107 FOR_EACH_VEC_ELT (m_nodes, j, other_enode)
5108 {
5109 if (other_enode->get_point ().get_kind () != PK_BEFORE_STMT)
5110 continue;
5111 if (other_enode->get_stmt () == stmt)
5112 switch (other_enode->get_status ())
5113 {
5114 default:
5115 gcc_unreachable ();
5116 case exploded_node::STATUS_WORKLIST:
5117 worklist_enodes.safe_push (other_enode);
5118 break;
5119 case exploded_node::STATUS_PROCESSED:
5120 processed_enodes.safe_push (other_enode);
5121 break;
5122 case exploded_node::STATUS_MERGER:
5123 merger_enodes.safe_push (other_enode);
5124 break;
5125 }
5126 }
5127
5128 pretty_printer pp;
5129 pp_character (&pp, '[');
5130 print_enode_indices (&pp, processed_enodes);
5131 if (merger_enodes.length () > 0)
5132 {
5133 pp_string (&pp, "] merger(s): [");
5134 print_enode_indices (&pp, merger_enodes);
5135 }
5136 if (worklist_enodes.length () > 0)
5137 {
5138 pp_string (&pp, "] worklist: [");
5139 print_enode_indices (&pp, worklist_enodes);
5140 }
5141 pp_character (&pp, ']');
5142
5143 warning_n (stmt->location, 0, processed_enodes.length (),
5144 "%i processed enode: %s",
5145 "%i processed enodes: %s",
5146 processed_enodes.length (), pp_formatted_text (&pp));
5147 seen.add (stmt);
5148
5149 /* If the argument is non-zero, then print all of the states
5150 of the various enodes. */
5151 tree t_arg = fold (gimple_call_arg (call, 0));
5152 if (TREE_CODE (t_arg) != INTEGER_CST)
5153 {
5154 error_at (call->location,
5155 "integer constant required for arg 1");
5156 return;
5157 }
5158 int i_arg = TREE_INT_CST_LOW (t_arg);
5159 if (i_arg)
5160 {
5161 exploded_node *other_enode;
5162 FOR_EACH_VEC_ELT (processed_enodes, j, other_enode)
5163 {
5164 fprintf (stderr, "%i of %i: EN %i:\n",
5165 j + 1, processed_enodes.length (),
5166 other_enode->m_index);
5167 other_enode->dump_succs_and_preds (stderr);
5168 /* Dump state. */
5169 other_enode->get_state ().dump (m_ext_state, false);
5170 }
5171 }
5172 }
5173 }
5174 }
5175
5176 DEBUG_FUNCTION exploded_node *
get_node_by_index(int idx) const5177 exploded_graph::get_node_by_index (int idx) const
5178 {
5179 exploded_node *enode = m_nodes[idx];
5180 gcc_assert (enode->m_index == idx);
5181 return enode;
5182 }
5183
5184 /* Ensure that there is an exploded_node for a top-level call to FNDECL. */
5185
5186 void
on_escaped_function(tree fndecl)5187 exploded_graph::on_escaped_function (tree fndecl)
5188 {
5189 logger * const logger = get_logger ();
5190 LOG_FUNC_1 (logger, "%qE", fndecl);
5191
5192 cgraph_node *cgnode = cgraph_node::get (fndecl);
5193 if (!cgnode)
5194 return;
5195
5196 function *fun = cgnode->get_fun ();
5197 if (!fun)
5198 return;
5199
5200 if (!gimple_has_body_p (fndecl))
5201 return;
5202
5203 exploded_node *enode = add_function_entry (fun);
5204 if (logger)
5205 {
5206 if (enode)
5207 logger->log ("created EN %i for %qE entrypoint",
5208 enode->m_index, fun->decl);
5209 else
5210 logger->log ("did not create enode for %qE entrypoint", fun->decl);
5211 }
5212 }
5213
5214 /* A collection of classes for visualizing the callgraph in .dot form
5215 (as represented in the supergraph). */
5216
5217 /* Forward decls. */
5218 class viz_callgraph_node;
5219 class viz_callgraph_edge;
5220 class viz_callgraph;
5221 class viz_callgraph_cluster;
5222
5223 /* Traits for using "digraph.h" to visualize the callgraph. */
5224
5225 struct viz_callgraph_traits
5226 {
5227 typedef viz_callgraph_node node_t;
5228 typedef viz_callgraph_edge edge_t;
5229 typedef viz_callgraph graph_t;
5230 struct dump_args_t
5231 {
dump_args_tana::viz_callgraph_traits::dump_args_t5232 dump_args_t (const exploded_graph *eg) : m_eg (eg) {}
5233 const exploded_graph *m_eg;
5234 };
5235 typedef viz_callgraph_cluster cluster_t;
5236 };
5237
5238 /* Subclass of dnode representing a function within the callgraph. */
5239
5240 class viz_callgraph_node : public dnode<viz_callgraph_traits>
5241 {
5242 friend class viz_callgraph;
5243
5244 public:
viz_callgraph_node(function * fun,int index)5245 viz_callgraph_node (function *fun, int index)
5246 : m_fun (fun), m_index (index), m_num_supernodes (0), m_num_superedges (0)
5247 {
5248 gcc_assert (fun);
5249 }
5250
dump_dot(graphviz_out * gv,const dump_args_t & args) const5251 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
5252 {
5253 pretty_printer *pp = gv->get_pp ();
5254
5255 dump_dot_id (pp);
5256 pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=<",
5257 "lightgrey");
5258 pp_string (pp, "<TABLE BORDER=\"0\">");
5259 pp_write_text_to_stream (pp);
5260
5261 gv->begin_trtd ();
5262 pp_printf (pp, "VCG: %i: %s", m_index, function_name (m_fun));
5263 gv->end_tdtr ();
5264 pp_newline (pp);
5265
5266 gv->begin_trtd ();
5267 pp_printf (pp, "supernodes: %i\n", m_num_supernodes);
5268 gv->end_tdtr ();
5269 pp_newline (pp);
5270
5271 gv->begin_trtd ();
5272 pp_printf (pp, "superedges: %i\n", m_num_superedges);
5273 gv->end_tdtr ();
5274 pp_newline (pp);
5275
5276 if (args.m_eg)
5277 {
5278 unsigned i;
5279 exploded_node *enode;
5280 unsigned num_enodes = 0;
5281 FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
5282 {
5283 if (enode->get_point ().get_function () == m_fun)
5284 num_enodes++;
5285 }
5286 gv->begin_trtd ();
5287 pp_printf (pp, "enodes: %i\n", num_enodes);
5288 gv->end_tdtr ();
5289 pp_newline (pp);
5290
5291 // TODO: also show the per-callstring breakdown
5292 const exploded_graph::call_string_data_map_t *per_cs_data
5293 = args.m_eg->get_per_call_string_data ();
5294 for (exploded_graph::call_string_data_map_t::iterator iter
5295 = per_cs_data->begin ();
5296 iter != per_cs_data->end ();
5297 ++iter)
5298 {
5299 const call_string *cs = (*iter).first;
5300 //per_call_string_data *data = (*iter).second;
5301 num_enodes = 0;
5302 FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
5303 {
5304 if (enode->get_point ().get_function () == m_fun
5305 && enode->get_point ().get_call_string () == *cs)
5306 num_enodes++;
5307 }
5308 if (num_enodes > 0)
5309 {
5310 gv->begin_trtd ();
5311 cs->print (pp);
5312 pp_printf (pp, ": %i\n", num_enodes);
5313 pp_write_text_as_html_like_dot_to_stream (pp);
5314 gv->end_tdtr ();
5315 }
5316 }
5317
5318 /* Show any summaries. */
5319 per_function_data *data = args.m_eg->get_per_function_data (m_fun);
5320 if (data)
5321 {
5322 pp_newline (pp);
5323 gv->begin_trtd ();
5324 pp_printf (pp, "summaries: %i\n", data->m_summaries.length ());
5325 pp_write_text_as_html_like_dot_to_stream (pp);
5326 gv->end_tdtr ();
5327 }
5328 }
5329
5330 pp_string (pp, "</TABLE>>];\n\n");
5331 pp_flush (pp);
5332 }
5333
dump_dot_id(pretty_printer * pp) const5334 void dump_dot_id (pretty_printer *pp) const
5335 {
5336 pp_printf (pp, "vcg_%i", m_index);
5337 }
5338
5339 private:
5340 function *m_fun;
5341 int m_index;
5342 int m_num_supernodes;
5343 int m_num_superedges;
5344 };
5345
5346 /* Subclass of dedge representing a callgraph edge. */
5347
5348 class viz_callgraph_edge : public dedge<viz_callgraph_traits>
5349 {
5350 public:
viz_callgraph_edge(viz_callgraph_node * src,viz_callgraph_node * dest)5351 viz_callgraph_edge (viz_callgraph_node *src, viz_callgraph_node *dest)
5352 : dedge<viz_callgraph_traits> (src, dest)
5353 {}
5354
dump_dot(graphviz_out * gv,const dump_args_t &) const5355 void dump_dot (graphviz_out *gv, const dump_args_t &) const
5356 FINAL OVERRIDE
5357 {
5358 pretty_printer *pp = gv->get_pp ();
5359
5360 const char *style = "\"solid,bold\"";
5361 const char *color = "black";
5362 int weight = 10;
5363 const char *constraint = "true";
5364
5365 m_src->dump_dot_id (pp);
5366 pp_string (pp, " -> ");
5367 m_dest->dump_dot_id (pp);
5368 pp_printf (pp,
5369 (" [style=%s, color=%s, weight=%d, constraint=%s,"
5370 " headlabel=\""),
5371 style, color, weight, constraint);
5372 pp_printf (pp, "\"];\n");
5373 }
5374 };
5375
5376 /* Subclass of digraph representing the callgraph. */
5377
5378 class viz_callgraph : public digraph<viz_callgraph_traits>
5379 {
5380 public:
5381 viz_callgraph (const supergraph &sg);
5382
get_vcg_node_for_function(function * fun)5383 viz_callgraph_node *get_vcg_node_for_function (function *fun)
5384 {
5385 return *m_map.get (fun);
5386 }
5387
get_vcg_node_for_snode(supernode * snode)5388 viz_callgraph_node *get_vcg_node_for_snode (supernode *snode)
5389 {
5390 return get_vcg_node_for_function (snode->m_fun);
5391 }
5392
5393 private:
5394 hash_map<function *, viz_callgraph_node *> m_map;
5395 };
5396
5397 /* Placeholder subclass of cluster. */
5398
5399 class viz_callgraph_cluster : public cluster<viz_callgraph_traits>
5400 {
5401 };
5402
5403 /* viz_callgraph's ctor. */
5404
viz_callgraph(const supergraph & sg)5405 viz_callgraph::viz_callgraph (const supergraph &sg)
5406 {
5407 cgraph_node *node;
5408 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
5409 {
5410 function *fun = node->get_fun ();
5411 viz_callgraph_node *vcg_node
5412 = new viz_callgraph_node (fun, m_nodes.length ());
5413 m_map.put (fun, vcg_node);
5414 add_node (vcg_node);
5415 }
5416
5417 unsigned i;
5418 superedge *sedge;
5419 FOR_EACH_VEC_ELT (sg.m_edges, i, sedge)
5420 {
5421 viz_callgraph_node *vcg_src = get_vcg_node_for_snode (sedge->m_src);
5422 if (vcg_src->m_fun)
5423 get_vcg_node_for_function (vcg_src->m_fun)->m_num_superedges++;
5424 if (sedge->dyn_cast_call_superedge ())
5425 {
5426 viz_callgraph_node *vcg_dest = get_vcg_node_for_snode (sedge->m_dest);
5427 viz_callgraph_edge *vcg_edge
5428 = new viz_callgraph_edge (vcg_src, vcg_dest);
5429 add_edge (vcg_edge);
5430 }
5431 }
5432
5433 supernode *snode;
5434 FOR_EACH_VEC_ELT (sg.m_nodes, i, snode)
5435 {
5436 if (snode->m_fun)
5437 get_vcg_node_for_function (snode->m_fun)->m_num_supernodes++;
5438 }
5439 }
5440
5441 /* Dump the callgraph to FILENAME. */
5442
5443 static void
dump_callgraph(const supergraph & sg,const char * filename,const exploded_graph * eg)5444 dump_callgraph (const supergraph &sg, const char *filename,
5445 const exploded_graph *eg)
5446 {
5447 FILE *outf = fopen (filename, "w");
5448 if (!outf)
5449 return;
5450
5451 // TODO
5452 viz_callgraph vcg (sg);
5453 vcg.dump_dot (filename, NULL, viz_callgraph_traits::dump_args_t (eg));
5454
5455 fclose (outf);
5456 }
5457
5458 /* Dump the callgraph to "<srcfile>.callgraph.dot". */
5459
5460 static void
dump_callgraph(const supergraph & sg,const exploded_graph * eg)5461 dump_callgraph (const supergraph &sg, const exploded_graph *eg)
5462 {
5463 auto_timevar tv (TV_ANALYZER_DUMP);
5464 char *filename = concat (dump_base_name, ".callgraph.dot", NULL);
5465 dump_callgraph (sg, filename, eg);
5466 free (filename);
5467 }
5468
5469 /* Subclass of dot_annotator for implementing
5470 DUMP_BASE_NAME.supergraph-eg.dot, a post-analysis dump of the supergraph.
5471
5472 Annotate the supergraph nodes by printing the exploded nodes in concise
5473 form within them, next to their pertinent statements where appropriate,
5474 colorizing the exploded nodes based on sm-state.
5475 Also show saved diagnostics within the exploded nodes, giving information
5476 on whether they were feasible, and, if infeasible, where the problem
5477 was. */
5478
5479 class exploded_graph_annotator : public dot_annotator
5480 {
5481 public:
exploded_graph_annotator(const exploded_graph & eg)5482 exploded_graph_annotator (const exploded_graph &eg)
5483 : m_eg (eg)
5484 {
5485 /* Avoid O(N^2) by prepopulating m_enodes_per_snodes. */
5486 unsigned i;
5487 supernode *snode;
5488 FOR_EACH_VEC_ELT (eg.get_supergraph ().m_nodes, i, snode)
5489 m_enodes_per_snodes.safe_push (new auto_vec <exploded_node *> ());
5490 exploded_node *enode;
5491 FOR_EACH_VEC_ELT (m_eg.m_nodes, i, enode)
5492 if (enode->get_supernode ())
5493 m_enodes_per_snodes[enode->get_supernode ()->m_index]->safe_push (enode);
5494 }
5495
5496 /* Show exploded nodes for BEFORE_SUPERNODE points before N. */
add_node_annotations(graphviz_out * gv,const supernode & n,bool within_table) const5497 bool add_node_annotations (graphviz_out *gv, const supernode &n,
5498 bool within_table)
5499 const FINAL OVERRIDE
5500 {
5501 if (!within_table)
5502 return false;
5503 gv->begin_tr ();
5504 pretty_printer *pp = gv->get_pp ();
5505
5506 gv->begin_td ();
5507 pp_string (pp, "BEFORE");
5508 pp_printf (pp, " (scc: %i)", m_eg.get_scc_id (n));
5509 gv->end_td ();
5510
5511 unsigned i;
5512 exploded_node *enode;
5513 bool had_enode = false;
5514 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[n.m_index], i, enode)
5515 {
5516 gcc_assert (enode->get_supernode () == &n);
5517 const program_point &point = enode->get_point ();
5518 if (point.get_kind () != PK_BEFORE_SUPERNODE)
5519 continue;
5520 print_enode (gv, enode);
5521 had_enode = true;
5522 }
5523 if (!had_enode)
5524 pp_string (pp, "<TD BGCOLOR=\"red\">UNREACHED</TD>");
5525 pp_flush (pp);
5526 gv->end_tr ();
5527 return true;
5528 }
5529
5530 /* Show exploded nodes for STMT. */
add_stmt_annotations(graphviz_out * gv,const gimple * stmt,bool within_row) const5531 void add_stmt_annotations (graphviz_out *gv, const gimple *stmt,
5532 bool within_row)
5533 const FINAL OVERRIDE
5534 {
5535 if (!within_row)
5536 return;
5537 pretty_printer *pp = gv->get_pp ();
5538
5539 const supernode *snode
5540 = m_eg.get_supergraph ().get_supernode_for_stmt (stmt);
5541 unsigned i;
5542 exploded_node *enode;
5543 bool had_td = false;
5544 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[snode->m_index], i, enode)
5545 {
5546 const program_point &point = enode->get_point ();
5547 if (point.get_kind () != PK_BEFORE_STMT)
5548 continue;
5549 if (point.get_stmt () != stmt)
5550 continue;
5551 print_enode (gv, enode);
5552 had_td = true;
5553 }
5554 pp_flush (pp);
5555 if (!had_td)
5556 {
5557 gv->begin_td ();
5558 gv->end_td ();
5559 }
5560 }
5561
5562 /* Show exploded nodes for AFTER_SUPERNODE points after N. */
add_after_node_annotations(graphviz_out * gv,const supernode & n) const5563 bool add_after_node_annotations (graphviz_out *gv, const supernode &n)
5564 const FINAL OVERRIDE
5565 {
5566 gv->begin_tr ();
5567 pretty_printer *pp = gv->get_pp ();
5568
5569 gv->begin_td ();
5570 pp_string (pp, "AFTER");
5571 gv->end_td ();
5572
5573 unsigned i;
5574 exploded_node *enode;
5575 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[n.m_index], i, enode)
5576 {
5577 gcc_assert (enode->get_supernode () == &n);
5578 const program_point &point = enode->get_point ();
5579 if (point.get_kind () != PK_AFTER_SUPERNODE)
5580 continue;
5581 print_enode (gv, enode);
5582 }
5583 pp_flush (pp);
5584 gv->end_tr ();
5585 return true;
5586 }
5587
5588 private:
5589 /* Concisely print a TD element for ENODE, showing the index, status,
5590 and any saved_diagnostics at the enode. Colorize it to show sm-state.
5591
5592 Ideally we'd dump ENODE's state here, hidden behind some kind of
5593 interactive disclosure method like a tooltip, so that the states
5594 can be explored without overwhelming the graph.
5595 However, I wasn't able to get graphviz/xdot to show tooltips on
5596 individual elements within a HTML-like label. */
print_enode(graphviz_out * gv,const exploded_node * enode) const5597 void print_enode (graphviz_out *gv, const exploded_node *enode) const
5598 {
5599 pretty_printer *pp = gv->get_pp ();
5600 pp_printf (pp, "<TD BGCOLOR=\"%s\">",
5601 enode->get_dot_fillcolor ());
5602 pp_printf (pp, "<TABLE BORDER=\"0\">");
5603 gv->begin_trtd ();
5604 pp_printf (pp, "EN: %i", enode->m_index);
5605 switch (enode->get_status ())
5606 {
5607 default:
5608 gcc_unreachable ();
5609 case exploded_node::STATUS_WORKLIST:
5610 pp_string (pp, "(W)");
5611 break;
5612 case exploded_node::STATUS_PROCESSED:
5613 break;
5614 case exploded_node::STATUS_MERGER:
5615 pp_string (pp, "(M)");
5616 break;
5617 case exploded_node::STATUS_BULK_MERGED:
5618 pp_string (pp, "(BM)");
5619 break;
5620 }
5621 gv->end_tdtr ();
5622
5623 /* Dump any saved_diagnostics at this enode. */
5624 for (unsigned i = 0; i < enode->get_num_diagnostics (); i++)
5625 {
5626 const saved_diagnostic *sd = enode->get_saved_diagnostic (i);
5627 print_saved_diagnostic (gv, sd);
5628 }
5629 pp_printf (pp, "</TABLE>");
5630 pp_printf (pp, "</TD>");
5631 }
5632
5633 /* Print a TABLE element for SD, showing the kind, the length of the
5634 exploded_path, whether the path was feasible, and if infeasible,
5635 what the problem was. */
print_saved_diagnostic(graphviz_out * gv,const saved_diagnostic * sd) const5636 void print_saved_diagnostic (graphviz_out *gv,
5637 const saved_diagnostic *sd) const
5638 {
5639 pretty_printer *pp = gv->get_pp ();
5640 gv->begin_trtd ();
5641 pp_printf (pp, "<TABLE BORDER=\"0\">");
5642 gv->begin_tr ();
5643 pp_string (pp, "<TD BGCOLOR=\"green\">");
5644 pp_printf (pp, "DIAGNOSTIC: %s", sd->m_d->get_kind ());
5645 gv->end_tdtr ();
5646 gv->begin_trtd ();
5647 if (sd->get_best_epath ())
5648 pp_printf (pp, "epath length: %i", sd->get_epath_length ());
5649 else
5650 pp_printf (pp, "no best epath");
5651 gv->end_tdtr ();
5652 if (const feasibility_problem *p = sd->get_feasibility_problem ())
5653 {
5654 gv->begin_trtd ();
5655 pp_printf (pp, "INFEASIBLE at eedge %i: EN:%i -> EN:%i",
5656 p->m_eedge_idx,
5657 p->m_eedge.m_src->m_index,
5658 p->m_eedge.m_dest->m_index);
5659 pp_write_text_as_html_like_dot_to_stream (pp);
5660 gv->end_tdtr ();
5661 gv->begin_trtd ();
5662 p->m_eedge.m_sedge->dump (pp);
5663 pp_write_text_as_html_like_dot_to_stream (pp);
5664 gv->end_tdtr ();
5665 gv->begin_trtd ();
5666 pp_gimple_stmt_1 (pp, p->m_last_stmt, 0, (dump_flags_t)0);
5667 pp_write_text_as_html_like_dot_to_stream (pp);
5668 gv->end_tdtr ();
5669 /* Ideally we'd print p->m_model here; see the notes above about
5670 tooltips. */
5671 }
5672 pp_printf (pp, "</TABLE>");
5673 gv->end_tdtr ();
5674 }
5675
5676 const exploded_graph &m_eg;
5677 auto_delete_vec<auto_vec <exploded_node *> > m_enodes_per_snodes;
5678 };
5679
5680 /* Implement -fdump-analyzer-json. */
5681
5682 static void
dump_analyzer_json(const supergraph & sg,const exploded_graph & eg)5683 dump_analyzer_json (const supergraph &sg,
5684 const exploded_graph &eg)
5685 {
5686 auto_timevar tv (TV_ANALYZER_DUMP);
5687 char *filename = concat (dump_base_name, ".analyzer.json.gz", NULL);
5688 gzFile output = gzopen (filename, "w");
5689 if (!output)
5690 {
5691 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
5692 free (filename);
5693 return;
5694 }
5695
5696 json::object *toplev_obj = new json::object ();
5697 toplev_obj->set ("sgraph", sg.to_json ());
5698 toplev_obj->set ("egraph", eg.to_json ());
5699
5700 pretty_printer pp;
5701 toplev_obj->print (&pp);
5702 pp_formatted_text (&pp);
5703
5704 delete toplev_obj;
5705
5706 if (gzputs (output, pp_formatted_text (&pp)) == EOF
5707 || gzclose (output))
5708 error_at (UNKNOWN_LOCATION, "error writing %qs", filename);
5709
5710 free (filename);
5711 }
5712
5713 /* Concrete subclass of plugin_analyzer_init_iface, allowing plugins
5714 to register new state machines. */
5715
5716 class plugin_analyzer_init_impl : public plugin_analyzer_init_iface
5717 {
5718 public:
plugin_analyzer_init_impl(auto_delete_vec<state_machine> * checkers,logger * logger)5719 plugin_analyzer_init_impl (auto_delete_vec <state_machine> *checkers,
5720 logger *logger)
5721 : m_checkers (checkers),
5722 m_logger (logger)
5723 {}
5724
register_state_machine(state_machine * sm)5725 void register_state_machine (state_machine *sm) FINAL OVERRIDE
5726 {
5727 m_checkers->safe_push (sm);
5728 }
5729
get_logger() const5730 logger *get_logger () const FINAL OVERRIDE
5731 {
5732 return m_logger;
5733 }
5734
5735 private:
5736 auto_delete_vec <state_machine> *m_checkers;
5737 logger *m_logger;
5738 };
5739
5740 /* Run the analysis "engine". */
5741
5742 void
impl_run_checkers(logger * logger)5743 impl_run_checkers (logger *logger)
5744 {
5745 LOG_SCOPE (logger);
5746
5747 if (logger)
5748 {
5749 logger->log ("BITS_BIG_ENDIAN: %i", BITS_BIG_ENDIAN ? 1 : 0);
5750 logger->log ("BYTES_BIG_ENDIAN: %i", BYTES_BIG_ENDIAN ? 1 : 0);
5751 logger->log ("WORDS_BIG_ENDIAN: %i", WORDS_BIG_ENDIAN ? 1 : 0);
5752 }
5753
5754 /* If using LTO, ensure that the cgraph nodes have function bodies. */
5755 cgraph_node *node;
5756 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
5757 node->get_untransformed_body ();
5758
5759 /* Create the supergraph. */
5760 supergraph sg (logger);
5761
5762 engine eng (&sg, logger);
5763
5764 state_purge_map *purge_map = NULL;
5765
5766 if (flag_analyzer_state_purge)
5767 purge_map = new state_purge_map (sg, eng.get_model_manager (), logger);
5768
5769 if (flag_dump_analyzer_supergraph)
5770 {
5771 /* Dump supergraph pre-analysis. */
5772 auto_timevar tv (TV_ANALYZER_DUMP);
5773 char *filename = concat (dump_base_name, ".supergraph.dot", NULL);
5774 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, NULL);
5775 sg.dump_dot (filename, args);
5776 free (filename);
5777 }
5778
5779 if (flag_dump_analyzer_state_purge)
5780 {
5781 auto_timevar tv (TV_ANALYZER_DUMP);
5782 state_purge_annotator a (purge_map);
5783 char *filename = concat (dump_base_name, ".state-purge.dot", NULL);
5784 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
5785 sg.dump_dot (filename, args);
5786 free (filename);
5787 }
5788
5789 auto_delete_vec <state_machine> checkers;
5790 make_checkers (checkers, logger);
5791
5792 plugin_analyzer_init_impl data (&checkers, logger);
5793 invoke_plugin_callbacks (PLUGIN_ANALYZER_INIT, &data);
5794
5795 if (logger)
5796 {
5797 int i;
5798 state_machine *sm;
5799 FOR_EACH_VEC_ELT (checkers, i, sm)
5800 logger->log ("checkers[%i]: %s", i, sm->get_name ());
5801 }
5802
5803 /* Extrinsic state shared by nodes in the graph. */
5804 const extrinsic_state ext_state (checkers, &eng, logger);
5805
5806 const analysis_plan plan (sg, logger);
5807
5808 /* The exploded graph. */
5809 exploded_graph eg (sg, logger, ext_state, purge_map, plan,
5810 analyzer_verbosity);
5811
5812 /* Add entrypoints to the graph for externally-callable functions. */
5813 eg.build_initial_worklist ();
5814
5815 /* Now process the worklist, exploring the <point, state> graph. */
5816 eg.process_worklist ();
5817
5818 if (flag_dump_analyzer_exploded_graph)
5819 {
5820 auto_timevar tv (TV_ANALYZER_DUMP);
5821 char *filename
5822 = concat (dump_base_name, ".eg.dot", NULL);
5823 exploded_graph::dump_args_t args (eg);
5824 root_cluster c;
5825 eg.dump_dot (filename, &c, args);
5826 free (filename);
5827 }
5828
5829 /* Now emit any saved diagnostics. */
5830 eg.get_diagnostic_manager ().emit_saved_diagnostics (eg);
5831
5832 eg.dump_exploded_nodes ();
5833
5834 eg.log_stats ();
5835
5836 if (flag_dump_analyzer_callgraph)
5837 dump_callgraph (sg, &eg);
5838
5839 if (flag_dump_analyzer_supergraph)
5840 {
5841 /* Dump post-analysis form of supergraph. */
5842 auto_timevar tv (TV_ANALYZER_DUMP);
5843 char *filename = concat (dump_base_name, ".supergraph-eg.dot", NULL);
5844 exploded_graph_annotator a (eg);
5845 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
5846 sg.dump_dot (filename, args);
5847 free (filename);
5848 }
5849
5850 if (flag_dump_analyzer_json)
5851 dump_analyzer_json (sg, eg);
5852
5853 if (flag_dump_analyzer_untracked)
5854 eng.get_model_manager ()->dump_untracked_regions ();
5855
5856 delete purge_map;
5857 }
5858
5859 /* External entrypoint to the analysis "engine".
5860 Set up any dumps, then call impl_run_checkers. */
5861
5862 void
run_checkers()5863 run_checkers ()
5864 {
5865 /* Save input_location. */
5866 location_t saved_input_location = input_location;
5867
5868 /* Handle -fdump-analyzer and -fdump-analyzer-stderr. */
5869 FILE *dump_fout = NULL;
5870 /* Track if we're responsible for closing dump_fout. */
5871 bool owns_dump_fout = false;
5872 if (flag_dump_analyzer_stderr)
5873 dump_fout = stderr;
5874 else if (flag_dump_analyzer)
5875 {
5876 char *dump_filename = concat (dump_base_name, ".analyzer.txt", NULL);
5877 dump_fout = fopen (dump_filename, "w");
5878 free (dump_filename);
5879 if (dump_fout)
5880 owns_dump_fout = true;
5881 }
5882
5883 {
5884 log_user the_logger (NULL);
5885 if (dump_fout)
5886 the_logger.set_logger (new logger (dump_fout, 0, 0,
5887 *global_dc->printer));
5888 LOG_SCOPE (the_logger.get_logger ());
5889
5890 impl_run_checkers (the_logger.get_logger ());
5891
5892 /* end of lifetime of the_logger (so that dump file is closed after the
5893 various dtors run). */
5894 }
5895
5896 if (owns_dump_fout)
5897 fclose (dump_fout);
5898
5899 /* Restore input_location. Subsequent passes may assume that input_location
5900 is some arbitrary value *not* in the block tree, which might be violated
5901 if we didn't restore it. */
5902 input_location = saved_input_location;
5903 }
5904
5905 } // namespace ana
5906
5907 #endif /* #if ENABLE_ANALYZER */
5908