1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
42
43
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
51
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
55
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
58 #endif
59
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
62 {
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
66 dw_cfa_location cfa;
67 dw_cfi_ref cfa_cfi;
68
69 /* The expressions for any register column that is saved. */
70 cfi_vec reg_save;
71
72 /* True if the return address is in a mangled state. */
73 bool ra_mangled;
74 };
75
76 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
77 struct GTY(()) reg_saved_in_data {
78 rtx orig_reg;
79 rtx saved_in_reg;
80 };
81
82
83 /* Since we no longer have a proper CFG, we're going to create a facsimile
84 of one on the fly while processing the frame-related insns.
85
86 We create dw_trace_info structures for each extended basic block beginning
87 and ending at a "save point". Save points are labels, barriers, certain
88 notes, and of course the beginning and end of the function.
89
90 As we encounter control transfer insns, we propagate the "current"
91 row state across the edges to the starts of traces. When checking is
92 enabled, we validate that we propagate the same data from all sources.
93
94 All traces are members of the TRACE_INFO array, in the order in which
95 they appear in the instruction stream.
96
97 All save points are present in the TRACE_INDEX hash, mapping the insn
98 starting a trace to the dw_trace_info describing the trace. */
99
100 struct dw_trace_info
101 {
102 /* The insn that begins the trace. */
103 rtx_insn *head;
104
105 /* The row state at the beginning and end of the trace. */
106 dw_cfi_row *beg_row, *end_row;
107
108 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
109 while scanning insns. However, the args_size value is irrelevant at
110 any point except can_throw_internal_p insns. Therefore the "delay"
111 sizes the values that must actually be emitted for this trace. */
112 poly_int64_pod beg_true_args_size, end_true_args_size;
113 poly_int64_pod beg_delay_args_size, end_delay_args_size;
114
115 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
116 rtx_insn *eh_head;
117
118 /* The following variables contain data used in interpreting frame related
119 expressions. These are not part of the "real" row state as defined by
120 Dwarf, but it seems like they need to be propagated into a trace in case
121 frame related expressions have been sunk. */
122 /* ??? This seems fragile. These variables are fragments of a larger
123 expression. If we do not keep the entire expression together, we risk
124 not being able to put it together properly. Consider forcing targets
125 to generate self-contained expressions and dropping all of the magic
126 interpretation code in this file. Or at least refusing to shrink wrap
127 any frame related insn that doesn't contain a complete expression. */
128
129 /* The register used for saving registers to the stack, and its offset
130 from the CFA. */
131 dw_cfa_location cfa_store;
132
133 /* A temporary register holding an integral value used in adjusting SP
134 or setting up the store_reg. The "offset" field holds the integer
135 value, not an offset. */
136 dw_cfa_location cfa_temp;
137
138 /* A set of registers saved in other registers. This is the inverse of
139 the row->reg_save info, if the entry is a DW_CFA_register. This is
140 implemented as a flat array because it normally contains zero or 1
141 entry, depending on the target. IA-64 is the big spender here, using
142 a maximum of 5 entries. */
143 vec<reg_saved_in_data> regs_saved_in_regs;
144
145 /* An identifier for this trace. Used only for debugging dumps. */
146 unsigned id;
147
148 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
149 bool switch_sections;
150
151 /* True if we've seen different values incoming to beg_true_args_size. */
152 bool args_size_undefined;
153 };
154
155
156 /* Hashtable helpers. */
157
158 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
159 {
160 static inline hashval_t hash (const dw_trace_info *);
161 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
162 };
163
164 inline hashval_t
hash(const dw_trace_info * ti)165 trace_info_hasher::hash (const dw_trace_info *ti)
166 {
167 return INSN_UID (ti->head);
168 }
169
170 inline bool
equal(const dw_trace_info * a,const dw_trace_info * b)171 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
172 {
173 return a->head == b->head;
174 }
175
176
177 /* The variables making up the pseudo-cfg, as described above. */
178 static vec<dw_trace_info> trace_info;
179 static vec<dw_trace_info *> trace_work_list;
180 static hash_table<trace_info_hasher> *trace_index;
181
182 /* A vector of call frame insns for the CIE. */
183 cfi_vec cie_cfi_vec;
184
185 /* The state of the first row of the FDE table, which includes the
186 state provided by the CIE. */
187 static GTY(()) dw_cfi_row *cie_cfi_row;
188
189 static GTY(()) reg_saved_in_data *cie_return_save;
190
191 static GTY(()) unsigned long dwarf2out_cfi_label_num;
192
193 /* The insn after which a new CFI note should be emitted. */
194 static rtx_insn *add_cfi_insn;
195
196 /* When non-null, add_cfi will add the CFI to this vector. */
197 static cfi_vec *add_cfi_vec;
198
199 /* The current instruction trace. */
200 static dw_trace_info *cur_trace;
201
202 /* The current, i.e. most recently generated, row of the CFI table. */
203 static dw_cfi_row *cur_row;
204
205 /* A copy of the current CFA, for use during the processing of a
206 single insn. */
207 static dw_cfa_location *cur_cfa;
208
209 /* We delay emitting a register save until either (a) we reach the end
210 of the prologue or (b) the register is clobbered. This clusters
211 register saves so that there are fewer pc advances. */
212
213 struct queued_reg_save {
214 rtx reg;
215 rtx saved_reg;
216 poly_int64_pod cfa_offset;
217 };
218
219
220 static vec<queued_reg_save> queued_reg_saves;
221
222 /* True if any CFI directives were emitted at the current insn. */
223 static bool any_cfis_emitted;
224
225 /* Short-hand for commonly used register numbers. */
226 static unsigned dw_stack_pointer_regnum;
227 static unsigned dw_frame_pointer_regnum;
228
229 /* Hook used by __throw. */
230
231 rtx
expand_builtin_dwarf_sp_column(void)232 expand_builtin_dwarf_sp_column (void)
233 {
234 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
235 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
236 }
237
238 /* MEM is a memory reference for the register size table, each element of
239 which has mode MODE. Initialize column C as a return address column. */
240
241 static void
init_return_column_size(scalar_int_mode mode,rtx mem,unsigned int c)242 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
243 {
244 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
245 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
246 emit_move_insn (adjust_address (mem, mode, offset),
247 gen_int_mode (size, mode));
248 }
249
250 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
251 init_one_dwarf_reg_size to communicate on what has been done by the
252 latter. */
253
254 struct init_one_dwarf_reg_state
255 {
256 /* Whether the dwarf return column was initialized. */
257 bool wrote_return_column;
258
259 /* For each hard register REGNO, whether init_one_dwarf_reg_size
260 was given REGNO to process already. */
261 bool processed_regno [FIRST_PSEUDO_REGISTER];
262
263 };
264
265 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
266 initialize the dwarf register size table entry corresponding to register
267 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
268 use for the size entry to initialize, and INIT_STATE is the communication
269 datastructure conveying what we're doing to our caller. */
270
271 static
init_one_dwarf_reg_size(int regno,machine_mode regmode,rtx table,machine_mode slotmode,init_one_dwarf_reg_state * init_state)272 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
273 rtx table, machine_mode slotmode,
274 init_one_dwarf_reg_state *init_state)
275 {
276 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
277 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
278 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
279
280 poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
281 poly_int64 regsize = GET_MODE_SIZE (regmode);
282
283 init_state->processed_regno[regno] = true;
284
285 if (rnum >= DWARF_FRAME_REGISTERS)
286 return;
287
288 if (dnum == DWARF_FRAME_RETURN_COLUMN)
289 {
290 if (regmode == VOIDmode)
291 return;
292 init_state->wrote_return_column = true;
293 }
294
295 /* ??? When is this true? Should it be a test based on DCOL instead? */
296 if (maybe_lt (slotoffset, 0))
297 return;
298
299 emit_move_insn (adjust_address (table, slotmode, slotoffset),
300 gen_int_mode (regsize, slotmode));
301 }
302
303 /* Generate code to initialize the dwarf register size table located
304 at the provided ADDRESS. */
305
306 void
expand_builtin_init_dwarf_reg_sizes(tree address)307 expand_builtin_init_dwarf_reg_sizes (tree address)
308 {
309 unsigned int i;
310 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
311 rtx addr = expand_normal (address);
312 rtx mem = gen_rtx_MEM (BLKmode, addr);
313
314 init_one_dwarf_reg_state init_state;
315
316 memset ((char *)&init_state, 0, sizeof (init_state));
317
318 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
319 {
320 machine_mode save_mode;
321 rtx span;
322
323 /* No point in processing a register multiple times. This could happen
324 with register spans, e.g. when a reg is first processed as a piece of
325 a span, then as a register on its own later on. */
326
327 if (init_state.processed_regno[i])
328 continue;
329
330 save_mode = targetm.dwarf_frame_reg_mode (i);
331 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
332
333 if (!span)
334 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
335 else
336 {
337 for (int si = 0; si < XVECLEN (span, 0); si++)
338 {
339 rtx reg = XVECEXP (span, 0, si);
340
341 init_one_dwarf_reg_size
342 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
343 }
344 }
345 }
346
347 if (!init_state.wrote_return_column)
348 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
349
350 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
351 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
352 #endif
353
354 targetm.init_dwarf_reg_sizes_extra (address);
355 }
356
357
358 static dw_trace_info *
get_trace_info(rtx_insn * insn)359 get_trace_info (rtx_insn *insn)
360 {
361 dw_trace_info dummy;
362 dummy.head = insn;
363 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
364 }
365
366 static bool
save_point_p(rtx_insn * insn)367 save_point_p (rtx_insn *insn)
368 {
369 /* Labels, except those that are really jump tables. */
370 if (LABEL_P (insn))
371 return inside_basic_block_p (insn);
372
373 /* We split traces at the prologue/epilogue notes because those
374 are points at which the unwind info is usually stable. This
375 makes it easier to find spots with identical unwind info so
376 that we can use remember/restore_state opcodes. */
377 if (NOTE_P (insn))
378 switch (NOTE_KIND (insn))
379 {
380 case NOTE_INSN_PROLOGUE_END:
381 case NOTE_INSN_EPILOGUE_BEG:
382 return true;
383 }
384
385 return false;
386 }
387
388 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
389
390 static inline HOST_WIDE_INT
div_data_align(HOST_WIDE_INT off)391 div_data_align (HOST_WIDE_INT off)
392 {
393 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
394 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
395 return r;
396 }
397
398 /* Return true if we need a signed version of a given opcode
399 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
400
401 static inline bool
need_data_align_sf_opcode(HOST_WIDE_INT off)402 need_data_align_sf_opcode (HOST_WIDE_INT off)
403 {
404 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
405 }
406
407 /* Return a pointer to a newly allocated Call Frame Instruction. */
408
409 static inline dw_cfi_ref
new_cfi(void)410 new_cfi (void)
411 {
412 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
413
414 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
415 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
416
417 return cfi;
418 }
419
420 /* Return a newly allocated CFI row, with no defined data. */
421
422 static dw_cfi_row *
new_cfi_row(void)423 new_cfi_row (void)
424 {
425 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
426
427 row->cfa.reg = INVALID_REGNUM;
428
429 return row;
430 }
431
432 /* Return a copy of an existing CFI row. */
433
434 static dw_cfi_row *
copy_cfi_row(dw_cfi_row * src)435 copy_cfi_row (dw_cfi_row *src)
436 {
437 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
438
439 *dst = *src;
440 dst->reg_save = vec_safe_copy (src->reg_save);
441
442 return dst;
443 }
444
445 /* Return a copy of an existing CFA location. */
446
447 static dw_cfa_location *
copy_cfa(dw_cfa_location * src)448 copy_cfa (dw_cfa_location *src)
449 {
450 dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
451 *dst = *src;
452 return dst;
453 }
454
455 /* Generate a new label for the CFI info to refer to. */
456
457 static char *
dwarf2out_cfi_label(void)458 dwarf2out_cfi_label (void)
459 {
460 int num = dwarf2out_cfi_label_num++;
461 char label[20];
462
463 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
464
465 return xstrdup (label);
466 }
467
468 /* Add CFI either to the current insn stream or to a vector, or both. */
469
470 static void
add_cfi(dw_cfi_ref cfi)471 add_cfi (dw_cfi_ref cfi)
472 {
473 any_cfis_emitted = true;
474
475 if (add_cfi_insn != NULL)
476 {
477 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
478 NOTE_CFI (add_cfi_insn) = cfi;
479 }
480
481 if (add_cfi_vec != NULL)
482 vec_safe_push (*add_cfi_vec, cfi);
483 }
484
485 static void
add_cfi_args_size(poly_int64 size)486 add_cfi_args_size (poly_int64 size)
487 {
488 /* We don't yet have a representation for polynomial sizes. */
489 HOST_WIDE_INT const_size = size.to_constant ();
490
491 dw_cfi_ref cfi = new_cfi ();
492
493 /* While we can occasionally have args_size < 0 internally, this state
494 should not persist at a point we actually need an opcode. */
495 gcc_assert (const_size >= 0);
496
497 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
498 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
499
500 add_cfi (cfi);
501 }
502
503 static void
add_cfi_restore(unsigned reg)504 add_cfi_restore (unsigned reg)
505 {
506 dw_cfi_ref cfi = new_cfi ();
507
508 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
509 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
510
511 add_cfi (cfi);
512 }
513
514 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
515 that the register column is no longer saved. */
516
517 static void
update_row_reg_save(dw_cfi_row * row,unsigned column,dw_cfi_ref cfi)518 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
519 {
520 if (vec_safe_length (row->reg_save) <= column)
521 vec_safe_grow_cleared (row->reg_save, column + 1);
522 (*row->reg_save)[column] = cfi;
523 }
524
525 /* This function fills in aa dw_cfa_location structure from a dwarf location
526 descriptor sequence. */
527
528 static void
get_cfa_from_loc_descr(dw_cfa_location * cfa,struct dw_loc_descr_node * loc)529 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
530 {
531 struct dw_loc_descr_node *ptr;
532 cfa->offset = 0;
533 cfa->base_offset = 0;
534 cfa->indirect = 0;
535 cfa->reg = -1;
536
537 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
538 {
539 enum dwarf_location_atom op = ptr->dw_loc_opc;
540
541 switch (op)
542 {
543 case DW_OP_reg0:
544 case DW_OP_reg1:
545 case DW_OP_reg2:
546 case DW_OP_reg3:
547 case DW_OP_reg4:
548 case DW_OP_reg5:
549 case DW_OP_reg6:
550 case DW_OP_reg7:
551 case DW_OP_reg8:
552 case DW_OP_reg9:
553 case DW_OP_reg10:
554 case DW_OP_reg11:
555 case DW_OP_reg12:
556 case DW_OP_reg13:
557 case DW_OP_reg14:
558 case DW_OP_reg15:
559 case DW_OP_reg16:
560 case DW_OP_reg17:
561 case DW_OP_reg18:
562 case DW_OP_reg19:
563 case DW_OP_reg20:
564 case DW_OP_reg21:
565 case DW_OP_reg22:
566 case DW_OP_reg23:
567 case DW_OP_reg24:
568 case DW_OP_reg25:
569 case DW_OP_reg26:
570 case DW_OP_reg27:
571 case DW_OP_reg28:
572 case DW_OP_reg29:
573 case DW_OP_reg30:
574 case DW_OP_reg31:
575 cfa->reg = op - DW_OP_reg0;
576 break;
577 case DW_OP_regx:
578 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
579 break;
580 case DW_OP_breg0:
581 case DW_OP_breg1:
582 case DW_OP_breg2:
583 case DW_OP_breg3:
584 case DW_OP_breg4:
585 case DW_OP_breg5:
586 case DW_OP_breg6:
587 case DW_OP_breg7:
588 case DW_OP_breg8:
589 case DW_OP_breg9:
590 case DW_OP_breg10:
591 case DW_OP_breg11:
592 case DW_OP_breg12:
593 case DW_OP_breg13:
594 case DW_OP_breg14:
595 case DW_OP_breg15:
596 case DW_OP_breg16:
597 case DW_OP_breg17:
598 case DW_OP_breg18:
599 case DW_OP_breg19:
600 case DW_OP_breg20:
601 case DW_OP_breg21:
602 case DW_OP_breg22:
603 case DW_OP_breg23:
604 case DW_OP_breg24:
605 case DW_OP_breg25:
606 case DW_OP_breg26:
607 case DW_OP_breg27:
608 case DW_OP_breg28:
609 case DW_OP_breg29:
610 case DW_OP_breg30:
611 case DW_OP_breg31:
612 cfa->reg = op - DW_OP_breg0;
613 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
614 break;
615 case DW_OP_bregx:
616 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
617 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
618 break;
619 case DW_OP_deref:
620 cfa->indirect = 1;
621 break;
622 case DW_OP_plus_uconst:
623 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
624 break;
625 default:
626 gcc_unreachable ();
627 }
628 }
629 }
630
631 /* Find the previous value for the CFA, iteratively. CFI is the opcode
632 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
633 one level of remember/restore state processing. */
634
635 void
lookup_cfa_1(dw_cfi_ref cfi,dw_cfa_location * loc,dw_cfa_location * remember)636 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
637 {
638 switch (cfi->dw_cfi_opc)
639 {
640 case DW_CFA_def_cfa_offset:
641 case DW_CFA_def_cfa_offset_sf:
642 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
643 break;
644 case DW_CFA_def_cfa_register:
645 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
646 break;
647 case DW_CFA_def_cfa:
648 case DW_CFA_def_cfa_sf:
649 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
650 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
651 break;
652 case DW_CFA_def_cfa_expression:
653 if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
654 *loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
655 else
656 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
657 break;
658
659 case DW_CFA_remember_state:
660 gcc_assert (!remember->in_use);
661 *remember = *loc;
662 remember->in_use = 1;
663 break;
664 case DW_CFA_restore_state:
665 gcc_assert (remember->in_use);
666 *loc = *remember;
667 remember->in_use = 0;
668 break;
669
670 default:
671 break;
672 }
673 }
674
675 /* Determine if two dw_cfa_location structures define the same data. */
676
677 bool
cfa_equal_p(const dw_cfa_location * loc1,const dw_cfa_location * loc2)678 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
679 {
680 return (loc1->reg == loc2->reg
681 && known_eq (loc1->offset, loc2->offset)
682 && loc1->indirect == loc2->indirect
683 && (loc1->indirect == 0
684 || known_eq (loc1->base_offset, loc2->base_offset)));
685 }
686
687 /* Determine if two CFI operands are identical. */
688
689 static bool
cfi_oprnd_equal_p(enum dw_cfi_oprnd_type t,dw_cfi_oprnd * a,dw_cfi_oprnd * b)690 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
691 {
692 switch (t)
693 {
694 case dw_cfi_oprnd_unused:
695 return true;
696 case dw_cfi_oprnd_reg_num:
697 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
698 case dw_cfi_oprnd_offset:
699 return a->dw_cfi_offset == b->dw_cfi_offset;
700 case dw_cfi_oprnd_addr:
701 return (a->dw_cfi_addr == b->dw_cfi_addr
702 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
703 case dw_cfi_oprnd_loc:
704 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
705 case dw_cfi_oprnd_cfa_loc:
706 return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
707 }
708 gcc_unreachable ();
709 }
710
711 /* Determine if two CFI entries are identical. */
712
713 static bool
cfi_equal_p(dw_cfi_ref a,dw_cfi_ref b)714 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
715 {
716 enum dwarf_call_frame_info opc;
717
718 /* Make things easier for our callers, including missing operands. */
719 if (a == b)
720 return true;
721 if (a == NULL || b == NULL)
722 return false;
723
724 /* Obviously, the opcodes must match. */
725 opc = a->dw_cfi_opc;
726 if (opc != b->dw_cfi_opc)
727 return false;
728
729 /* Compare the two operands, re-using the type of the operands as
730 already exposed elsewhere. */
731 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
732 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
733 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
734 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
735 }
736
737 /* Determine if two CFI_ROW structures are identical. */
738
739 static bool
cfi_row_equal_p(dw_cfi_row * a,dw_cfi_row * b)740 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
741 {
742 size_t i, n_a, n_b, n_max;
743
744 if (a->cfa_cfi)
745 {
746 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
747 return false;
748 }
749 else if (!cfa_equal_p (&a->cfa, &b->cfa))
750 return false;
751
752 n_a = vec_safe_length (a->reg_save);
753 n_b = vec_safe_length (b->reg_save);
754 n_max = MAX (n_a, n_b);
755
756 for (i = 0; i < n_max; ++i)
757 {
758 dw_cfi_ref r_a = NULL, r_b = NULL;
759
760 if (i < n_a)
761 r_a = (*a->reg_save)[i];
762 if (i < n_b)
763 r_b = (*b->reg_save)[i];
764
765 if (!cfi_equal_p (r_a, r_b))
766 return false;
767 }
768
769 if (a->ra_mangled != b->ra_mangled)
770 return false;
771
772 return true;
773 }
774
775 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
776 what opcode to emit. Returns the CFI opcode to effect the change, or
777 NULL if NEW_CFA == OLD_CFA. */
778
779 static dw_cfi_ref
def_cfa_0(dw_cfa_location * old_cfa,dw_cfa_location * new_cfa)780 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
781 {
782 dw_cfi_ref cfi;
783
784 /* If nothing changed, no need to issue any call frame instructions. */
785 if (cfa_equal_p (old_cfa, new_cfa))
786 return NULL;
787
788 cfi = new_cfi ();
789
790 HOST_WIDE_INT const_offset;
791 if (new_cfa->reg == old_cfa->reg
792 && !new_cfa->indirect
793 && !old_cfa->indirect
794 && new_cfa->offset.is_constant (&const_offset))
795 {
796 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
797 the CFA register did not change but the offset did. The data
798 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
799 in the assembler via the .cfi_def_cfa_offset directive. */
800 if (const_offset < 0)
801 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
802 else
803 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
804 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
805 }
806 else if (new_cfa->offset.is_constant ()
807 && known_eq (new_cfa->offset, old_cfa->offset)
808 && old_cfa->reg != INVALID_REGNUM
809 && !new_cfa->indirect
810 && !old_cfa->indirect)
811 {
812 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
813 indicating the CFA register has changed to <register> but the
814 offset has not changed. This requires the old CFA to have
815 been set as a register plus offset rather than a general
816 DW_CFA_def_cfa_expression. */
817 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
818 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
819 }
820 else if (new_cfa->indirect == 0
821 && new_cfa->offset.is_constant (&const_offset))
822 {
823 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
824 indicating the CFA register has changed to <register> with
825 the specified offset. The data factoring for DW_CFA_def_cfa_sf
826 happens in output_cfi, or in the assembler via the .cfi_def_cfa
827 directive. */
828 if (const_offset < 0)
829 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
830 else
831 cfi->dw_cfi_opc = DW_CFA_def_cfa;
832 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
833 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
834 }
835 else
836 {
837 /* Construct a DW_CFA_def_cfa_expression instruction to
838 calculate the CFA using a full location expression since no
839 register-offset pair is available. */
840 struct dw_loc_descr_node *loc_list;
841
842 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
843 loc_list = build_cfa_loc (new_cfa, 0);
844 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
845 if (!new_cfa->offset.is_constant ()
846 || !new_cfa->base_offset.is_constant ())
847 /* It's hard to reconstruct the CFA location for a polynomial
848 expression, so just cache it instead. */
849 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
850 else
851 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
852 }
853
854 return cfi;
855 }
856
857 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
858
859 static void
def_cfa_1(dw_cfa_location * new_cfa)860 def_cfa_1 (dw_cfa_location *new_cfa)
861 {
862 dw_cfi_ref cfi;
863
864 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
865 cur_trace->cfa_store.offset = new_cfa->offset;
866
867 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
868 if (cfi)
869 {
870 cur_row->cfa = *new_cfa;
871 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
872 ? cfi : NULL);
873
874 add_cfi (cfi);
875 }
876 }
877
878 /* Add the CFI for saving a register. REG is the CFA column number.
879 If SREG is -1, the register is saved at OFFSET from the CFA;
880 otherwise it is saved in SREG. */
881
882 static void
reg_save(unsigned int reg,unsigned int sreg,poly_int64 offset)883 reg_save (unsigned int reg, unsigned int sreg, poly_int64 offset)
884 {
885 dw_fde_ref fde = cfun ? cfun->fde : NULL;
886 dw_cfi_ref cfi = new_cfi ();
887
888 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
889
890 if (sreg == INVALID_REGNUM)
891 {
892 HOST_WIDE_INT const_offset;
893 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
894 if (fde && fde->stack_realign)
895 {
896 cfi->dw_cfi_opc = DW_CFA_expression;
897 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
898 cfi->dw_cfi_oprnd2.dw_cfi_loc
899 = build_cfa_aligned_loc (&cur_row->cfa, offset,
900 fde->stack_realignment);
901 }
902 else if (offset.is_constant (&const_offset))
903 {
904 if (need_data_align_sf_opcode (const_offset))
905 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
906 else if (reg & ~0x3f)
907 cfi->dw_cfi_opc = DW_CFA_offset_extended;
908 else
909 cfi->dw_cfi_opc = DW_CFA_offset;
910 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
911 }
912 else
913 {
914 cfi->dw_cfi_opc = DW_CFA_expression;
915 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
916 cfi->dw_cfi_oprnd2.dw_cfi_loc
917 = build_cfa_loc (&cur_row->cfa, offset);
918 }
919 }
920 else if (sreg == reg)
921 {
922 /* While we could emit something like DW_CFA_same_value or
923 DW_CFA_restore, we never expect to see something like that
924 in a prologue. This is more likely to be a bug. A backend
925 can always bypass this by using REG_CFA_RESTORE directly. */
926 gcc_unreachable ();
927 }
928 else
929 {
930 cfi->dw_cfi_opc = DW_CFA_register;
931 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
932 }
933
934 add_cfi (cfi);
935 update_row_reg_save (cur_row, reg, cfi);
936 }
937
938 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
939 and adjust data structures to match. */
940
941 static void
notice_args_size(rtx_insn * insn)942 notice_args_size (rtx_insn *insn)
943 {
944 poly_int64 args_size, delta;
945 rtx note;
946
947 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
948 if (note == NULL)
949 return;
950
951 args_size = get_args_size (note);
952 delta = args_size - cur_trace->end_true_args_size;
953 if (known_eq (delta, 0))
954 return;
955
956 cur_trace->end_true_args_size = args_size;
957
958 /* If the CFA is computed off the stack pointer, then we must adjust
959 the computation of the CFA as well. */
960 if (cur_cfa->reg == dw_stack_pointer_regnum)
961 {
962 gcc_assert (!cur_cfa->indirect);
963
964 /* Convert a change in args_size (always a positive in the
965 direction of stack growth) to a change in stack pointer. */
966 if (!STACK_GROWS_DOWNWARD)
967 delta = -delta;
968
969 cur_cfa->offset += delta;
970 }
971 }
972
973 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
974 data within the trace related to EH insns and args_size. */
975
976 static void
notice_eh_throw(rtx_insn * insn)977 notice_eh_throw (rtx_insn *insn)
978 {
979 poly_int64 args_size = cur_trace->end_true_args_size;
980 if (cur_trace->eh_head == NULL)
981 {
982 cur_trace->eh_head = insn;
983 cur_trace->beg_delay_args_size = args_size;
984 cur_trace->end_delay_args_size = args_size;
985 }
986 else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
987 {
988 cur_trace->end_delay_args_size = args_size;
989
990 /* ??? If the CFA is the stack pointer, search backward for the last
991 CFI note and insert there. Given that the stack changed for the
992 args_size change, there *must* be such a note in between here and
993 the last eh insn. */
994 add_cfi_args_size (args_size);
995 }
996 }
997
998 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
999 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1000 used in places where rtl is prohibited. */
1001
1002 static inline unsigned
dwf_regno(const_rtx reg)1003 dwf_regno (const_rtx reg)
1004 {
1005 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
1006 return DWARF_FRAME_REGNUM (REGNO (reg));
1007 }
1008
1009 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1010
1011 static bool
compare_reg_or_pc(rtx x,rtx y)1012 compare_reg_or_pc (rtx x, rtx y)
1013 {
1014 if (REG_P (x) && REG_P (y))
1015 return REGNO (x) == REGNO (y);
1016 return x == y;
1017 }
1018
1019 /* Record SRC as being saved in DEST. DEST may be null to delete an
1020 existing entry. SRC may be a register or PC_RTX. */
1021
1022 static void
record_reg_saved_in_reg(rtx dest,rtx src)1023 record_reg_saved_in_reg (rtx dest, rtx src)
1024 {
1025 reg_saved_in_data *elt;
1026 size_t i;
1027
1028 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1029 if (compare_reg_or_pc (elt->orig_reg, src))
1030 {
1031 if (dest == NULL)
1032 cur_trace->regs_saved_in_regs.unordered_remove (i);
1033 else
1034 elt->saved_in_reg = dest;
1035 return;
1036 }
1037
1038 if (dest == NULL)
1039 return;
1040
1041 reg_saved_in_data e = {src, dest};
1042 cur_trace->regs_saved_in_regs.safe_push (e);
1043 }
1044
1045 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1046 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1047
1048 static void
queue_reg_save(rtx reg,rtx sreg,poly_int64 offset)1049 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
1050 {
1051 queued_reg_save *q;
1052 queued_reg_save e = {reg, sreg, offset};
1053 size_t i;
1054
1055 /* Duplicates waste space, but it's also necessary to remove them
1056 for correctness, since the queue gets output in reverse order. */
1057 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1058 if (compare_reg_or_pc (q->reg, reg))
1059 {
1060 *q = e;
1061 return;
1062 }
1063
1064 queued_reg_saves.safe_push (e);
1065 }
1066
1067 /* Output all the entries in QUEUED_REG_SAVES. */
1068
1069 static void
dwarf2out_flush_queued_reg_saves(void)1070 dwarf2out_flush_queued_reg_saves (void)
1071 {
1072 queued_reg_save *q;
1073 size_t i;
1074
1075 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1076 {
1077 unsigned int reg, sreg;
1078
1079 record_reg_saved_in_reg (q->saved_reg, q->reg);
1080
1081 if (q->reg == pc_rtx)
1082 reg = DWARF_FRAME_RETURN_COLUMN;
1083 else
1084 reg = dwf_regno (q->reg);
1085 if (q->saved_reg)
1086 sreg = dwf_regno (q->saved_reg);
1087 else
1088 sreg = INVALID_REGNUM;
1089 reg_save (reg, sreg, q->cfa_offset);
1090 }
1091
1092 queued_reg_saves.truncate (0);
1093 }
1094
1095 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1096 location for? Or, does it clobber a register which we've previously
1097 said that some other register is saved in, and for which we now
1098 have a new location for? */
1099
1100 static bool
clobbers_queued_reg_save(const_rtx insn)1101 clobbers_queued_reg_save (const_rtx insn)
1102 {
1103 queued_reg_save *q;
1104 size_t iq;
1105
1106 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1107 {
1108 size_t ir;
1109 reg_saved_in_data *rir;
1110
1111 if (modified_in_p (q->reg, insn))
1112 return true;
1113
1114 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1115 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1116 && modified_in_p (rir->saved_in_reg, insn))
1117 return true;
1118 }
1119
1120 return false;
1121 }
1122
1123 /* What register, if any, is currently saved in REG? */
1124
1125 static rtx
reg_saved_in(rtx reg)1126 reg_saved_in (rtx reg)
1127 {
1128 unsigned int regn = REGNO (reg);
1129 queued_reg_save *q;
1130 reg_saved_in_data *rir;
1131 size_t i;
1132
1133 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1134 if (q->saved_reg && regn == REGNO (q->saved_reg))
1135 return q->reg;
1136
1137 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1138 if (regn == REGNO (rir->saved_in_reg))
1139 return rir->orig_reg;
1140
1141 return NULL_RTX;
1142 }
1143
1144 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1145
1146 static void
dwarf2out_frame_debug_def_cfa(rtx pat)1147 dwarf2out_frame_debug_def_cfa (rtx pat)
1148 {
1149 memset (cur_cfa, 0, sizeof (*cur_cfa));
1150
1151 pat = strip_offset (pat, &cur_cfa->offset);
1152 if (MEM_P (pat))
1153 {
1154 cur_cfa->indirect = 1;
1155 pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
1156 }
1157 /* ??? If this fails, we could be calling into the _loc functions to
1158 define a full expression. So far no port does that. */
1159 gcc_assert (REG_P (pat));
1160 cur_cfa->reg = dwf_regno (pat);
1161 }
1162
1163 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1164
1165 static void
dwarf2out_frame_debug_adjust_cfa(rtx pat)1166 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1167 {
1168 rtx src, dest;
1169
1170 gcc_assert (GET_CODE (pat) == SET);
1171 dest = XEXP (pat, 0);
1172 src = XEXP (pat, 1);
1173
1174 switch (GET_CODE (src))
1175 {
1176 case PLUS:
1177 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1178 cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
1179 break;
1180
1181 case REG:
1182 break;
1183
1184 default:
1185 gcc_unreachable ();
1186 }
1187
1188 cur_cfa->reg = dwf_regno (dest);
1189 gcc_assert (cur_cfa->indirect == 0);
1190 }
1191
1192 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1193
1194 static void
dwarf2out_frame_debug_cfa_offset(rtx set)1195 dwarf2out_frame_debug_cfa_offset (rtx set)
1196 {
1197 poly_int64 offset;
1198 rtx src, addr, span;
1199 unsigned int sregno;
1200
1201 src = XEXP (set, 1);
1202 addr = XEXP (set, 0);
1203 gcc_assert (MEM_P (addr));
1204 addr = XEXP (addr, 0);
1205
1206 /* As documented, only consider extremely simple addresses. */
1207 switch (GET_CODE (addr))
1208 {
1209 case REG:
1210 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1211 offset = -cur_cfa->offset;
1212 break;
1213 case PLUS:
1214 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1215 offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
1216 break;
1217 default:
1218 gcc_unreachable ();
1219 }
1220
1221 if (src == pc_rtx)
1222 {
1223 span = NULL;
1224 sregno = DWARF_FRAME_RETURN_COLUMN;
1225 }
1226 else
1227 {
1228 span = targetm.dwarf_register_span (src);
1229 sregno = dwf_regno (src);
1230 }
1231
1232 /* ??? We'd like to use queue_reg_save, but we need to come up with
1233 a different flushing heuristic for epilogues. */
1234 if (!span)
1235 reg_save (sregno, INVALID_REGNUM, offset);
1236 else
1237 {
1238 /* We have a PARALLEL describing where the contents of SRC live.
1239 Adjust the offset for each piece of the PARALLEL. */
1240 poly_int64 span_offset = offset;
1241
1242 gcc_assert (GET_CODE (span) == PARALLEL);
1243
1244 const int par_len = XVECLEN (span, 0);
1245 for (int par_index = 0; par_index < par_len; par_index++)
1246 {
1247 rtx elem = XVECEXP (span, 0, par_index);
1248 sregno = dwf_regno (src);
1249 reg_save (sregno, INVALID_REGNUM, span_offset);
1250 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1251 }
1252 }
1253 }
1254
1255 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1256
1257 static void
dwarf2out_frame_debug_cfa_register(rtx set)1258 dwarf2out_frame_debug_cfa_register (rtx set)
1259 {
1260 rtx src, dest;
1261 unsigned sregno, dregno;
1262
1263 src = XEXP (set, 1);
1264 dest = XEXP (set, 0);
1265
1266 record_reg_saved_in_reg (dest, src);
1267 if (src == pc_rtx)
1268 sregno = DWARF_FRAME_RETURN_COLUMN;
1269 else
1270 sregno = dwf_regno (src);
1271
1272 dregno = dwf_regno (dest);
1273
1274 /* ??? We'd like to use queue_reg_save, but we need to come up with
1275 a different flushing heuristic for epilogues. */
1276 reg_save (sregno, dregno, 0);
1277 }
1278
1279 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1280
1281 static void
dwarf2out_frame_debug_cfa_expression(rtx set)1282 dwarf2out_frame_debug_cfa_expression (rtx set)
1283 {
1284 rtx src, dest, span;
1285 dw_cfi_ref cfi = new_cfi ();
1286 unsigned regno;
1287
1288 dest = SET_DEST (set);
1289 src = SET_SRC (set);
1290
1291 gcc_assert (REG_P (src));
1292 gcc_assert (MEM_P (dest));
1293
1294 span = targetm.dwarf_register_span (src);
1295 gcc_assert (!span);
1296
1297 regno = dwf_regno (src);
1298
1299 cfi->dw_cfi_opc = DW_CFA_expression;
1300 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1301 cfi->dw_cfi_oprnd2.dw_cfi_loc
1302 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1303 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1304
1305 /* ??? We'd like to use queue_reg_save, were the interface different,
1306 and, as above, we could manage flushing for epilogues. */
1307 add_cfi (cfi);
1308 update_row_reg_save (cur_row, regno, cfi);
1309 }
1310
1311 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1312 note. */
1313
1314 static void
dwarf2out_frame_debug_cfa_val_expression(rtx set)1315 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1316 {
1317 rtx dest = SET_DEST (set);
1318 gcc_assert (REG_P (dest));
1319
1320 rtx span = targetm.dwarf_register_span (dest);
1321 gcc_assert (!span);
1322
1323 rtx src = SET_SRC (set);
1324 dw_cfi_ref cfi = new_cfi ();
1325 cfi->dw_cfi_opc = DW_CFA_val_expression;
1326 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1327 cfi->dw_cfi_oprnd2.dw_cfi_loc
1328 = mem_loc_descriptor (src, GET_MODE (src),
1329 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1330 add_cfi (cfi);
1331 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1332 }
1333
1334 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1335
1336 static void
dwarf2out_frame_debug_cfa_restore(rtx reg)1337 dwarf2out_frame_debug_cfa_restore (rtx reg)
1338 {
1339 gcc_assert (REG_P (reg));
1340
1341 rtx span = targetm.dwarf_register_span (reg);
1342 if (!span)
1343 {
1344 unsigned int regno = dwf_regno (reg);
1345 add_cfi_restore (regno);
1346 update_row_reg_save (cur_row, regno, NULL);
1347 }
1348 else
1349 {
1350 /* We have a PARALLEL describing where the contents of REG live.
1351 Restore the register for each piece of the PARALLEL. */
1352 gcc_assert (GET_CODE (span) == PARALLEL);
1353
1354 const int par_len = XVECLEN (span, 0);
1355 for (int par_index = 0; par_index < par_len; par_index++)
1356 {
1357 reg = XVECEXP (span, 0, par_index);
1358 gcc_assert (REG_P (reg));
1359 unsigned int regno = dwf_regno (reg);
1360 add_cfi_restore (regno);
1361 update_row_reg_save (cur_row, regno, NULL);
1362 }
1363 }
1364 }
1365
1366 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1367 ??? Perhaps we should note in the CIE where windows are saved (instead of
1368 assuming 0(cfa)) and what registers are in the window. */
1369
1370 static void
dwarf2out_frame_debug_cfa_window_save(void)1371 dwarf2out_frame_debug_cfa_window_save (void)
1372 {
1373 dw_cfi_ref cfi = new_cfi ();
1374
1375 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1376 add_cfi (cfi);
1377 }
1378
1379 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_TOGGLE_RA_MANGLE.
1380 Note: DW_CFA_GNU_window_save dwarf opcode is reused for toggling RA mangle
1381 state, this is a target specific operation on AArch64 and can only be used
1382 on other targets if they don't use the window save operation otherwise. */
1383
1384 static void
dwarf2out_frame_debug_cfa_toggle_ra_mangle(void)1385 dwarf2out_frame_debug_cfa_toggle_ra_mangle (void)
1386 {
1387 dw_cfi_ref cfi = new_cfi ();
1388
1389 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1390 add_cfi (cfi);
1391 cur_row->ra_mangled = !cur_row->ra_mangled;
1392 }
1393
1394 /* Record call frame debugging information for an expression EXPR,
1395 which either sets SP or FP (adjusting how we calculate the frame
1396 address) or saves a register to the stack or another register.
1397 LABEL indicates the address of EXPR.
1398
1399 This function encodes a state machine mapping rtxes to actions on
1400 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1401 users need not read the source code.
1402
1403 The High-Level Picture
1404
1405 Changes in the register we use to calculate the CFA: Currently we
1406 assume that if you copy the CFA register into another register, we
1407 should take the other one as the new CFA register; this seems to
1408 work pretty well. If it's wrong for some target, it's simple
1409 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1410
1411 Changes in the register we use for saving registers to the stack:
1412 This is usually SP, but not always. Again, we deduce that if you
1413 copy SP into another register (and SP is not the CFA register),
1414 then the new register is the one we will be using for register
1415 saves. This also seems to work.
1416
1417 Register saves: There's not much guesswork about this one; if
1418 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1419 register save, and the register used to calculate the destination
1420 had better be the one we think we're using for this purpose.
1421 It's also assumed that a copy from a call-saved register to another
1422 register is saving that register if RTX_FRAME_RELATED_P is set on
1423 that instruction. If the copy is from a call-saved register to
1424 the *same* register, that means that the register is now the same
1425 value as in the caller.
1426
1427 Except: If the register being saved is the CFA register, and the
1428 offset is nonzero, we are saving the CFA, so we assume we have to
1429 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1430 the intent is to save the value of SP from the previous frame.
1431
1432 In addition, if a register has previously been saved to a different
1433 register,
1434
1435 Invariants / Summaries of Rules
1436
1437 cfa current rule for calculating the CFA. It usually
1438 consists of a register and an offset. This is
1439 actually stored in *cur_cfa, but abbreviated
1440 for the purposes of this documentation.
1441 cfa_store register used by prologue code to save things to the stack
1442 cfa_store.offset is the offset from the value of
1443 cfa_store.reg to the actual CFA
1444 cfa_temp register holding an integral value. cfa_temp.offset
1445 stores the value, which will be used to adjust the
1446 stack pointer. cfa_temp is also used like cfa_store,
1447 to track stores to the stack via fp or a temp reg.
1448
1449 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1450 with cfa.reg as the first operand changes the cfa.reg and its
1451 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1452 cfa_temp.offset.
1453
1454 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1455 expression yielding a constant. This sets cfa_temp.reg
1456 and cfa_temp.offset.
1457
1458 Rule 5: Create a new register cfa_store used to save items to the
1459 stack.
1460
1461 Rules 10-14: Save a register to the stack. Define offset as the
1462 difference of the original location and cfa_store's
1463 location (or cfa_temp's location if cfa_temp is used).
1464
1465 Rules 16-20: If AND operation happens on sp in prologue, we assume
1466 stack is realigned. We will use a group of DW_OP_XXX
1467 expressions to represent the location of the stored
1468 register instead of CFA+offset.
1469
1470 The Rules
1471
1472 "{a,b}" indicates a choice of a xor b.
1473 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1474
1475 Rule 1:
1476 (set <reg1> <reg2>:cfa.reg)
1477 effects: cfa.reg = <reg1>
1478 cfa.offset unchanged
1479 cfa_temp.reg = <reg1>
1480 cfa_temp.offset = cfa.offset
1481
1482 Rule 2:
1483 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1484 {<const_int>,<reg>:cfa_temp.reg}))
1485 effects: cfa.reg = sp if fp used
1486 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1487 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1488 if cfa_store.reg==sp
1489
1490 Rule 3:
1491 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1492 effects: cfa.reg = fp
1493 cfa_offset += +/- <const_int>
1494
1495 Rule 4:
1496 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1497 constraints: <reg1> != fp
1498 <reg1> != sp
1499 effects: cfa.reg = <reg1>
1500 cfa_temp.reg = <reg1>
1501 cfa_temp.offset = cfa.offset
1502
1503 Rule 5:
1504 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1505 constraints: <reg1> != fp
1506 <reg1> != sp
1507 effects: cfa_store.reg = <reg1>
1508 cfa_store.offset = cfa.offset - cfa_temp.offset
1509
1510 Rule 6:
1511 (set <reg> <const_int>)
1512 effects: cfa_temp.reg = <reg>
1513 cfa_temp.offset = <const_int>
1514
1515 Rule 7:
1516 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1517 effects: cfa_temp.reg = <reg1>
1518 cfa_temp.offset |= <const_int>
1519
1520 Rule 8:
1521 (set <reg> (high <exp>))
1522 effects: none
1523
1524 Rule 9:
1525 (set <reg> (lo_sum <exp> <const_int>))
1526 effects: cfa_temp.reg = <reg>
1527 cfa_temp.offset = <const_int>
1528
1529 Rule 10:
1530 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1531 effects: cfa_store.offset -= <const_int>
1532 cfa.offset = cfa_store.offset if cfa.reg == sp
1533 cfa.reg = sp
1534 cfa.base_offset = -cfa_store.offset
1535
1536 Rule 11:
1537 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1538 effects: cfa_store.offset += -/+ mode_size(mem)
1539 cfa.offset = cfa_store.offset if cfa.reg == sp
1540 cfa.reg = sp
1541 cfa.base_offset = -cfa_store.offset
1542
1543 Rule 12:
1544 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1545
1546 <reg2>)
1547 effects: cfa.reg = <reg1>
1548 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1549
1550 Rule 13:
1551 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1552 effects: cfa.reg = <reg1>
1553 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1554
1555 Rule 14:
1556 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1557 effects: cfa.reg = <reg1>
1558 cfa.base_offset = -cfa_temp.offset
1559 cfa_temp.offset -= mode_size(mem)
1560
1561 Rule 15:
1562 (set <reg> {unspec, unspec_volatile})
1563 effects: target-dependent
1564
1565 Rule 16:
1566 (set sp (and: sp <const_int>))
1567 constraints: cfa_store.reg == sp
1568 effects: cfun->fde.stack_realign = 1
1569 cfa_store.offset = 0
1570 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1571
1572 Rule 17:
1573 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1574 effects: cfa_store.offset += -/+ mode_size(mem)
1575
1576 Rule 18:
1577 (set (mem ({pre_inc, pre_dec} sp)) fp)
1578 constraints: fde->stack_realign == 1
1579 effects: cfa_store.offset = 0
1580 cfa.reg != HARD_FRAME_POINTER_REGNUM
1581
1582 Rule 19:
1583 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1584 constraints: fde->stack_realign == 1
1585 && cfa.offset == 0
1586 && cfa.indirect == 0
1587 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1588 effects: Use DW_CFA_def_cfa_expression to define cfa
1589 cfa.reg == fde->drap_reg */
1590
1591 static void
dwarf2out_frame_debug_expr(rtx expr)1592 dwarf2out_frame_debug_expr (rtx expr)
1593 {
1594 rtx src, dest, span;
1595 poly_int64 offset;
1596 dw_fde_ref fde;
1597
1598 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1599 the PARALLEL independently. The first element is always processed if
1600 it is a SET. This is for backward compatibility. Other elements
1601 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1602 flag is set in them. */
1603 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1604 {
1605 int par_index;
1606 int limit = XVECLEN (expr, 0);
1607 rtx elem;
1608
1609 /* PARALLELs have strict read-modify-write semantics, so we
1610 ought to evaluate every rvalue before changing any lvalue.
1611 It's cumbersome to do that in general, but there's an
1612 easy approximation that is enough for all current users:
1613 handle register saves before register assignments. */
1614 if (GET_CODE (expr) == PARALLEL)
1615 for (par_index = 0; par_index < limit; par_index++)
1616 {
1617 elem = XVECEXP (expr, 0, par_index);
1618 if (GET_CODE (elem) == SET
1619 && MEM_P (SET_DEST (elem))
1620 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1621 dwarf2out_frame_debug_expr (elem);
1622 }
1623
1624 for (par_index = 0; par_index < limit; par_index++)
1625 {
1626 elem = XVECEXP (expr, 0, par_index);
1627 if (GET_CODE (elem) == SET
1628 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1629 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1630 dwarf2out_frame_debug_expr (elem);
1631 }
1632 return;
1633 }
1634
1635 gcc_assert (GET_CODE (expr) == SET);
1636
1637 src = SET_SRC (expr);
1638 dest = SET_DEST (expr);
1639
1640 if (REG_P (src))
1641 {
1642 rtx rsi = reg_saved_in (src);
1643 if (rsi)
1644 src = rsi;
1645 }
1646
1647 fde = cfun->fde;
1648
1649 switch (GET_CODE (dest))
1650 {
1651 case REG:
1652 switch (GET_CODE (src))
1653 {
1654 /* Setting FP from SP. */
1655 case REG:
1656 if (cur_cfa->reg == dwf_regno (src))
1657 {
1658 /* Rule 1 */
1659 /* Update the CFA rule wrt SP or FP. Make sure src is
1660 relative to the current CFA register.
1661
1662 We used to require that dest be either SP or FP, but the
1663 ARM copies SP to a temporary register, and from there to
1664 FP. So we just rely on the backends to only set
1665 RTX_FRAME_RELATED_P on appropriate insns. */
1666 cur_cfa->reg = dwf_regno (dest);
1667 cur_trace->cfa_temp.reg = cur_cfa->reg;
1668 cur_trace->cfa_temp.offset = cur_cfa->offset;
1669 }
1670 else
1671 {
1672 /* Saving a register in a register. */
1673 gcc_assert (!fixed_regs [REGNO (dest)]
1674 /* For the SPARC and its register window. */
1675 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1676
1677 /* After stack is aligned, we can only save SP in FP
1678 if drap register is used. In this case, we have
1679 to restore stack pointer with the CFA value and we
1680 don't generate this DWARF information. */
1681 if (fde
1682 && fde->stack_realign
1683 && REGNO (src) == STACK_POINTER_REGNUM)
1684 {
1685 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1686 && fde->drap_reg != INVALID_REGNUM
1687 && cur_cfa->reg != dwf_regno (src)
1688 && fde->rule18);
1689 fde->rule18 = 0;
1690 /* The save of hard frame pointer has been deferred
1691 until this point when Rule 18 applied. Emit it now. */
1692 queue_reg_save (dest, NULL_RTX, 0);
1693 /* And as the instruction modifies the hard frame pointer,
1694 flush the queue as well. */
1695 dwarf2out_flush_queued_reg_saves ();
1696 }
1697 else
1698 queue_reg_save (src, dest, 0);
1699 }
1700 break;
1701
1702 case PLUS:
1703 case MINUS:
1704 case LO_SUM:
1705 if (dest == stack_pointer_rtx)
1706 {
1707 /* Rule 2 */
1708 /* Adjusting SP. */
1709 if (REG_P (XEXP (src, 1)))
1710 {
1711 gcc_assert (dwf_regno (XEXP (src, 1))
1712 == cur_trace->cfa_temp.reg);
1713 offset = cur_trace->cfa_temp.offset;
1714 }
1715 else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
1716 gcc_unreachable ();
1717
1718 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1719 {
1720 /* Restoring SP from FP in the epilogue. */
1721 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1722 cur_cfa->reg = dw_stack_pointer_regnum;
1723 }
1724 else if (GET_CODE (src) == LO_SUM)
1725 /* Assume we've set the source reg of the LO_SUM from sp. */
1726 ;
1727 else
1728 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1729
1730 if (GET_CODE (src) != MINUS)
1731 offset = -offset;
1732 if (cur_cfa->reg == dw_stack_pointer_regnum)
1733 cur_cfa->offset += offset;
1734 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1735 cur_trace->cfa_store.offset += offset;
1736 }
1737 else if (dest == hard_frame_pointer_rtx)
1738 {
1739 /* Rule 3 */
1740 /* Either setting the FP from an offset of the SP,
1741 or adjusting the FP */
1742 gcc_assert (frame_pointer_needed);
1743
1744 gcc_assert (REG_P (XEXP (src, 0))
1745 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1746 offset = rtx_to_poly_int64 (XEXP (src, 1));
1747 if (GET_CODE (src) != MINUS)
1748 offset = -offset;
1749 cur_cfa->offset += offset;
1750 cur_cfa->reg = dw_frame_pointer_regnum;
1751 }
1752 else
1753 {
1754 gcc_assert (GET_CODE (src) != MINUS);
1755
1756 /* Rule 4 */
1757 if (REG_P (XEXP (src, 0))
1758 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1759 && poly_int_rtx_p (XEXP (src, 1), &offset))
1760 {
1761 /* Setting a temporary CFA register that will be copied
1762 into the FP later on. */
1763 offset = -offset;
1764 cur_cfa->offset += offset;
1765 cur_cfa->reg = dwf_regno (dest);
1766 /* Or used to save regs to the stack. */
1767 cur_trace->cfa_temp.reg = cur_cfa->reg;
1768 cur_trace->cfa_temp.offset = cur_cfa->offset;
1769 }
1770
1771 /* Rule 5 */
1772 else if (REG_P (XEXP (src, 0))
1773 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1774 && XEXP (src, 1) == stack_pointer_rtx)
1775 {
1776 /* Setting a scratch register that we will use instead
1777 of SP for saving registers to the stack. */
1778 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1779 cur_trace->cfa_store.reg = dwf_regno (dest);
1780 cur_trace->cfa_store.offset
1781 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1782 }
1783
1784 /* Rule 9 */
1785 else if (GET_CODE (src) == LO_SUM
1786 && poly_int_rtx_p (XEXP (src, 1),
1787 &cur_trace->cfa_temp.offset))
1788 cur_trace->cfa_temp.reg = dwf_regno (dest);
1789 else
1790 gcc_unreachable ();
1791 }
1792 break;
1793
1794 /* Rule 6 */
1795 case CONST_INT:
1796 case CONST_POLY_INT:
1797 cur_trace->cfa_temp.reg = dwf_regno (dest);
1798 cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
1799 break;
1800
1801 /* Rule 7 */
1802 case IOR:
1803 gcc_assert (REG_P (XEXP (src, 0))
1804 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1805 && CONST_INT_P (XEXP (src, 1)));
1806
1807 cur_trace->cfa_temp.reg = dwf_regno (dest);
1808 if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
1809 &cur_trace->cfa_temp.offset))
1810 /* The target shouldn't generate this kind of CFI note if we
1811 can't represent it. */
1812 gcc_unreachable ();
1813 break;
1814
1815 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1816 which will fill in all of the bits. */
1817 /* Rule 8 */
1818 case HIGH:
1819 break;
1820
1821 /* Rule 15 */
1822 case UNSPEC:
1823 case UNSPEC_VOLATILE:
1824 /* All unspecs should be represented by REG_CFA_* notes. */
1825 gcc_unreachable ();
1826 return;
1827
1828 /* Rule 16 */
1829 case AND:
1830 /* If this AND operation happens on stack pointer in prologue,
1831 we assume the stack is realigned and we extract the
1832 alignment. */
1833 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1834 {
1835 /* We interpret reg_save differently with stack_realign set.
1836 Thus we must flush whatever we have queued first. */
1837 dwarf2out_flush_queued_reg_saves ();
1838
1839 gcc_assert (cur_trace->cfa_store.reg
1840 == dwf_regno (XEXP (src, 0)));
1841 fde->stack_realign = 1;
1842 fde->stack_realignment = INTVAL (XEXP (src, 1));
1843 cur_trace->cfa_store.offset = 0;
1844
1845 if (cur_cfa->reg != dw_stack_pointer_regnum
1846 && cur_cfa->reg != dw_frame_pointer_regnum)
1847 fde->drap_reg = cur_cfa->reg;
1848 }
1849 return;
1850
1851 default:
1852 gcc_unreachable ();
1853 }
1854 break;
1855
1856 case MEM:
1857
1858 /* Saving a register to the stack. Make sure dest is relative to the
1859 CFA register. */
1860 switch (GET_CODE (XEXP (dest, 0)))
1861 {
1862 /* Rule 10 */
1863 /* With a push. */
1864 case PRE_MODIFY:
1865 case POST_MODIFY:
1866 /* We can't handle variable size modifications. */
1867 offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1868
1869 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1870 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1871
1872 cur_trace->cfa_store.offset += offset;
1873 if (cur_cfa->reg == dw_stack_pointer_regnum)
1874 cur_cfa->offset = cur_trace->cfa_store.offset;
1875
1876 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1877 offset -= cur_trace->cfa_store.offset;
1878 else
1879 offset = -cur_trace->cfa_store.offset;
1880 break;
1881
1882 /* Rule 11 */
1883 case PRE_INC:
1884 case PRE_DEC:
1885 case POST_DEC:
1886 offset = GET_MODE_SIZE (GET_MODE (dest));
1887 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1888 offset = -offset;
1889
1890 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1891 == STACK_POINTER_REGNUM)
1892 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1893
1894 cur_trace->cfa_store.offset += offset;
1895
1896 /* Rule 18: If stack is aligned, we will use FP as a
1897 reference to represent the address of the stored
1898 regiser. */
1899 if (fde
1900 && fde->stack_realign
1901 && REG_P (src)
1902 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1903 {
1904 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1905 cur_trace->cfa_store.offset = 0;
1906 fde->rule18 = 1;
1907 }
1908
1909 if (cur_cfa->reg == dw_stack_pointer_regnum)
1910 cur_cfa->offset = cur_trace->cfa_store.offset;
1911
1912 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1913 offset += -cur_trace->cfa_store.offset;
1914 else
1915 offset = -cur_trace->cfa_store.offset;
1916 break;
1917
1918 /* Rule 12 */
1919 /* With an offset. */
1920 case PLUS:
1921 case MINUS:
1922 case LO_SUM:
1923 {
1924 unsigned int regno;
1925
1926 gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
1927 offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
1928 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1929 offset = -offset;
1930
1931 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1932
1933 if (cur_cfa->reg == regno)
1934 offset -= cur_cfa->offset;
1935 else if (cur_trace->cfa_store.reg == regno)
1936 offset -= cur_trace->cfa_store.offset;
1937 else
1938 {
1939 gcc_assert (cur_trace->cfa_temp.reg == regno);
1940 offset -= cur_trace->cfa_temp.offset;
1941 }
1942 }
1943 break;
1944
1945 /* Rule 13 */
1946 /* Without an offset. */
1947 case REG:
1948 {
1949 unsigned int regno = dwf_regno (XEXP (dest, 0));
1950
1951 if (cur_cfa->reg == regno)
1952 offset = -cur_cfa->offset;
1953 else if (cur_trace->cfa_store.reg == regno)
1954 offset = -cur_trace->cfa_store.offset;
1955 else
1956 {
1957 gcc_assert (cur_trace->cfa_temp.reg == regno);
1958 offset = -cur_trace->cfa_temp.offset;
1959 }
1960 }
1961 break;
1962
1963 /* Rule 14 */
1964 case POST_INC:
1965 gcc_assert (cur_trace->cfa_temp.reg
1966 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1967 offset = -cur_trace->cfa_temp.offset;
1968 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1969 break;
1970
1971 default:
1972 gcc_unreachable ();
1973 }
1974
1975 /* Rule 17 */
1976 /* If the source operand of this MEM operation is a memory,
1977 we only care how much stack grew. */
1978 if (MEM_P (src))
1979 break;
1980
1981 if (REG_P (src)
1982 && REGNO (src) != STACK_POINTER_REGNUM
1983 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1984 && dwf_regno (src) == cur_cfa->reg)
1985 {
1986 /* We're storing the current CFA reg into the stack. */
1987
1988 if (known_eq (cur_cfa->offset, 0))
1989 {
1990 /* Rule 19 */
1991 /* If stack is aligned, putting CFA reg into stack means
1992 we can no longer use reg + offset to represent CFA.
1993 Here we use DW_CFA_def_cfa_expression instead. The
1994 result of this expression equals to the original CFA
1995 value. */
1996 if (fde
1997 && fde->stack_realign
1998 && cur_cfa->indirect == 0
1999 && cur_cfa->reg != dw_frame_pointer_regnum)
2000 {
2001 gcc_assert (fde->drap_reg == cur_cfa->reg);
2002
2003 cur_cfa->indirect = 1;
2004 cur_cfa->reg = dw_frame_pointer_regnum;
2005 cur_cfa->base_offset = offset;
2006 cur_cfa->offset = 0;
2007
2008 fde->drap_reg_saved = 1;
2009 break;
2010 }
2011
2012 /* If the source register is exactly the CFA, assume
2013 we're saving SP like any other register; this happens
2014 on the ARM. */
2015 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2016 break;
2017 }
2018 else
2019 {
2020 /* Otherwise, we'll need to look in the stack to
2021 calculate the CFA. */
2022 rtx x = XEXP (dest, 0);
2023
2024 if (!REG_P (x))
2025 x = XEXP (x, 0);
2026 gcc_assert (REG_P (x));
2027
2028 cur_cfa->reg = dwf_regno (x);
2029 cur_cfa->base_offset = offset;
2030 cur_cfa->indirect = 1;
2031 break;
2032 }
2033 }
2034
2035 if (REG_P (src))
2036 span = targetm.dwarf_register_span (src);
2037 else
2038 span = NULL;
2039
2040 if (!span)
2041 {
2042 if (fde->rule18)
2043 /* Just verify the hard frame pointer save when doing dynamic
2044 realignment uses expected offset. The actual queue_reg_save
2045 needs to be deferred until the instruction that sets
2046 hard frame pointer to stack pointer, see PR99334 for
2047 details. */
2048 gcc_assert (known_eq (offset, 0));
2049 else
2050 queue_reg_save (src, NULL_RTX, offset);
2051 }
2052 else
2053 {
2054 /* We have a PARALLEL describing where the contents of SRC live.
2055 Queue register saves for each piece of the PARALLEL. */
2056 poly_int64 span_offset = offset;
2057
2058 gcc_assert (GET_CODE (span) == PARALLEL);
2059
2060 const int par_len = XVECLEN (span, 0);
2061 for (int par_index = 0; par_index < par_len; par_index++)
2062 {
2063 rtx elem = XVECEXP (span, 0, par_index);
2064 queue_reg_save (elem, NULL_RTX, span_offset);
2065 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2066 }
2067 }
2068 break;
2069
2070 default:
2071 gcc_unreachable ();
2072 }
2073 }
2074
2075 /* Record call frame debugging information for INSN, which either sets
2076 SP or FP (adjusting how we calculate the frame address) or saves a
2077 register to the stack. */
2078
2079 static void
dwarf2out_frame_debug(rtx_insn * insn)2080 dwarf2out_frame_debug (rtx_insn *insn)
2081 {
2082 rtx note, n, pat;
2083 bool handled_one = false;
2084
2085 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2086 switch (REG_NOTE_KIND (note))
2087 {
2088 case REG_FRAME_RELATED_EXPR:
2089 pat = XEXP (note, 0);
2090 goto do_frame_expr;
2091
2092 case REG_CFA_DEF_CFA:
2093 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2094 handled_one = true;
2095 break;
2096
2097 case REG_CFA_ADJUST_CFA:
2098 n = XEXP (note, 0);
2099 if (n == NULL)
2100 {
2101 n = PATTERN (insn);
2102 if (GET_CODE (n) == PARALLEL)
2103 n = XVECEXP (n, 0, 0);
2104 }
2105 dwarf2out_frame_debug_adjust_cfa (n);
2106 handled_one = true;
2107 break;
2108
2109 case REG_CFA_OFFSET:
2110 n = XEXP (note, 0);
2111 if (n == NULL)
2112 n = single_set (insn);
2113 dwarf2out_frame_debug_cfa_offset (n);
2114 handled_one = true;
2115 break;
2116
2117 case REG_CFA_REGISTER:
2118 n = XEXP (note, 0);
2119 if (n == NULL)
2120 {
2121 n = PATTERN (insn);
2122 if (GET_CODE (n) == PARALLEL)
2123 n = XVECEXP (n, 0, 0);
2124 }
2125 dwarf2out_frame_debug_cfa_register (n);
2126 handled_one = true;
2127 break;
2128
2129 case REG_CFA_EXPRESSION:
2130 case REG_CFA_VAL_EXPRESSION:
2131 n = XEXP (note, 0);
2132 if (n == NULL)
2133 n = single_set (insn);
2134
2135 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2136 dwarf2out_frame_debug_cfa_expression (n);
2137 else
2138 dwarf2out_frame_debug_cfa_val_expression (n);
2139
2140 handled_one = true;
2141 break;
2142
2143 case REG_CFA_RESTORE:
2144 n = XEXP (note, 0);
2145 if (n == NULL)
2146 {
2147 n = PATTERN (insn);
2148 if (GET_CODE (n) == PARALLEL)
2149 n = XVECEXP (n, 0, 0);
2150 n = XEXP (n, 0);
2151 }
2152 dwarf2out_frame_debug_cfa_restore (n);
2153 handled_one = true;
2154 break;
2155
2156 case REG_CFA_SET_VDRAP:
2157 n = XEXP (note, 0);
2158 if (REG_P (n))
2159 {
2160 dw_fde_ref fde = cfun->fde;
2161 if (fde)
2162 {
2163 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2164 if (REG_P (n))
2165 fde->vdrap_reg = dwf_regno (n);
2166 }
2167 }
2168 handled_one = true;
2169 break;
2170
2171 case REG_CFA_TOGGLE_RA_MANGLE:
2172 dwarf2out_frame_debug_cfa_toggle_ra_mangle ();
2173 handled_one = true;
2174 break;
2175
2176 case REG_CFA_WINDOW_SAVE:
2177 dwarf2out_frame_debug_cfa_window_save ();
2178 handled_one = true;
2179 break;
2180
2181 case REG_CFA_FLUSH_QUEUE:
2182 /* The actual flush happens elsewhere. */
2183 handled_one = true;
2184 break;
2185
2186 default:
2187 break;
2188 }
2189
2190 if (!handled_one)
2191 {
2192 pat = PATTERN (insn);
2193 do_frame_expr:
2194 dwarf2out_frame_debug_expr (pat);
2195
2196 /* Check again. A parallel can save and update the same register.
2197 We could probably check just once, here, but this is safer than
2198 removing the check at the start of the function. */
2199 if (clobbers_queued_reg_save (pat))
2200 dwarf2out_flush_queued_reg_saves ();
2201 }
2202 }
2203
2204 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2205
2206 static void
change_cfi_row(dw_cfi_row * old_row,dw_cfi_row * new_row)2207 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2208 {
2209 size_t i, n_old, n_new, n_max;
2210 dw_cfi_ref cfi;
2211
2212 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2213 add_cfi (new_row->cfa_cfi);
2214 else
2215 {
2216 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2217 if (cfi)
2218 add_cfi (cfi);
2219 }
2220
2221 n_old = vec_safe_length (old_row->reg_save);
2222 n_new = vec_safe_length (new_row->reg_save);
2223 n_max = MAX (n_old, n_new);
2224
2225 for (i = 0; i < n_max; ++i)
2226 {
2227 dw_cfi_ref r_old = NULL, r_new = NULL;
2228
2229 if (i < n_old)
2230 r_old = (*old_row->reg_save)[i];
2231 if (i < n_new)
2232 r_new = (*new_row->reg_save)[i];
2233
2234 if (r_old == r_new)
2235 ;
2236 else if (r_new == NULL)
2237 add_cfi_restore (i);
2238 else if (!cfi_equal_p (r_old, r_new))
2239 add_cfi (r_new);
2240 }
2241
2242 if (old_row->ra_mangled != new_row->ra_mangled)
2243 {
2244 dw_cfi_ref cfi = new_cfi ();
2245
2246 /* DW_CFA_GNU_window_save is reused for toggling RA mangle state. */
2247 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2248 add_cfi (cfi);
2249 }
2250 }
2251
2252 /* Examine CFI and return true if a cfi label and set_loc is needed
2253 beforehand. Even when generating CFI assembler instructions, we
2254 still have to add the cfi to the list so that lookup_cfa_1 works
2255 later on. When -g2 and above we even need to force emitting of
2256 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2257 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2258 and so don't use convert_cfa_to_fb_loc_list. */
2259
2260 static bool
cfi_label_required_p(dw_cfi_ref cfi)2261 cfi_label_required_p (dw_cfi_ref cfi)
2262 {
2263 if (!dwarf2out_do_cfi_asm ())
2264 return true;
2265
2266 if (dwarf_version == 2
2267 && debug_info_level > DINFO_LEVEL_TERSE
2268 && (write_symbols == DWARF2_DEBUG
2269 || write_symbols == VMS_AND_DWARF2_DEBUG))
2270 {
2271 switch (cfi->dw_cfi_opc)
2272 {
2273 case DW_CFA_def_cfa_offset:
2274 case DW_CFA_def_cfa_offset_sf:
2275 case DW_CFA_def_cfa_register:
2276 case DW_CFA_def_cfa:
2277 case DW_CFA_def_cfa_sf:
2278 case DW_CFA_def_cfa_expression:
2279 case DW_CFA_restore_state:
2280 return true;
2281 default:
2282 return false;
2283 }
2284 }
2285 return false;
2286 }
2287
2288 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2289 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2290 necessary. */
2291 static void
add_cfis_to_fde(void)2292 add_cfis_to_fde (void)
2293 {
2294 dw_fde_ref fde = cfun->fde;
2295 rtx_insn *insn, *next;
2296
2297 for (insn = get_insns (); insn; insn = next)
2298 {
2299 next = NEXT_INSN (insn);
2300
2301 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2302 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2303
2304 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2305 {
2306 bool required = cfi_label_required_p (NOTE_CFI (insn));
2307 while (next)
2308 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2309 {
2310 required |= cfi_label_required_p (NOTE_CFI (next));
2311 next = NEXT_INSN (next);
2312 }
2313 else if (active_insn_p (next)
2314 || (NOTE_P (next) && (NOTE_KIND (next)
2315 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2316 break;
2317 else
2318 next = NEXT_INSN (next);
2319 if (required)
2320 {
2321 int num = dwarf2out_cfi_label_num;
2322 const char *label = dwarf2out_cfi_label ();
2323 dw_cfi_ref xcfi;
2324
2325 /* Set the location counter to the new label. */
2326 xcfi = new_cfi ();
2327 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2328 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2329 vec_safe_push (fde->dw_fde_cfi, xcfi);
2330
2331 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2332 NOTE_LABEL_NUMBER (tmp) = num;
2333 }
2334
2335 do
2336 {
2337 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2338 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2339 insn = NEXT_INSN (insn);
2340 }
2341 while (insn != next);
2342 }
2343 }
2344 }
2345
2346 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2347
2348 /* If LABEL is the start of a trace, then initialize the state of that
2349 trace from CUR_TRACE and CUR_ROW. */
2350
2351 static void
maybe_record_trace_start(rtx_insn * start,rtx_insn * origin)2352 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2353 {
2354 dw_trace_info *ti;
2355
2356 ti = get_trace_info (start);
2357 gcc_assert (ti != NULL);
2358
2359 if (dump_file)
2360 {
2361 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2362 cur_trace->id, ti->id,
2363 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2364 (origin ? INSN_UID (origin) : 0));
2365 }
2366
2367 poly_int64 args_size = cur_trace->end_true_args_size;
2368 if (ti->beg_row == NULL)
2369 {
2370 /* This is the first time we've encountered this trace. Propagate
2371 state across the edge and push the trace onto the work list. */
2372 ti->beg_row = copy_cfi_row (cur_row);
2373 ti->beg_true_args_size = args_size;
2374
2375 ti->cfa_store = cur_trace->cfa_store;
2376 ti->cfa_temp = cur_trace->cfa_temp;
2377 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2378
2379 trace_work_list.safe_push (ti);
2380
2381 if (dump_file)
2382 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2383 }
2384 else
2385 {
2386
2387 /* We ought to have the same state incoming to a given trace no
2388 matter how we arrive at the trace. Anything else means we've
2389 got some kind of optimization error. */
2390 #if CHECKING_P
2391 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2392 {
2393 if (dump_file)
2394 {
2395 fprintf (dump_file, "Inconsistent CFI state!\n");
2396 fprintf (dump_file, "SHOULD have:\n");
2397 dump_cfi_row (dump_file, ti->beg_row);
2398 fprintf (dump_file, "DO have:\n");
2399 dump_cfi_row (dump_file, cur_row);
2400 }
2401
2402 gcc_unreachable ();
2403 }
2404 #endif
2405
2406 /* The args_size is allowed to conflict if it isn't actually used. */
2407 if (maybe_ne (ti->beg_true_args_size, args_size))
2408 ti->args_size_undefined = true;
2409 }
2410 }
2411
2412 /* Similarly, but handle the args_size and CFA reset across EH
2413 and non-local goto edges. */
2414
2415 static void
maybe_record_trace_start_abnormal(rtx_insn * start,rtx_insn * origin)2416 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2417 {
2418 poly_int64 save_args_size, delta;
2419 dw_cfa_location save_cfa;
2420
2421 save_args_size = cur_trace->end_true_args_size;
2422 if (known_eq (save_args_size, 0))
2423 {
2424 maybe_record_trace_start (start, origin);
2425 return;
2426 }
2427
2428 delta = -save_args_size;
2429 cur_trace->end_true_args_size = 0;
2430
2431 save_cfa = cur_row->cfa;
2432 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2433 {
2434 /* Convert a change in args_size (always a positive in the
2435 direction of stack growth) to a change in stack pointer. */
2436 if (!STACK_GROWS_DOWNWARD)
2437 delta = -delta;
2438
2439 cur_row->cfa.offset += delta;
2440 }
2441
2442 maybe_record_trace_start (start, origin);
2443
2444 cur_trace->end_true_args_size = save_args_size;
2445 cur_row->cfa = save_cfa;
2446 }
2447
2448 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2449 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2450
2451 static void
create_trace_edges(rtx_insn * insn)2452 create_trace_edges (rtx_insn *insn)
2453 {
2454 rtx tmp;
2455 int i, n;
2456
2457 if (JUMP_P (insn))
2458 {
2459 rtx_jump_table_data *table;
2460
2461 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2462 return;
2463
2464 if (tablejump_p (insn, NULL, &table))
2465 {
2466 rtvec vec = table->get_labels ();
2467
2468 n = GET_NUM_ELEM (vec);
2469 for (i = 0; i < n; ++i)
2470 {
2471 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2472 maybe_record_trace_start (lab, insn);
2473 }
2474 }
2475 else if (computed_jump_p (insn))
2476 {
2477 rtx_insn *temp;
2478 unsigned int i;
2479 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2480 maybe_record_trace_start (temp, insn);
2481 }
2482 else if (returnjump_p (insn))
2483 ;
2484 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2485 {
2486 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2487 for (i = 0; i < n; ++i)
2488 {
2489 rtx_insn *lab =
2490 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2491 maybe_record_trace_start (lab, insn);
2492 }
2493 }
2494 else
2495 {
2496 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2497 gcc_assert (lab != NULL);
2498 maybe_record_trace_start (lab, insn);
2499 }
2500 }
2501 else if (CALL_P (insn))
2502 {
2503 /* Sibling calls don't have edges inside this function. */
2504 if (SIBLING_CALL_P (insn))
2505 return;
2506
2507 /* Process non-local goto edges. */
2508 if (can_nonlocal_goto (insn))
2509 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2510 lab;
2511 lab = lab->next ())
2512 maybe_record_trace_start_abnormal (lab->insn (), insn);
2513 }
2514 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2515 {
2516 int i, n = seq->len ();
2517 for (i = 0; i < n; ++i)
2518 create_trace_edges (seq->insn (i));
2519 return;
2520 }
2521
2522 /* Process EH edges. */
2523 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2524 {
2525 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2526 if (lp)
2527 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2528 }
2529 }
2530
2531 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2532
2533 static void
scan_insn_after(rtx_insn * insn)2534 scan_insn_after (rtx_insn *insn)
2535 {
2536 if (RTX_FRAME_RELATED_P (insn))
2537 dwarf2out_frame_debug (insn);
2538 notice_args_size (insn);
2539 }
2540
2541 /* Scan the trace beginning at INSN and create the CFI notes for the
2542 instructions therein. */
2543
2544 static void
scan_trace(dw_trace_info * trace,bool entry)2545 scan_trace (dw_trace_info *trace, bool entry)
2546 {
2547 rtx_insn *prev, *insn = trace->head;
2548 dw_cfa_location this_cfa;
2549
2550 if (dump_file)
2551 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2552 trace->id, rtx_name[(int) GET_CODE (insn)],
2553 INSN_UID (insn));
2554
2555 trace->end_row = copy_cfi_row (trace->beg_row);
2556 trace->end_true_args_size = trace->beg_true_args_size;
2557
2558 cur_trace = trace;
2559 cur_row = trace->end_row;
2560
2561 this_cfa = cur_row->cfa;
2562 cur_cfa = &this_cfa;
2563
2564 /* If the current function starts with a non-standard incoming frame
2565 sp offset, emit a note before the first instruction. */
2566 if (entry
2567 && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
2568 {
2569 add_cfi_insn = insn;
2570 gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
2571 this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
2572 def_cfa_1 (&this_cfa);
2573 }
2574
2575 for (prev = insn, insn = NEXT_INSN (insn);
2576 insn;
2577 prev = insn, insn = NEXT_INSN (insn))
2578 {
2579 rtx_insn *control;
2580
2581 /* Do everything that happens "before" the insn. */
2582 add_cfi_insn = prev;
2583
2584 /* Notice the end of a trace. */
2585 if (BARRIER_P (insn))
2586 {
2587 /* Don't bother saving the unneeded queued registers at all. */
2588 queued_reg_saves.truncate (0);
2589 break;
2590 }
2591 if (save_point_p (insn))
2592 {
2593 /* Propagate across fallthru edges. */
2594 dwarf2out_flush_queued_reg_saves ();
2595 maybe_record_trace_start (insn, NULL);
2596 break;
2597 }
2598
2599 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2600 continue;
2601
2602 /* Handle all changes to the row state. Sequences require special
2603 handling for the positioning of the notes. */
2604 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2605 {
2606 rtx_insn *elt;
2607 int i, n = pat->len ();
2608
2609 control = pat->insn (0);
2610 if (can_throw_internal (control))
2611 notice_eh_throw (control);
2612 dwarf2out_flush_queued_reg_saves ();
2613
2614 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2615 {
2616 /* ??? Hopefully multiple delay slots are not annulled. */
2617 gcc_assert (n == 2);
2618 gcc_assert (!RTX_FRAME_RELATED_P (control));
2619 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2620
2621 elt = pat->insn (1);
2622
2623 if (INSN_FROM_TARGET_P (elt))
2624 {
2625 cfi_vec save_row_reg_save;
2626
2627 /* If ELT is an instruction from target of an annulled
2628 branch, the effects are for the target only and so
2629 the args_size and CFA along the current path
2630 shouldn't change. */
2631 add_cfi_insn = NULL;
2632 poly_int64 restore_args_size = cur_trace->end_true_args_size;
2633 cur_cfa = &cur_row->cfa;
2634 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2635
2636 scan_insn_after (elt);
2637
2638 /* ??? Should we instead save the entire row state? */
2639 gcc_assert (!queued_reg_saves.length ());
2640
2641 create_trace_edges (control);
2642
2643 cur_trace->end_true_args_size = restore_args_size;
2644 cur_row->cfa = this_cfa;
2645 cur_row->reg_save = save_row_reg_save;
2646 cur_cfa = &this_cfa;
2647 }
2648 else
2649 {
2650 /* If ELT is a annulled branch-taken instruction (i.e.
2651 executed only when branch is not taken), the args_size
2652 and CFA should not change through the jump. */
2653 create_trace_edges (control);
2654
2655 /* Update and continue with the trace. */
2656 add_cfi_insn = insn;
2657 scan_insn_after (elt);
2658 def_cfa_1 (&this_cfa);
2659 }
2660 continue;
2661 }
2662
2663 /* The insns in the delay slot should all be considered to happen
2664 "before" a call insn. Consider a call with a stack pointer
2665 adjustment in the delay slot. The backtrace from the callee
2666 should include the sp adjustment. Unfortunately, that leaves
2667 us with an unavoidable unwinding error exactly at the call insn
2668 itself. For jump insns we'd prefer to avoid this error by
2669 placing the notes after the sequence. */
2670 if (JUMP_P (control))
2671 add_cfi_insn = insn;
2672
2673 for (i = 1; i < n; ++i)
2674 {
2675 elt = pat->insn (i);
2676 scan_insn_after (elt);
2677 }
2678
2679 /* Make sure any register saves are visible at the jump target. */
2680 dwarf2out_flush_queued_reg_saves ();
2681 any_cfis_emitted = false;
2682
2683 /* However, if there is some adjustment on the call itself, e.g.
2684 a call_pop, that action should be considered to happen after
2685 the call returns. */
2686 add_cfi_insn = insn;
2687 scan_insn_after (control);
2688 }
2689 else
2690 {
2691 /* Flush data before calls and jumps, and of course if necessary. */
2692 if (can_throw_internal (insn))
2693 {
2694 notice_eh_throw (insn);
2695 dwarf2out_flush_queued_reg_saves ();
2696 }
2697 else if (!NONJUMP_INSN_P (insn)
2698 || clobbers_queued_reg_save (insn)
2699 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2700 dwarf2out_flush_queued_reg_saves ();
2701 any_cfis_emitted = false;
2702
2703 add_cfi_insn = insn;
2704 scan_insn_after (insn);
2705 control = insn;
2706 }
2707
2708 /* Between frame-related-p and args_size we might have otherwise
2709 emitted two cfa adjustments. Do it now. */
2710 def_cfa_1 (&this_cfa);
2711
2712 /* Minimize the number of advances by emitting the entire queue
2713 once anything is emitted. */
2714 if (any_cfis_emitted
2715 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2716 dwarf2out_flush_queued_reg_saves ();
2717
2718 /* Note that a test for control_flow_insn_p does exactly the
2719 same tests as are done to actually create the edges. So
2720 always call the routine and let it not create edges for
2721 non-control-flow insns. */
2722 create_trace_edges (control);
2723 }
2724
2725 gcc_assert (!cfun->fde || !cfun->fde->rule18);
2726 add_cfi_insn = NULL;
2727 cur_row = NULL;
2728 cur_trace = NULL;
2729 cur_cfa = NULL;
2730 }
2731
2732 /* Scan the function and create the initial set of CFI notes. */
2733
2734 static void
create_cfi_notes(void)2735 create_cfi_notes (void)
2736 {
2737 dw_trace_info *ti;
2738
2739 gcc_checking_assert (!queued_reg_saves.exists ());
2740 gcc_checking_assert (!trace_work_list.exists ());
2741
2742 /* Always begin at the entry trace. */
2743 ti = &trace_info[0];
2744 scan_trace (ti, true);
2745
2746 while (!trace_work_list.is_empty ())
2747 {
2748 ti = trace_work_list.pop ();
2749 scan_trace (ti, false);
2750 }
2751
2752 queued_reg_saves.release ();
2753 trace_work_list.release ();
2754 }
2755
2756 /* Return the insn before the first NOTE_INSN_CFI after START. */
2757
2758 static rtx_insn *
before_next_cfi_note(rtx_insn * start)2759 before_next_cfi_note (rtx_insn *start)
2760 {
2761 rtx_insn *prev = start;
2762 while (start)
2763 {
2764 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2765 return prev;
2766 prev = start;
2767 start = NEXT_INSN (start);
2768 }
2769 gcc_unreachable ();
2770 }
2771
2772 /* Insert CFI notes between traces to properly change state between them. */
2773
2774 static void
connect_traces(void)2775 connect_traces (void)
2776 {
2777 unsigned i, n = trace_info.length ();
2778 dw_trace_info *prev_ti, *ti;
2779
2780 /* ??? Ideally, we should have both queued and processed every trace.
2781 However the current representation of constant pools on various targets
2782 is indistinguishable from unreachable code. Assume for the moment that
2783 we can simply skip over such traces. */
2784 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2785 these are not "real" instructions, and should not be considered.
2786 This could be generically useful for tablejump data as well. */
2787 /* Remove all unprocessed traces from the list. */
2788 for (i = n - 1; i > 0; --i)
2789 {
2790 ti = &trace_info[i];
2791 if (ti->beg_row == NULL)
2792 {
2793 trace_info.ordered_remove (i);
2794 n -= 1;
2795 }
2796 else
2797 gcc_assert (ti->end_row != NULL);
2798 }
2799
2800 /* Work from the end back to the beginning. This lets us easily insert
2801 remember/restore_state notes in the correct order wrt other notes. */
2802 prev_ti = &trace_info[n - 1];
2803 for (i = n - 1; i > 0; --i)
2804 {
2805 dw_cfi_row *old_row;
2806
2807 ti = prev_ti;
2808 prev_ti = &trace_info[i - 1];
2809
2810 add_cfi_insn = ti->head;
2811
2812 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2813 for the portion of the function in the alternate text
2814 section. The row state at the very beginning of that
2815 new FDE will be exactly the row state from the CIE. */
2816 if (ti->switch_sections)
2817 old_row = cie_cfi_row;
2818 else
2819 {
2820 old_row = prev_ti->end_row;
2821 /* If there's no change from the previous end state, fine. */
2822 if (cfi_row_equal_p (old_row, ti->beg_row))
2823 ;
2824 /* Otherwise check for the common case of sharing state with
2825 the beginning of an epilogue, but not the end. Insert
2826 remember/restore opcodes in that case. */
2827 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2828 {
2829 dw_cfi_ref cfi;
2830
2831 /* Note that if we blindly insert the remember at the
2832 start of the trace, we can wind up increasing the
2833 size of the unwind info due to extra advance opcodes.
2834 Instead, put the remember immediately before the next
2835 state change. We know there must be one, because the
2836 state at the beginning and head of the trace differ. */
2837 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2838 cfi = new_cfi ();
2839 cfi->dw_cfi_opc = DW_CFA_remember_state;
2840 add_cfi (cfi);
2841
2842 add_cfi_insn = ti->head;
2843 cfi = new_cfi ();
2844 cfi->dw_cfi_opc = DW_CFA_restore_state;
2845 add_cfi (cfi);
2846
2847 old_row = prev_ti->beg_row;
2848 }
2849 /* Otherwise, we'll simply change state from the previous end. */
2850 }
2851
2852 change_cfi_row (old_row, ti->beg_row);
2853
2854 if (dump_file && add_cfi_insn != ti->head)
2855 {
2856 rtx_insn *note;
2857
2858 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2859 prev_ti->id, ti->id);
2860
2861 note = ti->head;
2862 do
2863 {
2864 note = NEXT_INSN (note);
2865 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2866 output_cfi_directive (dump_file, NOTE_CFI (note));
2867 }
2868 while (note != add_cfi_insn);
2869 }
2870 }
2871
2872 /* Connect args_size between traces that have can_throw_internal insns. */
2873 if (cfun->eh->lp_array)
2874 {
2875 poly_int64 prev_args_size = 0;
2876
2877 for (i = 0; i < n; ++i)
2878 {
2879 ti = &trace_info[i];
2880
2881 if (ti->switch_sections)
2882 prev_args_size = 0;
2883 if (ti->eh_head == NULL)
2884 continue;
2885 gcc_assert (!ti->args_size_undefined);
2886
2887 if (maybe_ne (ti->beg_delay_args_size, prev_args_size))
2888 {
2889 /* ??? Search back to previous CFI note. */
2890 add_cfi_insn = PREV_INSN (ti->eh_head);
2891 add_cfi_args_size (ti->beg_delay_args_size);
2892 }
2893
2894 prev_args_size = ti->end_delay_args_size;
2895 }
2896 }
2897 }
2898
2899 /* Set up the pseudo-cfg of instruction traces, as described at the
2900 block comment at the top of the file. */
2901
2902 static void
create_pseudo_cfg(void)2903 create_pseudo_cfg (void)
2904 {
2905 bool saw_barrier, switch_sections;
2906 dw_trace_info ti;
2907 rtx_insn *insn;
2908 unsigned i;
2909
2910 /* The first trace begins at the start of the function,
2911 and begins with the CIE row state. */
2912 trace_info.create (16);
2913 memset (&ti, 0, sizeof (ti));
2914 ti.head = get_insns ();
2915 ti.beg_row = cie_cfi_row;
2916 ti.cfa_store = cie_cfi_row->cfa;
2917 ti.cfa_temp.reg = INVALID_REGNUM;
2918 trace_info.quick_push (ti);
2919
2920 if (cie_return_save)
2921 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2922
2923 /* Walk all the insns, collecting start of trace locations. */
2924 saw_barrier = false;
2925 switch_sections = false;
2926 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2927 {
2928 if (BARRIER_P (insn))
2929 saw_barrier = true;
2930 else if (NOTE_P (insn)
2931 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2932 {
2933 /* We should have just seen a barrier. */
2934 gcc_assert (saw_barrier);
2935 switch_sections = true;
2936 }
2937 /* Watch out for save_point notes between basic blocks.
2938 In particular, a note after a barrier. Do not record these,
2939 delaying trace creation until the label. */
2940 else if (save_point_p (insn)
2941 && (LABEL_P (insn) || !saw_barrier))
2942 {
2943 memset (&ti, 0, sizeof (ti));
2944 ti.head = insn;
2945 ti.switch_sections = switch_sections;
2946 ti.id = trace_info.length ();
2947 trace_info.safe_push (ti);
2948
2949 saw_barrier = false;
2950 switch_sections = false;
2951 }
2952 }
2953
2954 /* Create the trace index after we've finished building trace_info,
2955 avoiding stale pointer problems due to reallocation. */
2956 trace_index
2957 = new hash_table<trace_info_hasher> (trace_info.length ());
2958 dw_trace_info *tp;
2959 FOR_EACH_VEC_ELT (trace_info, i, tp)
2960 {
2961 dw_trace_info **slot;
2962
2963 if (dump_file)
2964 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2965 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2966 tp->switch_sections ? " (section switch)" : "");
2967
2968 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2969 gcc_assert (*slot == NULL);
2970 *slot = tp;
2971 }
2972 }
2973
2974 /* Record the initial position of the return address. RTL is
2975 INCOMING_RETURN_ADDR_RTX. */
2976
2977 static void
initial_return_save(rtx rtl)2978 initial_return_save (rtx rtl)
2979 {
2980 unsigned int reg = INVALID_REGNUM;
2981 poly_int64 offset = 0;
2982
2983 switch (GET_CODE (rtl))
2984 {
2985 case REG:
2986 /* RA is in a register. */
2987 reg = dwf_regno (rtl);
2988 break;
2989
2990 case MEM:
2991 /* RA is on the stack. */
2992 rtl = XEXP (rtl, 0);
2993 switch (GET_CODE (rtl))
2994 {
2995 case REG:
2996 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2997 offset = 0;
2998 break;
2999
3000 case PLUS:
3001 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3002 offset = rtx_to_poly_int64 (XEXP (rtl, 1));
3003 break;
3004
3005 case MINUS:
3006 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3007 offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
3008 break;
3009
3010 default:
3011 gcc_unreachable ();
3012 }
3013
3014 break;
3015
3016 case PLUS:
3017 /* The return address is at some offset from any value we can
3018 actually load. For instance, on the SPARC it is in %i7+8. Just
3019 ignore the offset for now; it doesn't matter for unwinding frames. */
3020 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
3021 initial_return_save (XEXP (rtl, 0));
3022 return;
3023
3024 default:
3025 gcc_unreachable ();
3026 }
3027
3028 if (reg != DWARF_FRAME_RETURN_COLUMN)
3029 {
3030 if (reg != INVALID_REGNUM)
3031 record_reg_saved_in_reg (rtl, pc_rtx);
3032 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
3033 }
3034 }
3035
3036 static void
create_cie_data(void)3037 create_cie_data (void)
3038 {
3039 dw_cfa_location loc;
3040 dw_trace_info cie_trace;
3041
3042 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
3043
3044 memset (&cie_trace, 0, sizeof (cie_trace));
3045 cur_trace = &cie_trace;
3046
3047 add_cfi_vec = &cie_cfi_vec;
3048 cie_cfi_row = cur_row = new_cfi_row ();
3049
3050 /* On entry, the Canonical Frame Address is at SP. */
3051 memset (&loc, 0, sizeof (loc));
3052 loc.reg = dw_stack_pointer_regnum;
3053 /* create_cie_data is called just once per TU, and when using .cfi_startproc
3054 is even done by the assembler rather than the compiler. If the target
3055 has different incoming frame sp offsets depending on what kind of
3056 function it is, use a single constant offset for the target and
3057 if needed, adjust before the first instruction in insn stream. */
3058 loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
3059 def_cfa_1 (&loc);
3060
3061 if (targetm.debug_unwind_info () == UI_DWARF2
3062 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3063 {
3064 initial_return_save (INCOMING_RETURN_ADDR_RTX);
3065
3066 /* For a few targets, we have the return address incoming into a
3067 register, but choose a different return column. This will result
3068 in a DW_CFA_register for the return, and an entry in
3069 regs_saved_in_regs to match. If the target later stores that
3070 return address register to the stack, we want to be able to emit
3071 the DW_CFA_offset against the return column, not the intermediate
3072 save register. Save the contents of regs_saved_in_regs so that
3073 we can re-initialize it at the start of each function. */
3074 switch (cie_trace.regs_saved_in_regs.length ())
3075 {
3076 case 0:
3077 break;
3078 case 1:
3079 cie_return_save = ggc_alloc<reg_saved_in_data> ();
3080 *cie_return_save = cie_trace.regs_saved_in_regs[0];
3081 cie_trace.regs_saved_in_regs.release ();
3082 break;
3083 default:
3084 gcc_unreachable ();
3085 }
3086 }
3087
3088 add_cfi_vec = NULL;
3089 cur_row = NULL;
3090 cur_trace = NULL;
3091 }
3092
3093 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3094 state at each location within the function. These notes will be
3095 emitted during pass_final. */
3096
3097 static unsigned int
execute_dwarf2_frame(void)3098 execute_dwarf2_frame (void)
3099 {
3100 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3101 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3102
3103 /* The first time we're called, compute the incoming frame state. */
3104 if (cie_cfi_vec == NULL)
3105 create_cie_data ();
3106
3107 dwarf2out_alloc_current_fde ();
3108
3109 create_pseudo_cfg ();
3110
3111 /* Do the work. */
3112 create_cfi_notes ();
3113 connect_traces ();
3114 add_cfis_to_fde ();
3115
3116 /* Free all the data we allocated. */
3117 {
3118 size_t i;
3119 dw_trace_info *ti;
3120
3121 FOR_EACH_VEC_ELT (trace_info, i, ti)
3122 ti->regs_saved_in_regs.release ();
3123 }
3124 trace_info.release ();
3125
3126 delete trace_index;
3127 trace_index = NULL;
3128
3129 return 0;
3130 }
3131
3132 /* Convert a DWARF call frame info. operation to its string name */
3133
3134 static const char *
dwarf_cfi_name(unsigned int cfi_opc)3135 dwarf_cfi_name (unsigned int cfi_opc)
3136 {
3137 const char *name = get_DW_CFA_name (cfi_opc);
3138
3139 if (name != NULL)
3140 return name;
3141
3142 return "DW_CFA_<unknown>";
3143 }
3144
3145 /* This routine will generate the correct assembly data for a location
3146 description based on a cfi entry with a complex address. */
3147
3148 static void
output_cfa_loc(dw_cfi_ref cfi,int for_eh)3149 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3150 {
3151 dw_loc_descr_ref loc;
3152 unsigned long size;
3153
3154 if (cfi->dw_cfi_opc == DW_CFA_expression
3155 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3156 {
3157 unsigned r =
3158 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3159 dw2_asm_output_data (1, r, NULL);
3160 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3161 }
3162 else
3163 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3164
3165 /* Output the size of the block. */
3166 size = size_of_locs (loc);
3167 dw2_asm_output_data_uleb128 (size, NULL);
3168
3169 /* Now output the operations themselves. */
3170 output_loc_sequence (loc, for_eh);
3171 }
3172
3173 /* Similar, but used for .cfi_escape. */
3174
3175 static void
output_cfa_loc_raw(dw_cfi_ref cfi)3176 output_cfa_loc_raw (dw_cfi_ref cfi)
3177 {
3178 dw_loc_descr_ref loc;
3179 unsigned long size;
3180
3181 if (cfi->dw_cfi_opc == DW_CFA_expression
3182 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3183 {
3184 unsigned r =
3185 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3186 fprintf (asm_out_file, "%#x,", r);
3187 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3188 }
3189 else
3190 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3191
3192 /* Output the size of the block. */
3193 size = size_of_locs (loc);
3194 dw2_asm_output_data_uleb128_raw (size);
3195 fputc (',', asm_out_file);
3196
3197 /* Now output the operations themselves. */
3198 output_loc_sequence_raw (loc);
3199 }
3200
3201 /* Output a Call Frame Information opcode and its operand(s). */
3202
3203 void
output_cfi(dw_cfi_ref cfi,dw_fde_ref fde,int for_eh)3204 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3205 {
3206 unsigned long r;
3207 HOST_WIDE_INT off;
3208
3209 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3210 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3211 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3212 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3213 ((unsigned HOST_WIDE_INT)
3214 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3215 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3216 {
3217 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3218 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3219 "DW_CFA_offset, column %#lx", r);
3220 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3221 dw2_asm_output_data_uleb128 (off, NULL);
3222 }
3223 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3224 {
3225 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3226 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3227 "DW_CFA_restore, column %#lx", r);
3228 }
3229 else
3230 {
3231 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3232 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3233
3234 switch (cfi->dw_cfi_opc)
3235 {
3236 case DW_CFA_set_loc:
3237 if (for_eh)
3238 dw2_asm_output_encoded_addr_rtx (
3239 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3240 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3241 false, NULL);
3242 else
3243 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3244 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3245 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3246 break;
3247
3248 case DW_CFA_advance_loc1:
3249 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3250 fde->dw_fde_current_label, NULL);
3251 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3252 break;
3253
3254 case DW_CFA_advance_loc2:
3255 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3256 fde->dw_fde_current_label, NULL);
3257 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3258 break;
3259
3260 case DW_CFA_advance_loc4:
3261 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3262 fde->dw_fde_current_label, NULL);
3263 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3264 break;
3265
3266 case DW_CFA_MIPS_advance_loc8:
3267 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3268 fde->dw_fde_current_label, NULL);
3269 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3270 break;
3271
3272 case DW_CFA_offset_extended:
3273 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3274 dw2_asm_output_data_uleb128 (r, NULL);
3275 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3276 dw2_asm_output_data_uleb128 (off, NULL);
3277 break;
3278
3279 case DW_CFA_def_cfa:
3280 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3281 dw2_asm_output_data_uleb128 (r, NULL);
3282 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3283 break;
3284
3285 case DW_CFA_offset_extended_sf:
3286 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3287 dw2_asm_output_data_uleb128 (r, NULL);
3288 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3289 dw2_asm_output_data_sleb128 (off, NULL);
3290 break;
3291
3292 case DW_CFA_def_cfa_sf:
3293 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3294 dw2_asm_output_data_uleb128 (r, NULL);
3295 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3296 dw2_asm_output_data_sleb128 (off, NULL);
3297 break;
3298
3299 case DW_CFA_restore_extended:
3300 case DW_CFA_undefined:
3301 case DW_CFA_same_value:
3302 case DW_CFA_def_cfa_register:
3303 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3304 dw2_asm_output_data_uleb128 (r, NULL);
3305 break;
3306
3307 case DW_CFA_register:
3308 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3309 dw2_asm_output_data_uleb128 (r, NULL);
3310 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3311 dw2_asm_output_data_uleb128 (r, NULL);
3312 break;
3313
3314 case DW_CFA_def_cfa_offset:
3315 case DW_CFA_GNU_args_size:
3316 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3317 break;
3318
3319 case DW_CFA_def_cfa_offset_sf:
3320 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3321 dw2_asm_output_data_sleb128 (off, NULL);
3322 break;
3323
3324 case DW_CFA_GNU_window_save:
3325 break;
3326
3327 case DW_CFA_def_cfa_expression:
3328 case DW_CFA_expression:
3329 case DW_CFA_val_expression:
3330 output_cfa_loc (cfi, for_eh);
3331 break;
3332
3333 case DW_CFA_GNU_negative_offset_extended:
3334 /* Obsoleted by DW_CFA_offset_extended_sf. */
3335 gcc_unreachable ();
3336
3337 default:
3338 break;
3339 }
3340 }
3341 }
3342
3343 /* Similar, but do it via assembler directives instead. */
3344
3345 void
output_cfi_directive(FILE * f,dw_cfi_ref cfi)3346 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3347 {
3348 unsigned long r, r2;
3349
3350 switch (cfi->dw_cfi_opc)
3351 {
3352 case DW_CFA_advance_loc:
3353 case DW_CFA_advance_loc1:
3354 case DW_CFA_advance_loc2:
3355 case DW_CFA_advance_loc4:
3356 case DW_CFA_MIPS_advance_loc8:
3357 case DW_CFA_set_loc:
3358 /* Should only be created in a code path not followed when emitting
3359 via directives. The assembler is going to take care of this for
3360 us. But this routines is also used for debugging dumps, so
3361 print something. */
3362 gcc_assert (f != asm_out_file);
3363 fprintf (f, "\t.cfi_advance_loc\n");
3364 break;
3365
3366 case DW_CFA_offset:
3367 case DW_CFA_offset_extended:
3368 case DW_CFA_offset_extended_sf:
3369 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3370 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3371 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3372 break;
3373
3374 case DW_CFA_restore:
3375 case DW_CFA_restore_extended:
3376 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3377 fprintf (f, "\t.cfi_restore %lu\n", r);
3378 break;
3379
3380 case DW_CFA_undefined:
3381 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3382 fprintf (f, "\t.cfi_undefined %lu\n", r);
3383 break;
3384
3385 case DW_CFA_same_value:
3386 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3387 fprintf (f, "\t.cfi_same_value %lu\n", r);
3388 break;
3389
3390 case DW_CFA_def_cfa:
3391 case DW_CFA_def_cfa_sf:
3392 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3393 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3394 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3395 break;
3396
3397 case DW_CFA_def_cfa_register:
3398 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3399 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3400 break;
3401
3402 case DW_CFA_register:
3403 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3404 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3405 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3406 break;
3407
3408 case DW_CFA_def_cfa_offset:
3409 case DW_CFA_def_cfa_offset_sf:
3410 fprintf (f, "\t.cfi_def_cfa_offset "
3411 HOST_WIDE_INT_PRINT_DEC"\n",
3412 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3413 break;
3414
3415 case DW_CFA_remember_state:
3416 fprintf (f, "\t.cfi_remember_state\n");
3417 break;
3418 case DW_CFA_restore_state:
3419 fprintf (f, "\t.cfi_restore_state\n");
3420 break;
3421
3422 case DW_CFA_GNU_args_size:
3423 if (f == asm_out_file)
3424 {
3425 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3426 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3427 if (flag_debug_asm)
3428 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3429 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3430 fputc ('\n', f);
3431 }
3432 else
3433 {
3434 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3435 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3436 }
3437 break;
3438
3439 case DW_CFA_GNU_window_save:
3440 fprintf (f, "\t.cfi_window_save\n");
3441 break;
3442
3443 case DW_CFA_def_cfa_expression:
3444 case DW_CFA_expression:
3445 case DW_CFA_val_expression:
3446 if (f != asm_out_file)
3447 {
3448 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3449 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3450 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3451 break;
3452 }
3453 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3454 output_cfa_loc_raw (cfi);
3455 fputc ('\n', f);
3456 break;
3457
3458 default:
3459 gcc_unreachable ();
3460 }
3461 }
3462
3463 void
dwarf2out_emit_cfi(dw_cfi_ref cfi)3464 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3465 {
3466 if (dwarf2out_do_cfi_asm ())
3467 output_cfi_directive (asm_out_file, cfi);
3468 }
3469
3470 static void
dump_cfi_row(FILE * f,dw_cfi_row * row)3471 dump_cfi_row (FILE *f, dw_cfi_row *row)
3472 {
3473 dw_cfi_ref cfi;
3474 unsigned i;
3475
3476 cfi = row->cfa_cfi;
3477 if (!cfi)
3478 {
3479 dw_cfa_location dummy;
3480 memset (&dummy, 0, sizeof (dummy));
3481 dummy.reg = INVALID_REGNUM;
3482 cfi = def_cfa_0 (&dummy, &row->cfa);
3483 }
3484 output_cfi_directive (f, cfi);
3485
3486 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3487 if (cfi)
3488 output_cfi_directive (f, cfi);
3489 }
3490
3491 void debug_cfi_row (dw_cfi_row *row);
3492
3493 void
debug_cfi_row(dw_cfi_row * row)3494 debug_cfi_row (dw_cfi_row *row)
3495 {
3496 dump_cfi_row (stderr, row);
3497 }
3498
3499
3500 /* Save the result of dwarf2out_do_frame across PCH.
3501 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3502 static GTY(()) signed char saved_do_cfi_asm = 0;
3503
3504 /* Decide whether to emit EH frame unwind information for the current
3505 translation unit. */
3506
3507 bool
dwarf2out_do_eh_frame(void)3508 dwarf2out_do_eh_frame (void)
3509 {
3510 return
3511 (flag_unwind_tables || flag_exceptions)
3512 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3513 }
3514
3515 /* Decide whether we want to emit frame unwind information for the current
3516 translation unit. */
3517
3518 bool
dwarf2out_do_frame(void)3519 dwarf2out_do_frame (void)
3520 {
3521 /* We want to emit correct CFA location expressions or lists, so we
3522 have to return true if we're going to output debug info, even if
3523 we're not going to output frame or unwind info. */
3524 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3525 return true;
3526
3527 if (saved_do_cfi_asm > 0)
3528 return true;
3529
3530 if (targetm.debug_unwind_info () == UI_DWARF2)
3531 return true;
3532
3533 if (dwarf2out_do_eh_frame ())
3534 return true;
3535
3536 return false;
3537 }
3538
3539 /* Decide whether to emit frame unwind via assembler directives. */
3540
3541 bool
dwarf2out_do_cfi_asm(void)3542 dwarf2out_do_cfi_asm (void)
3543 {
3544 int enc;
3545
3546 if (saved_do_cfi_asm != 0)
3547 return saved_do_cfi_asm > 0;
3548
3549 /* Assume failure for a moment. */
3550 saved_do_cfi_asm = -1;
3551
3552 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3553 return false;
3554 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3555 return false;
3556
3557 /* Make sure the personality encoding is one the assembler can support.
3558 In particular, aligned addresses can't be handled. */
3559 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3560 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3561 return false;
3562 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3563 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3564 return false;
3565
3566 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3567 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3568 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3569 return false;
3570
3571 /* Success! */
3572 saved_do_cfi_asm = 1;
3573 return true;
3574 }
3575
3576 namespace {
3577
3578 const pass_data pass_data_dwarf2_frame =
3579 {
3580 RTL_PASS, /* type */
3581 "dwarf2", /* name */
3582 OPTGROUP_NONE, /* optinfo_flags */
3583 TV_FINAL, /* tv_id */
3584 0, /* properties_required */
3585 0, /* properties_provided */
3586 0, /* properties_destroyed */
3587 0, /* todo_flags_start */
3588 0, /* todo_flags_finish */
3589 };
3590
3591 class pass_dwarf2_frame : public rtl_opt_pass
3592 {
3593 public:
pass_dwarf2_frame(gcc::context * ctxt)3594 pass_dwarf2_frame (gcc::context *ctxt)
3595 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3596 {}
3597
3598 /* opt_pass methods: */
3599 virtual bool gate (function *);
execute(function *)3600 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3601
3602 }; // class pass_dwarf2_frame
3603
3604 bool
gate(function *)3605 pass_dwarf2_frame::gate (function *)
3606 {
3607 /* Targets which still implement the prologue in assembler text
3608 cannot use the generic dwarf2 unwinding. */
3609 if (!targetm.have_prologue ())
3610 return false;
3611
3612 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3613 from the optimized shrink-wrapping annotations that we will compute.
3614 For now, only produce the CFI notes for dwarf2. */
3615 return dwarf2out_do_frame ();
3616 }
3617
3618 } // anon namespace
3619
3620 rtl_opt_pass *
make_pass_dwarf2_frame(gcc::context * ctxt)3621 make_pass_dwarf2_frame (gcc::context *ctxt)
3622 {
3623 return new pass_dwarf2_frame (ctxt);
3624 }
3625
3626 #include "gt-dwarf2cfi.h"
3627