1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h" /* For expr.h */
39 #include "expr.h" /* init_return_column_size */
40 #include "output.h" /* asm_out_file */
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
42
43
44 /* ??? Poison these here until it can be done generically. They've been
45 totally replaced in this file; make sure it stays that way. */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
51
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
54 #endif
55
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
58 #endif
59
60 /* A collected description of an entire row of the abstract CFI table. */
61 struct GTY(()) dw_cfi_row
62 {
63 /* The expression that computes the CFA, expressed in two different ways.
64 The CFA member for the simple cases, and the full CFI expression for
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
66 dw_cfa_location cfa;
67 dw_cfi_ref cfa_cfi;
68
69 /* The expressions for any register column that is saved. */
70 cfi_vec reg_save;
71
72 /* True if the register window is saved. */
73 bool window_save;
74
75 /* True if the return address is in a mangled state. */
76 bool ra_mangled;
77 };
78
79 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
80 struct GTY(()) reg_saved_in_data {
81 rtx orig_reg;
82 rtx saved_in_reg;
83 };
84
85
86 /* Since we no longer have a proper CFG, we're going to create a facsimile
87 of one on the fly while processing the frame-related insns.
88
89 We create dw_trace_info structures for each extended basic block beginning
90 and ending at a "save point". Save points are labels, barriers, certain
91 notes, and of course the beginning and end of the function.
92
93 As we encounter control transfer insns, we propagate the "current"
94 row state across the edges to the starts of traces. When checking is
95 enabled, we validate that we propagate the same data from all sources.
96
97 All traces are members of the TRACE_INFO array, in the order in which
98 they appear in the instruction stream.
99
100 All save points are present in the TRACE_INDEX hash, mapping the insn
101 starting a trace to the dw_trace_info describing the trace. */
102
103 struct dw_trace_info
104 {
105 /* The insn that begins the trace. */
106 rtx_insn *head;
107
108 /* The row state at the beginning and end of the trace. */
109 dw_cfi_row *beg_row, *end_row;
110
111 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
112 while scanning insns. However, the args_size value is irrelevant at
113 any point except can_throw_internal_p insns. Therefore the "delay"
114 sizes the values that must actually be emitted for this trace. */
115 poly_int64_pod beg_true_args_size, end_true_args_size;
116 poly_int64_pod beg_delay_args_size, end_delay_args_size;
117
118 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
119 rtx_insn *eh_head;
120
121 /* The following variables contain data used in interpreting frame related
122 expressions. These are not part of the "real" row state as defined by
123 Dwarf, but it seems like they need to be propagated into a trace in case
124 frame related expressions have been sunk. */
125 /* ??? This seems fragile. These variables are fragments of a larger
126 expression. If we do not keep the entire expression together, we risk
127 not being able to put it together properly. Consider forcing targets
128 to generate self-contained expressions and dropping all of the magic
129 interpretation code in this file. Or at least refusing to shrink wrap
130 any frame related insn that doesn't contain a complete expression. */
131
132 /* The register used for saving registers to the stack, and its offset
133 from the CFA. */
134 dw_cfa_location cfa_store;
135
136 /* A temporary register holding an integral value used in adjusting SP
137 or setting up the store_reg. The "offset" field holds the integer
138 value, not an offset. */
139 dw_cfa_location cfa_temp;
140
141 /* A set of registers saved in other registers. This is the inverse of
142 the row->reg_save info, if the entry is a DW_CFA_register. This is
143 implemented as a flat array because it normally contains zero or 1
144 entry, depending on the target. IA-64 is the big spender here, using
145 a maximum of 5 entries. */
146 vec<reg_saved_in_data> regs_saved_in_regs;
147
148 /* An identifier for this trace. Used only for debugging dumps. */
149 unsigned id;
150
151 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
152 bool switch_sections;
153
154 /* True if we've seen different values incoming to beg_true_args_size. */
155 bool args_size_undefined;
156
157 /* True if we've seen an insn with a REG_ARGS_SIZE note before EH_HEAD. */
158 bool args_size_defined_for_eh;
159 };
160
161
162 /* Hashtable helpers. */
163
164 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
165 {
166 static inline hashval_t hash (const dw_trace_info *);
167 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
168 };
169
170 inline hashval_t
hash(const dw_trace_info * ti)171 trace_info_hasher::hash (const dw_trace_info *ti)
172 {
173 return INSN_UID (ti->head);
174 }
175
176 inline bool
equal(const dw_trace_info * a,const dw_trace_info * b)177 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
178 {
179 return a->head == b->head;
180 }
181
182
183 /* The variables making up the pseudo-cfg, as described above. */
184 static vec<dw_trace_info> trace_info;
185 static vec<dw_trace_info *> trace_work_list;
186 static hash_table<trace_info_hasher> *trace_index;
187
188 /* A vector of call frame insns for the CIE. */
189 cfi_vec cie_cfi_vec;
190
191 /* The state of the first row of the FDE table, which includes the
192 state provided by the CIE. */
193 static GTY(()) dw_cfi_row *cie_cfi_row;
194
195 static GTY(()) reg_saved_in_data *cie_return_save;
196
197 static GTY(()) unsigned long dwarf2out_cfi_label_num;
198
199 /* The insn after which a new CFI note should be emitted. */
200 static rtx_insn *add_cfi_insn;
201
202 /* When non-null, add_cfi will add the CFI to this vector. */
203 static cfi_vec *add_cfi_vec;
204
205 /* The current instruction trace. */
206 static dw_trace_info *cur_trace;
207
208 /* The current, i.e. most recently generated, row of the CFI table. */
209 static dw_cfi_row *cur_row;
210
211 /* A copy of the current CFA, for use during the processing of a
212 single insn. */
213 static dw_cfa_location *cur_cfa;
214
215 /* We delay emitting a register save until either (a) we reach the end
216 of the prologue or (b) the register is clobbered. This clusters
217 register saves so that there are fewer pc advances. */
218
219 struct queued_reg_save {
220 rtx reg;
221 rtx saved_reg;
222 poly_int64_pod cfa_offset;
223 };
224
225
226 static vec<queued_reg_save> queued_reg_saves;
227
228 /* True if any CFI directives were emitted at the current insn. */
229 static bool any_cfis_emitted;
230
231 /* Short-hand for commonly used register numbers. */
232 static unsigned dw_stack_pointer_regnum;
233 static unsigned dw_frame_pointer_regnum;
234
235 /* Hook used by __throw. */
236
237 rtx
expand_builtin_dwarf_sp_column(void)238 expand_builtin_dwarf_sp_column (void)
239 {
240 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
241 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
242 }
243
244 /* MEM is a memory reference for the register size table, each element of
245 which has mode MODE. Initialize column C as a return address column. */
246
247 static void
init_return_column_size(scalar_int_mode mode,rtx mem,unsigned int c)248 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
249 {
250 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
251 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
252 emit_move_insn (adjust_address (mem, mode, offset),
253 gen_int_mode (size, mode));
254 }
255
256 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
257 init_one_dwarf_reg_size to communicate on what has been done by the
258 latter. */
259
260 struct init_one_dwarf_reg_state
261 {
262 /* Whether the dwarf return column was initialized. */
263 bool wrote_return_column;
264
265 /* For each hard register REGNO, whether init_one_dwarf_reg_size
266 was given REGNO to process already. */
267 bool processed_regno [FIRST_PSEUDO_REGISTER];
268
269 };
270
271 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
272 initialize the dwarf register size table entry corresponding to register
273 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
274 use for the size entry to initialize, and INIT_STATE is the communication
275 datastructure conveying what we're doing to our caller. */
276
277 static
init_one_dwarf_reg_size(int regno,machine_mode regmode,rtx table,machine_mode slotmode,init_one_dwarf_reg_state * init_state)278 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
279 rtx table, machine_mode slotmode,
280 init_one_dwarf_reg_state *init_state)
281 {
282 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
283 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
284 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
285
286 poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
287 poly_int64 regsize = GET_MODE_SIZE (regmode);
288
289 init_state->processed_regno[regno] = true;
290
291 if (rnum >= DWARF_FRAME_REGISTERS)
292 return;
293
294 if (dnum == DWARF_FRAME_RETURN_COLUMN)
295 {
296 if (regmode == VOIDmode)
297 return;
298 init_state->wrote_return_column = true;
299 }
300
301 /* ??? When is this true? Should it be a test based on DCOL instead? */
302 if (maybe_lt (slotoffset, 0))
303 return;
304
305 emit_move_insn (adjust_address (table, slotmode, slotoffset),
306 gen_int_mode (regsize, slotmode));
307 }
308
309 /* Generate code to initialize the dwarf register size table located
310 at the provided ADDRESS. */
311
312 void
expand_builtin_init_dwarf_reg_sizes(tree address)313 expand_builtin_init_dwarf_reg_sizes (tree address)
314 {
315 unsigned int i;
316 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
317 rtx addr = expand_normal (address);
318 rtx mem = gen_rtx_MEM (BLKmode, addr);
319
320 init_one_dwarf_reg_state init_state;
321
322 memset ((char *)&init_state, 0, sizeof (init_state));
323
324 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
325 {
326 machine_mode save_mode;
327 rtx span;
328
329 /* No point in processing a register multiple times. This could happen
330 with register spans, e.g. when a reg is first processed as a piece of
331 a span, then as a register on its own later on. */
332
333 if (init_state.processed_regno[i])
334 continue;
335
336 save_mode = targetm.dwarf_frame_reg_mode (i);
337 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
338
339 if (!span)
340 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
341 else
342 {
343 for (int si = 0; si < XVECLEN (span, 0); si++)
344 {
345 rtx reg = XVECEXP (span, 0, si);
346
347 init_one_dwarf_reg_size
348 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
349 }
350 }
351 }
352
353 if (!init_state.wrote_return_column)
354 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
355
356 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
357 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
358 #endif
359
360 targetm.init_dwarf_reg_sizes_extra (address);
361 }
362
363
364 static dw_trace_info *
get_trace_info(rtx_insn * insn)365 get_trace_info (rtx_insn *insn)
366 {
367 dw_trace_info dummy;
368 dummy.head = insn;
369 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
370 }
371
372 static bool
save_point_p(rtx_insn * insn)373 save_point_p (rtx_insn *insn)
374 {
375 /* Labels, except those that are really jump tables. */
376 if (LABEL_P (insn))
377 return inside_basic_block_p (insn);
378
379 /* We split traces at the prologue/epilogue notes because those
380 are points at which the unwind info is usually stable. This
381 makes it easier to find spots with identical unwind info so
382 that we can use remember/restore_state opcodes. */
383 if (NOTE_P (insn))
384 switch (NOTE_KIND (insn))
385 {
386 case NOTE_INSN_PROLOGUE_END:
387 case NOTE_INSN_EPILOGUE_BEG:
388 return true;
389 }
390
391 return false;
392 }
393
394 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
395
396 static inline HOST_WIDE_INT
div_data_align(HOST_WIDE_INT off)397 div_data_align (HOST_WIDE_INT off)
398 {
399 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
400 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
401 return r;
402 }
403
404 /* Return true if we need a signed version of a given opcode
405 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
406
407 static inline bool
need_data_align_sf_opcode(HOST_WIDE_INT off)408 need_data_align_sf_opcode (HOST_WIDE_INT off)
409 {
410 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
411 }
412
413 /* Return a pointer to a newly allocated Call Frame Instruction. */
414
415 static inline dw_cfi_ref
new_cfi(void)416 new_cfi (void)
417 {
418 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
419
420 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
421 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
422
423 return cfi;
424 }
425
426 /* Return a newly allocated CFI row, with no defined data. */
427
428 static dw_cfi_row *
new_cfi_row(void)429 new_cfi_row (void)
430 {
431 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
432
433 row->cfa.reg = INVALID_REGNUM;
434
435 return row;
436 }
437
438 /* Return a copy of an existing CFI row. */
439
440 static dw_cfi_row *
copy_cfi_row(dw_cfi_row * src)441 copy_cfi_row (dw_cfi_row *src)
442 {
443 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
444
445 *dst = *src;
446 dst->reg_save = vec_safe_copy (src->reg_save);
447
448 return dst;
449 }
450
451 /* Return a copy of an existing CFA location. */
452
453 static dw_cfa_location *
copy_cfa(dw_cfa_location * src)454 copy_cfa (dw_cfa_location *src)
455 {
456 dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
457 *dst = *src;
458 return dst;
459 }
460
461 /* Generate a new label for the CFI info to refer to. */
462
463 static char *
dwarf2out_cfi_label(void)464 dwarf2out_cfi_label (void)
465 {
466 int num = dwarf2out_cfi_label_num++;
467 char label[20];
468
469 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
470
471 return xstrdup (label);
472 }
473
474 /* Add CFI either to the current insn stream or to a vector, or both. */
475
476 static void
add_cfi(dw_cfi_ref cfi)477 add_cfi (dw_cfi_ref cfi)
478 {
479 any_cfis_emitted = true;
480
481 if (add_cfi_insn != NULL)
482 {
483 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
484 NOTE_CFI (add_cfi_insn) = cfi;
485 }
486
487 if (add_cfi_vec != NULL)
488 vec_safe_push (*add_cfi_vec, cfi);
489 }
490
491 static void
add_cfi_args_size(poly_int64 size)492 add_cfi_args_size (poly_int64 size)
493 {
494 /* We don't yet have a representation for polynomial sizes. */
495 HOST_WIDE_INT const_size = size.to_constant ();
496
497 dw_cfi_ref cfi = new_cfi ();
498
499 /* While we can occasionally have args_size < 0 internally, this state
500 should not persist at a point we actually need an opcode. */
501 gcc_assert (const_size >= 0);
502
503 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
504 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
505
506 add_cfi (cfi);
507 }
508
509 static void
add_cfi_restore(unsigned reg)510 add_cfi_restore (unsigned reg)
511 {
512 dw_cfi_ref cfi = new_cfi ();
513
514 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
515 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
516
517 add_cfi (cfi);
518 }
519
520 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
521 that the register column is no longer saved. */
522
523 static void
update_row_reg_save(dw_cfi_row * row,unsigned column,dw_cfi_ref cfi)524 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
525 {
526 if (vec_safe_length (row->reg_save) <= column)
527 vec_safe_grow_cleared (row->reg_save, column + 1);
528 (*row->reg_save)[column] = cfi;
529 }
530
531 /* This function fills in aa dw_cfa_location structure from a dwarf location
532 descriptor sequence. */
533
534 static void
get_cfa_from_loc_descr(dw_cfa_location * cfa,struct dw_loc_descr_node * loc)535 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
536 {
537 struct dw_loc_descr_node *ptr;
538 cfa->offset = 0;
539 cfa->base_offset = 0;
540 cfa->indirect = 0;
541 cfa->reg = -1;
542
543 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
544 {
545 enum dwarf_location_atom op = ptr->dw_loc_opc;
546
547 switch (op)
548 {
549 case DW_OP_reg0:
550 case DW_OP_reg1:
551 case DW_OP_reg2:
552 case DW_OP_reg3:
553 case DW_OP_reg4:
554 case DW_OP_reg5:
555 case DW_OP_reg6:
556 case DW_OP_reg7:
557 case DW_OP_reg8:
558 case DW_OP_reg9:
559 case DW_OP_reg10:
560 case DW_OP_reg11:
561 case DW_OP_reg12:
562 case DW_OP_reg13:
563 case DW_OP_reg14:
564 case DW_OP_reg15:
565 case DW_OP_reg16:
566 case DW_OP_reg17:
567 case DW_OP_reg18:
568 case DW_OP_reg19:
569 case DW_OP_reg20:
570 case DW_OP_reg21:
571 case DW_OP_reg22:
572 case DW_OP_reg23:
573 case DW_OP_reg24:
574 case DW_OP_reg25:
575 case DW_OP_reg26:
576 case DW_OP_reg27:
577 case DW_OP_reg28:
578 case DW_OP_reg29:
579 case DW_OP_reg30:
580 case DW_OP_reg31:
581 cfa->reg = op - DW_OP_reg0;
582 break;
583 case DW_OP_regx:
584 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
585 break;
586 case DW_OP_breg0:
587 case DW_OP_breg1:
588 case DW_OP_breg2:
589 case DW_OP_breg3:
590 case DW_OP_breg4:
591 case DW_OP_breg5:
592 case DW_OP_breg6:
593 case DW_OP_breg7:
594 case DW_OP_breg8:
595 case DW_OP_breg9:
596 case DW_OP_breg10:
597 case DW_OP_breg11:
598 case DW_OP_breg12:
599 case DW_OP_breg13:
600 case DW_OP_breg14:
601 case DW_OP_breg15:
602 case DW_OP_breg16:
603 case DW_OP_breg17:
604 case DW_OP_breg18:
605 case DW_OP_breg19:
606 case DW_OP_breg20:
607 case DW_OP_breg21:
608 case DW_OP_breg22:
609 case DW_OP_breg23:
610 case DW_OP_breg24:
611 case DW_OP_breg25:
612 case DW_OP_breg26:
613 case DW_OP_breg27:
614 case DW_OP_breg28:
615 case DW_OP_breg29:
616 case DW_OP_breg30:
617 case DW_OP_breg31:
618 cfa->reg = op - DW_OP_breg0;
619 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
620 break;
621 case DW_OP_bregx:
622 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
623 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
624 break;
625 case DW_OP_deref:
626 cfa->indirect = 1;
627 break;
628 case DW_OP_plus_uconst:
629 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
630 break;
631 default:
632 gcc_unreachable ();
633 }
634 }
635 }
636
637 /* Find the previous value for the CFA, iteratively. CFI is the opcode
638 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
639 one level of remember/restore state processing. */
640
641 void
lookup_cfa_1(dw_cfi_ref cfi,dw_cfa_location * loc,dw_cfa_location * remember)642 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
643 {
644 switch (cfi->dw_cfi_opc)
645 {
646 case DW_CFA_def_cfa_offset:
647 case DW_CFA_def_cfa_offset_sf:
648 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
649 break;
650 case DW_CFA_def_cfa_register:
651 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
652 break;
653 case DW_CFA_def_cfa:
654 case DW_CFA_def_cfa_sf:
655 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
656 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
657 break;
658 case DW_CFA_def_cfa_expression:
659 if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
660 *loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
661 else
662 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
663 break;
664
665 case DW_CFA_remember_state:
666 gcc_assert (!remember->in_use);
667 *remember = *loc;
668 remember->in_use = 1;
669 break;
670 case DW_CFA_restore_state:
671 gcc_assert (remember->in_use);
672 *loc = *remember;
673 remember->in_use = 0;
674 break;
675
676 default:
677 break;
678 }
679 }
680
681 /* Determine if two dw_cfa_location structures define the same data. */
682
683 bool
cfa_equal_p(const dw_cfa_location * loc1,const dw_cfa_location * loc2)684 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
685 {
686 return (loc1->reg == loc2->reg
687 && known_eq (loc1->offset, loc2->offset)
688 && loc1->indirect == loc2->indirect
689 && (loc1->indirect == 0
690 || known_eq (loc1->base_offset, loc2->base_offset)));
691 }
692
693 /* Determine if two CFI operands are identical. */
694
695 static bool
cfi_oprnd_equal_p(enum dw_cfi_oprnd_type t,dw_cfi_oprnd * a,dw_cfi_oprnd * b)696 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
697 {
698 switch (t)
699 {
700 case dw_cfi_oprnd_unused:
701 return true;
702 case dw_cfi_oprnd_reg_num:
703 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
704 case dw_cfi_oprnd_offset:
705 return a->dw_cfi_offset == b->dw_cfi_offset;
706 case dw_cfi_oprnd_addr:
707 return (a->dw_cfi_addr == b->dw_cfi_addr
708 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
709 case dw_cfi_oprnd_loc:
710 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
711 case dw_cfi_oprnd_cfa_loc:
712 return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
713 }
714 gcc_unreachable ();
715 }
716
717 /* Determine if two CFI entries are identical. */
718
719 static bool
cfi_equal_p(dw_cfi_ref a,dw_cfi_ref b)720 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
721 {
722 enum dwarf_call_frame_info opc;
723
724 /* Make things easier for our callers, including missing operands. */
725 if (a == b)
726 return true;
727 if (a == NULL || b == NULL)
728 return false;
729
730 /* Obviously, the opcodes must match. */
731 opc = a->dw_cfi_opc;
732 if (opc != b->dw_cfi_opc)
733 return false;
734
735 /* Compare the two operands, re-using the type of the operands as
736 already exposed elsewhere. */
737 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
738 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
739 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
740 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
741 }
742
743 /* Determine if two CFI_ROW structures are identical. */
744
745 static bool
cfi_row_equal_p(dw_cfi_row * a,dw_cfi_row * b)746 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
747 {
748 size_t i, n_a, n_b, n_max;
749
750 if (a->cfa_cfi)
751 {
752 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
753 return false;
754 }
755 else if (!cfa_equal_p (&a->cfa, &b->cfa))
756 return false;
757
758 n_a = vec_safe_length (a->reg_save);
759 n_b = vec_safe_length (b->reg_save);
760 n_max = MAX (n_a, n_b);
761
762 for (i = 0; i < n_max; ++i)
763 {
764 dw_cfi_ref r_a = NULL, r_b = NULL;
765
766 if (i < n_a)
767 r_a = (*a->reg_save)[i];
768 if (i < n_b)
769 r_b = (*b->reg_save)[i];
770
771 if (!cfi_equal_p (r_a, r_b))
772 return false;
773 }
774
775 if (a->window_save != b->window_save)
776 return false;
777
778 if (a->ra_mangled != b->ra_mangled)
779 return false;
780
781 return true;
782 }
783
784 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
785 what opcode to emit. Returns the CFI opcode to effect the change, or
786 NULL if NEW_CFA == OLD_CFA. */
787
788 static dw_cfi_ref
def_cfa_0(dw_cfa_location * old_cfa,dw_cfa_location * new_cfa)789 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
790 {
791 dw_cfi_ref cfi;
792
793 /* If nothing changed, no need to issue any call frame instructions. */
794 if (cfa_equal_p (old_cfa, new_cfa))
795 return NULL;
796
797 cfi = new_cfi ();
798
799 HOST_WIDE_INT const_offset;
800 if (new_cfa->reg == old_cfa->reg
801 && !new_cfa->indirect
802 && !old_cfa->indirect
803 && new_cfa->offset.is_constant (&const_offset))
804 {
805 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
806 the CFA register did not change but the offset did. The data
807 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
808 in the assembler via the .cfi_def_cfa_offset directive. */
809 if (const_offset < 0)
810 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
811 else
812 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
813 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
814 }
815 else if (new_cfa->offset.is_constant ()
816 && known_eq (new_cfa->offset, old_cfa->offset)
817 && old_cfa->reg != INVALID_REGNUM
818 && !new_cfa->indirect
819 && !old_cfa->indirect)
820 {
821 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
822 indicating the CFA register has changed to <register> but the
823 offset has not changed. This requires the old CFA to have
824 been set as a register plus offset rather than a general
825 DW_CFA_def_cfa_expression. */
826 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
827 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
828 }
829 else if (new_cfa->indirect == 0
830 && new_cfa->offset.is_constant (&const_offset))
831 {
832 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
833 indicating the CFA register has changed to <register> with
834 the specified offset. The data factoring for DW_CFA_def_cfa_sf
835 happens in output_cfi, or in the assembler via the .cfi_def_cfa
836 directive. */
837 if (const_offset < 0)
838 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
839 else
840 cfi->dw_cfi_opc = DW_CFA_def_cfa;
841 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
842 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
843 }
844 else
845 {
846 /* Construct a DW_CFA_def_cfa_expression instruction to
847 calculate the CFA using a full location expression since no
848 register-offset pair is available. */
849 struct dw_loc_descr_node *loc_list;
850
851 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
852 loc_list = build_cfa_loc (new_cfa, 0);
853 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
854 if (!new_cfa->offset.is_constant ()
855 || !new_cfa->base_offset.is_constant ())
856 /* It's hard to reconstruct the CFA location for a polynomial
857 expression, so just cache it instead. */
858 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
859 else
860 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
861 }
862
863 return cfi;
864 }
865
866 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
867
868 static void
def_cfa_1(dw_cfa_location * new_cfa)869 def_cfa_1 (dw_cfa_location *new_cfa)
870 {
871 dw_cfi_ref cfi;
872
873 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
874 cur_trace->cfa_store.offset = new_cfa->offset;
875
876 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
877 if (cfi)
878 {
879 cur_row->cfa = *new_cfa;
880 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
881 ? cfi : NULL);
882
883 add_cfi (cfi);
884 }
885 }
886
887 /* Add the CFI for saving a register. REG is the CFA column number.
888 If SREG is -1, the register is saved at OFFSET from the CFA;
889 otherwise it is saved in SREG. */
890
891 static void
reg_save(unsigned int reg,unsigned int sreg,poly_int64 offset)892 reg_save (unsigned int reg, unsigned int sreg, poly_int64 offset)
893 {
894 dw_fde_ref fde = cfun ? cfun->fde : NULL;
895 dw_cfi_ref cfi = new_cfi ();
896
897 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
898
899 if (sreg == INVALID_REGNUM)
900 {
901 HOST_WIDE_INT const_offset;
902 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
903 if (fde && fde->stack_realign)
904 {
905 cfi->dw_cfi_opc = DW_CFA_expression;
906 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
907 cfi->dw_cfi_oprnd2.dw_cfi_loc
908 = build_cfa_aligned_loc (&cur_row->cfa, offset,
909 fde->stack_realignment);
910 }
911 else if (offset.is_constant (&const_offset))
912 {
913 if (need_data_align_sf_opcode (const_offset))
914 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
915 else if (reg & ~0x3f)
916 cfi->dw_cfi_opc = DW_CFA_offset_extended;
917 else
918 cfi->dw_cfi_opc = DW_CFA_offset;
919 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
920 }
921 else
922 {
923 cfi->dw_cfi_opc = DW_CFA_expression;
924 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
925 cfi->dw_cfi_oprnd2.dw_cfi_loc
926 = build_cfa_loc (&cur_row->cfa, offset);
927 }
928 }
929 else if (sreg == reg)
930 {
931 /* While we could emit something like DW_CFA_same_value or
932 DW_CFA_restore, we never expect to see something like that
933 in a prologue. This is more likely to be a bug. A backend
934 can always bypass this by using REG_CFA_RESTORE directly. */
935 gcc_unreachable ();
936 }
937 else
938 {
939 cfi->dw_cfi_opc = DW_CFA_register;
940 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
941 }
942
943 add_cfi (cfi);
944 update_row_reg_save (cur_row, reg, cfi);
945 }
946
947 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
948 and adjust data structures to match. */
949
950 static void
notice_args_size(rtx_insn * insn)951 notice_args_size (rtx_insn *insn)
952 {
953 poly_int64 args_size, delta;
954 rtx note;
955
956 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
957 if (note == NULL)
958 return;
959
960 if (!cur_trace->eh_head)
961 cur_trace->args_size_defined_for_eh = true;
962
963 args_size = get_args_size (note);
964 delta = args_size - cur_trace->end_true_args_size;
965 if (known_eq (delta, 0))
966 return;
967
968 cur_trace->end_true_args_size = args_size;
969
970 /* If the CFA is computed off the stack pointer, then we must adjust
971 the computation of the CFA as well. */
972 if (cur_cfa->reg == dw_stack_pointer_regnum)
973 {
974 gcc_assert (!cur_cfa->indirect);
975
976 /* Convert a change in args_size (always a positive in the
977 direction of stack growth) to a change in stack pointer. */
978 if (!STACK_GROWS_DOWNWARD)
979 delta = -delta;
980
981 cur_cfa->offset += delta;
982 }
983 }
984
985 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
986 data within the trace related to EH insns and args_size. */
987
988 static void
notice_eh_throw(rtx_insn * insn)989 notice_eh_throw (rtx_insn *insn)
990 {
991 poly_int64 args_size = cur_trace->end_true_args_size;
992 if (cur_trace->eh_head == NULL)
993 {
994 cur_trace->eh_head = insn;
995 cur_trace->beg_delay_args_size = args_size;
996 cur_trace->end_delay_args_size = args_size;
997 }
998 else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
999 {
1000 cur_trace->end_delay_args_size = args_size;
1001
1002 /* ??? If the CFA is the stack pointer, search backward for the last
1003 CFI note and insert there. Given that the stack changed for the
1004 args_size change, there *must* be such a note in between here and
1005 the last eh insn. */
1006 add_cfi_args_size (args_size);
1007 }
1008 }
1009
1010 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
1011 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1012 used in places where rtl is prohibited. */
1013
1014 static inline unsigned
dwf_regno(const_rtx reg)1015 dwf_regno (const_rtx reg)
1016 {
1017 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
1018 return DWARF_FRAME_REGNUM (REGNO (reg));
1019 }
1020
1021 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
1022
1023 static bool
compare_reg_or_pc(rtx x,rtx y)1024 compare_reg_or_pc (rtx x, rtx y)
1025 {
1026 if (REG_P (x) && REG_P (y))
1027 return REGNO (x) == REGNO (y);
1028 return x == y;
1029 }
1030
1031 /* Record SRC as being saved in DEST. DEST may be null to delete an
1032 existing entry. SRC may be a register or PC_RTX. */
1033
1034 static void
record_reg_saved_in_reg(rtx dest,rtx src)1035 record_reg_saved_in_reg (rtx dest, rtx src)
1036 {
1037 reg_saved_in_data *elt;
1038 size_t i;
1039
1040 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1041 if (compare_reg_or_pc (elt->orig_reg, src))
1042 {
1043 if (dest == NULL)
1044 cur_trace->regs_saved_in_regs.unordered_remove (i);
1045 else
1046 elt->saved_in_reg = dest;
1047 return;
1048 }
1049
1050 if (dest == NULL)
1051 return;
1052
1053 reg_saved_in_data e = {src, dest};
1054 cur_trace->regs_saved_in_regs.safe_push (e);
1055 }
1056
1057 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1058 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1059
1060 static void
queue_reg_save(rtx reg,rtx sreg,poly_int64 offset)1061 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
1062 {
1063 queued_reg_save *q;
1064 queued_reg_save e = {reg, sreg, offset};
1065 size_t i;
1066
1067 /* Duplicates waste space, but it's also necessary to remove them
1068 for correctness, since the queue gets output in reverse order. */
1069 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1070 if (compare_reg_or_pc (q->reg, reg))
1071 {
1072 *q = e;
1073 return;
1074 }
1075
1076 queued_reg_saves.safe_push (e);
1077 }
1078
1079 /* Output all the entries in QUEUED_REG_SAVES. */
1080
1081 static void
dwarf2out_flush_queued_reg_saves(void)1082 dwarf2out_flush_queued_reg_saves (void)
1083 {
1084 queued_reg_save *q;
1085 size_t i;
1086
1087 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1088 {
1089 unsigned int reg, sreg;
1090
1091 record_reg_saved_in_reg (q->saved_reg, q->reg);
1092
1093 if (q->reg == pc_rtx)
1094 reg = DWARF_FRAME_RETURN_COLUMN;
1095 else
1096 reg = dwf_regno (q->reg);
1097 if (q->saved_reg)
1098 sreg = dwf_regno (q->saved_reg);
1099 else
1100 sreg = INVALID_REGNUM;
1101 reg_save (reg, sreg, q->cfa_offset);
1102 }
1103
1104 queued_reg_saves.truncate (0);
1105 }
1106
1107 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1108 location for? Or, does it clobber a register which we've previously
1109 said that some other register is saved in, and for which we now
1110 have a new location for? */
1111
1112 static bool
clobbers_queued_reg_save(const_rtx insn)1113 clobbers_queued_reg_save (const_rtx insn)
1114 {
1115 queued_reg_save *q;
1116 size_t iq;
1117
1118 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1119 {
1120 size_t ir;
1121 reg_saved_in_data *rir;
1122
1123 if (modified_in_p (q->reg, insn))
1124 return true;
1125
1126 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1127 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1128 && modified_in_p (rir->saved_in_reg, insn))
1129 return true;
1130 }
1131
1132 return false;
1133 }
1134
1135 /* What register, if any, is currently saved in REG? */
1136
1137 static rtx
reg_saved_in(rtx reg)1138 reg_saved_in (rtx reg)
1139 {
1140 unsigned int regn = REGNO (reg);
1141 queued_reg_save *q;
1142 reg_saved_in_data *rir;
1143 size_t i;
1144
1145 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1146 if (q->saved_reg && regn == REGNO (q->saved_reg))
1147 return q->reg;
1148
1149 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1150 if (regn == REGNO (rir->saved_in_reg))
1151 return rir->orig_reg;
1152
1153 return NULL_RTX;
1154 }
1155
1156 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1157
1158 static void
dwarf2out_frame_debug_def_cfa(rtx pat)1159 dwarf2out_frame_debug_def_cfa (rtx pat)
1160 {
1161 memset (cur_cfa, 0, sizeof (*cur_cfa));
1162
1163 pat = strip_offset (pat, &cur_cfa->offset);
1164 if (MEM_P (pat))
1165 {
1166 cur_cfa->indirect = 1;
1167 pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
1168 }
1169 /* ??? If this fails, we could be calling into the _loc functions to
1170 define a full expression. So far no port does that. */
1171 gcc_assert (REG_P (pat));
1172 cur_cfa->reg = dwf_regno (pat);
1173 }
1174
1175 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1176
1177 static void
dwarf2out_frame_debug_adjust_cfa(rtx pat)1178 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1179 {
1180 rtx src, dest;
1181
1182 gcc_assert (GET_CODE (pat) == SET);
1183 dest = XEXP (pat, 0);
1184 src = XEXP (pat, 1);
1185
1186 switch (GET_CODE (src))
1187 {
1188 case PLUS:
1189 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1190 cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
1191 break;
1192
1193 case REG:
1194 break;
1195
1196 default:
1197 gcc_unreachable ();
1198 }
1199
1200 cur_cfa->reg = dwf_regno (dest);
1201 gcc_assert (cur_cfa->indirect == 0);
1202 }
1203
1204 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1205
1206 static void
dwarf2out_frame_debug_cfa_offset(rtx set)1207 dwarf2out_frame_debug_cfa_offset (rtx set)
1208 {
1209 poly_int64 offset;
1210 rtx src, addr, span;
1211 unsigned int sregno;
1212
1213 src = XEXP (set, 1);
1214 addr = XEXP (set, 0);
1215 gcc_assert (MEM_P (addr));
1216 addr = XEXP (addr, 0);
1217
1218 /* As documented, only consider extremely simple addresses. */
1219 switch (GET_CODE (addr))
1220 {
1221 case REG:
1222 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1223 offset = -cur_cfa->offset;
1224 break;
1225 case PLUS:
1226 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1227 offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
1228 break;
1229 default:
1230 gcc_unreachable ();
1231 }
1232
1233 if (src == pc_rtx)
1234 {
1235 span = NULL;
1236 sregno = DWARF_FRAME_RETURN_COLUMN;
1237 }
1238 else
1239 {
1240 span = targetm.dwarf_register_span (src);
1241 sregno = dwf_regno (src);
1242 }
1243
1244 /* ??? We'd like to use queue_reg_save, but we need to come up with
1245 a different flushing heuristic for epilogues. */
1246 if (!span)
1247 reg_save (sregno, INVALID_REGNUM, offset);
1248 else
1249 {
1250 /* We have a PARALLEL describing where the contents of SRC live.
1251 Adjust the offset for each piece of the PARALLEL. */
1252 poly_int64 span_offset = offset;
1253
1254 gcc_assert (GET_CODE (span) == PARALLEL);
1255
1256 const int par_len = XVECLEN (span, 0);
1257 for (int par_index = 0; par_index < par_len; par_index++)
1258 {
1259 rtx elem = XVECEXP (span, 0, par_index);
1260 sregno = dwf_regno (src);
1261 reg_save (sregno, INVALID_REGNUM, span_offset);
1262 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1263 }
1264 }
1265 }
1266
1267 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1268
1269 static void
dwarf2out_frame_debug_cfa_register(rtx set)1270 dwarf2out_frame_debug_cfa_register (rtx set)
1271 {
1272 rtx src, dest;
1273 unsigned sregno, dregno;
1274
1275 src = XEXP (set, 1);
1276 dest = XEXP (set, 0);
1277
1278 record_reg_saved_in_reg (dest, src);
1279 if (src == pc_rtx)
1280 sregno = DWARF_FRAME_RETURN_COLUMN;
1281 else
1282 sregno = dwf_regno (src);
1283
1284 dregno = dwf_regno (dest);
1285
1286 /* ??? We'd like to use queue_reg_save, but we need to come up with
1287 a different flushing heuristic for epilogues. */
1288 reg_save (sregno, dregno, 0);
1289 }
1290
1291 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1292
1293 static void
dwarf2out_frame_debug_cfa_expression(rtx set)1294 dwarf2out_frame_debug_cfa_expression (rtx set)
1295 {
1296 rtx src, dest, span;
1297 dw_cfi_ref cfi = new_cfi ();
1298 unsigned regno;
1299
1300 dest = SET_DEST (set);
1301 src = SET_SRC (set);
1302
1303 gcc_assert (REG_P (src));
1304 gcc_assert (MEM_P (dest));
1305
1306 span = targetm.dwarf_register_span (src);
1307 gcc_assert (!span);
1308
1309 regno = dwf_regno (src);
1310
1311 cfi->dw_cfi_opc = DW_CFA_expression;
1312 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1313 cfi->dw_cfi_oprnd2.dw_cfi_loc
1314 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1315 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1316
1317 /* ??? We'd like to use queue_reg_save, were the interface different,
1318 and, as above, we could manage flushing for epilogues. */
1319 add_cfi (cfi);
1320 update_row_reg_save (cur_row, regno, cfi);
1321 }
1322
1323 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1324 note. */
1325
1326 static void
dwarf2out_frame_debug_cfa_val_expression(rtx set)1327 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1328 {
1329 rtx dest = SET_DEST (set);
1330 gcc_assert (REG_P (dest));
1331
1332 rtx span = targetm.dwarf_register_span (dest);
1333 gcc_assert (!span);
1334
1335 rtx src = SET_SRC (set);
1336 dw_cfi_ref cfi = new_cfi ();
1337 cfi->dw_cfi_opc = DW_CFA_val_expression;
1338 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1339 cfi->dw_cfi_oprnd2.dw_cfi_loc
1340 = mem_loc_descriptor (src, GET_MODE (src),
1341 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1342 add_cfi (cfi);
1343 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1344 }
1345
1346 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1347
1348 static void
dwarf2out_frame_debug_cfa_restore(rtx reg)1349 dwarf2out_frame_debug_cfa_restore (rtx reg)
1350 {
1351 gcc_assert (REG_P (reg));
1352
1353 rtx span = targetm.dwarf_register_span (reg);
1354 if (!span)
1355 {
1356 unsigned int regno = dwf_regno (reg);
1357 add_cfi_restore (regno);
1358 update_row_reg_save (cur_row, regno, NULL);
1359 }
1360 else
1361 {
1362 /* We have a PARALLEL describing where the contents of REG live.
1363 Restore the register for each piece of the PARALLEL. */
1364 gcc_assert (GET_CODE (span) == PARALLEL);
1365
1366 const int par_len = XVECLEN (span, 0);
1367 for (int par_index = 0; par_index < par_len; par_index++)
1368 {
1369 reg = XVECEXP (span, 0, par_index);
1370 gcc_assert (REG_P (reg));
1371 unsigned int regno = dwf_regno (reg);
1372 add_cfi_restore (regno);
1373 update_row_reg_save (cur_row, regno, NULL);
1374 }
1375 }
1376 }
1377
1378 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1379
1380 ??? Perhaps we should note in the CIE where windows are saved (instead
1381 of assuming 0(cfa)) and what registers are in the window. */
1382
1383 static void
dwarf2out_frame_debug_cfa_window_save(void)1384 dwarf2out_frame_debug_cfa_window_save (void)
1385 {
1386 dw_cfi_ref cfi = new_cfi ();
1387
1388 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1389 add_cfi (cfi);
1390 cur_row->window_save = true;
1391 }
1392
1393 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_TOGGLE_RA_MANGLE.
1394 Note: DW_CFA_GNU_window_save dwarf opcode is reused for toggling RA mangle
1395 state, this is a target specific operation on AArch64 and can only be used
1396 on other targets if they don't use the window save operation otherwise. */
1397
1398 static void
dwarf2out_frame_debug_cfa_toggle_ra_mangle(void)1399 dwarf2out_frame_debug_cfa_toggle_ra_mangle (void)
1400 {
1401 dw_cfi_ref cfi = new_cfi ();
1402
1403 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1404 add_cfi (cfi);
1405 cur_row->ra_mangled = !cur_row->ra_mangled;
1406 }
1407
1408 /* Record call frame debugging information for an expression EXPR,
1409 which either sets SP or FP (adjusting how we calculate the frame
1410 address) or saves a register to the stack or another register.
1411 LABEL indicates the address of EXPR.
1412
1413 This function encodes a state machine mapping rtxes to actions on
1414 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1415 users need not read the source code.
1416
1417 The High-Level Picture
1418
1419 Changes in the register we use to calculate the CFA: Currently we
1420 assume that if you copy the CFA register into another register, we
1421 should take the other one as the new CFA register; this seems to
1422 work pretty well. If it's wrong for some target, it's simple
1423 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1424
1425 Changes in the register we use for saving registers to the stack:
1426 This is usually SP, but not always. Again, we deduce that if you
1427 copy SP into another register (and SP is not the CFA register),
1428 then the new register is the one we will be using for register
1429 saves. This also seems to work.
1430
1431 Register saves: There's not much guesswork about this one; if
1432 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1433 register save, and the register used to calculate the destination
1434 had better be the one we think we're using for this purpose.
1435 It's also assumed that a copy from a call-saved register to another
1436 register is saving that register if RTX_FRAME_RELATED_P is set on
1437 that instruction. If the copy is from a call-saved register to
1438 the *same* register, that means that the register is now the same
1439 value as in the caller.
1440
1441 Except: If the register being saved is the CFA register, and the
1442 offset is nonzero, we are saving the CFA, so we assume we have to
1443 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1444 the intent is to save the value of SP from the previous frame.
1445
1446 In addition, if a register has previously been saved to a different
1447 register,
1448
1449 Invariants / Summaries of Rules
1450
1451 cfa current rule for calculating the CFA. It usually
1452 consists of a register and an offset. This is
1453 actually stored in *cur_cfa, but abbreviated
1454 for the purposes of this documentation.
1455 cfa_store register used by prologue code to save things to the stack
1456 cfa_store.offset is the offset from the value of
1457 cfa_store.reg to the actual CFA
1458 cfa_temp register holding an integral value. cfa_temp.offset
1459 stores the value, which will be used to adjust the
1460 stack pointer. cfa_temp is also used like cfa_store,
1461 to track stores to the stack via fp or a temp reg.
1462
1463 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1464 with cfa.reg as the first operand changes the cfa.reg and its
1465 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1466 cfa_temp.offset.
1467
1468 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1469 expression yielding a constant. This sets cfa_temp.reg
1470 and cfa_temp.offset.
1471
1472 Rule 5: Create a new register cfa_store used to save items to the
1473 stack.
1474
1475 Rules 10-14: Save a register to the stack. Define offset as the
1476 difference of the original location and cfa_store's
1477 location (or cfa_temp's location if cfa_temp is used).
1478
1479 Rules 16-20: If AND operation happens on sp in prologue, we assume
1480 stack is realigned. We will use a group of DW_OP_XXX
1481 expressions to represent the location of the stored
1482 register instead of CFA+offset.
1483
1484 The Rules
1485
1486 "{a,b}" indicates a choice of a xor b.
1487 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1488
1489 Rule 1:
1490 (set <reg1> <reg2>:cfa.reg)
1491 effects: cfa.reg = <reg1>
1492 cfa.offset unchanged
1493 cfa_temp.reg = <reg1>
1494 cfa_temp.offset = cfa.offset
1495
1496 Rule 2:
1497 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1498 {<const_int>,<reg>:cfa_temp.reg}))
1499 effects: cfa.reg = sp if fp used
1500 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1501 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1502 if cfa_store.reg==sp
1503
1504 Rule 3:
1505 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1506 effects: cfa.reg = fp
1507 cfa_offset += +/- <const_int>
1508
1509 Rule 4:
1510 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1511 constraints: <reg1> != fp
1512 <reg1> != sp
1513 effects: cfa.reg = <reg1>
1514 cfa_temp.reg = <reg1>
1515 cfa_temp.offset = cfa.offset
1516
1517 Rule 5:
1518 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1519 constraints: <reg1> != fp
1520 <reg1> != sp
1521 effects: cfa_store.reg = <reg1>
1522 cfa_store.offset = cfa.offset - cfa_temp.offset
1523
1524 Rule 6:
1525 (set <reg> <const_int>)
1526 effects: cfa_temp.reg = <reg>
1527 cfa_temp.offset = <const_int>
1528
1529 Rule 7:
1530 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1531 effects: cfa_temp.reg = <reg1>
1532 cfa_temp.offset |= <const_int>
1533
1534 Rule 8:
1535 (set <reg> (high <exp>))
1536 effects: none
1537
1538 Rule 9:
1539 (set <reg> (lo_sum <exp> <const_int>))
1540 effects: cfa_temp.reg = <reg>
1541 cfa_temp.offset = <const_int>
1542
1543 Rule 10:
1544 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1545 effects: cfa_store.offset -= <const_int>
1546 cfa.offset = cfa_store.offset if cfa.reg == sp
1547 cfa.reg = sp
1548 cfa.base_offset = -cfa_store.offset
1549
1550 Rule 11:
1551 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1552 effects: cfa_store.offset += -/+ mode_size(mem)
1553 cfa.offset = cfa_store.offset if cfa.reg == sp
1554 cfa.reg = sp
1555 cfa.base_offset = -cfa_store.offset
1556
1557 Rule 12:
1558 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1559
1560 <reg2>)
1561 effects: cfa.reg = <reg1>
1562 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1563
1564 Rule 13:
1565 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1566 effects: cfa.reg = <reg1>
1567 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1568
1569 Rule 14:
1570 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1571 effects: cfa.reg = <reg1>
1572 cfa.base_offset = -cfa_temp.offset
1573 cfa_temp.offset -= mode_size(mem)
1574
1575 Rule 15:
1576 (set <reg> {unspec, unspec_volatile})
1577 effects: target-dependent
1578
1579 Rule 16:
1580 (set sp (and: sp <const_int>))
1581 constraints: cfa_store.reg == sp
1582 effects: cfun->fde.stack_realign = 1
1583 cfa_store.offset = 0
1584 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1585
1586 Rule 17:
1587 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1588 effects: cfa_store.offset += -/+ mode_size(mem)
1589
1590 Rule 18:
1591 (set (mem ({pre_inc, pre_dec} sp)) fp)
1592 constraints: fde->stack_realign == 1
1593 effects: cfa_store.offset = 0
1594 cfa.reg != HARD_FRAME_POINTER_REGNUM
1595
1596 Rule 19:
1597 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1598 constraints: fde->stack_realign == 1
1599 && cfa.offset == 0
1600 && cfa.indirect == 0
1601 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1602 effects: Use DW_CFA_def_cfa_expression to define cfa
1603 cfa.reg == fde->drap_reg */
1604
1605 static void
dwarf2out_frame_debug_expr(rtx expr)1606 dwarf2out_frame_debug_expr (rtx expr)
1607 {
1608 rtx src, dest, span;
1609 poly_int64 offset;
1610 dw_fde_ref fde;
1611
1612 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1613 the PARALLEL independently. The first element is always processed if
1614 it is a SET. This is for backward compatibility. Other elements
1615 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1616 flag is set in them. */
1617 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1618 {
1619 int par_index;
1620 int limit = XVECLEN (expr, 0);
1621 rtx elem;
1622
1623 /* PARALLELs have strict read-modify-write semantics, so we
1624 ought to evaluate every rvalue before changing any lvalue.
1625 It's cumbersome to do that in general, but there's an
1626 easy approximation that is enough for all current users:
1627 handle register saves before register assignments. */
1628 if (GET_CODE (expr) == PARALLEL)
1629 for (par_index = 0; par_index < limit; par_index++)
1630 {
1631 elem = XVECEXP (expr, 0, par_index);
1632 if (GET_CODE (elem) == SET
1633 && MEM_P (SET_DEST (elem))
1634 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1635 dwarf2out_frame_debug_expr (elem);
1636 }
1637
1638 for (par_index = 0; par_index < limit; par_index++)
1639 {
1640 elem = XVECEXP (expr, 0, par_index);
1641 if (GET_CODE (elem) == SET
1642 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1643 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1644 dwarf2out_frame_debug_expr (elem);
1645 }
1646 return;
1647 }
1648
1649 gcc_assert (GET_CODE (expr) == SET);
1650
1651 src = SET_SRC (expr);
1652 dest = SET_DEST (expr);
1653
1654 if (REG_P (src))
1655 {
1656 rtx rsi = reg_saved_in (src);
1657 if (rsi)
1658 src = rsi;
1659 }
1660
1661 fde = cfun->fde;
1662
1663 switch (GET_CODE (dest))
1664 {
1665 case REG:
1666 switch (GET_CODE (src))
1667 {
1668 /* Setting FP from SP. */
1669 case REG:
1670 if (cur_cfa->reg == dwf_regno (src))
1671 {
1672 /* Rule 1 */
1673 /* Update the CFA rule wrt SP or FP. Make sure src is
1674 relative to the current CFA register.
1675
1676 We used to require that dest be either SP or FP, but the
1677 ARM copies SP to a temporary register, and from there to
1678 FP. So we just rely on the backends to only set
1679 RTX_FRAME_RELATED_P on appropriate insns. */
1680 cur_cfa->reg = dwf_regno (dest);
1681 cur_trace->cfa_temp.reg = cur_cfa->reg;
1682 cur_trace->cfa_temp.offset = cur_cfa->offset;
1683 }
1684 else
1685 {
1686 /* Saving a register in a register. */
1687 gcc_assert (!fixed_regs [REGNO (dest)]
1688 /* For the SPARC and its register window. */
1689 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1690
1691 /* After stack is aligned, we can only save SP in FP
1692 if drap register is used. In this case, we have
1693 to restore stack pointer with the CFA value and we
1694 don't generate this DWARF information. */
1695 if (fde
1696 && fde->stack_realign
1697 && REGNO (src) == STACK_POINTER_REGNUM)
1698 {
1699 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1700 && fde->drap_reg != INVALID_REGNUM
1701 && cur_cfa->reg != dwf_regno (src)
1702 && fde->rule18);
1703 fde->rule18 = 0;
1704 /* The save of hard frame pointer has been deferred
1705 until this point when Rule 18 applied. Emit it now. */
1706 queue_reg_save (dest, NULL_RTX, 0);
1707 /* And as the instruction modifies the hard frame pointer,
1708 flush the queue as well. */
1709 dwarf2out_flush_queued_reg_saves ();
1710 }
1711 else
1712 queue_reg_save (src, dest, 0);
1713 }
1714 break;
1715
1716 case PLUS:
1717 case MINUS:
1718 case LO_SUM:
1719 if (dest == stack_pointer_rtx)
1720 {
1721 /* Rule 2 */
1722 /* Adjusting SP. */
1723 if (REG_P (XEXP (src, 1)))
1724 {
1725 gcc_assert (dwf_regno (XEXP (src, 1))
1726 == cur_trace->cfa_temp.reg);
1727 offset = cur_trace->cfa_temp.offset;
1728 }
1729 else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
1730 gcc_unreachable ();
1731
1732 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1733 {
1734 /* Restoring SP from FP in the epilogue. */
1735 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1736 cur_cfa->reg = dw_stack_pointer_regnum;
1737 }
1738 else if (GET_CODE (src) == LO_SUM)
1739 /* Assume we've set the source reg of the LO_SUM from sp. */
1740 ;
1741 else
1742 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1743
1744 if (GET_CODE (src) != MINUS)
1745 offset = -offset;
1746 if (cur_cfa->reg == dw_stack_pointer_regnum)
1747 cur_cfa->offset += offset;
1748 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1749 cur_trace->cfa_store.offset += offset;
1750 }
1751 else if (dest == hard_frame_pointer_rtx)
1752 {
1753 /* Rule 3 */
1754 /* Either setting the FP from an offset of the SP,
1755 or adjusting the FP */
1756 gcc_assert (frame_pointer_needed);
1757
1758 gcc_assert (REG_P (XEXP (src, 0))
1759 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1760 offset = rtx_to_poly_int64 (XEXP (src, 1));
1761 if (GET_CODE (src) != MINUS)
1762 offset = -offset;
1763 cur_cfa->offset += offset;
1764 cur_cfa->reg = dw_frame_pointer_regnum;
1765 }
1766 else
1767 {
1768 gcc_assert (GET_CODE (src) != MINUS);
1769
1770 /* Rule 4 */
1771 if (REG_P (XEXP (src, 0))
1772 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1773 && poly_int_rtx_p (XEXP (src, 1), &offset))
1774 {
1775 /* Setting a temporary CFA register that will be copied
1776 into the FP later on. */
1777 offset = -offset;
1778 cur_cfa->offset += offset;
1779 cur_cfa->reg = dwf_regno (dest);
1780 /* Or used to save regs to the stack. */
1781 cur_trace->cfa_temp.reg = cur_cfa->reg;
1782 cur_trace->cfa_temp.offset = cur_cfa->offset;
1783 }
1784
1785 /* Rule 5 */
1786 else if (REG_P (XEXP (src, 0))
1787 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1788 && XEXP (src, 1) == stack_pointer_rtx)
1789 {
1790 /* Setting a scratch register that we will use instead
1791 of SP for saving registers to the stack. */
1792 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1793 cur_trace->cfa_store.reg = dwf_regno (dest);
1794 cur_trace->cfa_store.offset
1795 = cur_cfa->offset - cur_trace->cfa_temp.offset;
1796 }
1797
1798 /* Rule 9 */
1799 else if (GET_CODE (src) == LO_SUM
1800 && poly_int_rtx_p (XEXP (src, 1),
1801 &cur_trace->cfa_temp.offset))
1802 cur_trace->cfa_temp.reg = dwf_regno (dest);
1803 else
1804 gcc_unreachable ();
1805 }
1806 break;
1807
1808 /* Rule 6 */
1809 case CONST_INT:
1810 case CONST_POLY_INT:
1811 cur_trace->cfa_temp.reg = dwf_regno (dest);
1812 cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
1813 break;
1814
1815 /* Rule 7 */
1816 case IOR:
1817 gcc_assert (REG_P (XEXP (src, 0))
1818 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1819 && CONST_INT_P (XEXP (src, 1)));
1820
1821 cur_trace->cfa_temp.reg = dwf_regno (dest);
1822 if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
1823 &cur_trace->cfa_temp.offset))
1824 /* The target shouldn't generate this kind of CFI note if we
1825 can't represent it. */
1826 gcc_unreachable ();
1827 break;
1828
1829 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1830 which will fill in all of the bits. */
1831 /* Rule 8 */
1832 case HIGH:
1833 break;
1834
1835 /* Rule 15 */
1836 case UNSPEC:
1837 case UNSPEC_VOLATILE:
1838 /* All unspecs should be represented by REG_CFA_* notes. */
1839 gcc_unreachable ();
1840 return;
1841
1842 /* Rule 16 */
1843 case AND:
1844 /* If this AND operation happens on stack pointer in prologue,
1845 we assume the stack is realigned and we extract the
1846 alignment. */
1847 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1848 {
1849 /* We interpret reg_save differently with stack_realign set.
1850 Thus we must flush whatever we have queued first. */
1851 dwarf2out_flush_queued_reg_saves ();
1852
1853 gcc_assert (cur_trace->cfa_store.reg
1854 == dwf_regno (XEXP (src, 0)));
1855 fde->stack_realign = 1;
1856 fde->stack_realignment = INTVAL (XEXP (src, 1));
1857 cur_trace->cfa_store.offset = 0;
1858
1859 if (cur_cfa->reg != dw_stack_pointer_regnum
1860 && cur_cfa->reg != dw_frame_pointer_regnum)
1861 fde->drap_reg = cur_cfa->reg;
1862 }
1863 return;
1864
1865 default:
1866 gcc_unreachable ();
1867 }
1868 break;
1869
1870 case MEM:
1871
1872 /* Saving a register to the stack. Make sure dest is relative to the
1873 CFA register. */
1874 switch (GET_CODE (XEXP (dest, 0)))
1875 {
1876 /* Rule 10 */
1877 /* With a push. */
1878 case PRE_MODIFY:
1879 case POST_MODIFY:
1880 /* We can't handle variable size modifications. */
1881 offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1882
1883 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1884 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1885
1886 cur_trace->cfa_store.offset += offset;
1887 if (cur_cfa->reg == dw_stack_pointer_regnum)
1888 cur_cfa->offset = cur_trace->cfa_store.offset;
1889
1890 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1891 offset -= cur_trace->cfa_store.offset;
1892 else
1893 offset = -cur_trace->cfa_store.offset;
1894 break;
1895
1896 /* Rule 11 */
1897 case PRE_INC:
1898 case PRE_DEC:
1899 case POST_DEC:
1900 offset = GET_MODE_SIZE (GET_MODE (dest));
1901 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1902 offset = -offset;
1903
1904 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1905 == STACK_POINTER_REGNUM)
1906 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1907
1908 cur_trace->cfa_store.offset += offset;
1909
1910 /* Rule 18: If stack is aligned, we will use FP as a
1911 reference to represent the address of the stored
1912 regiser. */
1913 if (fde
1914 && fde->stack_realign
1915 && REG_P (src)
1916 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1917 {
1918 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1919 cur_trace->cfa_store.offset = 0;
1920 fde->rule18 = 1;
1921 }
1922
1923 if (cur_cfa->reg == dw_stack_pointer_regnum)
1924 cur_cfa->offset = cur_trace->cfa_store.offset;
1925
1926 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1927 offset += -cur_trace->cfa_store.offset;
1928 else
1929 offset = -cur_trace->cfa_store.offset;
1930 break;
1931
1932 /* Rule 12 */
1933 /* With an offset. */
1934 case PLUS:
1935 case MINUS:
1936 case LO_SUM:
1937 {
1938 unsigned int regno;
1939
1940 gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
1941 offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
1942 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1943 offset = -offset;
1944
1945 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1946
1947 if (cur_cfa->reg == regno)
1948 offset -= cur_cfa->offset;
1949 else if (cur_trace->cfa_store.reg == regno)
1950 offset -= cur_trace->cfa_store.offset;
1951 else
1952 {
1953 gcc_assert (cur_trace->cfa_temp.reg == regno);
1954 offset -= cur_trace->cfa_temp.offset;
1955 }
1956 }
1957 break;
1958
1959 /* Rule 13 */
1960 /* Without an offset. */
1961 case REG:
1962 {
1963 unsigned int regno = dwf_regno (XEXP (dest, 0));
1964
1965 if (cur_cfa->reg == regno)
1966 offset = -cur_cfa->offset;
1967 else if (cur_trace->cfa_store.reg == regno)
1968 offset = -cur_trace->cfa_store.offset;
1969 else
1970 {
1971 gcc_assert (cur_trace->cfa_temp.reg == regno);
1972 offset = -cur_trace->cfa_temp.offset;
1973 }
1974 }
1975 break;
1976
1977 /* Rule 14 */
1978 case POST_INC:
1979 gcc_assert (cur_trace->cfa_temp.reg
1980 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1981 offset = -cur_trace->cfa_temp.offset;
1982 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1983 break;
1984
1985 default:
1986 gcc_unreachable ();
1987 }
1988
1989 /* Rule 17 */
1990 /* If the source operand of this MEM operation is a memory,
1991 we only care how much stack grew. */
1992 if (MEM_P (src))
1993 break;
1994
1995 if (REG_P (src)
1996 && REGNO (src) != STACK_POINTER_REGNUM
1997 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1998 && dwf_regno (src) == cur_cfa->reg)
1999 {
2000 /* We're storing the current CFA reg into the stack. */
2001
2002 if (known_eq (cur_cfa->offset, 0))
2003 {
2004 /* Rule 19 */
2005 /* If stack is aligned, putting CFA reg into stack means
2006 we can no longer use reg + offset to represent CFA.
2007 Here we use DW_CFA_def_cfa_expression instead. The
2008 result of this expression equals to the original CFA
2009 value. */
2010 if (fde
2011 && fde->stack_realign
2012 && cur_cfa->indirect == 0
2013 && cur_cfa->reg != dw_frame_pointer_regnum)
2014 {
2015 gcc_assert (fde->drap_reg == cur_cfa->reg);
2016
2017 cur_cfa->indirect = 1;
2018 cur_cfa->reg = dw_frame_pointer_regnum;
2019 cur_cfa->base_offset = offset;
2020 cur_cfa->offset = 0;
2021
2022 fde->drap_reg_saved = 1;
2023 break;
2024 }
2025
2026 /* If the source register is exactly the CFA, assume
2027 we're saving SP like any other register; this happens
2028 on the ARM. */
2029 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2030 break;
2031 }
2032 else
2033 {
2034 /* Otherwise, we'll need to look in the stack to
2035 calculate the CFA. */
2036 rtx x = XEXP (dest, 0);
2037
2038 if (!REG_P (x))
2039 x = XEXP (x, 0);
2040 gcc_assert (REG_P (x));
2041
2042 cur_cfa->reg = dwf_regno (x);
2043 cur_cfa->base_offset = offset;
2044 cur_cfa->indirect = 1;
2045 break;
2046 }
2047 }
2048
2049 if (REG_P (src))
2050 span = targetm.dwarf_register_span (src);
2051 else
2052 span = NULL;
2053
2054 if (!span)
2055 {
2056 if (fde->rule18)
2057 /* Just verify the hard frame pointer save when doing dynamic
2058 realignment uses expected offset. The actual queue_reg_save
2059 needs to be deferred until the instruction that sets
2060 hard frame pointer to stack pointer, see PR99334 for
2061 details. */
2062 gcc_assert (known_eq (offset, 0));
2063 else
2064 queue_reg_save (src, NULL_RTX, offset);
2065 }
2066 else
2067 {
2068 /* We have a PARALLEL describing where the contents of SRC live.
2069 Queue register saves for each piece of the PARALLEL. */
2070 poly_int64 span_offset = offset;
2071
2072 gcc_assert (GET_CODE (span) == PARALLEL);
2073
2074 const int par_len = XVECLEN (span, 0);
2075 for (int par_index = 0; par_index < par_len; par_index++)
2076 {
2077 rtx elem = XVECEXP (span, 0, par_index);
2078 queue_reg_save (elem, NULL_RTX, span_offset);
2079 span_offset += GET_MODE_SIZE (GET_MODE (elem));
2080 }
2081 }
2082 break;
2083
2084 default:
2085 gcc_unreachable ();
2086 }
2087 }
2088
2089 /* Record call frame debugging information for INSN, which either sets
2090 SP or FP (adjusting how we calculate the frame address) or saves a
2091 register to the stack. */
2092
2093 static void
dwarf2out_frame_debug(rtx_insn * insn)2094 dwarf2out_frame_debug (rtx_insn *insn)
2095 {
2096 rtx note, n, pat;
2097 bool handled_one = false;
2098
2099 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2100 switch (REG_NOTE_KIND (note))
2101 {
2102 case REG_FRAME_RELATED_EXPR:
2103 pat = XEXP (note, 0);
2104 goto do_frame_expr;
2105
2106 case REG_CFA_DEF_CFA:
2107 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2108 handled_one = true;
2109 break;
2110
2111 case REG_CFA_ADJUST_CFA:
2112 n = XEXP (note, 0);
2113 if (n == NULL)
2114 {
2115 n = PATTERN (insn);
2116 if (GET_CODE (n) == PARALLEL)
2117 n = XVECEXP (n, 0, 0);
2118 }
2119 dwarf2out_frame_debug_adjust_cfa (n);
2120 handled_one = true;
2121 break;
2122
2123 case REG_CFA_OFFSET:
2124 n = XEXP (note, 0);
2125 if (n == NULL)
2126 n = single_set (insn);
2127 dwarf2out_frame_debug_cfa_offset (n);
2128 handled_one = true;
2129 break;
2130
2131 case REG_CFA_REGISTER:
2132 n = XEXP (note, 0);
2133 if (n == NULL)
2134 {
2135 n = PATTERN (insn);
2136 if (GET_CODE (n) == PARALLEL)
2137 n = XVECEXP (n, 0, 0);
2138 }
2139 dwarf2out_frame_debug_cfa_register (n);
2140 handled_one = true;
2141 break;
2142
2143 case REG_CFA_EXPRESSION:
2144 case REG_CFA_VAL_EXPRESSION:
2145 n = XEXP (note, 0);
2146 if (n == NULL)
2147 n = single_set (insn);
2148
2149 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2150 dwarf2out_frame_debug_cfa_expression (n);
2151 else
2152 dwarf2out_frame_debug_cfa_val_expression (n);
2153
2154 handled_one = true;
2155 break;
2156
2157 case REG_CFA_RESTORE:
2158 n = XEXP (note, 0);
2159 if (n == NULL)
2160 {
2161 n = PATTERN (insn);
2162 if (GET_CODE (n) == PARALLEL)
2163 n = XVECEXP (n, 0, 0);
2164 n = XEXP (n, 0);
2165 }
2166 dwarf2out_frame_debug_cfa_restore (n);
2167 handled_one = true;
2168 break;
2169
2170 case REG_CFA_SET_VDRAP:
2171 n = XEXP (note, 0);
2172 if (REG_P (n))
2173 {
2174 dw_fde_ref fde = cfun->fde;
2175 if (fde)
2176 {
2177 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2178 if (REG_P (n))
2179 fde->vdrap_reg = dwf_regno (n);
2180 }
2181 }
2182 handled_one = true;
2183 break;
2184
2185 case REG_CFA_TOGGLE_RA_MANGLE:
2186 dwarf2out_frame_debug_cfa_toggle_ra_mangle ();
2187 handled_one = true;
2188 break;
2189
2190 case REG_CFA_WINDOW_SAVE:
2191 dwarf2out_frame_debug_cfa_window_save ();
2192 handled_one = true;
2193 break;
2194
2195 case REG_CFA_FLUSH_QUEUE:
2196 /* The actual flush happens elsewhere. */
2197 handled_one = true;
2198 break;
2199
2200 default:
2201 break;
2202 }
2203
2204 if (!handled_one)
2205 {
2206 pat = PATTERN (insn);
2207 do_frame_expr:
2208 dwarf2out_frame_debug_expr (pat);
2209
2210 /* Check again. A parallel can save and update the same register.
2211 We could probably check just once, here, but this is safer than
2212 removing the check at the start of the function. */
2213 if (clobbers_queued_reg_save (pat))
2214 dwarf2out_flush_queued_reg_saves ();
2215 }
2216 }
2217
2218 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
2219
2220 static void
change_cfi_row(dw_cfi_row * old_row,dw_cfi_row * new_row)2221 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2222 {
2223 size_t i, n_old, n_new, n_max;
2224 dw_cfi_ref cfi;
2225
2226 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2227 add_cfi (new_row->cfa_cfi);
2228 else
2229 {
2230 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2231 if (cfi)
2232 add_cfi (cfi);
2233 }
2234
2235 n_old = vec_safe_length (old_row->reg_save);
2236 n_new = vec_safe_length (new_row->reg_save);
2237 n_max = MAX (n_old, n_new);
2238
2239 for (i = 0; i < n_max; ++i)
2240 {
2241 dw_cfi_ref r_old = NULL, r_new = NULL;
2242
2243 if (i < n_old)
2244 r_old = (*old_row->reg_save)[i];
2245 if (i < n_new)
2246 r_new = (*new_row->reg_save)[i];
2247
2248 if (r_old == r_new)
2249 ;
2250 else if (r_new == NULL)
2251 add_cfi_restore (i);
2252 else if (!cfi_equal_p (r_old, r_new))
2253 add_cfi (r_new);
2254 }
2255
2256 if (!old_row->window_save && new_row->window_save)
2257 {
2258 dw_cfi_ref cfi = new_cfi ();
2259
2260 gcc_assert (!old_row->ra_mangled && !new_row->ra_mangled);
2261 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2262 add_cfi (cfi);
2263 }
2264
2265 if (old_row->ra_mangled != new_row->ra_mangled)
2266 {
2267 dw_cfi_ref cfi = new_cfi ();
2268
2269 gcc_assert (!old_row->window_save && !new_row->window_save);
2270 /* DW_CFA_GNU_window_save is reused for toggling RA mangle state. */
2271 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2272 add_cfi (cfi);
2273 }
2274 }
2275
2276 /* Examine CFI and return true if a cfi label and set_loc is needed
2277 beforehand. Even when generating CFI assembler instructions, we
2278 still have to add the cfi to the list so that lookup_cfa_1 works
2279 later on. When -g2 and above we even need to force emitting of
2280 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2281 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2282 and so don't use convert_cfa_to_fb_loc_list. */
2283
2284 static bool
cfi_label_required_p(dw_cfi_ref cfi)2285 cfi_label_required_p (dw_cfi_ref cfi)
2286 {
2287 if (!dwarf2out_do_cfi_asm ())
2288 return true;
2289
2290 if (dwarf_version == 2
2291 && debug_info_level > DINFO_LEVEL_TERSE
2292 && (write_symbols == DWARF2_DEBUG
2293 || write_symbols == VMS_AND_DWARF2_DEBUG))
2294 {
2295 switch (cfi->dw_cfi_opc)
2296 {
2297 case DW_CFA_def_cfa_offset:
2298 case DW_CFA_def_cfa_offset_sf:
2299 case DW_CFA_def_cfa_register:
2300 case DW_CFA_def_cfa:
2301 case DW_CFA_def_cfa_sf:
2302 case DW_CFA_def_cfa_expression:
2303 case DW_CFA_restore_state:
2304 return true;
2305 default:
2306 return false;
2307 }
2308 }
2309 return false;
2310 }
2311
2312 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2313 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2314 necessary. */
2315 static void
add_cfis_to_fde(void)2316 add_cfis_to_fde (void)
2317 {
2318 dw_fde_ref fde = cfun->fde;
2319 rtx_insn *insn, *next;
2320
2321 for (insn = get_insns (); insn; insn = next)
2322 {
2323 next = NEXT_INSN (insn);
2324
2325 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2326 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2327
2328 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2329 {
2330 bool required = cfi_label_required_p (NOTE_CFI (insn));
2331 while (next)
2332 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2333 {
2334 required |= cfi_label_required_p (NOTE_CFI (next));
2335 next = NEXT_INSN (next);
2336 }
2337 else if (active_insn_p (next)
2338 || (NOTE_P (next) && (NOTE_KIND (next)
2339 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2340 break;
2341 else
2342 next = NEXT_INSN (next);
2343 if (required)
2344 {
2345 int num = dwarf2out_cfi_label_num;
2346 const char *label = dwarf2out_cfi_label ();
2347 dw_cfi_ref xcfi;
2348
2349 /* Set the location counter to the new label. */
2350 xcfi = new_cfi ();
2351 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2352 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2353 vec_safe_push (fde->dw_fde_cfi, xcfi);
2354
2355 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2356 NOTE_LABEL_NUMBER (tmp) = num;
2357 }
2358
2359 do
2360 {
2361 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2362 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2363 insn = NEXT_INSN (insn);
2364 }
2365 while (insn != next);
2366 }
2367 }
2368 }
2369
2370 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2371
2372 /* If LABEL is the start of a trace, then initialize the state of that
2373 trace from CUR_TRACE and CUR_ROW. */
2374
2375 static void
maybe_record_trace_start(rtx_insn * start,rtx_insn * origin)2376 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2377 {
2378 dw_trace_info *ti;
2379
2380 ti = get_trace_info (start);
2381 gcc_assert (ti != NULL);
2382
2383 if (dump_file)
2384 {
2385 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
2386 cur_trace->id, ti->id,
2387 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2388 (origin ? INSN_UID (origin) : 0));
2389 }
2390
2391 poly_int64 args_size = cur_trace->end_true_args_size;
2392 if (ti->beg_row == NULL)
2393 {
2394 /* This is the first time we've encountered this trace. Propagate
2395 state across the edge and push the trace onto the work list. */
2396 ti->beg_row = copy_cfi_row (cur_row);
2397 ti->beg_true_args_size = args_size;
2398
2399 ti->cfa_store = cur_trace->cfa_store;
2400 ti->cfa_temp = cur_trace->cfa_temp;
2401 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2402
2403 trace_work_list.safe_push (ti);
2404
2405 if (dump_file)
2406 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2407 }
2408 else
2409 {
2410
2411 /* We ought to have the same state incoming to a given trace no
2412 matter how we arrive at the trace. Anything else means we've
2413 got some kind of optimization error. */
2414 #if CHECKING_P
2415 if (!cfi_row_equal_p (cur_row, ti->beg_row))
2416 {
2417 if (dump_file)
2418 {
2419 fprintf (dump_file, "Inconsistent CFI state!\n");
2420 fprintf (dump_file, "SHOULD have:\n");
2421 dump_cfi_row (dump_file, ti->beg_row);
2422 fprintf (dump_file, "DO have:\n");
2423 dump_cfi_row (dump_file, cur_row);
2424 }
2425
2426 gcc_unreachable ();
2427 }
2428 #endif
2429
2430 /* The args_size is allowed to conflict if it isn't actually used. */
2431 if (maybe_ne (ti->beg_true_args_size, args_size))
2432 ti->args_size_undefined = true;
2433 }
2434 }
2435
2436 /* Similarly, but handle the args_size and CFA reset across EH
2437 and non-local goto edges. */
2438
2439 static void
maybe_record_trace_start_abnormal(rtx_insn * start,rtx_insn * origin)2440 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2441 {
2442 poly_int64 save_args_size, delta;
2443 dw_cfa_location save_cfa;
2444
2445 save_args_size = cur_trace->end_true_args_size;
2446 if (known_eq (save_args_size, 0))
2447 {
2448 maybe_record_trace_start (start, origin);
2449 return;
2450 }
2451
2452 delta = -save_args_size;
2453 cur_trace->end_true_args_size = 0;
2454
2455 save_cfa = cur_row->cfa;
2456 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2457 {
2458 /* Convert a change in args_size (always a positive in the
2459 direction of stack growth) to a change in stack pointer. */
2460 if (!STACK_GROWS_DOWNWARD)
2461 delta = -delta;
2462
2463 cur_row->cfa.offset += delta;
2464 }
2465
2466 maybe_record_trace_start (start, origin);
2467
2468 cur_trace->end_true_args_size = save_args_size;
2469 cur_row->cfa = save_cfa;
2470 }
2471
2472 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
2473 /* ??? Sadly, this is in large part a duplicate of make_edges. */
2474
2475 static void
create_trace_edges(rtx_insn * insn)2476 create_trace_edges (rtx_insn *insn)
2477 {
2478 rtx tmp;
2479 int i, n;
2480
2481 if (JUMP_P (insn))
2482 {
2483 rtx_jump_table_data *table;
2484
2485 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2486 return;
2487
2488 if (tablejump_p (insn, NULL, &table))
2489 {
2490 rtvec vec = table->get_labels ();
2491
2492 n = GET_NUM_ELEM (vec);
2493 for (i = 0; i < n; ++i)
2494 {
2495 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2496 maybe_record_trace_start (lab, insn);
2497 }
2498
2499 /* Handle casesi dispatch insns. */
2500 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX)
2501 {
2502 rtx_insn * lab = label_ref_label (XEXP (SET_SRC (tmp), 2));
2503 maybe_record_trace_start (lab, insn);
2504 }
2505 }
2506 else if (computed_jump_p (insn))
2507 {
2508 rtx_insn *temp;
2509 unsigned int i;
2510 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2511 maybe_record_trace_start (temp, insn);
2512 }
2513 else if (returnjump_p (insn))
2514 ;
2515 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2516 {
2517 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2518 for (i = 0; i < n; ++i)
2519 {
2520 rtx_insn *lab =
2521 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2522 maybe_record_trace_start (lab, insn);
2523 }
2524 }
2525 else
2526 {
2527 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2528 gcc_assert (lab != NULL);
2529 maybe_record_trace_start (lab, insn);
2530 }
2531 }
2532 else if (CALL_P (insn))
2533 {
2534 /* Sibling calls don't have edges inside this function. */
2535 if (SIBLING_CALL_P (insn))
2536 return;
2537
2538 /* Process non-local goto edges. */
2539 if (can_nonlocal_goto (insn))
2540 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2541 lab;
2542 lab = lab->next ())
2543 maybe_record_trace_start_abnormal (lab->insn (), insn);
2544 }
2545 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2546 {
2547 int i, n = seq->len ();
2548 for (i = 0; i < n; ++i)
2549 create_trace_edges (seq->insn (i));
2550 return;
2551 }
2552
2553 /* Process EH edges. */
2554 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2555 {
2556 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2557 if (lp)
2558 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2559 }
2560 }
2561
2562 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
2563
2564 static void
scan_insn_after(rtx_insn * insn)2565 scan_insn_after (rtx_insn *insn)
2566 {
2567 if (RTX_FRAME_RELATED_P (insn))
2568 dwarf2out_frame_debug (insn);
2569 notice_args_size (insn);
2570 }
2571
2572 /* Scan the trace beginning at INSN and create the CFI notes for the
2573 instructions therein. */
2574
2575 static void
scan_trace(dw_trace_info * trace,bool entry)2576 scan_trace (dw_trace_info *trace, bool entry)
2577 {
2578 rtx_insn *prev, *insn = trace->head;
2579 dw_cfa_location this_cfa;
2580
2581 if (dump_file)
2582 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2583 trace->id, rtx_name[(int) GET_CODE (insn)],
2584 INSN_UID (insn));
2585
2586 trace->end_row = copy_cfi_row (trace->beg_row);
2587 trace->end_true_args_size = trace->beg_true_args_size;
2588
2589 cur_trace = trace;
2590 cur_row = trace->end_row;
2591
2592 this_cfa = cur_row->cfa;
2593 cur_cfa = &this_cfa;
2594
2595 /* If the current function starts with a non-standard incoming frame
2596 sp offset, emit a note before the first instruction. */
2597 if (entry
2598 && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
2599 {
2600 add_cfi_insn = insn;
2601 gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
2602 this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
2603 def_cfa_1 (&this_cfa);
2604 }
2605
2606 for (prev = insn, insn = NEXT_INSN (insn);
2607 insn;
2608 prev = insn, insn = NEXT_INSN (insn))
2609 {
2610 rtx_insn *control;
2611
2612 /* Do everything that happens "before" the insn. */
2613 add_cfi_insn = prev;
2614
2615 /* Notice the end of a trace. */
2616 if (BARRIER_P (insn))
2617 {
2618 /* Don't bother saving the unneeded queued registers at all. */
2619 queued_reg_saves.truncate (0);
2620 break;
2621 }
2622 if (save_point_p (insn))
2623 {
2624 /* Propagate across fallthru edges. */
2625 dwarf2out_flush_queued_reg_saves ();
2626 maybe_record_trace_start (insn, NULL);
2627 break;
2628 }
2629
2630 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2631 continue;
2632
2633 /* Handle all changes to the row state. Sequences require special
2634 handling for the positioning of the notes. */
2635 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2636 {
2637 rtx_insn *elt;
2638 int i, n = pat->len ();
2639
2640 control = pat->insn (0);
2641 if (can_throw_internal (control))
2642 notice_eh_throw (control);
2643 dwarf2out_flush_queued_reg_saves ();
2644
2645 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2646 {
2647 /* ??? Hopefully multiple delay slots are not annulled. */
2648 gcc_assert (n == 2);
2649 gcc_assert (!RTX_FRAME_RELATED_P (control));
2650 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2651
2652 elt = pat->insn (1);
2653
2654 if (INSN_FROM_TARGET_P (elt))
2655 {
2656 cfi_vec save_row_reg_save;
2657
2658 /* If ELT is an instruction from target of an annulled
2659 branch, the effects are for the target only and so
2660 the args_size and CFA along the current path
2661 shouldn't change. */
2662 add_cfi_insn = NULL;
2663 poly_int64 restore_args_size = cur_trace->end_true_args_size;
2664 cur_cfa = &cur_row->cfa;
2665 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2666
2667 scan_insn_after (elt);
2668
2669 /* ??? Should we instead save the entire row state? */
2670 gcc_assert (!queued_reg_saves.length ());
2671
2672 create_trace_edges (control);
2673
2674 cur_trace->end_true_args_size = restore_args_size;
2675 cur_row->cfa = this_cfa;
2676 cur_row->reg_save = save_row_reg_save;
2677 cur_cfa = &this_cfa;
2678 }
2679 else
2680 {
2681 /* If ELT is a annulled branch-taken instruction (i.e.
2682 executed only when branch is not taken), the args_size
2683 and CFA should not change through the jump. */
2684 create_trace_edges (control);
2685
2686 /* Update and continue with the trace. */
2687 add_cfi_insn = insn;
2688 scan_insn_after (elt);
2689 def_cfa_1 (&this_cfa);
2690 }
2691 continue;
2692 }
2693
2694 /* The insns in the delay slot should all be considered to happen
2695 "before" a call insn. Consider a call with a stack pointer
2696 adjustment in the delay slot. The backtrace from the callee
2697 should include the sp adjustment. Unfortunately, that leaves
2698 us with an unavoidable unwinding error exactly at the call insn
2699 itself. For jump insns we'd prefer to avoid this error by
2700 placing the notes after the sequence. */
2701 if (JUMP_P (control))
2702 add_cfi_insn = insn;
2703
2704 for (i = 1; i < n; ++i)
2705 {
2706 elt = pat->insn (i);
2707 scan_insn_after (elt);
2708 }
2709
2710 /* Make sure any register saves are visible at the jump target. */
2711 dwarf2out_flush_queued_reg_saves ();
2712 any_cfis_emitted = false;
2713
2714 /* However, if there is some adjustment on the call itself, e.g.
2715 a call_pop, that action should be considered to happen after
2716 the call returns. */
2717 add_cfi_insn = insn;
2718 scan_insn_after (control);
2719 }
2720 else
2721 {
2722 /* Flush data before calls and jumps, and of course if necessary. */
2723 if (can_throw_internal (insn))
2724 {
2725 notice_eh_throw (insn);
2726 dwarf2out_flush_queued_reg_saves ();
2727 }
2728 else if (!NONJUMP_INSN_P (insn)
2729 || clobbers_queued_reg_save (insn)
2730 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2731 dwarf2out_flush_queued_reg_saves ();
2732 any_cfis_emitted = false;
2733
2734 add_cfi_insn = insn;
2735 scan_insn_after (insn);
2736 control = insn;
2737 }
2738
2739 /* Between frame-related-p and args_size we might have otherwise
2740 emitted two cfa adjustments. Do it now. */
2741 def_cfa_1 (&this_cfa);
2742
2743 /* Minimize the number of advances by emitting the entire queue
2744 once anything is emitted. */
2745 if (any_cfis_emitted
2746 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2747 dwarf2out_flush_queued_reg_saves ();
2748
2749 /* Note that a test for control_flow_insn_p does exactly the
2750 same tests as are done to actually create the edges. So
2751 always call the routine and let it not create edges for
2752 non-control-flow insns. */
2753 create_trace_edges (control);
2754 }
2755
2756 gcc_assert (!cfun->fde || !cfun->fde->rule18);
2757 add_cfi_insn = NULL;
2758 cur_row = NULL;
2759 cur_trace = NULL;
2760 cur_cfa = NULL;
2761 }
2762
2763 /* Scan the function and create the initial set of CFI notes. */
2764
2765 static void
create_cfi_notes(void)2766 create_cfi_notes (void)
2767 {
2768 dw_trace_info *ti;
2769
2770 gcc_checking_assert (!queued_reg_saves.exists ());
2771 gcc_checking_assert (!trace_work_list.exists ());
2772
2773 /* Always begin at the entry trace. */
2774 ti = &trace_info[0];
2775 scan_trace (ti, true);
2776
2777 while (!trace_work_list.is_empty ())
2778 {
2779 ti = trace_work_list.pop ();
2780 scan_trace (ti, false);
2781 }
2782
2783 queued_reg_saves.release ();
2784 trace_work_list.release ();
2785 }
2786
2787 /* Return the insn before the first NOTE_INSN_CFI after START. */
2788
2789 static rtx_insn *
before_next_cfi_note(rtx_insn * start)2790 before_next_cfi_note (rtx_insn *start)
2791 {
2792 rtx_insn *prev = start;
2793 while (start)
2794 {
2795 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2796 return prev;
2797 prev = start;
2798 start = NEXT_INSN (start);
2799 }
2800 gcc_unreachable ();
2801 }
2802
2803 /* Insert CFI notes between traces to properly change state between them. */
2804
2805 static void
connect_traces(void)2806 connect_traces (void)
2807 {
2808 unsigned i, n;
2809 dw_trace_info *prev_ti, *ti;
2810
2811 /* ??? Ideally, we should have both queued and processed every trace.
2812 However the current representation of constant pools on various targets
2813 is indistinguishable from unreachable code. Assume for the moment that
2814 we can simply skip over such traces. */
2815 /* ??? Consider creating a DATA_INSN rtx code to indicate that
2816 these are not "real" instructions, and should not be considered.
2817 This could be generically useful for tablejump data as well. */
2818 /* Remove all unprocessed traces from the list. */
2819 unsigned ix, ix2;
2820 VEC_ORDERED_REMOVE_IF_FROM_TO (trace_info, ix, ix2, ti, 1,
2821 trace_info.length (), ti->beg_row == NULL);
2822 FOR_EACH_VEC_ELT (trace_info, ix, ti)
2823 gcc_assert (ti->end_row != NULL);
2824
2825 /* Work from the end back to the beginning. This lets us easily insert
2826 remember/restore_state notes in the correct order wrt other notes. */
2827 n = trace_info.length ();
2828 prev_ti = &trace_info[n - 1];
2829 for (i = n - 1; i > 0; --i)
2830 {
2831 dw_cfi_row *old_row;
2832
2833 ti = prev_ti;
2834 prev_ti = &trace_info[i - 1];
2835
2836 add_cfi_insn = ti->head;
2837
2838 /* In dwarf2out_switch_text_section, we'll begin a new FDE
2839 for the portion of the function in the alternate text
2840 section. The row state at the very beginning of that
2841 new FDE will be exactly the row state from the CIE. */
2842 if (ti->switch_sections)
2843 old_row = cie_cfi_row;
2844 else
2845 {
2846 old_row = prev_ti->end_row;
2847 /* If there's no change from the previous end state, fine. */
2848 if (cfi_row_equal_p (old_row, ti->beg_row))
2849 ;
2850 /* Otherwise check for the common case of sharing state with
2851 the beginning of an epilogue, but not the end. Insert
2852 remember/restore opcodes in that case. */
2853 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2854 {
2855 dw_cfi_ref cfi;
2856
2857 /* Note that if we blindly insert the remember at the
2858 start of the trace, we can wind up increasing the
2859 size of the unwind info due to extra advance opcodes.
2860 Instead, put the remember immediately before the next
2861 state change. We know there must be one, because the
2862 state at the beginning and head of the trace differ. */
2863 add_cfi_insn = before_next_cfi_note (prev_ti->head);
2864 cfi = new_cfi ();
2865 cfi->dw_cfi_opc = DW_CFA_remember_state;
2866 add_cfi (cfi);
2867
2868 add_cfi_insn = ti->head;
2869 cfi = new_cfi ();
2870 cfi->dw_cfi_opc = DW_CFA_restore_state;
2871 add_cfi (cfi);
2872
2873 /* If the target unwinder does not save the CFA as part of the
2874 register state, we need to restore it separately. */
2875 if (targetm.asm_out.should_restore_cfa_state ()
2876 && (cfi = def_cfa_0 (&old_row->cfa, &ti->beg_row->cfa)))
2877 add_cfi (cfi);
2878
2879 old_row = prev_ti->beg_row;
2880 }
2881 /* Otherwise, we'll simply change state from the previous end. */
2882 }
2883
2884 change_cfi_row (old_row, ti->beg_row);
2885
2886 if (dump_file && add_cfi_insn != ti->head)
2887 {
2888 rtx_insn *note;
2889
2890 fprintf (dump_file, "Fixup between trace %u and %u:\n",
2891 prev_ti->id, ti->id);
2892
2893 note = ti->head;
2894 do
2895 {
2896 note = NEXT_INSN (note);
2897 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2898 output_cfi_directive (dump_file, NOTE_CFI (note));
2899 }
2900 while (note != add_cfi_insn);
2901 }
2902 }
2903
2904 /* Connect args_size between traces that have can_throw_internal insns. */
2905 if (cfun->eh->lp_array)
2906 {
2907 poly_int64 prev_args_size = 0;
2908
2909 for (i = 0; i < n; ++i)
2910 {
2911 ti = &trace_info[i];
2912
2913 if (ti->switch_sections)
2914 prev_args_size = 0;
2915
2916 if (ti->eh_head == NULL)
2917 continue;
2918
2919 /* We require either the incoming args_size values to match or the
2920 presence of an insn setting it before the first EH insn. */
2921 gcc_assert (!ti->args_size_undefined || ti->args_size_defined_for_eh);
2922
2923 /* In the latter case, we force the creation of a CFI note. */
2924 if (ti->args_size_undefined
2925 || maybe_ne (ti->beg_delay_args_size, prev_args_size))
2926 {
2927 /* ??? Search back to previous CFI note. */
2928 add_cfi_insn = PREV_INSN (ti->eh_head);
2929 add_cfi_args_size (ti->beg_delay_args_size);
2930 }
2931
2932 prev_args_size = ti->end_delay_args_size;
2933 }
2934 }
2935 }
2936
2937 /* Set up the pseudo-cfg of instruction traces, as described at the
2938 block comment at the top of the file. */
2939
2940 static void
create_pseudo_cfg(void)2941 create_pseudo_cfg (void)
2942 {
2943 bool saw_barrier, switch_sections;
2944 dw_trace_info ti;
2945 rtx_insn *insn;
2946 unsigned i;
2947
2948 /* The first trace begins at the start of the function,
2949 and begins with the CIE row state. */
2950 trace_info.create (16);
2951 memset (&ti, 0, sizeof (ti));
2952 ti.head = get_insns ();
2953 ti.beg_row = cie_cfi_row;
2954 ti.cfa_store = cie_cfi_row->cfa;
2955 ti.cfa_temp.reg = INVALID_REGNUM;
2956 trace_info.quick_push (ti);
2957
2958 if (cie_return_save)
2959 ti.regs_saved_in_regs.safe_push (*cie_return_save);
2960
2961 /* Walk all the insns, collecting start of trace locations. */
2962 saw_barrier = false;
2963 switch_sections = false;
2964 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2965 {
2966 if (BARRIER_P (insn))
2967 saw_barrier = true;
2968 else if (NOTE_P (insn)
2969 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2970 {
2971 /* We should have just seen a barrier. */
2972 gcc_assert (saw_barrier);
2973 switch_sections = true;
2974 }
2975 /* Watch out for save_point notes between basic blocks.
2976 In particular, a note after a barrier. Do not record these,
2977 delaying trace creation until the label. */
2978 else if (save_point_p (insn)
2979 && (LABEL_P (insn) || !saw_barrier))
2980 {
2981 memset (&ti, 0, sizeof (ti));
2982 ti.head = insn;
2983 ti.switch_sections = switch_sections;
2984 ti.id = trace_info.length ();
2985 trace_info.safe_push (ti);
2986
2987 saw_barrier = false;
2988 switch_sections = false;
2989 }
2990 }
2991
2992 /* Create the trace index after we've finished building trace_info,
2993 avoiding stale pointer problems due to reallocation. */
2994 trace_index
2995 = new hash_table<trace_info_hasher> (trace_info.length ());
2996 dw_trace_info *tp;
2997 FOR_EACH_VEC_ELT (trace_info, i, tp)
2998 {
2999 dw_trace_info **slot;
3000
3001 if (dump_file)
3002 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
3003 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
3004 tp->switch_sections ? " (section switch)" : "");
3005
3006 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
3007 gcc_assert (*slot == NULL);
3008 *slot = tp;
3009 }
3010 }
3011
3012 /* Record the initial position of the return address. RTL is
3013 INCOMING_RETURN_ADDR_RTX. */
3014
3015 static void
initial_return_save(rtx rtl)3016 initial_return_save (rtx rtl)
3017 {
3018 unsigned int reg = INVALID_REGNUM;
3019 poly_int64 offset = 0;
3020
3021 switch (GET_CODE (rtl))
3022 {
3023 case REG:
3024 /* RA is in a register. */
3025 reg = dwf_regno (rtl);
3026 break;
3027
3028 case MEM:
3029 /* RA is on the stack. */
3030 rtl = XEXP (rtl, 0);
3031 switch (GET_CODE (rtl))
3032 {
3033 case REG:
3034 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
3035 offset = 0;
3036 break;
3037
3038 case PLUS:
3039 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3040 offset = rtx_to_poly_int64 (XEXP (rtl, 1));
3041 break;
3042
3043 case MINUS:
3044 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3045 offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
3046 break;
3047
3048 default:
3049 gcc_unreachable ();
3050 }
3051
3052 break;
3053
3054 case PLUS:
3055 /* The return address is at some offset from any value we can
3056 actually load. For instance, on the SPARC it is in %i7+8. Just
3057 ignore the offset for now; it doesn't matter for unwinding frames. */
3058 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
3059 initial_return_save (XEXP (rtl, 0));
3060 return;
3061
3062 default:
3063 gcc_unreachable ();
3064 }
3065
3066 if (reg != DWARF_FRAME_RETURN_COLUMN)
3067 {
3068 if (reg != INVALID_REGNUM)
3069 record_reg_saved_in_reg (rtl, pc_rtx);
3070 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
3071 }
3072 }
3073
3074 static void
create_cie_data(void)3075 create_cie_data (void)
3076 {
3077 dw_cfa_location loc;
3078 dw_trace_info cie_trace;
3079
3080 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
3081
3082 memset (&cie_trace, 0, sizeof (cie_trace));
3083 cur_trace = &cie_trace;
3084
3085 add_cfi_vec = &cie_cfi_vec;
3086 cie_cfi_row = cur_row = new_cfi_row ();
3087
3088 /* On entry, the Canonical Frame Address is at SP. */
3089 memset (&loc, 0, sizeof (loc));
3090 loc.reg = dw_stack_pointer_regnum;
3091 /* create_cie_data is called just once per TU, and when using .cfi_startproc
3092 is even done by the assembler rather than the compiler. If the target
3093 has different incoming frame sp offsets depending on what kind of
3094 function it is, use a single constant offset for the target and
3095 if needed, adjust before the first instruction in insn stream. */
3096 loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
3097 def_cfa_1 (&loc);
3098
3099 if (targetm.debug_unwind_info () == UI_DWARF2
3100 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3101 {
3102 initial_return_save (INCOMING_RETURN_ADDR_RTX);
3103
3104 /* For a few targets, we have the return address incoming into a
3105 register, but choose a different return column. This will result
3106 in a DW_CFA_register for the return, and an entry in
3107 regs_saved_in_regs to match. If the target later stores that
3108 return address register to the stack, we want to be able to emit
3109 the DW_CFA_offset against the return column, not the intermediate
3110 save register. Save the contents of regs_saved_in_regs so that
3111 we can re-initialize it at the start of each function. */
3112 switch (cie_trace.regs_saved_in_regs.length ())
3113 {
3114 case 0:
3115 break;
3116 case 1:
3117 cie_return_save = ggc_alloc<reg_saved_in_data> ();
3118 *cie_return_save = cie_trace.regs_saved_in_regs[0];
3119 cie_trace.regs_saved_in_regs.release ();
3120 break;
3121 default:
3122 gcc_unreachable ();
3123 }
3124 }
3125
3126 add_cfi_vec = NULL;
3127 cur_row = NULL;
3128 cur_trace = NULL;
3129 }
3130
3131 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3132 state at each location within the function. These notes will be
3133 emitted during pass_final. */
3134
3135 static unsigned int
execute_dwarf2_frame(void)3136 execute_dwarf2_frame (void)
3137 {
3138 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
3139 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3140
3141 /* The first time we're called, compute the incoming frame state. */
3142 if (cie_cfi_vec == NULL)
3143 create_cie_data ();
3144
3145 dwarf2out_alloc_current_fde ();
3146
3147 create_pseudo_cfg ();
3148
3149 /* Do the work. */
3150 create_cfi_notes ();
3151 connect_traces ();
3152 add_cfis_to_fde ();
3153
3154 /* Free all the data we allocated. */
3155 {
3156 size_t i;
3157 dw_trace_info *ti;
3158
3159 FOR_EACH_VEC_ELT (trace_info, i, ti)
3160 ti->regs_saved_in_regs.release ();
3161 }
3162 trace_info.release ();
3163
3164 delete trace_index;
3165 trace_index = NULL;
3166
3167 return 0;
3168 }
3169
3170 /* Convert a DWARF call frame info. operation to its string name */
3171
3172 static const char *
dwarf_cfi_name(unsigned int cfi_opc)3173 dwarf_cfi_name (unsigned int cfi_opc)
3174 {
3175 const char *name = get_DW_CFA_name (cfi_opc);
3176
3177 if (name != NULL)
3178 return name;
3179
3180 return "DW_CFA_<unknown>";
3181 }
3182
3183 /* This routine will generate the correct assembly data for a location
3184 description based on a cfi entry with a complex address. */
3185
3186 static void
output_cfa_loc(dw_cfi_ref cfi,int for_eh)3187 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3188 {
3189 dw_loc_descr_ref loc;
3190 unsigned long size;
3191
3192 if (cfi->dw_cfi_opc == DW_CFA_expression
3193 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3194 {
3195 unsigned r =
3196 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3197 dw2_asm_output_data (1, r, NULL);
3198 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3199 }
3200 else
3201 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3202
3203 /* Output the size of the block. */
3204 size = size_of_locs (loc);
3205 dw2_asm_output_data_uleb128 (size, NULL);
3206
3207 /* Now output the operations themselves. */
3208 output_loc_sequence (loc, for_eh);
3209 }
3210
3211 /* Similar, but used for .cfi_escape. */
3212
3213 static void
output_cfa_loc_raw(dw_cfi_ref cfi)3214 output_cfa_loc_raw (dw_cfi_ref cfi)
3215 {
3216 dw_loc_descr_ref loc;
3217 unsigned long size;
3218
3219 if (cfi->dw_cfi_opc == DW_CFA_expression
3220 || cfi->dw_cfi_opc == DW_CFA_val_expression)
3221 {
3222 unsigned r =
3223 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3224 fprintf (asm_out_file, "%#x,", r);
3225 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3226 }
3227 else
3228 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3229
3230 /* Output the size of the block. */
3231 size = size_of_locs (loc);
3232 dw2_asm_output_data_uleb128_raw (size);
3233 fputc (',', asm_out_file);
3234
3235 /* Now output the operations themselves. */
3236 output_loc_sequence_raw (loc);
3237 }
3238
3239 /* Output a Call Frame Information opcode and its operand(s). */
3240
3241 void
output_cfi(dw_cfi_ref cfi,dw_fde_ref fde,int for_eh)3242 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3243 {
3244 unsigned long r;
3245 HOST_WIDE_INT off;
3246
3247 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3248 dw2_asm_output_data (1, (cfi->dw_cfi_opc
3249 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3250 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3251 ((unsigned HOST_WIDE_INT)
3252 cfi->dw_cfi_oprnd1.dw_cfi_offset));
3253 else if (cfi->dw_cfi_opc == DW_CFA_offset)
3254 {
3255 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3256 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3257 "DW_CFA_offset, column %#lx", r);
3258 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3259 dw2_asm_output_data_uleb128 (off, NULL);
3260 }
3261 else if (cfi->dw_cfi_opc == DW_CFA_restore)
3262 {
3263 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3264 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3265 "DW_CFA_restore, column %#lx", r);
3266 }
3267 else
3268 {
3269 dw2_asm_output_data (1, cfi->dw_cfi_opc,
3270 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3271
3272 switch (cfi->dw_cfi_opc)
3273 {
3274 case DW_CFA_set_loc:
3275 if (for_eh)
3276 dw2_asm_output_encoded_addr_rtx (
3277 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3278 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3279 false, NULL);
3280 else
3281 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3282 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3283 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3284 break;
3285
3286 case DW_CFA_advance_loc1:
3287 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3288 fde->dw_fde_current_label, NULL);
3289 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3290 break;
3291
3292 case DW_CFA_advance_loc2:
3293 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3294 fde->dw_fde_current_label, NULL);
3295 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3296 break;
3297
3298 case DW_CFA_advance_loc4:
3299 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3300 fde->dw_fde_current_label, NULL);
3301 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3302 break;
3303
3304 case DW_CFA_MIPS_advance_loc8:
3305 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3306 fde->dw_fde_current_label, NULL);
3307 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3308 break;
3309
3310 case DW_CFA_offset_extended:
3311 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3312 dw2_asm_output_data_uleb128 (r, NULL);
3313 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3314 dw2_asm_output_data_uleb128 (off, NULL);
3315 break;
3316
3317 case DW_CFA_def_cfa:
3318 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3319 dw2_asm_output_data_uleb128 (r, NULL);
3320 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3321 break;
3322
3323 case DW_CFA_offset_extended_sf:
3324 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3325 dw2_asm_output_data_uleb128 (r, NULL);
3326 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3327 dw2_asm_output_data_sleb128 (off, NULL);
3328 break;
3329
3330 case DW_CFA_def_cfa_sf:
3331 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3332 dw2_asm_output_data_uleb128 (r, NULL);
3333 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3334 dw2_asm_output_data_sleb128 (off, NULL);
3335 break;
3336
3337 case DW_CFA_restore_extended:
3338 case DW_CFA_undefined:
3339 case DW_CFA_same_value:
3340 case DW_CFA_def_cfa_register:
3341 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3342 dw2_asm_output_data_uleb128 (r, NULL);
3343 break;
3344
3345 case DW_CFA_register:
3346 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3347 dw2_asm_output_data_uleb128 (r, NULL);
3348 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3349 dw2_asm_output_data_uleb128 (r, NULL);
3350 break;
3351
3352 case DW_CFA_def_cfa_offset:
3353 case DW_CFA_GNU_args_size:
3354 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3355 break;
3356
3357 case DW_CFA_def_cfa_offset_sf:
3358 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3359 dw2_asm_output_data_sleb128 (off, NULL);
3360 break;
3361
3362 case DW_CFA_GNU_window_save:
3363 break;
3364
3365 case DW_CFA_def_cfa_expression:
3366 case DW_CFA_expression:
3367 case DW_CFA_val_expression:
3368 output_cfa_loc (cfi, for_eh);
3369 break;
3370
3371 case DW_CFA_GNU_negative_offset_extended:
3372 /* Obsoleted by DW_CFA_offset_extended_sf. */
3373 gcc_unreachable ();
3374
3375 default:
3376 break;
3377 }
3378 }
3379 }
3380
3381 /* Similar, but do it via assembler directives instead. */
3382
3383 void
output_cfi_directive(FILE * f,dw_cfi_ref cfi)3384 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3385 {
3386 unsigned long r, r2;
3387
3388 switch (cfi->dw_cfi_opc)
3389 {
3390 case DW_CFA_advance_loc:
3391 case DW_CFA_advance_loc1:
3392 case DW_CFA_advance_loc2:
3393 case DW_CFA_advance_loc4:
3394 case DW_CFA_MIPS_advance_loc8:
3395 case DW_CFA_set_loc:
3396 /* Should only be created in a code path not followed when emitting
3397 via directives. The assembler is going to take care of this for
3398 us. But this routines is also used for debugging dumps, so
3399 print something. */
3400 gcc_assert (f != asm_out_file);
3401 fprintf (f, "\t.cfi_advance_loc\n");
3402 break;
3403
3404 case DW_CFA_offset:
3405 case DW_CFA_offset_extended:
3406 case DW_CFA_offset_extended_sf:
3407 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3408 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3409 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3410 break;
3411
3412 case DW_CFA_restore:
3413 case DW_CFA_restore_extended:
3414 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3415 fprintf (f, "\t.cfi_restore %lu\n", r);
3416 break;
3417
3418 case DW_CFA_undefined:
3419 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3420 fprintf (f, "\t.cfi_undefined %lu\n", r);
3421 break;
3422
3423 case DW_CFA_same_value:
3424 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3425 fprintf (f, "\t.cfi_same_value %lu\n", r);
3426 break;
3427
3428 case DW_CFA_def_cfa:
3429 case DW_CFA_def_cfa_sf:
3430 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3431 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3432 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3433 break;
3434
3435 case DW_CFA_def_cfa_register:
3436 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3437 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3438 break;
3439
3440 case DW_CFA_register:
3441 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3442 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3443 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3444 break;
3445
3446 case DW_CFA_def_cfa_offset:
3447 case DW_CFA_def_cfa_offset_sf:
3448 fprintf (f, "\t.cfi_def_cfa_offset "
3449 HOST_WIDE_INT_PRINT_DEC"\n",
3450 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3451 break;
3452
3453 case DW_CFA_remember_state:
3454 fprintf (f, "\t.cfi_remember_state\n");
3455 break;
3456 case DW_CFA_restore_state:
3457 fprintf (f, "\t.cfi_restore_state\n");
3458 break;
3459
3460 case DW_CFA_GNU_args_size:
3461 if (f == asm_out_file)
3462 {
3463 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3464 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3465 if (flag_debug_asm)
3466 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3467 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3468 fputc ('\n', f);
3469 }
3470 else
3471 {
3472 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3473 cfi->dw_cfi_oprnd1.dw_cfi_offset);
3474 }
3475 break;
3476
3477 case DW_CFA_GNU_window_save:
3478 fprintf (f, "\t.cfi_window_save\n");
3479 break;
3480
3481 case DW_CFA_def_cfa_expression:
3482 case DW_CFA_expression:
3483 case DW_CFA_val_expression:
3484 if (f != asm_out_file)
3485 {
3486 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3487 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3488 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3489 break;
3490 }
3491 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3492 output_cfa_loc_raw (cfi);
3493 fputc ('\n', f);
3494 break;
3495
3496 default:
3497 gcc_unreachable ();
3498 }
3499 }
3500
3501 void
dwarf2out_emit_cfi(dw_cfi_ref cfi)3502 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3503 {
3504 if (dwarf2out_do_cfi_asm ())
3505 output_cfi_directive (asm_out_file, cfi);
3506 }
3507
3508 static void
dump_cfi_row(FILE * f,dw_cfi_row * row)3509 dump_cfi_row (FILE *f, dw_cfi_row *row)
3510 {
3511 dw_cfi_ref cfi;
3512 unsigned i;
3513
3514 cfi = row->cfa_cfi;
3515 if (!cfi)
3516 {
3517 dw_cfa_location dummy;
3518 memset (&dummy, 0, sizeof (dummy));
3519 dummy.reg = INVALID_REGNUM;
3520 cfi = def_cfa_0 (&dummy, &row->cfa);
3521 }
3522 output_cfi_directive (f, cfi);
3523
3524 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3525 if (cfi)
3526 output_cfi_directive (f, cfi);
3527 }
3528
3529 void debug_cfi_row (dw_cfi_row *row);
3530
3531 void
debug_cfi_row(dw_cfi_row * row)3532 debug_cfi_row (dw_cfi_row *row)
3533 {
3534 dump_cfi_row (stderr, row);
3535 }
3536
3537
3538 /* Save the result of dwarf2out_do_frame across PCH.
3539 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3540 static GTY(()) signed char saved_do_cfi_asm = 0;
3541
3542 /* Decide whether to emit EH frame unwind information for the current
3543 translation unit. */
3544
3545 bool
dwarf2out_do_eh_frame(void)3546 dwarf2out_do_eh_frame (void)
3547 {
3548 return
3549 (flag_unwind_tables || flag_exceptions)
3550 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3551 }
3552
3553 /* Decide whether we want to emit frame unwind information for the current
3554 translation unit. */
3555
3556 bool
dwarf2out_do_frame(void)3557 dwarf2out_do_frame (void)
3558 {
3559 /* We want to emit correct CFA location expressions or lists, so we
3560 have to return true if we're going to output debug info, even if
3561 we're not going to output frame or unwind info. */
3562 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3563 return true;
3564
3565 if (saved_do_cfi_asm > 0)
3566 return true;
3567
3568 if (targetm.debug_unwind_info () == UI_DWARF2)
3569 return true;
3570
3571 if (dwarf2out_do_eh_frame ())
3572 return true;
3573
3574 return false;
3575 }
3576
3577 /* Decide whether to emit frame unwind via assembler directives. */
3578
3579 bool
dwarf2out_do_cfi_asm(void)3580 dwarf2out_do_cfi_asm (void)
3581 {
3582 int enc;
3583
3584 if (saved_do_cfi_asm != 0)
3585 return saved_do_cfi_asm > 0;
3586
3587 /* Assume failure for a moment. */
3588 saved_do_cfi_asm = -1;
3589
3590 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3591 return false;
3592 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3593 return false;
3594
3595 /* Make sure the personality encoding is one the assembler can support.
3596 In particular, aligned addresses can't be handled. */
3597 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3598 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3599 return false;
3600 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3601 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3602 return false;
3603
3604 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3605 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3606 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3607 return false;
3608
3609 /* Success! */
3610 saved_do_cfi_asm = 1;
3611 return true;
3612 }
3613
3614 namespace {
3615
3616 const pass_data pass_data_dwarf2_frame =
3617 {
3618 RTL_PASS, /* type */
3619 "dwarf2", /* name */
3620 OPTGROUP_NONE, /* optinfo_flags */
3621 TV_FINAL, /* tv_id */
3622 0, /* properties_required */
3623 0, /* properties_provided */
3624 0, /* properties_destroyed */
3625 0, /* todo_flags_start */
3626 0, /* todo_flags_finish */
3627 };
3628
3629 class pass_dwarf2_frame : public rtl_opt_pass
3630 {
3631 public:
pass_dwarf2_frame(gcc::context * ctxt)3632 pass_dwarf2_frame (gcc::context *ctxt)
3633 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3634 {}
3635
3636 /* opt_pass methods: */
3637 virtual bool gate (function *);
execute(function *)3638 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3639
3640 }; // class pass_dwarf2_frame
3641
3642 bool
gate(function *)3643 pass_dwarf2_frame::gate (function *)
3644 {
3645 /* Targets which still implement the prologue in assembler text
3646 cannot use the generic dwarf2 unwinding. */
3647 if (!targetm.have_prologue ())
3648 return false;
3649
3650 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3651 from the optimized shrink-wrapping annotations that we will compute.
3652 For now, only produce the CFI notes for dwarf2. */
3653 return dwarf2out_do_frame ();
3654 }
3655
3656 } // anon namespace
3657
3658 rtl_opt_pass *
make_pass_dwarf2_frame(gcc::context * ctxt)3659 make_pass_dwarf2_frame (gcc::context *ctxt)
3660 {
3661 return new pass_dwarf2_frame (ctxt);
3662 }
3663
3664 #include "gt-dwarf2cfi.h"
3665