1 /* tc-ia64.c -- Assembler for the HP/Intel IA-64 architecture.
2 Copyright (C) 1998-2022 Free Software Foundation, Inc.
3 Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
4
5 This file is part of GAS, the GNU Assembler.
6
7 GAS is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GAS is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GAS; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street - Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 /*
23 TODO:
24
25 - optional operands
26 - directives:
27 .eb
28 .estate
29 .lb
30 .popsection
31 .previous
32 .psr
33 .pushsection
34 - labels are wrong if automatic alignment is introduced
35 (e.g., checkout the second real10 definition in test-data.s)
36 - DV-related stuff:
37 <reg>.safe_across_calls and any other DV-related directives I don't
38 have documentation for.
39 verify mod-sched-brs reads/writes are checked/marked (and other
40 notes)
41
42 */
43
44 #include "as.h"
45 #include "safe-ctype.h"
46 #include "dwarf2dbg.h"
47 #include "subsegs.h"
48
49 #include "opcode/ia64.h"
50
51 #include "elf/ia64.h"
52 #include "bfdver.h"
53 #include <time.h>
54 #include <limits.h>
55
56 #define NELEMS(a) ((int) (sizeof (a)/sizeof ((a)[0])))
57
58 /* Some systems define MIN in, e.g., param.h. */
59 #undef MIN
60 #define MIN(a,b) ((a) < (b) ? (a) : (b))
61
62 #define NUM_SLOTS 4
63 #define PREV_SLOT md.slot[(md.curr_slot + NUM_SLOTS - 1) % NUM_SLOTS]
64 #define CURR_SLOT md.slot[md.curr_slot]
65
66 #define O_pseudo_fixup (O_max + 1)
67
68 enum special_section
69 {
70 /* IA-64 ABI section pseudo-ops. */
71 SPECIAL_SECTION_BSS = 0,
72 SPECIAL_SECTION_SBSS,
73 SPECIAL_SECTION_SDATA,
74 SPECIAL_SECTION_RODATA,
75 SPECIAL_SECTION_COMMENT,
76 SPECIAL_SECTION_UNWIND,
77 SPECIAL_SECTION_UNWIND_INFO,
78 /* HPUX specific section pseudo-ops. */
79 SPECIAL_SECTION_INIT_ARRAY,
80 SPECIAL_SECTION_FINI_ARRAY,
81 };
82
83 enum reloc_func
84 {
85 FUNC_DTP_MODULE,
86 FUNC_DTP_RELATIVE,
87 FUNC_FPTR_RELATIVE,
88 FUNC_GP_RELATIVE,
89 FUNC_LT_RELATIVE,
90 FUNC_LT_RELATIVE_X,
91 FUNC_PC_RELATIVE,
92 FUNC_PLT_RELATIVE,
93 FUNC_SEC_RELATIVE,
94 FUNC_SEG_RELATIVE,
95 FUNC_TP_RELATIVE,
96 FUNC_LTV_RELATIVE,
97 FUNC_LT_FPTR_RELATIVE,
98 FUNC_LT_DTP_MODULE,
99 FUNC_LT_DTP_RELATIVE,
100 FUNC_LT_TP_RELATIVE,
101 FUNC_IPLT_RELOC,
102 #ifdef TE_VMS
103 FUNC_SLOTCOUNT_RELOC,
104 #endif
105 };
106
107 enum reg_symbol
108 {
109 REG_GR = 0,
110 REG_FR = (REG_GR + 128),
111 REG_AR = (REG_FR + 128),
112 REG_CR = (REG_AR + 128),
113 REG_DAHR = (REG_CR + 128),
114 REG_P = (REG_DAHR + 8),
115 REG_BR = (REG_P + 64),
116 REG_IP = (REG_BR + 8),
117 REG_CFM,
118 REG_PR,
119 REG_PR_ROT,
120 REG_PSR,
121 REG_PSR_L,
122 REG_PSR_UM,
123 /* The following are pseudo-registers for use by gas only. */
124 IND_CPUID,
125 IND_DBR,
126 IND_DTR,
127 IND_ITR,
128 IND_IBR,
129 IND_MSR,
130 IND_PKR,
131 IND_PMC,
132 IND_PMD,
133 IND_DAHR,
134 IND_RR,
135 /* The following pseudo-registers are used for unwind directives only: */
136 REG_PSP,
137 REG_PRIUNAT,
138 REG_NUM
139 };
140
141 enum dynreg_type
142 {
143 DYNREG_GR = 0, /* dynamic general purpose register */
144 DYNREG_FR, /* dynamic floating point register */
145 DYNREG_PR, /* dynamic predicate register */
146 DYNREG_NUM_TYPES
147 };
148
149 enum operand_match_result
150 {
151 OPERAND_MATCH,
152 OPERAND_OUT_OF_RANGE,
153 OPERAND_MISMATCH
154 };
155
156 /* On the ia64, we can't know the address of a text label until the
157 instructions are packed into a bundle. To handle this, we keep
158 track of the list of labels that appear in front of each
159 instruction. */
160 struct label_fix
161 {
162 struct label_fix *next;
163 struct symbol *sym;
164 bool dw2_mark_labels;
165 };
166
167 #ifdef TE_VMS
168 /* An internally used relocation. */
169 #define DUMMY_RELOC_IA64_SLOTCOUNT (BFD_RELOC_UNUSED + 1)
170 #endif
171
172 /* This is the endianness of the current section. */
173 extern int target_big_endian;
174
175 /* This is the default endianness. */
176 static int default_big_endian = TARGET_BYTES_BIG_ENDIAN;
177
178 void (*ia64_number_to_chars) (char *, valueT, int);
179
180 static void ia64_float_to_chars_bigendian (char *, LITTLENUM_TYPE *, int);
181 static void ia64_float_to_chars_littleendian (char *, LITTLENUM_TYPE *, int);
182
183 static void (*ia64_float_to_chars) (char *, LITTLENUM_TYPE *, int);
184
185 static htab_t alias_hash;
186 static htab_t alias_name_hash;
187 static htab_t secalias_hash;
188 static htab_t secalias_name_hash;
189
190 /* List of chars besides those in app.c:symbol_chars that can start an
191 operand. Used to prevent the scrubber eating vital white-space. */
192 const char ia64_symbol_chars[] = "@?";
193
194 /* Characters which always start a comment. */
195 const char comment_chars[] = "";
196
197 /* Characters which start a comment at the beginning of a line. */
198 const char line_comment_chars[] = "#";
199
200 /* Characters which may be used to separate multiple commands on a
201 single line. */
202 const char line_separator_chars[] = ";{}";
203
204 /* Characters which are used to indicate an exponent in a floating
205 point number. */
206 const char EXP_CHARS[] = "eE";
207
208 /* Characters which mean that a number is a floating point constant,
209 as in 0d1.0. */
210 const char FLT_CHARS[] = "rRsSfFdDxXpP";
211
212 /* ia64-specific option processing: */
213
214 const char *md_shortopts = "m:N:x::";
215
216 struct option md_longopts[] =
217 {
218 #define OPTION_MCONSTANT_GP (OPTION_MD_BASE + 1)
219 {"mconstant-gp", no_argument, NULL, OPTION_MCONSTANT_GP},
220 #define OPTION_MAUTO_PIC (OPTION_MD_BASE + 2)
221 {"mauto-pic", no_argument, NULL, OPTION_MAUTO_PIC}
222 };
223
224 size_t md_longopts_size = sizeof (md_longopts);
225
226 static struct
227 {
228 htab_t pseudo_hash; /* pseudo opcode hash table */
229 htab_t reg_hash; /* register name hash table */
230 htab_t dynreg_hash; /* dynamic register hash table */
231 htab_t const_hash; /* constant hash table */
232 htab_t entry_hash; /* code entry hint hash table */
233
234 /* If X_op is != O_absent, the register name for the instruction's
235 qualifying predicate. If NULL, p0 is assumed for instructions
236 that are predictable. */
237 expressionS qp;
238
239 /* Optimize for which CPU. */
240 enum
241 {
242 itanium1,
243 itanium2
244 } tune;
245
246 /* What to do when hint.b is used. */
247 enum
248 {
249 hint_b_error,
250 hint_b_warning,
251 hint_b_ok
252 } hint_b;
253
254 unsigned int
255 manual_bundling : 1,
256 debug_dv: 1,
257 detect_dv: 1,
258 explicit_mode : 1, /* which mode we're in */
259 default_explicit_mode : 1, /* which mode is the default */
260 mode_explicitly_set : 1, /* was the current mode explicitly set? */
261 auto_align : 1,
262 keep_pending_output : 1;
263
264 /* What to do when something is wrong with unwind directives. */
265 enum
266 {
267 unwind_check_warning,
268 unwind_check_error
269 } unwind_check;
270
271 /* Each bundle consists of up to three instructions. We keep
272 track of four most recent instructions so we can correctly set
273 the end_of_insn_group for the last instruction in a bundle. */
274 int curr_slot;
275 int num_slots_in_use;
276 struct slot
277 {
278 unsigned int
279 end_of_insn_group : 1,
280 manual_bundling_on : 1,
281 manual_bundling_off : 1,
282 loc_directive_seen : 1;
283 signed char user_template; /* user-selected template, if any */
284 unsigned char qp_regno; /* qualifying predicate */
285 /* This duplicates a good fraction of "struct fix" but we
286 can't use a "struct fix" instead since we can't call
287 fix_new_exp() until we know the address of the instruction. */
288 int num_fixups;
289 struct insn_fix
290 {
291 bfd_reloc_code_real_type code;
292 enum ia64_opnd opnd; /* type of operand in need of fix */
293 unsigned int is_pcrel : 1; /* is operand pc-relative? */
294 expressionS expr; /* the value to be inserted */
295 }
296 fixup[2]; /* at most two fixups per insn */
297 struct ia64_opcode *idesc;
298 struct label_fix *label_fixups;
299 struct label_fix *tag_fixups;
300 struct unw_rec_list *unwind_record; /* Unwind directive. */
301 expressionS opnd[6];
302 const char *src_file;
303 unsigned int src_line;
304 struct dwarf2_line_info debug_line;
305 }
306 slot[NUM_SLOTS];
307
308 segT last_text_seg;
309 subsegT last_text_subseg;
310
311 struct dynreg
312 {
313 struct dynreg *next; /* next dynamic register */
314 const char *name;
315 unsigned short base; /* the base register number */
316 unsigned short num_regs; /* # of registers in this set */
317 }
318 *dynreg[DYNREG_NUM_TYPES], in, loc, out, rot;
319
320 flagword flags; /* ELF-header flags */
321
322 struct mem_offset {
323 unsigned hint:1; /* is this hint currently valid? */
324 bfd_vma offset; /* mem.offset offset */
325 bfd_vma base; /* mem.offset base */
326 } mem_offset;
327
328 int path; /* number of alt. entry points seen */
329 const char **entry_labels; /* labels of all alternate paths in
330 the current DV-checking block. */
331 int maxpaths; /* size currently allocated for
332 entry_labels */
333
334 int pointer_size; /* size in bytes of a pointer */
335 int pointer_size_shift; /* shift size of a pointer for alignment */
336
337 symbolS *indregsym[IND_RR - IND_CPUID + 1];
338 }
339 md;
340
341 /* These are not const, because they are modified to MMI for non-itanium1
342 targets below. */
343 /* MFI bundle of nops. */
344 static unsigned char le_nop[16] =
345 {
346 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
347 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
348 };
349 /* MFI bundle of nops with stop-bit. */
350 static unsigned char le_nop_stop[16] =
351 {
352 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
353 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00
354 };
355
356 /* application registers: */
357
358 #define AR_K0 0
359 #define AR_K7 7
360 #define AR_RSC 16
361 #define AR_BSP 17
362 #define AR_BSPSTORE 18
363 #define AR_RNAT 19
364 #define AR_FCR 21
365 #define AR_EFLAG 24
366 #define AR_CSD 25
367 #define AR_SSD 26
368 #define AR_CFLG 27
369 #define AR_FSR 28
370 #define AR_FIR 29
371 #define AR_FDR 30
372 #define AR_CCV 32
373 #define AR_UNAT 36
374 #define AR_FPSR 40
375 #define AR_ITC 44
376 #define AR_RUC 45
377 #define AR_PFS 64
378 #define AR_LC 65
379 #define AR_EC 66
380
381 static const struct
382 {
383 const char *name;
384 unsigned int regnum;
385 }
386 ar[] =
387 {
388 {"ar.k0", AR_K0}, {"ar.k1", AR_K0 + 1},
389 {"ar.k2", AR_K0 + 2}, {"ar.k3", AR_K0 + 3},
390 {"ar.k4", AR_K0 + 4}, {"ar.k5", AR_K0 + 5},
391 {"ar.k6", AR_K0 + 6}, {"ar.k7", AR_K7},
392 {"ar.rsc", AR_RSC}, {"ar.bsp", AR_BSP},
393 {"ar.bspstore", AR_BSPSTORE}, {"ar.rnat", AR_RNAT},
394 {"ar.fcr", AR_FCR}, {"ar.eflag", AR_EFLAG},
395 {"ar.csd", AR_CSD}, {"ar.ssd", AR_SSD},
396 {"ar.cflg", AR_CFLG}, {"ar.fsr", AR_FSR},
397 {"ar.fir", AR_FIR}, {"ar.fdr", AR_FDR},
398 {"ar.ccv", AR_CCV}, {"ar.unat", AR_UNAT},
399 {"ar.fpsr", AR_FPSR}, {"ar.itc", AR_ITC},
400 {"ar.ruc", AR_RUC}, {"ar.pfs", AR_PFS},
401 {"ar.lc", AR_LC}, {"ar.ec", AR_EC},
402 };
403
404 /* control registers: */
405
406 #define CR_DCR 0
407 #define CR_ITM 1
408 #define CR_IVA 2
409 #define CR_PTA 8
410 #define CR_GPTA 9
411 #define CR_IPSR 16
412 #define CR_ISR 17
413 #define CR_IIP 19
414 #define CR_IFA 20
415 #define CR_ITIR 21
416 #define CR_IIPA 22
417 #define CR_IFS 23
418 #define CR_IIM 24
419 #define CR_IHA 25
420 #define CR_IIB0 26
421 #define CR_IIB1 27
422 #define CR_LID 64
423 #define CR_IVR 65
424 #define CR_TPR 66
425 #define CR_EOI 67
426 #define CR_IRR0 68
427 #define CR_IRR3 71
428 #define CR_ITV 72
429 #define CR_PMV 73
430 #define CR_CMCV 74
431 #define CR_LRR0 80
432 #define CR_LRR1 81
433
434 static const struct
435 {
436 const char *name;
437 unsigned int regnum;
438 }
439 cr[] =
440 {
441 {"cr.dcr", CR_DCR},
442 {"cr.itm", CR_ITM},
443 {"cr.iva", CR_IVA},
444 {"cr.pta", CR_PTA},
445 {"cr.gpta", CR_GPTA},
446 {"cr.ipsr", CR_IPSR},
447 {"cr.isr", CR_ISR},
448 {"cr.iip", CR_IIP},
449 {"cr.ifa", CR_IFA},
450 {"cr.itir", CR_ITIR},
451 {"cr.iipa", CR_IIPA},
452 {"cr.ifs", CR_IFS},
453 {"cr.iim", CR_IIM},
454 {"cr.iha", CR_IHA},
455 {"cr.iib0", CR_IIB0},
456 {"cr.iib1", CR_IIB1},
457 {"cr.lid", CR_LID},
458 {"cr.ivr", CR_IVR},
459 {"cr.tpr", CR_TPR},
460 {"cr.eoi", CR_EOI},
461 {"cr.irr0", CR_IRR0},
462 {"cr.irr1", CR_IRR0 + 1},
463 {"cr.irr2", CR_IRR0 + 2},
464 {"cr.irr3", CR_IRR3},
465 {"cr.itv", CR_ITV},
466 {"cr.pmv", CR_PMV},
467 {"cr.cmcv", CR_CMCV},
468 {"cr.lrr0", CR_LRR0},
469 {"cr.lrr1", CR_LRR1}
470 };
471
472 #define PSR_MFL 4
473 #define PSR_IC 13
474 #define PSR_DFL 18
475 #define PSR_CPL 32
476
477 static const struct const_desc
478 {
479 const char *name;
480 valueT value;
481 }
482 const_bits[] =
483 {
484 /* PSR constant masks: */
485
486 /* 0: reserved */
487 {"psr.be", ((valueT) 1) << 1},
488 {"psr.up", ((valueT) 1) << 2},
489 {"psr.ac", ((valueT) 1) << 3},
490 {"psr.mfl", ((valueT) 1) << 4},
491 {"psr.mfh", ((valueT) 1) << 5},
492 /* 6-12: reserved */
493 {"psr.ic", ((valueT) 1) << 13},
494 {"psr.i", ((valueT) 1) << 14},
495 {"psr.pk", ((valueT) 1) << 15},
496 /* 16: reserved */
497 {"psr.dt", ((valueT) 1) << 17},
498 {"psr.dfl", ((valueT) 1) << 18},
499 {"psr.dfh", ((valueT) 1) << 19},
500 {"psr.sp", ((valueT) 1) << 20},
501 {"psr.pp", ((valueT) 1) << 21},
502 {"psr.di", ((valueT) 1) << 22},
503 {"psr.si", ((valueT) 1) << 23},
504 {"psr.db", ((valueT) 1) << 24},
505 {"psr.lp", ((valueT) 1) << 25},
506 {"psr.tb", ((valueT) 1) << 26},
507 {"psr.rt", ((valueT) 1) << 27},
508 /* 28-31: reserved */
509 /* 32-33: cpl (current privilege level) */
510 {"psr.is", ((valueT) 1) << 34},
511 {"psr.mc", ((valueT) 1) << 35},
512 {"psr.it", ((valueT) 1) << 36},
513 {"psr.id", ((valueT) 1) << 37},
514 {"psr.da", ((valueT) 1) << 38},
515 {"psr.dd", ((valueT) 1) << 39},
516 {"psr.ss", ((valueT) 1) << 40},
517 /* 41-42: ri (restart instruction) */
518 {"psr.ed", ((valueT) 1) << 43},
519 {"psr.bn", ((valueT) 1) << 44},
520 };
521
522 /* indirect register-sets/memory: */
523
524 static const struct
525 {
526 const char *name;
527 unsigned int regnum;
528 }
529 indirect_reg[] =
530 {
531 { "CPUID", IND_CPUID },
532 { "cpuid", IND_CPUID },
533 { "dbr", IND_DBR },
534 { "dtr", IND_DTR },
535 { "itr", IND_ITR },
536 { "ibr", IND_IBR },
537 { "msr", IND_MSR },
538 { "pkr", IND_PKR },
539 { "pmc", IND_PMC },
540 { "pmd", IND_PMD },
541 { "dahr", IND_DAHR },
542 { "rr", IND_RR },
543 };
544
545 /* Pseudo functions used to indicate relocation types (these functions
546 start with an at sign (@). */
547 static struct
548 {
549 const char *name;
550 enum pseudo_type
551 {
552 PSEUDO_FUNC_NONE,
553 PSEUDO_FUNC_RELOC,
554 PSEUDO_FUNC_CONST,
555 PSEUDO_FUNC_REG,
556 PSEUDO_FUNC_FLOAT
557 }
558 type;
559 union
560 {
561 unsigned long ival;
562 symbolS *sym;
563 }
564 u;
565 }
566 pseudo_func[] =
567 {
568 /* reloc pseudo functions (these must come first!): */
569 { "dtpmod", PSEUDO_FUNC_RELOC, { 0 } },
570 { "dtprel", PSEUDO_FUNC_RELOC, { 0 } },
571 { "fptr", PSEUDO_FUNC_RELOC, { 0 } },
572 { "gprel", PSEUDO_FUNC_RELOC, { 0 } },
573 { "ltoff", PSEUDO_FUNC_RELOC, { 0 } },
574 { "ltoffx", PSEUDO_FUNC_RELOC, { 0 } },
575 { "pcrel", PSEUDO_FUNC_RELOC, { 0 } },
576 { "pltoff", PSEUDO_FUNC_RELOC, { 0 } },
577 { "secrel", PSEUDO_FUNC_RELOC, { 0 } },
578 { "segrel", PSEUDO_FUNC_RELOC, { 0 } },
579 { "tprel", PSEUDO_FUNC_RELOC, { 0 } },
580 { "ltv", PSEUDO_FUNC_RELOC, { 0 } },
581 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_FPTR_RELATIVE */
582 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_MODULE */
583 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_DTP_RELATIVE */
584 { NULL, 0, { 0 } }, /* placeholder for FUNC_LT_TP_RELATIVE */
585 { "iplt", PSEUDO_FUNC_RELOC, { 0 } },
586 #ifdef TE_VMS
587 { "slotcount", PSEUDO_FUNC_RELOC, { 0 } },
588 #endif
589
590 /* mbtype4 constants: */
591 { "alt", PSEUDO_FUNC_CONST, { 0xa } },
592 { "brcst", PSEUDO_FUNC_CONST, { 0x0 } },
593 { "mix", PSEUDO_FUNC_CONST, { 0x8 } },
594 { "rev", PSEUDO_FUNC_CONST, { 0xb } },
595 { "shuf", PSEUDO_FUNC_CONST, { 0x9 } },
596
597 /* fclass constants: */
598 { "nat", PSEUDO_FUNC_CONST, { 0x100 } },
599 { "qnan", PSEUDO_FUNC_CONST, { 0x080 } },
600 { "snan", PSEUDO_FUNC_CONST, { 0x040 } },
601 { "pos", PSEUDO_FUNC_CONST, { 0x001 } },
602 { "neg", PSEUDO_FUNC_CONST, { 0x002 } },
603 { "zero", PSEUDO_FUNC_CONST, { 0x004 } },
604 { "unorm", PSEUDO_FUNC_CONST, { 0x008 } },
605 { "norm", PSEUDO_FUNC_CONST, { 0x010 } },
606 { "inf", PSEUDO_FUNC_CONST, { 0x020 } },
607
608 { "natval", PSEUDO_FUNC_CONST, { 0x100 } }, /* old usage */
609
610 /* hint constants: */
611 { "pause", PSEUDO_FUNC_CONST, { 0x0 } },
612 { "priority", PSEUDO_FUNC_CONST, { 0x1 } },
613
614 /* tf constants: */
615 { "clz", PSEUDO_FUNC_CONST, { 32 } },
616 { "mpy", PSEUDO_FUNC_CONST, { 33 } },
617 { "datahints", PSEUDO_FUNC_CONST, { 34 } },
618
619 /* unwind-related constants: */
620 { "svr4", PSEUDO_FUNC_CONST, { ELFOSABI_NONE } },
621 { "hpux", PSEUDO_FUNC_CONST, { ELFOSABI_HPUX } },
622 { "nt", PSEUDO_FUNC_CONST, { 2 } }, /* conflicts w/ELFOSABI_NETBSD */
623 { "linux", PSEUDO_FUNC_CONST, { ELFOSABI_GNU } },
624 { "freebsd", PSEUDO_FUNC_CONST, { ELFOSABI_FREEBSD } },
625 { "openvms", PSEUDO_FUNC_CONST, { ELFOSABI_OPENVMS } },
626 { "nsk", PSEUDO_FUNC_CONST, { ELFOSABI_NSK } },
627
628 /* unwind-related registers: */
629 { "priunat",PSEUDO_FUNC_REG, { REG_PRIUNAT } }
630 };
631
632 /* 41-bit nop opcodes (one per unit): */
633 static const bfd_vma nop[IA64_NUM_UNITS] =
634 {
635 0x0000000000LL, /* NIL => break 0 */
636 0x0008000000LL, /* I-unit nop */
637 0x0008000000LL, /* M-unit nop */
638 0x4000000000LL, /* B-unit nop */
639 0x0008000000LL, /* F-unit nop */
640 0x0000000000LL, /* L-"unit" nop immediate */
641 0x0008000000LL, /* X-unit nop */
642 };
643
644 /* Can't be `const' as it's passed to input routines (which have the
645 habit of setting temporary sentinels. */
646 static char special_section_name[][20] =
647 {
648 {".bss"}, {".sbss"}, {".sdata"}, {".rodata"}, {".comment"},
649 {".IA_64.unwind"}, {".IA_64.unwind_info"},
650 {".init_array"}, {".fini_array"}
651 };
652
653 /* The best template for a particular sequence of up to three
654 instructions: */
655 #define N IA64_NUM_TYPES
656 static unsigned char best_template[N][N][N];
657 #undef N
658
659 /* Resource dependencies currently in effect */
660 static struct rsrc {
661 int depind; /* dependency index */
662 const struct ia64_dependency *dependency; /* actual dependency */
663 unsigned specific:1, /* is this a specific bit/regno? */
664 link_to_qp_branch:1; /* will a branch on the same QP clear it?*/
665 int index; /* specific regno/bit within dependency */
666 int note; /* optional qualifying note (0 if none) */
667 #define STATE_NONE 0
668 #define STATE_STOP 1
669 #define STATE_SRLZ 2
670 int insn_srlz; /* current insn serialization state */
671 int data_srlz; /* current data serialization state */
672 int qp_regno; /* qualifying predicate for this usage */
673 const char *file; /* what file marked this dependency */
674 unsigned int line; /* what line marked this dependency */
675 struct mem_offset mem_offset; /* optional memory offset hint */
676 enum { CMP_NONE, CMP_OR, CMP_AND } cmp_type; /* OR or AND compare? */
677 int path; /* corresponding code entry index */
678 } *regdeps = NULL;
679 static int regdepslen = 0;
680 static int regdepstotlen = 0;
681 static const char *dv_mode[] = { "RAW", "WAW", "WAR" };
682 static const char *dv_sem[] = { "none", "implied", "impliedf",
683 "data", "instr", "specific", "stop", "other" };
684 static const char *dv_cmp_type[] = { "none", "OR", "AND" };
685
686 /* Current state of PR mutexation */
687 static struct qpmutex {
688 valueT prmask;
689 int path;
690 } *qp_mutexes = NULL; /* QP mutex bitmasks */
691 static int qp_mutexeslen = 0;
692 static int qp_mutexestotlen = 0;
693 static valueT qp_safe_across_calls = 0;
694
695 /* Current state of PR implications */
696 static struct qp_imply {
697 unsigned p1:6;
698 unsigned p2:6;
699 unsigned p2_branched:1;
700 int path;
701 } *qp_implies = NULL;
702 static int qp_implieslen = 0;
703 static int qp_impliestotlen = 0;
704
705 /* Keep track of static GR values so that indirect register usage can
706 sometimes be tracked. */
707 static struct gr {
708 unsigned known:1;
709 int path;
710 valueT value;
711 } gr_values[128] = {
712 {
713 1,
714 #ifdef INT_MAX
715 INT_MAX,
716 #else
717 (((1 << (8 * sizeof(gr_values->path) - 2)) - 1) << 1) + 1,
718 #endif
719 0
720 }
721 };
722
723 /* Remember the alignment frag. */
724 static fragS *align_frag;
725
726 /* These are the routines required to output the various types of
727 unwind records. */
728
729 /* A slot_number is a frag address plus the slot index (0-2). We use the
730 frag address here so that if there is a section switch in the middle of
731 a function, then instructions emitted to a different section are not
732 counted. Since there may be more than one frag for a function, this
733 means we also need to keep track of which frag this address belongs to
734 so we can compute inter-frag distances. This also nicely solves the
735 problem with nops emitted for align directives, which can't easily be
736 counted, but can easily be derived from frag sizes. */
737
738 typedef struct unw_rec_list {
739 unwind_record r;
740 unsigned long slot_number;
741 fragS *slot_frag;
742 struct unw_rec_list *next;
743 } unw_rec_list;
744
745 #define SLOT_NUM_NOT_SET (unsigned)-1
746
747 /* Linked list of saved prologue counts. A very poor
748 implementation of a map from label numbers to prologue counts. */
749 typedef struct label_prologue_count
750 {
751 struct label_prologue_count *next;
752 unsigned long label_number;
753 unsigned int prologue_count;
754 } label_prologue_count;
755
756 typedef struct proc_pending
757 {
758 symbolS *sym;
759 struct proc_pending *next;
760 } proc_pending;
761
762 static struct
763 {
764 /* Maintain a list of unwind entries for the current function. */
765 unw_rec_list *list;
766 unw_rec_list *tail;
767
768 /* Any unwind entries that should be attached to the current slot
769 that an insn is being constructed for. */
770 unw_rec_list *current_entry;
771
772 /* These are used to create the unwind table entry for this function. */
773 proc_pending proc_pending;
774 symbolS *info; /* pointer to unwind info */
775 symbolS *personality_routine;
776 segT saved_text_seg;
777 subsegT saved_text_subseg;
778 unsigned int force_unwind_entry : 1; /* force generation of unwind entry? */
779
780 /* TRUE if processing unwind directives in a prologue region. */
781 unsigned int prologue : 1;
782 unsigned int prologue_mask : 4;
783 unsigned int prologue_gr : 7;
784 unsigned int body : 1;
785 unsigned int insn : 1;
786 unsigned int prologue_count; /* number of .prologues seen so far */
787 /* Prologue counts at previous .label_state directives. */
788 struct label_prologue_count * saved_prologue_counts;
789
790 /* List of split up .save-s. */
791 unw_p_record *pending_saves;
792 } unwind;
793
794 /* The input value is a negated offset from psp, and specifies an address
795 psp - offset. The encoded value is psp + 16 - (4 * offset). Thus we
796 must add 16 and divide by 4 to get the encoded value. */
797
798 #define ENCODED_PSP_OFFSET(OFFSET) (((OFFSET) + 16) / 4)
799
800 typedef void (*vbyte_func) (int, char *, char *);
801
802 /* Forward declarations: */
803 static void dot_alias (int);
804 static int parse_operand_and_eval (expressionS *, int);
805 static void emit_one_bundle (void);
806 static bfd_reloc_code_real_type ia64_gen_real_reloc_type (struct symbol *,
807 bfd_reloc_code_real_type);
808 static void insn_group_break (int, int, int);
809 static void add_qp_mutex (valueT);
810 static void add_qp_imply (int, int);
811 static void clear_qp_mutex (valueT);
812 static void clear_qp_implies (valueT, valueT);
813 static void print_dependency (const char *, int);
814 static void instruction_serialization (void);
815 static void data_serialization (void);
816 static void output_R3_format (vbyte_func, unw_record_type, unsigned long);
817 static void output_B3_format (vbyte_func, unsigned long, unsigned long);
818 static void output_B4_format (vbyte_func, unw_record_type, unsigned long);
819 static void free_saved_prologue_counts (void);
820
821 /* Determine if application register REGNUM resides only in the integer
822 unit (as opposed to the memory unit). */
823 static int
ar_is_only_in_integer_unit(int reg)824 ar_is_only_in_integer_unit (int reg)
825 {
826 reg -= REG_AR;
827 return reg >= 64 && reg <= 111;
828 }
829
830 /* Determine if application register REGNUM resides only in the memory
831 unit (as opposed to the integer unit). */
832 static int
ar_is_only_in_memory_unit(int reg)833 ar_is_only_in_memory_unit (int reg)
834 {
835 reg -= REG_AR;
836 return reg >= 0 && reg <= 47;
837 }
838
839 /* Switch to section NAME and create section if necessary. It's
840 rather ugly that we have to manipulate input_line_pointer but I
841 don't see any other way to accomplish the same thing without
842 changing obj-elf.c (which may be the Right Thing, in the end). */
843 static void
set_section(char * name)844 set_section (char *name)
845 {
846 char *saved_input_line_pointer;
847
848 saved_input_line_pointer = input_line_pointer;
849 input_line_pointer = name;
850 obj_elf_section (0);
851 input_line_pointer = saved_input_line_pointer;
852 }
853
854 /* Map 's' to SHF_IA_64_SHORT. */
855
856 bfd_vma
ia64_elf_section_letter(int letter,const char ** ptr_msg)857 ia64_elf_section_letter (int letter, const char **ptr_msg)
858 {
859 if (letter == 's')
860 return SHF_IA_64_SHORT;
861 else if (letter == 'o')
862 return SHF_LINK_ORDER;
863 #ifdef TE_VMS
864 else if (letter == 'O')
865 return SHF_IA_64_VMS_OVERLAID;
866 else if (letter == 'g')
867 return SHF_IA_64_VMS_GLOBAL;
868 #endif
869
870 *ptr_msg = _("bad .section directive: want a,o,s,w,x,M,S,G,T in string");
871 return -1;
872 }
873
874 /* Map SHF_IA_64_SHORT to SEC_SMALL_DATA. */
875
876 flagword
ia64_elf_section_flags(flagword flags,bfd_vma attr,int type ATTRIBUTE_UNUSED)877 ia64_elf_section_flags (flagword flags,
878 bfd_vma attr,
879 int type ATTRIBUTE_UNUSED)
880 {
881 if (attr & SHF_IA_64_SHORT)
882 flags |= SEC_SMALL_DATA;
883 return flags;
884 }
885
886 int
ia64_elf_section_type(const char * str,size_t len)887 ia64_elf_section_type (const char *str, size_t len)
888 {
889 #define STREQ(s) ((len == sizeof (s) - 1) && (strncmp (str, s, sizeof (s) - 1) == 0))
890
891 if (STREQ (ELF_STRING_ia64_unwind_info))
892 return SHT_PROGBITS;
893
894 if (STREQ (ELF_STRING_ia64_unwind_info_once))
895 return SHT_PROGBITS;
896
897 if (STREQ (ELF_STRING_ia64_unwind))
898 return SHT_IA_64_UNWIND;
899
900 if (STREQ (ELF_STRING_ia64_unwind_once))
901 return SHT_IA_64_UNWIND;
902
903 if (STREQ ("unwind"))
904 return SHT_IA_64_UNWIND;
905
906 return -1;
907 #undef STREQ
908 }
909
910 static unsigned int
set_regstack(unsigned int ins,unsigned int locs,unsigned int outs,unsigned int rots)911 set_regstack (unsigned int ins,
912 unsigned int locs,
913 unsigned int outs,
914 unsigned int rots)
915 {
916 /* Size of frame. */
917 unsigned int sof;
918
919 sof = ins + locs + outs;
920 if (sof > 96)
921 {
922 as_bad (_("Size of frame exceeds maximum of 96 registers"));
923 return 0;
924 }
925 if (rots > sof)
926 {
927 as_warn (_("Size of rotating registers exceeds frame size"));
928 return 0;
929 }
930 md.in.base = REG_GR + 32;
931 md.loc.base = md.in.base + ins;
932 md.out.base = md.loc.base + locs;
933
934 md.in.num_regs = ins;
935 md.loc.num_regs = locs;
936 md.out.num_regs = outs;
937 md.rot.num_regs = rots;
938 return sof;
939 }
940
941 void
ia64_flush_insns(void)942 ia64_flush_insns (void)
943 {
944 struct label_fix *lfix;
945 segT saved_seg;
946 subsegT saved_subseg;
947 unw_rec_list *ptr;
948 bool mark;
949
950 if (!md.last_text_seg)
951 return;
952
953 saved_seg = now_seg;
954 saved_subseg = now_subseg;
955
956 subseg_set (md.last_text_seg, md.last_text_subseg);
957
958 while (md.num_slots_in_use > 0)
959 emit_one_bundle (); /* force out queued instructions */
960
961 /* In case there are labels following the last instruction, resolve
962 those now. */
963 mark = false;
964 for (lfix = CURR_SLOT.label_fixups; lfix; lfix = lfix->next)
965 {
966 symbol_set_value_now (lfix->sym);
967 mark |= lfix->dw2_mark_labels;
968 }
969 if (mark)
970 {
971 dwarf2_where (&CURR_SLOT.debug_line);
972 CURR_SLOT.debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
973 dwarf2_gen_line_info (frag_now_fix (), &CURR_SLOT.debug_line);
974 dwarf2_consume_line_info ();
975 }
976 CURR_SLOT.label_fixups = 0;
977
978 for (lfix = CURR_SLOT.tag_fixups; lfix; lfix = lfix->next)
979 symbol_set_value_now (lfix->sym);
980 CURR_SLOT.tag_fixups = 0;
981
982 /* In case there are unwind directives following the last instruction,
983 resolve those now. We only handle prologue, body, and endp directives
984 here. Give an error for others. */
985 for (ptr = unwind.current_entry; ptr; ptr = ptr->next)
986 {
987 switch (ptr->r.type)
988 {
989 case prologue:
990 case prologue_gr:
991 case body:
992 case endp:
993 ptr->slot_number = (unsigned long) frag_more (0);
994 ptr->slot_frag = frag_now;
995 break;
996
997 /* Allow any record which doesn't have a "t" field (i.e.,
998 doesn't relate to a particular instruction). */
999 case unwabi:
1000 case br_gr:
1001 case copy_state:
1002 case fr_mem:
1003 case frgr_mem:
1004 case gr_gr:
1005 case gr_mem:
1006 case label_state:
1007 case rp_br:
1008 case spill_base:
1009 case spill_mask:
1010 /* nothing */
1011 break;
1012
1013 default:
1014 as_bad (_("Unwind directive not followed by an instruction."));
1015 break;
1016 }
1017 }
1018 unwind.current_entry = NULL;
1019
1020 subseg_set (saved_seg, saved_subseg);
1021
1022 if (md.qp.X_op == O_register)
1023 as_bad (_("qualifying predicate not followed by instruction"));
1024 }
1025
1026 void
ia64_cons_align(int nbytes)1027 ia64_cons_align (int nbytes)
1028 {
1029 if (md.auto_align)
1030 {
1031 int log;
1032 for (log = 0; (nbytes & 1) != 1; nbytes >>= 1)
1033 log++;
1034
1035 do_align (log, NULL, 0, 0);
1036 }
1037 }
1038
1039 #ifdef TE_VMS
1040
1041 /* .vms_common section, symbol, size, alignment */
1042
1043 static void
obj_elf_vms_common(int ignore ATTRIBUTE_UNUSED)1044 obj_elf_vms_common (int ignore ATTRIBUTE_UNUSED)
1045 {
1046 const char *sec_name;
1047 char *sym_name;
1048 char c;
1049 offsetT size;
1050 offsetT cur_size;
1051 offsetT temp;
1052 symbolS *symbolP;
1053 segT current_seg = now_seg;
1054 subsegT current_subseg = now_subseg;
1055 offsetT log_align;
1056
1057 /* Section name. */
1058 sec_name = obj_elf_section_name ();
1059 if (sec_name == NULL)
1060 return;
1061
1062 /* Symbol name. */
1063 SKIP_WHITESPACE ();
1064 if (*input_line_pointer == ',')
1065 {
1066 input_line_pointer++;
1067 SKIP_WHITESPACE ();
1068 }
1069 else
1070 {
1071 as_bad (_("expected ',' after section name"));
1072 ignore_rest_of_line ();
1073 return;
1074 }
1075
1076 c = get_symbol_name (&sym_name);
1077
1078 if (input_line_pointer == sym_name)
1079 {
1080 (void) restore_line_pointer (c);
1081 as_bad (_("expected symbol name"));
1082 ignore_rest_of_line ();
1083 return;
1084 }
1085
1086 symbolP = symbol_find_or_make (sym_name);
1087 (void) restore_line_pointer (c);
1088
1089 if ((S_IS_DEFINED (symbolP) || symbol_equated_p (symbolP))
1090 && !S_IS_COMMON (symbolP))
1091 {
1092 as_bad (_("Ignoring attempt to re-define symbol"));
1093 ignore_rest_of_line ();
1094 return;
1095 }
1096
1097 /* Symbol size. */
1098 SKIP_WHITESPACE ();
1099 if (*input_line_pointer == ',')
1100 {
1101 input_line_pointer++;
1102 SKIP_WHITESPACE ();
1103 }
1104 else
1105 {
1106 as_bad (_("expected ',' after symbol name"));
1107 ignore_rest_of_line ();
1108 return;
1109 }
1110
1111 temp = get_absolute_expression ();
1112 size = temp;
1113 size &= ((offsetT) 2 << (stdoutput->arch_info->bits_per_address - 1)) - 1;
1114 if (temp != size)
1115 {
1116 as_warn (_("size (%ld) out of range, ignored"), (long) temp);
1117 ignore_rest_of_line ();
1118 return;
1119 }
1120
1121 /* Alignment. */
1122 SKIP_WHITESPACE ();
1123 if (*input_line_pointer == ',')
1124 {
1125 input_line_pointer++;
1126 SKIP_WHITESPACE ();
1127 }
1128 else
1129 {
1130 as_bad (_("expected ',' after symbol size"));
1131 ignore_rest_of_line ();
1132 return;
1133 }
1134
1135 log_align = get_absolute_expression ();
1136
1137 demand_empty_rest_of_line ();
1138
1139 obj_elf_change_section
1140 (sec_name, SHT_NOBITS,
1141 SHF_ALLOC | SHF_WRITE | SHF_IA_64_VMS_OVERLAID | SHF_IA_64_VMS_GLOBAL,
1142 0, NULL, 1, 0);
1143
1144 S_SET_VALUE (symbolP, 0);
1145 S_SET_SIZE (symbolP, size);
1146 S_SET_EXTERNAL (symbolP);
1147 S_SET_SEGMENT (symbolP, now_seg);
1148
1149 symbol_get_bfdsym (symbolP)->flags |= BSF_OBJECT;
1150
1151 record_alignment (now_seg, log_align);
1152
1153 cur_size = bfd_section_size (now_seg);
1154 if ((int) size > cur_size)
1155 {
1156 char *pfrag
1157 = frag_var (rs_fill, 1, 1, (relax_substateT)0, NULL,
1158 (valueT)size - (valueT)cur_size, NULL);
1159 *pfrag = 0;
1160 bfd_set_section_size (now_seg, size);
1161 }
1162
1163 /* Switch back to current segment. */
1164 subseg_set (current_seg, current_subseg);
1165
1166 #ifdef md_elf_section_change_hook
1167 md_elf_section_change_hook ();
1168 #endif
1169 }
1170
1171 #endif /* TE_VMS */
1172
1173 /* Output COUNT bytes to a memory location. */
1174 static char *vbyte_mem_ptr = NULL;
1175
1176 static void
output_vbyte_mem(int count,char * ptr,char * comment ATTRIBUTE_UNUSED)1177 output_vbyte_mem (int count, char *ptr, char *comment ATTRIBUTE_UNUSED)
1178 {
1179 int x;
1180 if (vbyte_mem_ptr == NULL)
1181 abort ();
1182
1183 if (count == 0)
1184 return;
1185 for (x = 0; x < count; x++)
1186 *(vbyte_mem_ptr++) = ptr[x];
1187 }
1188
1189 /* Count the number of bytes required for records. */
1190 static int vbyte_count = 0;
1191 static void
count_output(int count,char * ptr ATTRIBUTE_UNUSED,char * comment ATTRIBUTE_UNUSED)1192 count_output (int count,
1193 char *ptr ATTRIBUTE_UNUSED,
1194 char *comment ATTRIBUTE_UNUSED)
1195 {
1196 vbyte_count += count;
1197 }
1198
1199 static void
output_R1_format(vbyte_func f,unw_record_type rtype,int rlen)1200 output_R1_format (vbyte_func f, unw_record_type rtype, int rlen)
1201 {
1202 int r = 0;
1203 char byte;
1204 if (rlen > 0x1f)
1205 {
1206 output_R3_format (f, rtype, rlen);
1207 return;
1208 }
1209
1210 if (rtype == body)
1211 r = 1;
1212 else if (rtype != prologue)
1213 as_bad (_("record type is not valid"));
1214
1215 byte = UNW_R1 | (r << 5) | (rlen & 0x1f);
1216 (*f) (1, &byte, NULL);
1217 }
1218
1219 static void
output_R2_format(vbyte_func f,int mask,int grsave,unsigned long rlen)1220 output_R2_format (vbyte_func f, int mask, int grsave, unsigned long rlen)
1221 {
1222 char bytes[20];
1223 int count = 2;
1224 mask = (mask & 0x0f);
1225 grsave = (grsave & 0x7f);
1226
1227 bytes[0] = (UNW_R2 | (mask >> 1));
1228 bytes[1] = (((mask & 0x01) << 7) | grsave);
1229 count += output_leb128 (bytes + 2, rlen, 0);
1230 (*f) (count, bytes, NULL);
1231 }
1232
1233 static void
output_R3_format(vbyte_func f,unw_record_type rtype,unsigned long rlen)1234 output_R3_format (vbyte_func f, unw_record_type rtype, unsigned long rlen)
1235 {
1236 int r = 0, count;
1237 char bytes[20];
1238 if (rlen <= 0x1f)
1239 {
1240 output_R1_format (f, rtype, rlen);
1241 return;
1242 }
1243
1244 if (rtype == body)
1245 r = 1;
1246 else if (rtype != prologue)
1247 as_bad (_("record type is not valid"));
1248 bytes[0] = (UNW_R3 | r);
1249 count = output_leb128 (bytes + 1, rlen, 0);
1250 (*f) (count + 1, bytes, NULL);
1251 }
1252
1253 static void
output_P1_format(vbyte_func f,int brmask)1254 output_P1_format (vbyte_func f, int brmask)
1255 {
1256 char byte;
1257 byte = UNW_P1 | (brmask & 0x1f);
1258 (*f) (1, &byte, NULL);
1259 }
1260
1261 static void
output_P2_format(vbyte_func f,int brmask,int gr)1262 output_P2_format (vbyte_func f, int brmask, int gr)
1263 {
1264 char bytes[2];
1265 brmask = (brmask & 0x1f);
1266 bytes[0] = UNW_P2 | (brmask >> 1);
1267 bytes[1] = (((brmask & 1) << 7) | gr);
1268 (*f) (2, bytes, NULL);
1269 }
1270
1271 static void
output_P3_format(vbyte_func f,unw_record_type rtype,int reg)1272 output_P3_format (vbyte_func f, unw_record_type rtype, int reg)
1273 {
1274 char bytes[2];
1275 int r = 0;
1276 reg = (reg & 0x7f);
1277 switch (rtype)
1278 {
1279 case psp_gr:
1280 r = 0;
1281 break;
1282 case rp_gr:
1283 r = 1;
1284 break;
1285 case pfs_gr:
1286 r = 2;
1287 break;
1288 case preds_gr:
1289 r = 3;
1290 break;
1291 case unat_gr:
1292 r = 4;
1293 break;
1294 case lc_gr:
1295 r = 5;
1296 break;
1297 case rp_br:
1298 r = 6;
1299 break;
1300 case rnat_gr:
1301 r = 7;
1302 break;
1303 case bsp_gr:
1304 r = 8;
1305 break;
1306 case bspstore_gr:
1307 r = 9;
1308 break;
1309 case fpsr_gr:
1310 r = 10;
1311 break;
1312 case priunat_gr:
1313 r = 11;
1314 break;
1315 default:
1316 as_bad (_("Invalid record type for P3 format."));
1317 }
1318 bytes[0] = (UNW_P3 | (r >> 1));
1319 bytes[1] = (((r & 1) << 7) | reg);
1320 (*f) (2, bytes, NULL);
1321 }
1322
1323 static void
output_P4_format(vbyte_func f,unsigned char * imask,unsigned long imask_size)1324 output_P4_format (vbyte_func f, unsigned char *imask, unsigned long imask_size)
1325 {
1326 imask[0] = UNW_P4;
1327 (*f) (imask_size, (char *) imask, NULL);
1328 }
1329
1330 static void
output_P5_format(vbyte_func f,int grmask,unsigned long frmask)1331 output_P5_format (vbyte_func f, int grmask, unsigned long frmask)
1332 {
1333 char bytes[4];
1334 grmask = (grmask & 0x0f);
1335
1336 bytes[0] = UNW_P5;
1337 bytes[1] = ((grmask << 4) | ((frmask & 0x000f0000) >> 16));
1338 bytes[2] = ((frmask & 0x0000ff00) >> 8);
1339 bytes[3] = (frmask & 0x000000ff);
1340 (*f) (4, bytes, NULL);
1341 }
1342
1343 static void
output_P6_format(vbyte_func f,unw_record_type rtype,int rmask)1344 output_P6_format (vbyte_func f, unw_record_type rtype, int rmask)
1345 {
1346 char byte;
1347 int r = 0;
1348
1349 if (rtype == gr_mem)
1350 r = 1;
1351 else if (rtype != fr_mem)
1352 as_bad (_("Invalid record type for format P6"));
1353 byte = (UNW_P6 | (r << 4) | (rmask & 0x0f));
1354 (*f) (1, &byte, NULL);
1355 }
1356
1357 static void
output_P7_format(vbyte_func f,unw_record_type rtype,unsigned long w1,unsigned long w2)1358 output_P7_format (vbyte_func f,
1359 unw_record_type rtype,
1360 unsigned long w1,
1361 unsigned long w2)
1362 {
1363 char bytes[20];
1364 int count = 1;
1365 int r = 0;
1366 count += output_leb128 (bytes + 1, w1, 0);
1367 switch (rtype)
1368 {
1369 case mem_stack_f:
1370 r = 0;
1371 count += output_leb128 (bytes + count, w2 >> 4, 0);
1372 break;
1373 case mem_stack_v:
1374 r = 1;
1375 break;
1376 case spill_base:
1377 r = 2;
1378 break;
1379 case psp_sprel:
1380 r = 3;
1381 break;
1382 case rp_when:
1383 r = 4;
1384 break;
1385 case rp_psprel:
1386 r = 5;
1387 break;
1388 case pfs_when:
1389 r = 6;
1390 break;
1391 case pfs_psprel:
1392 r = 7;
1393 break;
1394 case preds_when:
1395 r = 8;
1396 break;
1397 case preds_psprel:
1398 r = 9;
1399 break;
1400 case lc_when:
1401 r = 10;
1402 break;
1403 case lc_psprel:
1404 r = 11;
1405 break;
1406 case unat_when:
1407 r = 12;
1408 break;
1409 case unat_psprel:
1410 r = 13;
1411 break;
1412 case fpsr_when:
1413 r = 14;
1414 break;
1415 case fpsr_psprel:
1416 r = 15;
1417 break;
1418 default:
1419 break;
1420 }
1421 bytes[0] = (UNW_P7 | r);
1422 (*f) (count, bytes, NULL);
1423 }
1424
1425 static void
output_P8_format(vbyte_func f,unw_record_type rtype,unsigned long t)1426 output_P8_format (vbyte_func f, unw_record_type rtype, unsigned long t)
1427 {
1428 char bytes[20];
1429 int r = 0;
1430 int count = 2;
1431 bytes[0] = UNW_P8;
1432 switch (rtype)
1433 {
1434 case rp_sprel:
1435 r = 1;
1436 break;
1437 case pfs_sprel:
1438 r = 2;
1439 break;
1440 case preds_sprel:
1441 r = 3;
1442 break;
1443 case lc_sprel:
1444 r = 4;
1445 break;
1446 case unat_sprel:
1447 r = 5;
1448 break;
1449 case fpsr_sprel:
1450 r = 6;
1451 break;
1452 case bsp_when:
1453 r = 7;
1454 break;
1455 case bsp_psprel:
1456 r = 8;
1457 break;
1458 case bsp_sprel:
1459 r = 9;
1460 break;
1461 case bspstore_when:
1462 r = 10;
1463 break;
1464 case bspstore_psprel:
1465 r = 11;
1466 break;
1467 case bspstore_sprel:
1468 r = 12;
1469 break;
1470 case rnat_when:
1471 r = 13;
1472 break;
1473 case rnat_psprel:
1474 r = 14;
1475 break;
1476 case rnat_sprel:
1477 r = 15;
1478 break;
1479 case priunat_when_gr:
1480 r = 16;
1481 break;
1482 case priunat_psprel:
1483 r = 17;
1484 break;
1485 case priunat_sprel:
1486 r = 18;
1487 break;
1488 case priunat_when_mem:
1489 r = 19;
1490 break;
1491 default:
1492 break;
1493 }
1494 bytes[1] = r;
1495 count += output_leb128 (bytes + 2, t, 0);
1496 (*f) (count, bytes, NULL);
1497 }
1498
1499 static void
output_P9_format(vbyte_func f,int grmask,int gr)1500 output_P9_format (vbyte_func f, int grmask, int gr)
1501 {
1502 char bytes[3];
1503 bytes[0] = UNW_P9;
1504 bytes[1] = (grmask & 0x0f);
1505 bytes[2] = (gr & 0x7f);
1506 (*f) (3, bytes, NULL);
1507 }
1508
1509 static void
output_P10_format(vbyte_func f,int abi,int context)1510 output_P10_format (vbyte_func f, int abi, int context)
1511 {
1512 char bytes[3];
1513 bytes[0] = UNW_P10;
1514 bytes[1] = (abi & 0xff);
1515 bytes[2] = (context & 0xff);
1516 (*f) (3, bytes, NULL);
1517 }
1518
1519 static void
output_B1_format(vbyte_func f,unw_record_type rtype,unsigned long label)1520 output_B1_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1521 {
1522 char byte;
1523 int r = 0;
1524 if (label > 0x1f)
1525 {
1526 output_B4_format (f, rtype, label);
1527 return;
1528 }
1529 if (rtype == copy_state)
1530 r = 1;
1531 else if (rtype != label_state)
1532 as_bad (_("Invalid record type for format B1"));
1533
1534 byte = (UNW_B1 | (r << 5) | (label & 0x1f));
1535 (*f) (1, &byte, NULL);
1536 }
1537
1538 static void
output_B2_format(vbyte_func f,unsigned long ecount,unsigned long t)1539 output_B2_format (vbyte_func f, unsigned long ecount, unsigned long t)
1540 {
1541 char bytes[20];
1542 int count = 1;
1543 if (ecount > 0x1f)
1544 {
1545 output_B3_format (f, ecount, t);
1546 return;
1547 }
1548 bytes[0] = (UNW_B2 | (ecount & 0x1f));
1549 count += output_leb128 (bytes + 1, t, 0);
1550 (*f) (count, bytes, NULL);
1551 }
1552
1553 static void
output_B3_format(vbyte_func f,unsigned long ecount,unsigned long t)1554 output_B3_format (vbyte_func f, unsigned long ecount, unsigned long t)
1555 {
1556 char bytes[20];
1557 int count = 1;
1558 if (ecount <= 0x1f)
1559 {
1560 output_B2_format (f, ecount, t);
1561 return;
1562 }
1563 bytes[0] = UNW_B3;
1564 count += output_leb128 (bytes + 1, t, 0);
1565 count += output_leb128 (bytes + count, ecount, 0);
1566 (*f) (count, bytes, NULL);
1567 }
1568
1569 static void
output_B4_format(vbyte_func f,unw_record_type rtype,unsigned long label)1570 output_B4_format (vbyte_func f, unw_record_type rtype, unsigned long label)
1571 {
1572 char bytes[20];
1573 int r = 0;
1574 int count = 1;
1575 if (label <= 0x1f)
1576 {
1577 output_B1_format (f, rtype, label);
1578 return;
1579 }
1580
1581 if (rtype == copy_state)
1582 r = 1;
1583 else if (rtype != label_state)
1584 as_bad (_("Invalid record type for format B1"));
1585
1586 bytes[0] = (UNW_B4 | (r << 3));
1587 count += output_leb128 (bytes + 1, label, 0);
1588 (*f) (count, bytes, NULL);
1589 }
1590
1591 static char
format_ab_reg(int ab,int reg)1592 format_ab_reg (int ab, int reg)
1593 {
1594 int ret;
1595 ab = (ab & 3);
1596 reg = (reg & 0x1f);
1597 ret = (ab << 5) | reg;
1598 return ret;
1599 }
1600
1601 static void
output_X1_format(vbyte_func f,unw_record_type rtype,int ab,int reg,unsigned long t,unsigned long w1)1602 output_X1_format (vbyte_func f,
1603 unw_record_type rtype,
1604 int ab,
1605 int reg,
1606 unsigned long t,
1607 unsigned long w1)
1608 {
1609 char bytes[20];
1610 int r = 0;
1611 int count = 2;
1612 bytes[0] = UNW_X1;
1613
1614 if (rtype == spill_sprel)
1615 r = 1;
1616 else if (rtype != spill_psprel)
1617 as_bad (_("Invalid record type for format X1"));
1618 bytes[1] = ((r << 7) | format_ab_reg (ab, reg));
1619 count += output_leb128 (bytes + 2, t, 0);
1620 count += output_leb128 (bytes + count, w1, 0);
1621 (*f) (count, bytes, NULL);
1622 }
1623
1624 static void
output_X2_format(vbyte_func f,int ab,int reg,int x,int y,int treg,unsigned long t)1625 output_X2_format (vbyte_func f,
1626 int ab,
1627 int reg,
1628 int x,
1629 int y,
1630 int treg,
1631 unsigned long t)
1632 {
1633 char bytes[20];
1634 int count = 3;
1635 bytes[0] = UNW_X2;
1636 bytes[1] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1637 bytes[2] = (((y & 1) << 7) | (treg & 0x7f));
1638 count += output_leb128 (bytes + 3, t, 0);
1639 (*f) (count, bytes, NULL);
1640 }
1641
1642 static void
output_X3_format(vbyte_func f,unw_record_type rtype,int qp,int ab,int reg,unsigned long t,unsigned long w1)1643 output_X3_format (vbyte_func f,
1644 unw_record_type rtype,
1645 int qp,
1646 int ab,
1647 int reg,
1648 unsigned long t,
1649 unsigned long w1)
1650 {
1651 char bytes[20];
1652 int r = 0;
1653 int count = 3;
1654 bytes[0] = UNW_X3;
1655
1656 if (rtype == spill_sprel_p)
1657 r = 1;
1658 else if (rtype != spill_psprel_p)
1659 as_bad (_("Invalid record type for format X3"));
1660 bytes[1] = ((r << 7) | (qp & 0x3f));
1661 bytes[2] = format_ab_reg (ab, reg);
1662 count += output_leb128 (bytes + 3, t, 0);
1663 count += output_leb128 (bytes + count, w1, 0);
1664 (*f) (count, bytes, NULL);
1665 }
1666
1667 static void
output_X4_format(vbyte_func f,int qp,int ab,int reg,int x,int y,int treg,unsigned long t)1668 output_X4_format (vbyte_func f,
1669 int qp,
1670 int ab,
1671 int reg,
1672 int x,
1673 int y,
1674 int treg,
1675 unsigned long t)
1676 {
1677 char bytes[20];
1678 int count = 4;
1679 bytes[0] = UNW_X4;
1680 bytes[1] = (qp & 0x3f);
1681 bytes[2] = (((x & 1) << 7) | format_ab_reg (ab, reg));
1682 bytes[3] = (((y & 1) << 7) | (treg & 0x7f));
1683 count += output_leb128 (bytes + 4, t, 0);
1684 (*f) (count, bytes, NULL);
1685 }
1686
1687 /* This function checks whether there are any outstanding .save-s and
1688 discards them if so. */
1689
1690 static void
check_pending_save(void)1691 check_pending_save (void)
1692 {
1693 if (unwind.pending_saves)
1694 {
1695 unw_rec_list *cur, *prev;
1696
1697 as_warn (_("Previous .save incomplete"));
1698 for (cur = unwind.list, prev = NULL; cur; )
1699 if (&cur->r.record.p == unwind.pending_saves)
1700 {
1701 if (prev)
1702 prev->next = cur->next;
1703 else
1704 unwind.list = cur->next;
1705 if (cur == unwind.tail)
1706 unwind.tail = prev;
1707 if (cur == unwind.current_entry)
1708 unwind.current_entry = cur->next;
1709 /* Don't free the first discarded record, it's being used as
1710 terminator for (currently) br_gr and gr_gr processing, and
1711 also prevents leaving a dangling pointer to it in its
1712 predecessor. */
1713 cur->r.record.p.grmask = 0;
1714 cur->r.record.p.brmask = 0;
1715 cur->r.record.p.frmask = 0;
1716 prev = cur->r.record.p.next;
1717 cur->r.record.p.next = NULL;
1718 cur = prev;
1719 break;
1720 }
1721 else
1722 {
1723 prev = cur;
1724 cur = cur->next;
1725 }
1726 while (cur)
1727 {
1728 prev = cur;
1729 cur = cur->r.record.p.next;
1730 free (prev);
1731 }
1732 unwind.pending_saves = NULL;
1733 }
1734 }
1735
1736 /* This function allocates a record list structure, and initializes fields. */
1737
1738 static unw_rec_list *
alloc_record(unw_record_type t)1739 alloc_record (unw_record_type t)
1740 {
1741 unw_rec_list *ptr;
1742 ptr = XNEW (unw_rec_list);
1743 memset (ptr, 0, sizeof (*ptr));
1744 ptr->slot_number = SLOT_NUM_NOT_SET;
1745 ptr->r.type = t;
1746 return ptr;
1747 }
1748
1749 /* Dummy unwind record used for calculating the length of the last prologue or
1750 body region. */
1751
1752 static unw_rec_list *
output_endp(void)1753 output_endp (void)
1754 {
1755 unw_rec_list *ptr = alloc_record (endp);
1756 return ptr;
1757 }
1758
1759 static unw_rec_list *
output_prologue(void)1760 output_prologue (void)
1761 {
1762 unw_rec_list *ptr = alloc_record (prologue);
1763 return ptr;
1764 }
1765
1766 static unw_rec_list *
output_prologue_gr(unsigned int saved_mask,unsigned int reg)1767 output_prologue_gr (unsigned int saved_mask, unsigned int reg)
1768 {
1769 unw_rec_list *ptr = alloc_record (prologue_gr);
1770 ptr->r.record.r.grmask = saved_mask;
1771 ptr->r.record.r.grsave = reg;
1772 return ptr;
1773 }
1774
1775 static unw_rec_list *
output_body(void)1776 output_body (void)
1777 {
1778 unw_rec_list *ptr = alloc_record (body);
1779 return ptr;
1780 }
1781
1782 static unw_rec_list *
output_mem_stack_f(unsigned int size)1783 output_mem_stack_f (unsigned int size)
1784 {
1785 unw_rec_list *ptr = alloc_record (mem_stack_f);
1786 ptr->r.record.p.size = size;
1787 return ptr;
1788 }
1789
1790 static unw_rec_list *
output_mem_stack_v(void)1791 output_mem_stack_v (void)
1792 {
1793 unw_rec_list *ptr = alloc_record (mem_stack_v);
1794 return ptr;
1795 }
1796
1797 static unw_rec_list *
output_psp_gr(unsigned int gr)1798 output_psp_gr (unsigned int gr)
1799 {
1800 unw_rec_list *ptr = alloc_record (psp_gr);
1801 ptr->r.record.p.r.gr = gr;
1802 return ptr;
1803 }
1804
1805 static unw_rec_list *
output_psp_sprel(unsigned int offset)1806 output_psp_sprel (unsigned int offset)
1807 {
1808 unw_rec_list *ptr = alloc_record (psp_sprel);
1809 ptr->r.record.p.off.sp = offset / 4;
1810 return ptr;
1811 }
1812
1813 static unw_rec_list *
output_rp_when(void)1814 output_rp_when (void)
1815 {
1816 unw_rec_list *ptr = alloc_record (rp_when);
1817 return ptr;
1818 }
1819
1820 static unw_rec_list *
output_rp_gr(unsigned int gr)1821 output_rp_gr (unsigned int gr)
1822 {
1823 unw_rec_list *ptr = alloc_record (rp_gr);
1824 ptr->r.record.p.r.gr = gr;
1825 return ptr;
1826 }
1827
1828 static unw_rec_list *
output_rp_br(unsigned int br)1829 output_rp_br (unsigned int br)
1830 {
1831 unw_rec_list *ptr = alloc_record (rp_br);
1832 ptr->r.record.p.r.br = br;
1833 return ptr;
1834 }
1835
1836 static unw_rec_list *
output_rp_psprel(unsigned int offset)1837 output_rp_psprel (unsigned int offset)
1838 {
1839 unw_rec_list *ptr = alloc_record (rp_psprel);
1840 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1841 return ptr;
1842 }
1843
1844 static unw_rec_list *
output_rp_sprel(unsigned int offset)1845 output_rp_sprel (unsigned int offset)
1846 {
1847 unw_rec_list *ptr = alloc_record (rp_sprel);
1848 ptr->r.record.p.off.sp = offset / 4;
1849 return ptr;
1850 }
1851
1852 static unw_rec_list *
output_pfs_when(void)1853 output_pfs_when (void)
1854 {
1855 unw_rec_list *ptr = alloc_record (pfs_when);
1856 return ptr;
1857 }
1858
1859 static unw_rec_list *
output_pfs_gr(unsigned int gr)1860 output_pfs_gr (unsigned int gr)
1861 {
1862 unw_rec_list *ptr = alloc_record (pfs_gr);
1863 ptr->r.record.p.r.gr = gr;
1864 return ptr;
1865 }
1866
1867 static unw_rec_list *
output_pfs_psprel(unsigned int offset)1868 output_pfs_psprel (unsigned int offset)
1869 {
1870 unw_rec_list *ptr = alloc_record (pfs_psprel);
1871 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1872 return ptr;
1873 }
1874
1875 static unw_rec_list *
output_pfs_sprel(unsigned int offset)1876 output_pfs_sprel (unsigned int offset)
1877 {
1878 unw_rec_list *ptr = alloc_record (pfs_sprel);
1879 ptr->r.record.p.off.sp = offset / 4;
1880 return ptr;
1881 }
1882
1883 static unw_rec_list *
output_preds_when(void)1884 output_preds_when (void)
1885 {
1886 unw_rec_list *ptr = alloc_record (preds_when);
1887 return ptr;
1888 }
1889
1890 static unw_rec_list *
output_preds_gr(unsigned int gr)1891 output_preds_gr (unsigned int gr)
1892 {
1893 unw_rec_list *ptr = alloc_record (preds_gr);
1894 ptr->r.record.p.r.gr = gr;
1895 return ptr;
1896 }
1897
1898 static unw_rec_list *
output_preds_psprel(unsigned int offset)1899 output_preds_psprel (unsigned int offset)
1900 {
1901 unw_rec_list *ptr = alloc_record (preds_psprel);
1902 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
1903 return ptr;
1904 }
1905
1906 static unw_rec_list *
output_preds_sprel(unsigned int offset)1907 output_preds_sprel (unsigned int offset)
1908 {
1909 unw_rec_list *ptr = alloc_record (preds_sprel);
1910 ptr->r.record.p.off.sp = offset / 4;
1911 return ptr;
1912 }
1913
1914 static unw_rec_list *
output_fr_mem(unsigned int mask)1915 output_fr_mem (unsigned int mask)
1916 {
1917 unw_rec_list *ptr = alloc_record (fr_mem);
1918 unw_rec_list *cur = ptr;
1919
1920 ptr->r.record.p.frmask = mask;
1921 unwind.pending_saves = &ptr->r.record.p;
1922 for (;;)
1923 {
1924 unw_rec_list *prev = cur;
1925
1926 /* Clear least significant set bit. */
1927 mask &= ~(mask & (~mask + 1));
1928 if (!mask)
1929 return ptr;
1930 cur = alloc_record (fr_mem);
1931 cur->r.record.p.frmask = mask;
1932 /* Retain only least significant bit. */
1933 prev->r.record.p.frmask ^= mask;
1934 prev->r.record.p.next = cur;
1935 }
1936 }
1937
1938 static unw_rec_list *
output_frgr_mem(unsigned int gr_mask,unsigned int fr_mask)1939 output_frgr_mem (unsigned int gr_mask, unsigned int fr_mask)
1940 {
1941 unw_rec_list *ptr = alloc_record (frgr_mem);
1942 unw_rec_list *cur = ptr;
1943
1944 unwind.pending_saves = &cur->r.record.p;
1945 cur->r.record.p.frmask = fr_mask;
1946 while (fr_mask)
1947 {
1948 unw_rec_list *prev = cur;
1949
1950 /* Clear least significant set bit. */
1951 fr_mask &= ~(fr_mask & (~fr_mask + 1));
1952 if (!gr_mask && !fr_mask)
1953 return ptr;
1954 cur = alloc_record (frgr_mem);
1955 cur->r.record.p.frmask = fr_mask;
1956 /* Retain only least significant bit. */
1957 prev->r.record.p.frmask ^= fr_mask;
1958 prev->r.record.p.next = cur;
1959 }
1960 cur->r.record.p.grmask = gr_mask;
1961 for (;;)
1962 {
1963 unw_rec_list *prev = cur;
1964
1965 /* Clear least significant set bit. */
1966 gr_mask &= ~(gr_mask & (~gr_mask + 1));
1967 if (!gr_mask)
1968 return ptr;
1969 cur = alloc_record (frgr_mem);
1970 cur->r.record.p.grmask = gr_mask;
1971 /* Retain only least significant bit. */
1972 prev->r.record.p.grmask ^= gr_mask;
1973 prev->r.record.p.next = cur;
1974 }
1975 }
1976
1977 static unw_rec_list *
output_gr_gr(unsigned int mask,unsigned int reg)1978 output_gr_gr (unsigned int mask, unsigned int reg)
1979 {
1980 unw_rec_list *ptr = alloc_record (gr_gr);
1981 unw_rec_list *cur = ptr;
1982
1983 ptr->r.record.p.grmask = mask;
1984 ptr->r.record.p.r.gr = reg;
1985 unwind.pending_saves = &ptr->r.record.p;
1986 for (;;)
1987 {
1988 unw_rec_list *prev = cur;
1989
1990 /* Clear least significant set bit. */
1991 mask &= ~(mask & (~mask + 1));
1992 if (!mask)
1993 return ptr;
1994 cur = alloc_record (gr_gr);
1995 cur->r.record.p.grmask = mask;
1996 /* Indicate this record shouldn't be output. */
1997 cur->r.record.p.r.gr = REG_NUM;
1998 /* Retain only least significant bit. */
1999 prev->r.record.p.grmask ^= mask;
2000 prev->r.record.p.next = cur;
2001 }
2002 }
2003
2004 static unw_rec_list *
output_gr_mem(unsigned int mask)2005 output_gr_mem (unsigned int mask)
2006 {
2007 unw_rec_list *ptr = alloc_record (gr_mem);
2008 unw_rec_list *cur = ptr;
2009
2010 ptr->r.record.p.grmask = mask;
2011 unwind.pending_saves = &ptr->r.record.p;
2012 for (;;)
2013 {
2014 unw_rec_list *prev = cur;
2015
2016 /* Clear least significant set bit. */
2017 mask &= ~(mask & (~mask + 1));
2018 if (!mask)
2019 return ptr;
2020 cur = alloc_record (gr_mem);
2021 cur->r.record.p.grmask = mask;
2022 /* Retain only least significant bit. */
2023 prev->r.record.p.grmask ^= mask;
2024 prev->r.record.p.next = cur;
2025 }
2026 }
2027
2028 static unw_rec_list *
output_br_mem(unsigned int mask)2029 output_br_mem (unsigned int mask)
2030 {
2031 unw_rec_list *ptr = alloc_record (br_mem);
2032 unw_rec_list *cur = ptr;
2033
2034 ptr->r.record.p.brmask = mask;
2035 unwind.pending_saves = &ptr->r.record.p;
2036 for (;;)
2037 {
2038 unw_rec_list *prev = cur;
2039
2040 /* Clear least significant set bit. */
2041 mask &= ~(mask & (~mask + 1));
2042 if (!mask)
2043 return ptr;
2044 cur = alloc_record (br_mem);
2045 cur->r.record.p.brmask = mask;
2046 /* Retain only least significant bit. */
2047 prev->r.record.p.brmask ^= mask;
2048 prev->r.record.p.next = cur;
2049 }
2050 }
2051
2052 static unw_rec_list *
output_br_gr(unsigned int mask,unsigned int reg)2053 output_br_gr (unsigned int mask, unsigned int reg)
2054 {
2055 unw_rec_list *ptr = alloc_record (br_gr);
2056 unw_rec_list *cur = ptr;
2057
2058 ptr->r.record.p.brmask = mask;
2059 ptr->r.record.p.r.gr = reg;
2060 unwind.pending_saves = &ptr->r.record.p;
2061 for (;;)
2062 {
2063 unw_rec_list *prev = cur;
2064
2065 /* Clear least significant set bit. */
2066 mask &= ~(mask & (~mask + 1));
2067 if (!mask)
2068 return ptr;
2069 cur = alloc_record (br_gr);
2070 cur->r.record.p.brmask = mask;
2071 /* Indicate this record shouldn't be output. */
2072 cur->r.record.p.r.gr = REG_NUM;
2073 /* Retain only least significant bit. */
2074 prev->r.record.p.brmask ^= mask;
2075 prev->r.record.p.next = cur;
2076 }
2077 }
2078
2079 static unw_rec_list *
output_spill_base(unsigned int offset)2080 output_spill_base (unsigned int offset)
2081 {
2082 unw_rec_list *ptr = alloc_record (spill_base);
2083 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2084 return ptr;
2085 }
2086
2087 static unw_rec_list *
output_unat_when(void)2088 output_unat_when (void)
2089 {
2090 unw_rec_list *ptr = alloc_record (unat_when);
2091 return ptr;
2092 }
2093
2094 static unw_rec_list *
output_unat_gr(unsigned int gr)2095 output_unat_gr (unsigned int gr)
2096 {
2097 unw_rec_list *ptr = alloc_record (unat_gr);
2098 ptr->r.record.p.r.gr = gr;
2099 return ptr;
2100 }
2101
2102 static unw_rec_list *
output_unat_psprel(unsigned int offset)2103 output_unat_psprel (unsigned int offset)
2104 {
2105 unw_rec_list *ptr = alloc_record (unat_psprel);
2106 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2107 return ptr;
2108 }
2109
2110 static unw_rec_list *
output_unat_sprel(unsigned int offset)2111 output_unat_sprel (unsigned int offset)
2112 {
2113 unw_rec_list *ptr = alloc_record (unat_sprel);
2114 ptr->r.record.p.off.sp = offset / 4;
2115 return ptr;
2116 }
2117
2118 static unw_rec_list *
output_lc_when(void)2119 output_lc_when (void)
2120 {
2121 unw_rec_list *ptr = alloc_record (lc_when);
2122 return ptr;
2123 }
2124
2125 static unw_rec_list *
output_lc_gr(unsigned int gr)2126 output_lc_gr (unsigned int gr)
2127 {
2128 unw_rec_list *ptr = alloc_record (lc_gr);
2129 ptr->r.record.p.r.gr = gr;
2130 return ptr;
2131 }
2132
2133 static unw_rec_list *
output_lc_psprel(unsigned int offset)2134 output_lc_psprel (unsigned int offset)
2135 {
2136 unw_rec_list *ptr = alloc_record (lc_psprel);
2137 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2138 return ptr;
2139 }
2140
2141 static unw_rec_list *
output_lc_sprel(unsigned int offset)2142 output_lc_sprel (unsigned int offset)
2143 {
2144 unw_rec_list *ptr = alloc_record (lc_sprel);
2145 ptr->r.record.p.off.sp = offset / 4;
2146 return ptr;
2147 }
2148
2149 static unw_rec_list *
output_fpsr_when(void)2150 output_fpsr_when (void)
2151 {
2152 unw_rec_list *ptr = alloc_record (fpsr_when);
2153 return ptr;
2154 }
2155
2156 static unw_rec_list *
output_fpsr_gr(unsigned int gr)2157 output_fpsr_gr (unsigned int gr)
2158 {
2159 unw_rec_list *ptr = alloc_record (fpsr_gr);
2160 ptr->r.record.p.r.gr = gr;
2161 return ptr;
2162 }
2163
2164 static unw_rec_list *
output_fpsr_psprel(unsigned int offset)2165 output_fpsr_psprel (unsigned int offset)
2166 {
2167 unw_rec_list *ptr = alloc_record (fpsr_psprel);
2168 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2169 return ptr;
2170 }
2171
2172 static unw_rec_list *
output_fpsr_sprel(unsigned int offset)2173 output_fpsr_sprel (unsigned int offset)
2174 {
2175 unw_rec_list *ptr = alloc_record (fpsr_sprel);
2176 ptr->r.record.p.off.sp = offset / 4;
2177 return ptr;
2178 }
2179
2180 static unw_rec_list *
output_priunat_when_gr(void)2181 output_priunat_when_gr (void)
2182 {
2183 unw_rec_list *ptr = alloc_record (priunat_when_gr);
2184 return ptr;
2185 }
2186
2187 static unw_rec_list *
output_priunat_when_mem(void)2188 output_priunat_when_mem (void)
2189 {
2190 unw_rec_list *ptr = alloc_record (priunat_when_mem);
2191 return ptr;
2192 }
2193
2194 static unw_rec_list *
output_priunat_gr(unsigned int gr)2195 output_priunat_gr (unsigned int gr)
2196 {
2197 unw_rec_list *ptr = alloc_record (priunat_gr);
2198 ptr->r.record.p.r.gr = gr;
2199 return ptr;
2200 }
2201
2202 static unw_rec_list *
output_priunat_psprel(unsigned int offset)2203 output_priunat_psprel (unsigned int offset)
2204 {
2205 unw_rec_list *ptr = alloc_record (priunat_psprel);
2206 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2207 return ptr;
2208 }
2209
2210 static unw_rec_list *
output_priunat_sprel(unsigned int offset)2211 output_priunat_sprel (unsigned int offset)
2212 {
2213 unw_rec_list *ptr = alloc_record (priunat_sprel);
2214 ptr->r.record.p.off.sp = offset / 4;
2215 return ptr;
2216 }
2217
2218 static unw_rec_list *
output_bsp_when(void)2219 output_bsp_when (void)
2220 {
2221 unw_rec_list *ptr = alloc_record (bsp_when);
2222 return ptr;
2223 }
2224
2225 static unw_rec_list *
output_bsp_gr(unsigned int gr)2226 output_bsp_gr (unsigned int gr)
2227 {
2228 unw_rec_list *ptr = alloc_record (bsp_gr);
2229 ptr->r.record.p.r.gr = gr;
2230 return ptr;
2231 }
2232
2233 static unw_rec_list *
output_bsp_psprel(unsigned int offset)2234 output_bsp_psprel (unsigned int offset)
2235 {
2236 unw_rec_list *ptr = alloc_record (bsp_psprel);
2237 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2238 return ptr;
2239 }
2240
2241 static unw_rec_list *
output_bsp_sprel(unsigned int offset)2242 output_bsp_sprel (unsigned int offset)
2243 {
2244 unw_rec_list *ptr = alloc_record (bsp_sprel);
2245 ptr->r.record.p.off.sp = offset / 4;
2246 return ptr;
2247 }
2248
2249 static unw_rec_list *
output_bspstore_when(void)2250 output_bspstore_when (void)
2251 {
2252 unw_rec_list *ptr = alloc_record (bspstore_when);
2253 return ptr;
2254 }
2255
2256 static unw_rec_list *
output_bspstore_gr(unsigned int gr)2257 output_bspstore_gr (unsigned int gr)
2258 {
2259 unw_rec_list *ptr = alloc_record (bspstore_gr);
2260 ptr->r.record.p.r.gr = gr;
2261 return ptr;
2262 }
2263
2264 static unw_rec_list *
output_bspstore_psprel(unsigned int offset)2265 output_bspstore_psprel (unsigned int offset)
2266 {
2267 unw_rec_list *ptr = alloc_record (bspstore_psprel);
2268 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2269 return ptr;
2270 }
2271
2272 static unw_rec_list *
output_bspstore_sprel(unsigned int offset)2273 output_bspstore_sprel (unsigned int offset)
2274 {
2275 unw_rec_list *ptr = alloc_record (bspstore_sprel);
2276 ptr->r.record.p.off.sp = offset / 4;
2277 return ptr;
2278 }
2279
2280 static unw_rec_list *
output_rnat_when(void)2281 output_rnat_when (void)
2282 {
2283 unw_rec_list *ptr = alloc_record (rnat_when);
2284 return ptr;
2285 }
2286
2287 static unw_rec_list *
output_rnat_gr(unsigned int gr)2288 output_rnat_gr (unsigned int gr)
2289 {
2290 unw_rec_list *ptr = alloc_record (rnat_gr);
2291 ptr->r.record.p.r.gr = gr;
2292 return ptr;
2293 }
2294
2295 static unw_rec_list *
output_rnat_psprel(unsigned int offset)2296 output_rnat_psprel (unsigned int offset)
2297 {
2298 unw_rec_list *ptr = alloc_record (rnat_psprel);
2299 ptr->r.record.p.off.psp = ENCODED_PSP_OFFSET (offset);
2300 return ptr;
2301 }
2302
2303 static unw_rec_list *
output_rnat_sprel(unsigned int offset)2304 output_rnat_sprel (unsigned int offset)
2305 {
2306 unw_rec_list *ptr = alloc_record (rnat_sprel);
2307 ptr->r.record.p.off.sp = offset / 4;
2308 return ptr;
2309 }
2310
2311 static unw_rec_list *
output_unwabi(unsigned long abi,unsigned long context)2312 output_unwabi (unsigned long abi, unsigned long context)
2313 {
2314 unw_rec_list *ptr = alloc_record (unwabi);
2315 ptr->r.record.p.abi = abi;
2316 ptr->r.record.p.context = context;
2317 return ptr;
2318 }
2319
2320 static unw_rec_list *
output_epilogue(unsigned long ecount)2321 output_epilogue (unsigned long ecount)
2322 {
2323 unw_rec_list *ptr = alloc_record (epilogue);
2324 ptr->r.record.b.ecount = ecount;
2325 return ptr;
2326 }
2327
2328 static unw_rec_list *
output_label_state(unsigned long label)2329 output_label_state (unsigned long label)
2330 {
2331 unw_rec_list *ptr = alloc_record (label_state);
2332 ptr->r.record.b.label = label;
2333 return ptr;
2334 }
2335
2336 static unw_rec_list *
output_copy_state(unsigned long label)2337 output_copy_state (unsigned long label)
2338 {
2339 unw_rec_list *ptr = alloc_record (copy_state);
2340 ptr->r.record.b.label = label;
2341 return ptr;
2342 }
2343
2344 static unw_rec_list *
output_spill_psprel(unsigned int ab,unsigned int reg,unsigned int offset,unsigned int predicate)2345 output_spill_psprel (unsigned int ab,
2346 unsigned int reg,
2347 unsigned int offset,
2348 unsigned int predicate)
2349 {
2350 unw_rec_list *ptr = alloc_record (predicate ? spill_psprel_p : spill_psprel);
2351 ptr->r.record.x.ab = ab;
2352 ptr->r.record.x.reg = reg;
2353 ptr->r.record.x.where.pspoff = ENCODED_PSP_OFFSET (offset);
2354 ptr->r.record.x.qp = predicate;
2355 return ptr;
2356 }
2357
2358 static unw_rec_list *
output_spill_sprel(unsigned int ab,unsigned int reg,unsigned int offset,unsigned int predicate)2359 output_spill_sprel (unsigned int ab,
2360 unsigned int reg,
2361 unsigned int offset,
2362 unsigned int predicate)
2363 {
2364 unw_rec_list *ptr = alloc_record (predicate ? spill_sprel_p : spill_sprel);
2365 ptr->r.record.x.ab = ab;
2366 ptr->r.record.x.reg = reg;
2367 ptr->r.record.x.where.spoff = offset / 4;
2368 ptr->r.record.x.qp = predicate;
2369 return ptr;
2370 }
2371
2372 static unw_rec_list *
output_spill_reg(unsigned int ab,unsigned int reg,unsigned int targ_reg,unsigned int xy,unsigned int predicate)2373 output_spill_reg (unsigned int ab,
2374 unsigned int reg,
2375 unsigned int targ_reg,
2376 unsigned int xy,
2377 unsigned int predicate)
2378 {
2379 unw_rec_list *ptr = alloc_record (predicate ? spill_reg_p : spill_reg);
2380 ptr->r.record.x.ab = ab;
2381 ptr->r.record.x.reg = reg;
2382 ptr->r.record.x.where.reg = targ_reg;
2383 ptr->r.record.x.xy = xy;
2384 ptr->r.record.x.qp = predicate;
2385 return ptr;
2386 }
2387
2388 /* Given a unw_rec_list process the correct format with the
2389 specified function. */
2390
2391 static void
process_one_record(unw_rec_list * ptr,vbyte_func f)2392 process_one_record (unw_rec_list *ptr, vbyte_func f)
2393 {
2394 unsigned int fr_mask, gr_mask;
2395
2396 switch (ptr->r.type)
2397 {
2398 /* This is a dummy record that takes up no space in the output. */
2399 case endp:
2400 break;
2401
2402 case gr_mem:
2403 case fr_mem:
2404 case br_mem:
2405 case frgr_mem:
2406 /* These are taken care of by prologue/prologue_gr. */
2407 break;
2408
2409 case prologue_gr:
2410 case prologue:
2411 if (ptr->r.type == prologue_gr)
2412 output_R2_format (f, ptr->r.record.r.grmask,
2413 ptr->r.record.r.grsave, ptr->r.record.r.rlen);
2414 else
2415 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2416
2417 /* Output descriptor(s) for union of register spills (if any). */
2418 gr_mask = ptr->r.record.r.mask.gr_mem;
2419 fr_mask = ptr->r.record.r.mask.fr_mem;
2420 if (fr_mask)
2421 {
2422 if ((fr_mask & ~0xfUL) == 0)
2423 output_P6_format (f, fr_mem, fr_mask);
2424 else
2425 {
2426 output_P5_format (f, gr_mask, fr_mask);
2427 gr_mask = 0;
2428 }
2429 }
2430 if (gr_mask)
2431 output_P6_format (f, gr_mem, gr_mask);
2432 if (ptr->r.record.r.mask.br_mem)
2433 output_P1_format (f, ptr->r.record.r.mask.br_mem);
2434
2435 /* output imask descriptor if necessary: */
2436 if (ptr->r.record.r.mask.i)
2437 output_P4_format (f, ptr->r.record.r.mask.i,
2438 ptr->r.record.r.imask_size);
2439 break;
2440
2441 case body:
2442 output_R1_format (f, ptr->r.type, ptr->r.record.r.rlen);
2443 break;
2444 case mem_stack_f:
2445 case mem_stack_v:
2446 output_P7_format (f, ptr->r.type, ptr->r.record.p.t,
2447 ptr->r.record.p.size);
2448 break;
2449 case psp_gr:
2450 case rp_gr:
2451 case pfs_gr:
2452 case preds_gr:
2453 case unat_gr:
2454 case lc_gr:
2455 case fpsr_gr:
2456 case priunat_gr:
2457 case bsp_gr:
2458 case bspstore_gr:
2459 case rnat_gr:
2460 output_P3_format (f, ptr->r.type, ptr->r.record.p.r.gr);
2461 break;
2462 case rp_br:
2463 output_P3_format (f, rp_br, ptr->r.record.p.r.br);
2464 break;
2465 case psp_sprel:
2466 output_P7_format (f, psp_sprel, ptr->r.record.p.off.sp, 0);
2467 break;
2468 case rp_when:
2469 case pfs_when:
2470 case preds_when:
2471 case unat_when:
2472 case lc_when:
2473 case fpsr_when:
2474 output_P7_format (f, ptr->r.type, ptr->r.record.p.t, 0);
2475 break;
2476 case rp_psprel:
2477 case pfs_psprel:
2478 case preds_psprel:
2479 case unat_psprel:
2480 case lc_psprel:
2481 case fpsr_psprel:
2482 case spill_base:
2483 output_P7_format (f, ptr->r.type, ptr->r.record.p.off.psp, 0);
2484 break;
2485 case rp_sprel:
2486 case pfs_sprel:
2487 case preds_sprel:
2488 case unat_sprel:
2489 case lc_sprel:
2490 case fpsr_sprel:
2491 case priunat_sprel:
2492 case bsp_sprel:
2493 case bspstore_sprel:
2494 case rnat_sprel:
2495 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.sp);
2496 break;
2497 case gr_gr:
2498 if (ptr->r.record.p.r.gr < REG_NUM)
2499 {
2500 const unw_rec_list *cur = ptr;
2501
2502 gr_mask = cur->r.record.p.grmask;
2503 while ((cur = cur->r.record.p.next) != NULL)
2504 gr_mask |= cur->r.record.p.grmask;
2505 output_P9_format (f, gr_mask, ptr->r.record.p.r.gr);
2506 }
2507 break;
2508 case br_gr:
2509 if (ptr->r.record.p.r.gr < REG_NUM)
2510 {
2511 const unw_rec_list *cur = ptr;
2512
2513 gr_mask = cur->r.record.p.brmask;
2514 while ((cur = cur->r.record.p.next) != NULL)
2515 gr_mask |= cur->r.record.p.brmask;
2516 output_P2_format (f, gr_mask, ptr->r.record.p.r.gr);
2517 }
2518 break;
2519 case spill_mask:
2520 as_bad (_("spill_mask record unimplemented."));
2521 break;
2522 case priunat_when_gr:
2523 case priunat_when_mem:
2524 case bsp_when:
2525 case bspstore_when:
2526 case rnat_when:
2527 output_P8_format (f, ptr->r.type, ptr->r.record.p.t);
2528 break;
2529 case priunat_psprel:
2530 case bsp_psprel:
2531 case bspstore_psprel:
2532 case rnat_psprel:
2533 output_P8_format (f, ptr->r.type, ptr->r.record.p.off.psp);
2534 break;
2535 case unwabi:
2536 output_P10_format (f, ptr->r.record.p.abi, ptr->r.record.p.context);
2537 break;
2538 case epilogue:
2539 output_B3_format (f, ptr->r.record.b.ecount, ptr->r.record.b.t);
2540 break;
2541 case label_state:
2542 case copy_state:
2543 output_B4_format (f, ptr->r.type, ptr->r.record.b.label);
2544 break;
2545 case spill_psprel:
2546 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2547 ptr->r.record.x.reg, ptr->r.record.x.t,
2548 ptr->r.record.x.where.pspoff);
2549 break;
2550 case spill_sprel:
2551 output_X1_format (f, ptr->r.type, ptr->r.record.x.ab,
2552 ptr->r.record.x.reg, ptr->r.record.x.t,
2553 ptr->r.record.x.where.spoff);
2554 break;
2555 case spill_reg:
2556 output_X2_format (f, ptr->r.record.x.ab, ptr->r.record.x.reg,
2557 ptr->r.record.x.xy >> 1, ptr->r.record.x.xy,
2558 ptr->r.record.x.where.reg, ptr->r.record.x.t);
2559 break;
2560 case spill_psprel_p:
2561 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2562 ptr->r.record.x.ab, ptr->r.record.x.reg,
2563 ptr->r.record.x.t, ptr->r.record.x.where.pspoff);
2564 break;
2565 case spill_sprel_p:
2566 output_X3_format (f, ptr->r.type, ptr->r.record.x.qp,
2567 ptr->r.record.x.ab, ptr->r.record.x.reg,
2568 ptr->r.record.x.t, ptr->r.record.x.where.spoff);
2569 break;
2570 case spill_reg_p:
2571 output_X4_format (f, ptr->r.record.x.qp, ptr->r.record.x.ab,
2572 ptr->r.record.x.reg, ptr->r.record.x.xy >> 1,
2573 ptr->r.record.x.xy, ptr->r.record.x.where.reg,
2574 ptr->r.record.x.t);
2575 break;
2576 default:
2577 as_bad (_("record_type_not_valid"));
2578 break;
2579 }
2580 }
2581
2582 /* Given a unw_rec_list list, process all the records with
2583 the specified function. */
2584 static void
process_unw_records(unw_rec_list * list,vbyte_func f)2585 process_unw_records (unw_rec_list *list, vbyte_func f)
2586 {
2587 unw_rec_list *ptr;
2588 for (ptr = list; ptr; ptr = ptr->next)
2589 process_one_record (ptr, f);
2590 }
2591
2592 /* Determine the size of a record list in bytes. */
2593 static int
calc_record_size(unw_rec_list * list)2594 calc_record_size (unw_rec_list *list)
2595 {
2596 vbyte_count = 0;
2597 process_unw_records (list, count_output);
2598 return vbyte_count;
2599 }
2600
2601 /* Return the number of bits set in the input value.
2602 Perhaps this has a better place... */
2603 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
2604 # define popcount __builtin_popcount
2605 #else
2606 static int
popcount(unsigned x)2607 popcount (unsigned x)
2608 {
2609 static const unsigned char popcnt[16] =
2610 {
2611 0, 1, 1, 2,
2612 1, 2, 2, 3,
2613 1, 2, 2, 3,
2614 2, 3, 3, 4
2615 };
2616
2617 if (x < NELEMS (popcnt))
2618 return popcnt[x];
2619 return popcnt[x % NELEMS (popcnt)] + popcount (x / NELEMS (popcnt));
2620 }
2621 #endif
2622
2623 /* Update IMASK bitmask to reflect the fact that one or more registers
2624 of type TYPE are saved starting at instruction with index T. If N
2625 bits are set in REGMASK, it is assumed that instructions T through
2626 T+N-1 save these registers.
2627
2628 TYPE values:
2629 0: no save
2630 1: instruction saves next fp reg
2631 2: instruction saves next general reg
2632 3: instruction saves next branch reg */
2633 static void
set_imask(unw_rec_list * region,unsigned long regmask,unsigned long t,unsigned int type)2634 set_imask (unw_rec_list *region,
2635 unsigned long regmask,
2636 unsigned long t,
2637 unsigned int type)
2638 {
2639 unsigned char *imask;
2640 unsigned long imask_size;
2641 unsigned int i;
2642 int pos;
2643
2644 imask = region->r.record.r.mask.i;
2645 imask_size = region->r.record.r.imask_size;
2646 if (!imask)
2647 {
2648 imask_size = (region->r.record.r.rlen * 2 + 7) / 8 + 1;
2649 imask = XCNEWVEC (unsigned char, imask_size);
2650
2651 region->r.record.r.imask_size = imask_size;
2652 region->r.record.r.mask.i = imask;
2653 }
2654
2655 i = (t / 4) + 1;
2656 pos = 2 * (3 - t % 4);
2657 while (regmask)
2658 {
2659 if (i >= imask_size)
2660 {
2661 as_bad (_("Ignoring attempt to spill beyond end of region"));
2662 return;
2663 }
2664
2665 imask[i] |= (type & 0x3) << pos;
2666
2667 regmask &= (regmask - 1);
2668 pos -= 2;
2669 if (pos < 0)
2670 {
2671 pos = 0;
2672 ++i;
2673 }
2674 }
2675 }
2676
2677 /* Return the number of instruction slots from FIRST_ADDR to SLOT_ADDR.
2678 SLOT_FRAG is the frag containing SLOT_ADDR, and FIRST_FRAG is the frag
2679 containing FIRST_ADDR. If BEFORE_RELAX, then we use worst-case estimates
2680 for frag sizes. */
2681
2682 static unsigned long
slot_index(unsigned long slot_addr,fragS * slot_frag,unsigned long first_addr,fragS * first_frag,int before_relax)2683 slot_index (unsigned long slot_addr,
2684 fragS *slot_frag,
2685 unsigned long first_addr,
2686 fragS *first_frag,
2687 int before_relax)
2688 {
2689 unsigned long s_index = 0;
2690
2691 /* First time we are called, the initial address and frag are invalid. */
2692 if (first_addr == 0)
2693 return 0;
2694
2695 /* If the two addresses are in different frags, then we need to add in
2696 the remaining size of this frag, and then the entire size of intermediate
2697 frags. */
2698 while (slot_frag != first_frag)
2699 {
2700 unsigned long start_addr = (unsigned long) &first_frag->fr_literal;
2701
2702 if (! before_relax)
2703 {
2704 /* We can get the final addresses only during and after
2705 relaxation. */
2706 if (first_frag->fr_next && first_frag->fr_next->fr_address)
2707 s_index += 3 * ((first_frag->fr_next->fr_address
2708 - first_frag->fr_address
2709 - first_frag->fr_fix) >> 4);
2710 }
2711 else
2712 /* We don't know what the final addresses will be. We try our
2713 best to estimate. */
2714 switch (first_frag->fr_type)
2715 {
2716 default:
2717 break;
2718
2719 case rs_space:
2720 as_fatal (_("Only constant space allocation is supported"));
2721 break;
2722
2723 case rs_align:
2724 case rs_align_code:
2725 case rs_align_test:
2726 /* Take alignment into account. Assume the worst case
2727 before relaxation. */
2728 s_index += 3 * ((1 << first_frag->fr_offset) >> 4);
2729 break;
2730
2731 case rs_org:
2732 if (first_frag->fr_symbol)
2733 {
2734 as_fatal (_("Only constant offsets are supported"));
2735 break;
2736 }
2737 /* Fall through. */
2738 case rs_fill:
2739 s_index += 3 * (first_frag->fr_offset >> 4);
2740 break;
2741 }
2742
2743 /* Add in the full size of the frag converted to instruction slots. */
2744 s_index += 3 * (first_frag->fr_fix >> 4);
2745 /* Subtract away the initial part before first_addr. */
2746 s_index -= (3 * ((first_addr >> 4) - (start_addr >> 4))
2747 + ((first_addr & 0x3) - (start_addr & 0x3)));
2748
2749 /* Move to the beginning of the next frag. */
2750 first_frag = first_frag->fr_next;
2751 first_addr = (unsigned long) &first_frag->fr_literal;
2752
2753 /* This can happen if there is section switching in the middle of a
2754 function, causing the frag chain for the function to be broken.
2755 It is too difficult to recover safely from this problem, so we just
2756 exit with an error. */
2757 if (first_frag == NULL)
2758 as_fatal (_("Section switching in code is not supported."));
2759 }
2760
2761 /* Add in the used part of the last frag. */
2762 s_index += (3 * ((slot_addr >> 4) - (first_addr >> 4))
2763 + ((slot_addr & 0x3) - (first_addr & 0x3)));
2764 return s_index;
2765 }
2766
2767 /* Optimize unwind record directives. */
2768
2769 static unw_rec_list *
optimize_unw_records(unw_rec_list * list)2770 optimize_unw_records (unw_rec_list *list)
2771 {
2772 if (!list)
2773 return NULL;
2774
2775 /* If the only unwind record is ".prologue" or ".prologue" followed
2776 by ".body", then we can optimize the unwind directives away. */
2777 if (list->r.type == prologue
2778 && (list->next->r.type == endp
2779 || (list->next->r.type == body && list->next->next->r.type == endp)))
2780 return NULL;
2781
2782 return list;
2783 }
2784
2785 /* Given a complete record list, process any records which have
2786 unresolved fields, (ie length counts for a prologue). After
2787 this has been run, all necessary information should be available
2788 within each record to generate an image. */
2789
2790 static void
fixup_unw_records(unw_rec_list * list,int before_relax)2791 fixup_unw_records (unw_rec_list *list, int before_relax)
2792 {
2793 unw_rec_list *ptr, *region = 0;
2794 unsigned long first_addr = 0, rlen = 0, t;
2795 fragS *first_frag = 0;
2796
2797 for (ptr = list; ptr; ptr = ptr->next)
2798 {
2799 if (ptr->slot_number == SLOT_NUM_NOT_SET)
2800 as_bad (_("Insn slot not set in unwind record."));
2801 t = slot_index (ptr->slot_number, ptr->slot_frag,
2802 first_addr, first_frag, before_relax);
2803 switch (ptr->r.type)
2804 {
2805 case prologue:
2806 case prologue_gr:
2807 case body:
2808 {
2809 unw_rec_list *last;
2810 int size;
2811 unsigned long last_addr = 0;
2812 fragS *last_frag = NULL;
2813
2814 first_addr = ptr->slot_number;
2815 first_frag = ptr->slot_frag;
2816 /* Find either the next body/prologue start, or the end of
2817 the function, and determine the size of the region. */
2818 for (last = ptr->next; last != NULL; last = last->next)
2819 if (last->r.type == prologue || last->r.type == prologue_gr
2820 || last->r.type == body || last->r.type == endp)
2821 {
2822 last_addr = last->slot_number;
2823 last_frag = last->slot_frag;
2824 break;
2825 }
2826 size = slot_index (last_addr, last_frag, first_addr, first_frag,
2827 before_relax);
2828 rlen = ptr->r.record.r.rlen = size;
2829 if (ptr->r.type == body)
2830 /* End of region. */
2831 region = 0;
2832 else
2833 region = ptr;
2834 break;
2835 }
2836 case epilogue:
2837 if (t < rlen)
2838 ptr->r.record.b.t = rlen - 1 - t;
2839 else
2840 /* This happens when a memory-stack-less procedure uses a
2841 ".restore sp" directive at the end of a region to pop
2842 the frame state. */
2843 ptr->r.record.b.t = 0;
2844 break;
2845
2846 case mem_stack_f:
2847 case mem_stack_v:
2848 case rp_when:
2849 case pfs_when:
2850 case preds_when:
2851 case unat_when:
2852 case lc_when:
2853 case fpsr_when:
2854 case priunat_when_gr:
2855 case priunat_when_mem:
2856 case bsp_when:
2857 case bspstore_when:
2858 case rnat_when:
2859 ptr->r.record.p.t = t;
2860 break;
2861
2862 case spill_reg:
2863 case spill_sprel:
2864 case spill_psprel:
2865 case spill_reg_p:
2866 case spill_sprel_p:
2867 case spill_psprel_p:
2868 ptr->r.record.x.t = t;
2869 break;
2870
2871 case frgr_mem:
2872 if (!region)
2873 {
2874 as_bad (_("frgr_mem record before region record!"));
2875 return;
2876 }
2877 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2878 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2879 set_imask (region, ptr->r.record.p.frmask, t, 1);
2880 set_imask (region, ptr->r.record.p.grmask, t, 2);
2881 break;
2882 case fr_mem:
2883 if (!region)
2884 {
2885 as_bad (_("fr_mem record before region record!"));
2886 return;
2887 }
2888 region->r.record.r.mask.fr_mem |= ptr->r.record.p.frmask;
2889 set_imask (region, ptr->r.record.p.frmask, t, 1);
2890 break;
2891 case gr_mem:
2892 if (!region)
2893 {
2894 as_bad (_("gr_mem record before region record!"));
2895 return;
2896 }
2897 region->r.record.r.mask.gr_mem |= ptr->r.record.p.grmask;
2898 set_imask (region, ptr->r.record.p.grmask, t, 2);
2899 break;
2900 case br_mem:
2901 if (!region)
2902 {
2903 as_bad (_("br_mem record before region record!"));
2904 return;
2905 }
2906 region->r.record.r.mask.br_mem |= ptr->r.record.p.brmask;
2907 set_imask (region, ptr->r.record.p.brmask, t, 3);
2908 break;
2909
2910 case gr_gr:
2911 if (!region)
2912 {
2913 as_bad (_("gr_gr record before region record!"));
2914 return;
2915 }
2916 set_imask (region, ptr->r.record.p.grmask, t, 2);
2917 break;
2918 case br_gr:
2919 if (!region)
2920 {
2921 as_bad (_("br_gr record before region record!"));
2922 return;
2923 }
2924 set_imask (region, ptr->r.record.p.brmask, t, 3);
2925 break;
2926
2927 default:
2928 break;
2929 }
2930 }
2931 }
2932
2933 /* Estimate the size of a frag before relaxing. We only have one type of frag
2934 to handle here, which is the unwind info frag. */
2935
2936 int
ia64_estimate_size_before_relax(fragS * frag,asection * segtype ATTRIBUTE_UNUSED)2937 ia64_estimate_size_before_relax (fragS *frag,
2938 asection *segtype ATTRIBUTE_UNUSED)
2939 {
2940 unw_rec_list *list;
2941 int len, size, pad;
2942
2943 /* ??? This code is identical to the first part of ia64_convert_frag. */
2944 list = (unw_rec_list *) frag->fr_opcode;
2945 fixup_unw_records (list, 0);
2946
2947 len = calc_record_size (list);
2948 /* pad to pointer-size boundary. */
2949 pad = len % md.pointer_size;
2950 if (pad != 0)
2951 len += md.pointer_size - pad;
2952 /* Add 8 for the header. */
2953 size = len + 8;
2954 /* Add a pointer for the personality offset. */
2955 if (frag->fr_offset)
2956 size += md.pointer_size;
2957
2958 /* fr_var carries the max_chars that we created the fragment with.
2959 We must, of course, have allocated enough memory earlier. */
2960 gas_assert (frag->fr_var >= size);
2961
2962 return frag->fr_fix + size;
2963 }
2964
2965 /* This function converts a rs_machine_dependent variant frag into a
2966 normal fill frag with the unwind image from the record list. */
2967 void
ia64_convert_frag(fragS * frag)2968 ia64_convert_frag (fragS *frag)
2969 {
2970 unw_rec_list *list;
2971 int len, size, pad;
2972 valueT flag_value;
2973
2974 /* ??? This code is identical to ia64_estimate_size_before_relax. */
2975 list = (unw_rec_list *) frag->fr_opcode;
2976 fixup_unw_records (list, 0);
2977
2978 len = calc_record_size (list);
2979 /* pad to pointer-size boundary. */
2980 pad = len % md.pointer_size;
2981 if (pad != 0)
2982 len += md.pointer_size - pad;
2983 /* Add 8 for the header. */
2984 size = len + 8;
2985 /* Add a pointer for the personality offset. */
2986 if (frag->fr_offset)
2987 size += md.pointer_size;
2988
2989 /* fr_var carries the max_chars that we created the fragment with.
2990 We must, of course, have allocated enough memory earlier. */
2991 gas_assert (frag->fr_var >= size);
2992
2993 /* Initialize the header area. fr_offset is initialized with
2994 unwind.personality_routine. */
2995 if (frag->fr_offset)
2996 {
2997 if (md.flags & EF_IA_64_ABI64)
2998 flag_value = (bfd_vma) 3 << 32;
2999 else
3000 /* 32-bit unwind info block. */
3001 flag_value = (bfd_vma) 0x1003 << 32;
3002 }
3003 else
3004 flag_value = 0;
3005
3006 md_number_to_chars (frag->fr_literal,
3007 (((bfd_vma) 1 << 48) /* Version. */
3008 | flag_value /* U & E handler flags. */
3009 | (len / md.pointer_size)), /* Length. */
3010 8);
3011
3012 /* Skip the header. */
3013 vbyte_mem_ptr = frag->fr_literal + 8;
3014 process_unw_records (list, output_vbyte_mem);
3015
3016 /* Fill the padding bytes with zeros. */
3017 if (pad != 0)
3018 md_number_to_chars (frag->fr_literal + len + 8 - md.pointer_size + pad, 0,
3019 md.pointer_size - pad);
3020 /* Fill the unwind personality with zeros. */
3021 if (frag->fr_offset)
3022 md_number_to_chars (frag->fr_literal + size - md.pointer_size, 0,
3023 md.pointer_size);
3024
3025 frag->fr_fix += size;
3026 frag->fr_type = rs_fill;
3027 frag->fr_var = 0;
3028 frag->fr_offset = 0;
3029 }
3030
3031 static int
parse_predicate_and_operand(expressionS * e,unsigned * qp,const char * po)3032 parse_predicate_and_operand (expressionS *e, unsigned *qp, const char *po)
3033 {
3034 int sep = parse_operand_and_eval (e, ',');
3035
3036 *qp = e->X_add_number - REG_P;
3037 if (e->X_op != O_register || *qp > 63)
3038 {
3039 as_bad (_("First operand to .%s must be a predicate"), po);
3040 *qp = 0;
3041 }
3042 else if (*qp == 0)
3043 as_warn (_("Pointless use of p0 as first operand to .%s"), po);
3044 if (sep == ',')
3045 sep = parse_operand_and_eval (e, ',');
3046 else
3047 e->X_op = O_absent;
3048 return sep;
3049 }
3050
3051 static void
convert_expr_to_ab_reg(const expressionS * e,unsigned int * ab,unsigned int * regp,const char * po,int n)3052 convert_expr_to_ab_reg (const expressionS *e,
3053 unsigned int *ab,
3054 unsigned int *regp,
3055 const char *po,
3056 int n)
3057 {
3058 unsigned int reg = e->X_add_number;
3059
3060 *ab = *regp = 0; /* Anything valid is good here. */
3061
3062 if (e->X_op != O_register)
3063 reg = REG_GR; /* Anything invalid is good here. */
3064
3065 if (reg >= (REG_GR + 4) && reg <= (REG_GR + 7))
3066 {
3067 *ab = 0;
3068 *regp = reg - REG_GR;
3069 }
3070 else if ((reg >= (REG_FR + 2) && reg <= (REG_FR + 5))
3071 || (reg >= (REG_FR + 16) && reg <= (REG_FR + 31)))
3072 {
3073 *ab = 1;
3074 *regp = reg - REG_FR;
3075 }
3076 else if (reg >= (REG_BR + 1) && reg <= (REG_BR + 5))
3077 {
3078 *ab = 2;
3079 *regp = reg - REG_BR;
3080 }
3081 else
3082 {
3083 *ab = 3;
3084 switch (reg)
3085 {
3086 case REG_PR: *regp = 0; break;
3087 case REG_PSP: *regp = 1; break;
3088 case REG_PRIUNAT: *regp = 2; break;
3089 case REG_BR + 0: *regp = 3; break;
3090 case REG_AR + AR_BSP: *regp = 4; break;
3091 case REG_AR + AR_BSPSTORE: *regp = 5; break;
3092 case REG_AR + AR_RNAT: *regp = 6; break;
3093 case REG_AR + AR_UNAT: *regp = 7; break;
3094 case REG_AR + AR_FPSR: *regp = 8; break;
3095 case REG_AR + AR_PFS: *regp = 9; break;
3096 case REG_AR + AR_LC: *regp = 10; break;
3097
3098 default:
3099 as_bad (_("Operand %d to .%s must be a preserved register"), n, po);
3100 break;
3101 }
3102 }
3103 }
3104
3105 static void
convert_expr_to_xy_reg(const expressionS * e,unsigned int * xy,unsigned int * regp,const char * po,int n)3106 convert_expr_to_xy_reg (const expressionS *e,
3107 unsigned int *xy,
3108 unsigned int *regp,
3109 const char *po,
3110 int n)
3111 {
3112 unsigned int reg = e->X_add_number;
3113
3114 *xy = *regp = 0; /* Anything valid is good here. */
3115
3116 if (e->X_op != O_register)
3117 reg = REG_GR; /* Anything invalid is good here. */
3118
3119 if (reg >= (REG_GR + 1) && reg <= (REG_GR + 127))
3120 {
3121 *xy = 0;
3122 *regp = reg - REG_GR;
3123 }
3124 else if (reg >= (REG_FR + 2) && reg <= (REG_FR + 127))
3125 {
3126 *xy = 1;
3127 *regp = reg - REG_FR;
3128 }
3129 else if (reg >= REG_BR && reg <= (REG_BR + 7))
3130 {
3131 *xy = 2;
3132 *regp = reg - REG_BR;
3133 }
3134 else
3135 as_bad (_("Operand %d to .%s must be a writable register"), n, po);
3136 }
3137
3138 static void
dot_align(int arg)3139 dot_align (int arg)
3140 {
3141 /* The current frag is an alignment frag. */
3142 align_frag = frag_now;
3143 s_align_bytes (arg);
3144 }
3145
3146 static void
dot_radix(int dummy ATTRIBUTE_UNUSED)3147 dot_radix (int dummy ATTRIBUTE_UNUSED)
3148 {
3149 char *radix;
3150 int ch;
3151
3152 SKIP_WHITESPACE ();
3153
3154 if (is_it_end_of_statement ())
3155 return;
3156 ch = get_symbol_name (&radix);
3157 ia64_canonicalize_symbol_name (radix);
3158 if (strcasecmp (radix, "C"))
3159 as_bad (_("Radix `%s' unsupported or invalid"), radix);
3160 (void) restore_line_pointer (ch);
3161 demand_empty_rest_of_line ();
3162 }
3163
3164 /* Helper function for .loc directives. If the assembler is not generating
3165 line number info, then we need to remember which instructions have a .loc
3166 directive, and only call dwarf2_gen_line_info for those instructions. */
3167
3168 static void
dot_loc(int x)3169 dot_loc (int x)
3170 {
3171 CURR_SLOT.loc_directive_seen = 1;
3172 dwarf2_directive_loc (x);
3173 }
3174
3175 /* .sbss, .bss etc. are macros that expand into ".section SECNAME". */
3176 static void
dot_special_section(int which)3177 dot_special_section (int which)
3178 {
3179 set_section ((char *) special_section_name[which]);
3180 }
3181
3182 /* Return -1 for warning and 0 for error. */
3183
3184 static int
unwind_diagnostic(const char * region,const char * directive)3185 unwind_diagnostic (const char * region, const char *directive)
3186 {
3187 if (md.unwind_check == unwind_check_warning)
3188 {
3189 as_warn (_(".%s outside of %s"), directive, region);
3190 return -1;
3191 }
3192 else
3193 {
3194 as_bad (_(".%s outside of %s"), directive, region);
3195 ignore_rest_of_line ();
3196 return 0;
3197 }
3198 }
3199
3200 /* Return 1 if a directive is in a procedure, -1 if a directive isn't in
3201 a procedure but the unwind directive check is set to warning, 0 if
3202 a directive isn't in a procedure and the unwind directive check is set
3203 to error. */
3204
3205 static int
in_procedure(const char * directive)3206 in_procedure (const char *directive)
3207 {
3208 if (unwind.proc_pending.sym
3209 && (!unwind.saved_text_seg || strcmp (directive, "endp") == 0))
3210 return 1;
3211 return unwind_diagnostic ("procedure", directive);
3212 }
3213
3214 /* Return 1 if a directive is in a prologue, -1 if a directive isn't in
3215 a prologue but the unwind directive check is set to warning, 0 if
3216 a directive isn't in a prologue and the unwind directive check is set
3217 to error. */
3218
3219 static int
in_prologue(const char * directive)3220 in_prologue (const char *directive)
3221 {
3222 int in = in_procedure (directive);
3223
3224 if (in > 0 && !unwind.prologue)
3225 in = unwind_diagnostic ("prologue", directive);
3226 check_pending_save ();
3227 return in;
3228 }
3229
3230 /* Return 1 if a directive is in a body, -1 if a directive isn't in
3231 a body but the unwind directive check is set to warning, 0 if
3232 a directive isn't in a body and the unwind directive check is set
3233 to error. */
3234
3235 static int
in_body(const char * directive)3236 in_body (const char *directive)
3237 {
3238 int in = in_procedure (directive);
3239
3240 if (in > 0 && !unwind.body)
3241 in = unwind_diagnostic ("body region", directive);
3242 return in;
3243 }
3244
3245 static void
add_unwind_entry(unw_rec_list * ptr,int sep)3246 add_unwind_entry (unw_rec_list *ptr, int sep)
3247 {
3248 if (ptr)
3249 {
3250 if (unwind.tail)
3251 unwind.tail->next = ptr;
3252 else
3253 unwind.list = ptr;
3254 unwind.tail = ptr;
3255
3256 /* The current entry can in fact be a chain of unwind entries. */
3257 if (unwind.current_entry == NULL)
3258 unwind.current_entry = ptr;
3259 }
3260
3261 /* The current entry can in fact be a chain of unwind entries. */
3262 if (unwind.current_entry == NULL)
3263 unwind.current_entry = ptr;
3264
3265 if (sep == ',')
3266 {
3267 char *name;
3268 /* Parse a tag permitted for the current directive. */
3269 int ch;
3270
3271 SKIP_WHITESPACE ();
3272 ch = get_symbol_name (&name);
3273 /* FIXME: For now, just issue a warning that this isn't implemented. */
3274 {
3275 static int warned;
3276
3277 if (!warned)
3278 {
3279 warned = 1;
3280 as_warn (_("Tags on unwind pseudo-ops aren't supported, yet"));
3281 }
3282 }
3283 (void) restore_line_pointer (ch);
3284 }
3285 if (sep != NOT_A_CHAR)
3286 demand_empty_rest_of_line ();
3287 }
3288
3289 static void
dot_fframe(int dummy ATTRIBUTE_UNUSED)3290 dot_fframe (int dummy ATTRIBUTE_UNUSED)
3291 {
3292 expressionS e;
3293 int sep;
3294
3295 if (!in_prologue ("fframe"))
3296 return;
3297
3298 sep = parse_operand_and_eval (&e, ',');
3299
3300 if (e.X_op != O_constant)
3301 {
3302 as_bad (_("First operand to .fframe must be a constant"));
3303 e.X_add_number = 0;
3304 }
3305 add_unwind_entry (output_mem_stack_f (e.X_add_number), sep);
3306 }
3307
3308 static void
dot_vframe(int dummy ATTRIBUTE_UNUSED)3309 dot_vframe (int dummy ATTRIBUTE_UNUSED)
3310 {
3311 expressionS e;
3312 unsigned reg;
3313 int sep;
3314
3315 if (!in_prologue ("vframe"))
3316 return;
3317
3318 sep = parse_operand_and_eval (&e, ',');
3319 reg = e.X_add_number - REG_GR;
3320 if (e.X_op != O_register || reg > 127)
3321 {
3322 as_bad (_("First operand to .vframe must be a general register"));
3323 reg = 0;
3324 }
3325 add_unwind_entry (output_mem_stack_v (), sep);
3326 if (! (unwind.prologue_mask & 2))
3327 add_unwind_entry (output_psp_gr (reg), NOT_A_CHAR);
3328 else if (reg != unwind.prologue_gr
3329 + (unsigned) popcount (unwind.prologue_mask & -(2 << 1)))
3330 as_warn (_("Operand of .vframe contradicts .prologue"));
3331 }
3332
3333 static void
dot_vframesp(int psp)3334 dot_vframesp (int psp)
3335 {
3336 expressionS e;
3337 int sep;
3338
3339 if (psp)
3340 as_warn (_(".vframepsp is meaningless, assuming .vframesp was meant"));
3341
3342 if (!in_prologue ("vframesp"))
3343 return;
3344
3345 sep = parse_operand_and_eval (&e, ',');
3346 if (e.X_op != O_constant)
3347 {
3348 as_bad (_("Operand to .vframesp must be a constant (sp-relative offset)"));
3349 e.X_add_number = 0;
3350 }
3351 add_unwind_entry (output_mem_stack_v (), sep);
3352 add_unwind_entry (output_psp_sprel (e.X_add_number), NOT_A_CHAR);
3353 }
3354
3355 static void
dot_save(int dummy ATTRIBUTE_UNUSED)3356 dot_save (int dummy ATTRIBUTE_UNUSED)
3357 {
3358 expressionS e1, e2;
3359 unsigned reg1, reg2;
3360 int sep;
3361
3362 if (!in_prologue ("save"))
3363 return;
3364
3365 sep = parse_operand_and_eval (&e1, ',');
3366 if (sep == ',')
3367 sep = parse_operand_and_eval (&e2, ',');
3368 else
3369 e2.X_op = O_absent;
3370
3371 reg1 = e1.X_add_number;
3372 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3373 if (e1.X_op != O_register)
3374 {
3375 as_bad (_("First operand to .save not a register"));
3376 reg1 = REG_PR; /* Anything valid is good here. */
3377 }
3378 reg2 = e2.X_add_number - REG_GR;
3379 if (e2.X_op != O_register || reg2 > 127)
3380 {
3381 as_bad (_("Second operand to .save not a valid register"));
3382 reg2 = 0;
3383 }
3384 switch (reg1)
3385 {
3386 case REG_AR + AR_BSP:
3387 add_unwind_entry (output_bsp_when (), sep);
3388 add_unwind_entry (output_bsp_gr (reg2), NOT_A_CHAR);
3389 break;
3390 case REG_AR + AR_BSPSTORE:
3391 add_unwind_entry (output_bspstore_when (), sep);
3392 add_unwind_entry (output_bspstore_gr (reg2), NOT_A_CHAR);
3393 break;
3394 case REG_AR + AR_RNAT:
3395 add_unwind_entry (output_rnat_when (), sep);
3396 add_unwind_entry (output_rnat_gr (reg2), NOT_A_CHAR);
3397 break;
3398 case REG_AR + AR_UNAT:
3399 add_unwind_entry (output_unat_when (), sep);
3400 add_unwind_entry (output_unat_gr (reg2), NOT_A_CHAR);
3401 break;
3402 case REG_AR + AR_FPSR:
3403 add_unwind_entry (output_fpsr_when (), sep);
3404 add_unwind_entry (output_fpsr_gr (reg2), NOT_A_CHAR);
3405 break;
3406 case REG_AR + AR_PFS:
3407 add_unwind_entry (output_pfs_when (), sep);
3408 if (! (unwind.prologue_mask & 4))
3409 add_unwind_entry (output_pfs_gr (reg2), NOT_A_CHAR);
3410 else if (reg2 != unwind.prologue_gr
3411 + (unsigned) popcount (unwind.prologue_mask & -(4 << 1)))
3412 as_warn (_("Second operand of .save contradicts .prologue"));
3413 break;
3414 case REG_AR + AR_LC:
3415 add_unwind_entry (output_lc_when (), sep);
3416 add_unwind_entry (output_lc_gr (reg2), NOT_A_CHAR);
3417 break;
3418 case REG_BR:
3419 add_unwind_entry (output_rp_when (), sep);
3420 if (! (unwind.prologue_mask & 8))
3421 add_unwind_entry (output_rp_gr (reg2), NOT_A_CHAR);
3422 else if (reg2 != unwind.prologue_gr)
3423 as_warn (_("Second operand of .save contradicts .prologue"));
3424 break;
3425 case REG_PR:
3426 add_unwind_entry (output_preds_when (), sep);
3427 if (! (unwind.prologue_mask & 1))
3428 add_unwind_entry (output_preds_gr (reg2), NOT_A_CHAR);
3429 else if (reg2 != unwind.prologue_gr
3430 + (unsigned) popcount (unwind.prologue_mask & -(1 << 1)))
3431 as_warn (_("Second operand of .save contradicts .prologue"));
3432 break;
3433 case REG_PRIUNAT:
3434 add_unwind_entry (output_priunat_when_gr (), sep);
3435 add_unwind_entry (output_priunat_gr (reg2), NOT_A_CHAR);
3436 break;
3437 default:
3438 as_bad (_("First operand to .save not a valid register"));
3439 add_unwind_entry (NULL, sep);
3440 break;
3441 }
3442 }
3443
3444 static void
dot_restore(int dummy ATTRIBUTE_UNUSED)3445 dot_restore (int dummy ATTRIBUTE_UNUSED)
3446 {
3447 expressionS e1;
3448 unsigned long ecount; /* # of _additional_ regions to pop */
3449 int sep;
3450
3451 if (!in_body ("restore"))
3452 return;
3453
3454 sep = parse_operand_and_eval (&e1, ',');
3455 if (e1.X_op != O_register || e1.X_add_number != REG_GR + 12)
3456 as_bad (_("First operand to .restore must be stack pointer (sp)"));
3457
3458 if (sep == ',')
3459 {
3460 expressionS e2;
3461
3462 sep = parse_operand_and_eval (&e2, ',');
3463 if (e2.X_op != O_constant || e2.X_add_number < 0)
3464 {
3465 as_bad (_("Second operand to .restore must be a constant >= 0"));
3466 e2.X_add_number = 0;
3467 }
3468 ecount = e2.X_add_number;
3469 }
3470 else
3471 ecount = unwind.prologue_count - 1;
3472
3473 if (ecount >= unwind.prologue_count)
3474 {
3475 as_bad (_("Epilogue count of %lu exceeds number of nested prologues (%u)"),
3476 ecount + 1, unwind.prologue_count);
3477 ecount = 0;
3478 }
3479
3480 add_unwind_entry (output_epilogue (ecount), sep);
3481
3482 if (ecount < unwind.prologue_count)
3483 unwind.prologue_count -= ecount + 1;
3484 else
3485 unwind.prologue_count = 0;
3486 }
3487
3488 static void
dot_restorereg(int pred)3489 dot_restorereg (int pred)
3490 {
3491 unsigned int qp, ab, reg;
3492 expressionS e;
3493 int sep;
3494 const char * const po = pred ? "restorereg.p" : "restorereg";
3495
3496 if (!in_procedure (po))
3497 return;
3498
3499 if (pred)
3500 sep = parse_predicate_and_operand (&e, &qp, po);
3501 else
3502 {
3503 sep = parse_operand_and_eval (&e, ',');
3504 qp = 0;
3505 }
3506 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred);
3507
3508 add_unwind_entry (output_spill_reg (ab, reg, 0, 0, qp), sep);
3509 }
3510
3511 static const char *special_linkonce_name[] =
3512 {
3513 ".gnu.linkonce.ia64unw.", ".gnu.linkonce.ia64unwi."
3514 };
3515
3516 static void
start_unwind_section(const segT text_seg,int sec_index)3517 start_unwind_section (const segT text_seg, int sec_index)
3518 {
3519 /*
3520 Use a slightly ugly scheme to derive the unwind section names from
3521 the text section name:
3522
3523 text sect. unwind table sect.
3524 name: name: comments:
3525 ---------- ----------------- --------------------------------
3526 .text .IA_64.unwind
3527 .text.foo .IA_64.unwind.text.foo
3528 .foo .IA_64.unwind.foo
3529 .gnu.linkonce.t.foo
3530 .gnu.linkonce.ia64unw.foo
3531 _info .IA_64.unwind_info gas issues error message (ditto)
3532 _infoFOO .IA_64.unwind_infoFOO gas issues error message (ditto)
3533
3534 This mapping is done so that:
3535
3536 (a) An object file with unwind info only in .text will use
3537 unwind section names .IA_64.unwind and .IA_64.unwind_info.
3538 This follows the letter of the ABI and also ensures backwards
3539 compatibility with older toolchains.
3540
3541 (b) An object file with unwind info in multiple text sections
3542 will use separate unwind sections for each text section.
3543 This allows us to properly set the "sh_info" and "sh_link"
3544 fields in SHT_IA_64_UNWIND as required by the ABI and also
3545 lets GNU ld support programs with multiple segments
3546 containing unwind info (as might be the case for certain
3547 embedded applications).
3548
3549 (c) An error is issued if there would be a name clash.
3550 */
3551
3552 const char *text_name, *sec_text_name;
3553 char *sec_name;
3554 const char *prefix = special_section_name [sec_index];
3555 const char *suffix;
3556
3557 sec_text_name = segment_name (text_seg);
3558 text_name = sec_text_name;
3559 if (startswith (text_name, "_info"))
3560 {
3561 as_bad (_("Illegal section name `%s' (causes unwind section name clash)"),
3562 text_name);
3563 ignore_rest_of_line ();
3564 return;
3565 }
3566 if (strcmp (text_name, ".text") == 0)
3567 text_name = "";
3568
3569 /* Build the unwind section name by appending the (possibly stripped)
3570 text section name to the unwind prefix. */
3571 suffix = text_name;
3572 if (startswith (text_name, ".gnu.linkonce.t."))
3573 {
3574 prefix = special_linkonce_name [sec_index - SPECIAL_SECTION_UNWIND];
3575 suffix += sizeof (".gnu.linkonce.t.") - 1;
3576 }
3577
3578 sec_name = concat (prefix, suffix, NULL);
3579
3580 /* Handle COMDAT group. */
3581 if ((text_seg->flags & SEC_LINK_ONCE) != 0
3582 && (elf_section_flags (text_seg) & SHF_GROUP) != 0)
3583 {
3584 char *section;
3585 const char *group_name = elf_group_name (text_seg);
3586
3587 if (group_name == NULL)
3588 {
3589 as_bad (_("Group section `%s' has no group signature"),
3590 sec_text_name);
3591 ignore_rest_of_line ();
3592 free (sec_name);
3593 return;
3594 }
3595
3596 /* We have to construct a fake section directive. */
3597 section = concat (sec_name, ",\"aG\",@progbits,", group_name, ",comdat", NULL);
3598 set_section (section);
3599 free (section);
3600 }
3601 else
3602 {
3603 set_section (sec_name);
3604 bfd_set_section_flags (now_seg, SEC_LOAD | SEC_ALLOC | SEC_READONLY);
3605 }
3606
3607 elf_linked_to_section (now_seg) = text_seg;
3608 free (sec_name);
3609 }
3610
3611 static void
generate_unwind_image(const segT text_seg)3612 generate_unwind_image (const segT text_seg)
3613 {
3614 int size, pad;
3615 unw_rec_list *list;
3616
3617 /* Mark the end of the unwind info, so that we can compute the size of the
3618 last unwind region. */
3619 add_unwind_entry (output_endp (), NOT_A_CHAR);
3620
3621 /* Force out pending instructions, to make sure all unwind records have
3622 a valid slot_number field. */
3623 ia64_flush_insns ();
3624
3625 /* Generate the unwind record. */
3626 list = optimize_unw_records (unwind.list);
3627 fixup_unw_records (list, 1);
3628 size = calc_record_size (list);
3629
3630 if (size > 0 || unwind.force_unwind_entry)
3631 {
3632 unwind.force_unwind_entry = 0;
3633 /* pad to pointer-size boundary. */
3634 pad = size % md.pointer_size;
3635 if (pad != 0)
3636 size += md.pointer_size - pad;
3637 /* Add 8 for the header. */
3638 size += 8;
3639 /* Add a pointer for the personality offset. */
3640 if (unwind.personality_routine)
3641 size += md.pointer_size;
3642 }
3643
3644 /* If there are unwind records, switch sections, and output the info. */
3645 if (size != 0)
3646 {
3647 expressionS exp;
3648 bfd_reloc_code_real_type reloc;
3649
3650 start_unwind_section (text_seg, SPECIAL_SECTION_UNWIND_INFO);
3651
3652 /* Make sure the section has 4 byte alignment for ILP32 and
3653 8 byte alignment for LP64. */
3654 frag_align (md.pointer_size_shift, 0, 0);
3655 record_alignment (now_seg, md.pointer_size_shift);
3656
3657 /* Set expression which points to start of unwind descriptor area. */
3658 unwind.info = expr_build_dot ();
3659
3660 frag_var (rs_machine_dependent, size, size, 0, 0,
3661 (offsetT) (long) unwind.personality_routine,
3662 (char *) list);
3663
3664 /* Add the personality address to the image. */
3665 if (unwind.personality_routine != 0)
3666 {
3667 exp.X_op = O_symbol;
3668 exp.X_add_symbol = unwind.personality_routine;
3669 exp.X_add_number = 0;
3670
3671 if (md.flags & EF_IA_64_BE)
3672 {
3673 if (md.flags & EF_IA_64_ABI64)
3674 reloc = BFD_RELOC_IA64_LTOFF_FPTR64MSB;
3675 else
3676 reloc = BFD_RELOC_IA64_LTOFF_FPTR32MSB;
3677 }
3678 else
3679 {
3680 if (md.flags & EF_IA_64_ABI64)
3681 reloc = BFD_RELOC_IA64_LTOFF_FPTR64LSB;
3682 else
3683 reloc = BFD_RELOC_IA64_LTOFF_FPTR32LSB;
3684 }
3685
3686 fix_new_exp (frag_now, frag_now_fix () - md.pointer_size,
3687 md.pointer_size, &exp, 0, reloc);
3688 unwind.personality_routine = 0;
3689 }
3690 }
3691
3692 free_saved_prologue_counts ();
3693 unwind.list = unwind.tail = unwind.current_entry = NULL;
3694 }
3695
3696 static void
dot_handlerdata(int dummy ATTRIBUTE_UNUSED)3697 dot_handlerdata (int dummy ATTRIBUTE_UNUSED)
3698 {
3699 if (!in_procedure ("handlerdata"))
3700 return;
3701 unwind.force_unwind_entry = 1;
3702
3703 /* Remember which segment we're in so we can switch back after .endp */
3704 unwind.saved_text_seg = now_seg;
3705 unwind.saved_text_subseg = now_subseg;
3706
3707 /* Generate unwind info into unwind-info section and then leave that
3708 section as the currently active one so dataXX directives go into
3709 the language specific data area of the unwind info block. */
3710 generate_unwind_image (now_seg);
3711 demand_empty_rest_of_line ();
3712 }
3713
3714 static void
dot_unwentry(int dummy ATTRIBUTE_UNUSED)3715 dot_unwentry (int dummy ATTRIBUTE_UNUSED)
3716 {
3717 if (!in_procedure ("unwentry"))
3718 return;
3719 unwind.force_unwind_entry = 1;
3720 demand_empty_rest_of_line ();
3721 }
3722
3723 static void
dot_altrp(int dummy ATTRIBUTE_UNUSED)3724 dot_altrp (int dummy ATTRIBUTE_UNUSED)
3725 {
3726 expressionS e;
3727 unsigned reg;
3728
3729 if (!in_prologue ("altrp"))
3730 return;
3731
3732 parse_operand_and_eval (&e, 0);
3733 reg = e.X_add_number - REG_BR;
3734 if (e.X_op != O_register || reg > 7)
3735 {
3736 as_bad (_("First operand to .altrp not a valid branch register"));
3737 reg = 0;
3738 }
3739 add_unwind_entry (output_rp_br (reg), 0);
3740 }
3741
3742 static void
dot_savemem(int psprel)3743 dot_savemem (int psprel)
3744 {
3745 expressionS e1, e2;
3746 int sep;
3747 int reg1, val;
3748 const char * const po = psprel ? "savepsp" : "savesp";
3749
3750 if (!in_prologue (po))
3751 return;
3752
3753 sep = parse_operand_and_eval (&e1, ',');
3754 if (sep == ',')
3755 sep = parse_operand_and_eval (&e2, ',');
3756 else
3757 e2.X_op = O_absent;
3758
3759 reg1 = e1.X_add_number;
3760 val = e2.X_add_number;
3761
3762 /* Make sure it's a valid ar.xxx reg, OR its br0, aka 'rp'. */
3763 if (e1.X_op != O_register)
3764 {
3765 as_bad (_("First operand to .%s not a register"), po);
3766 reg1 = REG_PR; /* Anything valid is good here. */
3767 }
3768 if (e2.X_op != O_constant)
3769 {
3770 as_bad (_("Second operand to .%s not a constant"), po);
3771 val = 0;
3772 }
3773
3774 switch (reg1)
3775 {
3776 case REG_AR + AR_BSP:
3777 add_unwind_entry (output_bsp_when (), sep);
3778 add_unwind_entry ((psprel
3779 ? output_bsp_psprel
3780 : output_bsp_sprel) (val), NOT_A_CHAR);
3781 break;
3782 case REG_AR + AR_BSPSTORE:
3783 add_unwind_entry (output_bspstore_when (), sep);
3784 add_unwind_entry ((psprel
3785 ? output_bspstore_psprel
3786 : output_bspstore_sprel) (val), NOT_A_CHAR);
3787 break;
3788 case REG_AR + AR_RNAT:
3789 add_unwind_entry (output_rnat_when (), sep);
3790 add_unwind_entry ((psprel
3791 ? output_rnat_psprel
3792 : output_rnat_sprel) (val), NOT_A_CHAR);
3793 break;
3794 case REG_AR + AR_UNAT:
3795 add_unwind_entry (output_unat_when (), sep);
3796 add_unwind_entry ((psprel
3797 ? output_unat_psprel
3798 : output_unat_sprel) (val), NOT_A_CHAR);
3799 break;
3800 case REG_AR + AR_FPSR:
3801 add_unwind_entry (output_fpsr_when (), sep);
3802 add_unwind_entry ((psprel
3803 ? output_fpsr_psprel
3804 : output_fpsr_sprel) (val), NOT_A_CHAR);
3805 break;
3806 case REG_AR + AR_PFS:
3807 add_unwind_entry (output_pfs_when (), sep);
3808 add_unwind_entry ((psprel
3809 ? output_pfs_psprel
3810 : output_pfs_sprel) (val), NOT_A_CHAR);
3811 break;
3812 case REG_AR + AR_LC:
3813 add_unwind_entry (output_lc_when (), sep);
3814 add_unwind_entry ((psprel
3815 ? output_lc_psprel
3816 : output_lc_sprel) (val), NOT_A_CHAR);
3817 break;
3818 case REG_BR:
3819 add_unwind_entry (output_rp_when (), sep);
3820 add_unwind_entry ((psprel
3821 ? output_rp_psprel
3822 : output_rp_sprel) (val), NOT_A_CHAR);
3823 break;
3824 case REG_PR:
3825 add_unwind_entry (output_preds_when (), sep);
3826 add_unwind_entry ((psprel
3827 ? output_preds_psprel
3828 : output_preds_sprel) (val), NOT_A_CHAR);
3829 break;
3830 case REG_PRIUNAT:
3831 add_unwind_entry (output_priunat_when_mem (), sep);
3832 add_unwind_entry ((psprel
3833 ? output_priunat_psprel
3834 : output_priunat_sprel) (val), NOT_A_CHAR);
3835 break;
3836 default:
3837 as_bad (_("First operand to .%s not a valid register"), po);
3838 add_unwind_entry (NULL, sep);
3839 break;
3840 }
3841 }
3842
3843 static void
dot_saveg(int dummy ATTRIBUTE_UNUSED)3844 dot_saveg (int dummy ATTRIBUTE_UNUSED)
3845 {
3846 expressionS e;
3847 unsigned grmask;
3848 int sep;
3849
3850 if (!in_prologue ("save.g"))
3851 return;
3852
3853 sep = parse_operand_and_eval (&e, ',');
3854
3855 grmask = e.X_add_number;
3856 if (e.X_op != O_constant
3857 || e.X_add_number <= 0
3858 || e.X_add_number > 0xf)
3859 {
3860 as_bad (_("First operand to .save.g must be a positive 4-bit constant"));
3861 grmask = 0;
3862 }
3863
3864 if (sep == ',')
3865 {
3866 unsigned reg;
3867 int n = popcount (grmask);
3868
3869 parse_operand_and_eval (&e, 0);
3870 reg = e.X_add_number - REG_GR;
3871 if (e.X_op != O_register || reg > 127)
3872 {
3873 as_bad (_("Second operand to .save.g must be a general register"));
3874 reg = 0;
3875 }
3876 else if (reg > 128U - n)
3877 {
3878 as_bad (_("Second operand to .save.g must be the first of %d general registers"), n);
3879 reg = 0;
3880 }
3881 add_unwind_entry (output_gr_gr (grmask, reg), 0);
3882 }
3883 else
3884 add_unwind_entry (output_gr_mem (grmask), 0);
3885 }
3886
3887 static void
dot_savef(int dummy ATTRIBUTE_UNUSED)3888 dot_savef (int dummy ATTRIBUTE_UNUSED)
3889 {
3890 expressionS e;
3891
3892 if (!in_prologue ("save.f"))
3893 return;
3894
3895 parse_operand_and_eval (&e, 0);
3896
3897 if (e.X_op != O_constant
3898 || e.X_add_number <= 0
3899 || e.X_add_number > 0xfffff)
3900 {
3901 as_bad (_("Operand to .save.f must be a positive 20-bit constant"));
3902 e.X_add_number = 0;
3903 }
3904 add_unwind_entry (output_fr_mem (e.X_add_number), 0);
3905 }
3906
3907 static void
dot_saveb(int dummy ATTRIBUTE_UNUSED)3908 dot_saveb (int dummy ATTRIBUTE_UNUSED)
3909 {
3910 expressionS e;
3911 unsigned brmask;
3912 int sep;
3913
3914 if (!in_prologue ("save.b"))
3915 return;
3916
3917 sep = parse_operand_and_eval (&e, ',');
3918
3919 brmask = e.X_add_number;
3920 if (e.X_op != O_constant
3921 || e.X_add_number <= 0
3922 || e.X_add_number > 0x1f)
3923 {
3924 as_bad (_("First operand to .save.b must be a positive 5-bit constant"));
3925 brmask = 0;
3926 }
3927
3928 if (sep == ',')
3929 {
3930 unsigned reg;
3931 int n = popcount (brmask);
3932
3933 parse_operand_and_eval (&e, 0);
3934 reg = e.X_add_number - REG_GR;
3935 if (e.X_op != O_register || reg > 127)
3936 {
3937 as_bad (_("Second operand to .save.b must be a general register"));
3938 reg = 0;
3939 }
3940 else if (reg > 128U - n)
3941 {
3942 as_bad (_("Second operand to .save.b must be the first of %d general registers"), n);
3943 reg = 0;
3944 }
3945 add_unwind_entry (output_br_gr (brmask, reg), 0);
3946 }
3947 else
3948 add_unwind_entry (output_br_mem (brmask), 0);
3949 }
3950
3951 static void
dot_savegf(int dummy ATTRIBUTE_UNUSED)3952 dot_savegf (int dummy ATTRIBUTE_UNUSED)
3953 {
3954 expressionS e1, e2;
3955
3956 if (!in_prologue ("save.gf"))
3957 return;
3958
3959 if (parse_operand_and_eval (&e1, ',') == ',')
3960 parse_operand_and_eval (&e2, 0);
3961 else
3962 e2.X_op = O_absent;
3963
3964 if (e1.X_op != O_constant
3965 || e1.X_add_number < 0
3966 || e1.X_add_number > 0xf)
3967 {
3968 as_bad (_("First operand to .save.gf must be a non-negative 4-bit constant"));
3969 e1.X_op = O_absent;
3970 e1.X_add_number = 0;
3971 }
3972 if (e2.X_op != O_constant
3973 || e2.X_add_number < 0
3974 || e2.X_add_number > 0xfffff)
3975 {
3976 as_bad (_("Second operand to .save.gf must be a non-negative 20-bit constant"));
3977 e2.X_op = O_absent;
3978 e2.X_add_number = 0;
3979 }
3980 if (e1.X_op == O_constant
3981 && e2.X_op == O_constant
3982 && e1.X_add_number == 0
3983 && e2.X_add_number == 0)
3984 as_bad (_("Operands to .save.gf may not be both zero"));
3985
3986 add_unwind_entry (output_frgr_mem (e1.X_add_number, e2.X_add_number), 0);
3987 }
3988
3989 static void
dot_spill(int dummy ATTRIBUTE_UNUSED)3990 dot_spill (int dummy ATTRIBUTE_UNUSED)
3991 {
3992 expressionS e;
3993
3994 if (!in_prologue ("spill"))
3995 return;
3996
3997 parse_operand_and_eval (&e, 0);
3998
3999 if (e.X_op != O_constant)
4000 {
4001 as_bad (_("Operand to .spill must be a constant"));
4002 e.X_add_number = 0;
4003 }
4004 add_unwind_entry (output_spill_base (e.X_add_number), 0);
4005 }
4006
4007 static void
dot_spillreg(int pred)4008 dot_spillreg (int pred)
4009 {
4010 int sep;
4011 unsigned int qp, ab, xy, reg, treg;
4012 expressionS e;
4013 const char * const po = pred ? "spillreg.p" : "spillreg";
4014
4015 if (!in_procedure (po))
4016 return;
4017
4018 if (pred)
4019 sep = parse_predicate_and_operand (&e, &qp, po);
4020 else
4021 {
4022 sep = parse_operand_and_eval (&e, ',');
4023 qp = 0;
4024 }
4025 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred);
4026
4027 if (sep == ',')
4028 sep = parse_operand_and_eval (&e, ',');
4029 else
4030 e.X_op = O_absent;
4031 convert_expr_to_xy_reg (&e, &xy, &treg, po, 2 + pred);
4032
4033 add_unwind_entry (output_spill_reg (ab, reg, treg, xy, qp), sep);
4034 }
4035
4036 static void
dot_spillmem(int psprel)4037 dot_spillmem (int psprel)
4038 {
4039 expressionS e;
4040 int pred = (psprel < 0), sep;
4041 unsigned int qp, ab, reg;
4042 const char * po;
4043
4044 if (pred)
4045 {
4046 psprel = ~psprel;
4047 po = psprel ? "spillpsp.p" : "spillsp.p";
4048 }
4049 else
4050 po = psprel ? "spillpsp" : "spillsp";
4051
4052 if (!in_procedure (po))
4053 return;
4054
4055 if (pred)
4056 sep = parse_predicate_and_operand (&e, &qp, po);
4057 else
4058 {
4059 sep = parse_operand_and_eval (&e, ',');
4060 qp = 0;
4061 }
4062 convert_expr_to_ab_reg (&e, &ab, ®, po, 1 + pred);
4063
4064 if (sep == ',')
4065 sep = parse_operand_and_eval (&e, ',');
4066 else
4067 e.X_op = O_absent;
4068 if (e.X_op != O_constant)
4069 {
4070 as_bad (_("Operand %d to .%s must be a constant"), 2 + pred, po);
4071 e.X_add_number = 0;
4072 }
4073
4074 if (psprel)
4075 add_unwind_entry (output_spill_psprel (ab, reg, e.X_add_number, qp), sep);
4076 else
4077 add_unwind_entry (output_spill_sprel (ab, reg, e.X_add_number, qp), sep);
4078 }
4079
4080 static unsigned int
get_saved_prologue_count(unsigned long lbl)4081 get_saved_prologue_count (unsigned long lbl)
4082 {
4083 label_prologue_count *lpc = unwind.saved_prologue_counts;
4084
4085 while (lpc != NULL && lpc->label_number != lbl)
4086 lpc = lpc->next;
4087
4088 if (lpc != NULL)
4089 return lpc->prologue_count;
4090
4091 as_bad (_("Missing .label_state %ld"), lbl);
4092 return 1;
4093 }
4094
4095 static void
save_prologue_count(unsigned long lbl,unsigned int count)4096 save_prologue_count (unsigned long lbl, unsigned int count)
4097 {
4098 label_prologue_count *lpc = unwind.saved_prologue_counts;
4099
4100 while (lpc != NULL && lpc->label_number != lbl)
4101 lpc = lpc->next;
4102
4103 if (lpc != NULL)
4104 lpc->prologue_count = count;
4105 else
4106 {
4107 label_prologue_count *new_lpc = XNEW (label_prologue_count);
4108
4109 new_lpc->next = unwind.saved_prologue_counts;
4110 new_lpc->label_number = lbl;
4111 new_lpc->prologue_count = count;
4112 unwind.saved_prologue_counts = new_lpc;
4113 }
4114 }
4115
4116 static void
free_saved_prologue_counts(void)4117 free_saved_prologue_counts (void)
4118 {
4119 label_prologue_count *lpc = unwind.saved_prologue_counts;
4120 label_prologue_count *next;
4121
4122 while (lpc != NULL)
4123 {
4124 next = lpc->next;
4125 free (lpc);
4126 lpc = next;
4127 }
4128
4129 unwind.saved_prologue_counts = NULL;
4130 }
4131
4132 static void
dot_label_state(int dummy ATTRIBUTE_UNUSED)4133 dot_label_state (int dummy ATTRIBUTE_UNUSED)
4134 {
4135 expressionS e;
4136
4137 if (!in_body ("label_state"))
4138 return;
4139
4140 parse_operand_and_eval (&e, 0);
4141 if (e.X_op == O_constant)
4142 save_prologue_count (e.X_add_number, unwind.prologue_count);
4143 else
4144 {
4145 as_bad (_("Operand to .label_state must be a constant"));
4146 e.X_add_number = 0;
4147 }
4148 add_unwind_entry (output_label_state (e.X_add_number), 0);
4149 }
4150
4151 static void
dot_copy_state(int dummy ATTRIBUTE_UNUSED)4152 dot_copy_state (int dummy ATTRIBUTE_UNUSED)
4153 {
4154 expressionS e;
4155
4156 if (!in_body ("copy_state"))
4157 return;
4158
4159 parse_operand_and_eval (&e, 0);
4160 if (e.X_op == O_constant)
4161 unwind.prologue_count = get_saved_prologue_count (e.X_add_number);
4162 else
4163 {
4164 as_bad (_("Operand to .copy_state must be a constant"));
4165 e.X_add_number = 0;
4166 }
4167 add_unwind_entry (output_copy_state (e.X_add_number), 0);
4168 }
4169
4170 static void
dot_unwabi(int dummy ATTRIBUTE_UNUSED)4171 dot_unwabi (int dummy ATTRIBUTE_UNUSED)
4172 {
4173 expressionS e1, e2;
4174 unsigned char sep;
4175
4176 if (!in_prologue ("unwabi"))
4177 return;
4178
4179 sep = parse_operand_and_eval (&e1, ',');
4180 if (sep == ',')
4181 parse_operand_and_eval (&e2, 0);
4182 else
4183 e2.X_op = O_absent;
4184
4185 if (e1.X_op != O_constant)
4186 {
4187 as_bad (_("First operand to .unwabi must be a constant"));
4188 e1.X_add_number = 0;
4189 }
4190
4191 if (e2.X_op != O_constant)
4192 {
4193 as_bad (_("Second operand to .unwabi must be a constant"));
4194 e2.X_add_number = 0;
4195 }
4196
4197 add_unwind_entry (output_unwabi (e1.X_add_number, e2.X_add_number), 0);
4198 }
4199
4200 static void
dot_personality(int dummy ATTRIBUTE_UNUSED)4201 dot_personality (int dummy ATTRIBUTE_UNUSED)
4202 {
4203 char *name, *p, c;
4204
4205 if (!in_procedure ("personality"))
4206 return;
4207 SKIP_WHITESPACE ();
4208 c = get_symbol_name (&name);
4209 p = input_line_pointer;
4210 unwind.personality_routine = symbol_find_or_make (name);
4211 unwind.force_unwind_entry = 1;
4212 *p = c;
4213 SKIP_WHITESPACE_AFTER_NAME ();
4214 demand_empty_rest_of_line ();
4215 }
4216
4217 static void
dot_proc(int dummy ATTRIBUTE_UNUSED)4218 dot_proc (int dummy ATTRIBUTE_UNUSED)
4219 {
4220 char *name, *p, c;
4221 symbolS *sym;
4222 proc_pending *pending, *last_pending;
4223
4224 if (unwind.proc_pending.sym)
4225 {
4226 (md.unwind_check == unwind_check_warning
4227 ? as_warn
4228 : as_bad) (_("Missing .endp after previous .proc"));
4229 while (unwind.proc_pending.next)
4230 {
4231 pending = unwind.proc_pending.next;
4232 unwind.proc_pending.next = pending->next;
4233 free (pending);
4234 }
4235 }
4236 last_pending = NULL;
4237
4238 /* Parse names of main and alternate entry points and mark them as
4239 function symbols: */
4240 while (1)
4241 {
4242 SKIP_WHITESPACE ();
4243 c = get_symbol_name (&name);
4244 p = input_line_pointer;
4245 if (!*name)
4246 as_bad (_("Empty argument of .proc"));
4247 else
4248 {
4249 sym = symbol_find_or_make (name);
4250 if (S_IS_DEFINED (sym))
4251 as_bad (_("`%s' was already defined"), name);
4252 else if (!last_pending)
4253 {
4254 unwind.proc_pending.sym = sym;
4255 last_pending = &unwind.proc_pending;
4256 }
4257 else
4258 {
4259 pending = XNEW (proc_pending);
4260 pending->sym = sym;
4261 last_pending = last_pending->next = pending;
4262 }
4263 symbol_get_bfdsym (sym)->flags |= BSF_FUNCTION;
4264 }
4265 *p = c;
4266 SKIP_WHITESPACE_AFTER_NAME ();
4267 if (*input_line_pointer != ',')
4268 break;
4269 ++input_line_pointer;
4270 }
4271 if (!last_pending)
4272 {
4273 unwind.proc_pending.sym = expr_build_dot ();
4274 last_pending = &unwind.proc_pending;
4275 }
4276 last_pending->next = NULL;
4277 demand_empty_rest_of_line ();
4278 do_align (4, NULL, 0, 0);
4279
4280 unwind.prologue = 0;
4281 unwind.prologue_count = 0;
4282 unwind.body = 0;
4283 unwind.insn = 0;
4284 unwind.list = unwind.tail = unwind.current_entry = NULL;
4285 unwind.personality_routine = 0;
4286 }
4287
4288 static void
dot_body(int dummy ATTRIBUTE_UNUSED)4289 dot_body (int dummy ATTRIBUTE_UNUSED)
4290 {
4291 if (!in_procedure ("body"))
4292 return;
4293 if (!unwind.prologue && !unwind.body && unwind.insn)
4294 as_warn (_("Initial .body should precede any instructions"));
4295 check_pending_save ();
4296
4297 unwind.prologue = 0;
4298 unwind.prologue_mask = 0;
4299 unwind.body = 1;
4300
4301 add_unwind_entry (output_body (), 0);
4302 }
4303
4304 static void
dot_prologue(int dummy ATTRIBUTE_UNUSED)4305 dot_prologue (int dummy ATTRIBUTE_UNUSED)
4306 {
4307 unsigned mask = 0, grsave = 0;
4308
4309 if (!in_procedure ("prologue"))
4310 return;
4311 if (unwind.prologue)
4312 {
4313 as_bad (_(".prologue within prologue"));
4314 ignore_rest_of_line ();
4315 return;
4316 }
4317 if (!unwind.body && unwind.insn)
4318 as_warn (_("Initial .prologue should precede any instructions"));
4319
4320 if (!is_it_end_of_statement ())
4321 {
4322 expressionS e;
4323 int n, sep = parse_operand_and_eval (&e, ',');
4324
4325 if (e.X_op != O_constant
4326 || e.X_add_number < 0
4327 || e.X_add_number > 0xf)
4328 as_bad (_("First operand to .prologue must be a positive 4-bit constant"));
4329 else if (e.X_add_number == 0)
4330 as_warn (_("Pointless use of zero first operand to .prologue"));
4331 else
4332 mask = e.X_add_number;
4333
4334 n = popcount (mask);
4335
4336 if (sep == ',')
4337 parse_operand_and_eval (&e, 0);
4338 else
4339 e.X_op = O_absent;
4340
4341 if (e.X_op == O_constant
4342 && e.X_add_number >= 0
4343 && e.X_add_number < 128)
4344 {
4345 if (md.unwind_check == unwind_check_error)
4346 as_warn (_("Using a constant as second operand to .prologue is deprecated"));
4347 grsave = e.X_add_number;
4348 }
4349 else if (e.X_op != O_register
4350 || (grsave = e.X_add_number - REG_GR) > 127)
4351 {
4352 as_bad (_("Second operand to .prologue must be a general register"));
4353 grsave = 0;
4354 }
4355 else if (grsave > 128U - n)
4356 {
4357 as_bad (_("Second operand to .prologue must be the first of %d general registers"), n);
4358 grsave = 0;
4359 }
4360 }
4361
4362 if (mask)
4363 add_unwind_entry (output_prologue_gr (mask, grsave), 0);
4364 else
4365 add_unwind_entry (output_prologue (), 0);
4366
4367 unwind.prologue = 1;
4368 unwind.prologue_mask = mask;
4369 unwind.prologue_gr = grsave;
4370 unwind.body = 0;
4371 ++unwind.prologue_count;
4372 }
4373
4374 static void
dot_endp(int dummy ATTRIBUTE_UNUSED)4375 dot_endp (int dummy ATTRIBUTE_UNUSED)
4376 {
4377 expressionS e;
4378 int bytes_per_address;
4379 long where;
4380 segT saved_seg;
4381 subsegT saved_subseg;
4382 proc_pending *pending;
4383 int unwind_check = md.unwind_check;
4384
4385 md.unwind_check = unwind_check_error;
4386 if (!in_procedure ("endp"))
4387 return;
4388 md.unwind_check = unwind_check;
4389
4390 if (unwind.saved_text_seg)
4391 {
4392 saved_seg = unwind.saved_text_seg;
4393 saved_subseg = unwind.saved_text_subseg;
4394 unwind.saved_text_seg = NULL;
4395 }
4396 else
4397 {
4398 saved_seg = now_seg;
4399 saved_subseg = now_subseg;
4400 }
4401
4402 insn_group_break (1, 0, 0);
4403
4404 /* If there wasn't a .handlerdata, we haven't generated an image yet. */
4405 if (!unwind.info)
4406 generate_unwind_image (saved_seg);
4407
4408 if (unwind.info || unwind.force_unwind_entry)
4409 {
4410 symbolS *proc_end;
4411
4412 subseg_set (md.last_text_seg, md.last_text_subseg);
4413 proc_end = expr_build_dot ();
4414
4415 start_unwind_section (saved_seg, SPECIAL_SECTION_UNWIND);
4416
4417 /* Make sure that section has 4 byte alignment for ILP32 and
4418 8 byte alignment for LP64. */
4419 record_alignment (now_seg, md.pointer_size_shift);
4420
4421 /* Need space for 3 pointers for procedure start, procedure end,
4422 and unwind info. */
4423 memset (frag_more (3 * md.pointer_size), 0, 3 * md.pointer_size);
4424 where = frag_now_fix () - (3 * md.pointer_size);
4425 bytes_per_address = bfd_arch_bits_per_address (stdoutput) / 8;
4426
4427 /* Issue the values of a) Proc Begin, b) Proc End, c) Unwind Record. */
4428 e.X_op = O_pseudo_fixup;
4429 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4430 e.X_add_number = 0;
4431 if (!S_IS_LOCAL (unwind.proc_pending.sym)
4432 && S_IS_DEFINED (unwind.proc_pending.sym))
4433 e.X_add_symbol
4434 = symbol_temp_new (S_GET_SEGMENT (unwind.proc_pending.sym),
4435 symbol_get_frag (unwind.proc_pending.sym),
4436 S_GET_VALUE (unwind.proc_pending.sym));
4437 else
4438 e.X_add_symbol = unwind.proc_pending.sym;
4439 ia64_cons_fix_new (frag_now, where, bytes_per_address, &e,
4440 BFD_RELOC_NONE);
4441
4442 e.X_op = O_pseudo_fixup;
4443 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4444 e.X_add_number = 0;
4445 e.X_add_symbol = proc_end;
4446 ia64_cons_fix_new (frag_now, where + bytes_per_address,
4447 bytes_per_address, &e, BFD_RELOC_NONE);
4448
4449 if (unwind.info)
4450 {
4451 e.X_op = O_pseudo_fixup;
4452 e.X_op_symbol = pseudo_func[FUNC_SEG_RELATIVE].u.sym;
4453 e.X_add_number = 0;
4454 e.X_add_symbol = unwind.info;
4455 ia64_cons_fix_new (frag_now, where + (bytes_per_address * 2),
4456 bytes_per_address, &e, BFD_RELOC_NONE);
4457 }
4458 }
4459 subseg_set (saved_seg, saved_subseg);
4460
4461 /* Set symbol sizes. */
4462 pending = &unwind.proc_pending;
4463 if (S_GET_NAME (pending->sym))
4464 {
4465 do
4466 {
4467 symbolS *sym = pending->sym;
4468
4469 if (!S_IS_DEFINED (sym))
4470 as_bad (_("`%s' was not defined within procedure"), S_GET_NAME (sym));
4471 else if (S_GET_SIZE (sym) == 0
4472 && symbol_get_obj (sym)->size == NULL)
4473 {
4474 fragS *frag = symbol_get_frag (sym);
4475
4476 if (frag)
4477 {
4478 if (frag == frag_now && SEG_NORMAL (now_seg))
4479 S_SET_SIZE (sym, frag_now_fix () - S_GET_VALUE (sym));
4480 else
4481 {
4482 symbol_get_obj (sym)->size = XNEW (expressionS);
4483 symbol_get_obj (sym)->size->X_op = O_subtract;
4484 symbol_get_obj (sym)->size->X_add_symbol
4485 = symbol_new (FAKE_LABEL_NAME, now_seg,
4486 frag_now, frag_now_fix ());
4487 symbol_get_obj (sym)->size->X_op_symbol = sym;
4488 symbol_get_obj (sym)->size->X_add_number = 0;
4489 }
4490 }
4491 }
4492 } while ((pending = pending->next) != NULL);
4493 }
4494
4495 /* Parse names of main and alternate entry points. */
4496 while (1)
4497 {
4498 char *name, *p, c;
4499
4500 SKIP_WHITESPACE ();
4501 c = get_symbol_name (&name);
4502 p = input_line_pointer;
4503 if (!*name)
4504 (md.unwind_check == unwind_check_warning
4505 ? as_warn
4506 : as_bad) (_("Empty argument of .endp"));
4507 else
4508 {
4509 symbolS *sym = symbol_find (name);
4510
4511 for (pending = &unwind.proc_pending; pending; pending = pending->next)
4512 {
4513 if (sym == pending->sym)
4514 {
4515 pending->sym = NULL;
4516 break;
4517 }
4518 }
4519 if (!sym || !pending)
4520 as_warn (_("`%s' was not specified with previous .proc"), name);
4521 }
4522 *p = c;
4523 SKIP_WHITESPACE_AFTER_NAME ();
4524 if (*input_line_pointer != ',')
4525 break;
4526 ++input_line_pointer;
4527 }
4528 demand_empty_rest_of_line ();
4529
4530 /* Deliberately only checking for the main entry point here; the
4531 language spec even says all arguments to .endp are ignored. */
4532 if (unwind.proc_pending.sym
4533 && S_GET_NAME (unwind.proc_pending.sym)
4534 && strcmp (S_GET_NAME (unwind.proc_pending.sym), FAKE_LABEL_NAME))
4535 as_warn (_("`%s' should be an operand to this .endp"),
4536 S_GET_NAME (unwind.proc_pending.sym));
4537 while (unwind.proc_pending.next)
4538 {
4539 pending = unwind.proc_pending.next;
4540 unwind.proc_pending.next = pending->next;
4541 free (pending);
4542 }
4543 unwind.proc_pending.sym = unwind.info = NULL;
4544 }
4545
4546 static void
dot_template(int template_val)4547 dot_template (int template_val)
4548 {
4549 CURR_SLOT.user_template = template_val;
4550 }
4551
4552 static void
dot_regstk(int dummy ATTRIBUTE_UNUSED)4553 dot_regstk (int dummy ATTRIBUTE_UNUSED)
4554 {
4555 int ins, locs, outs, rots;
4556
4557 if (is_it_end_of_statement ())
4558 ins = locs = outs = rots = 0;
4559 else
4560 {
4561 ins = get_absolute_expression ();
4562 if (*input_line_pointer++ != ',')
4563 goto err;
4564 locs = get_absolute_expression ();
4565 if (*input_line_pointer++ != ',')
4566 goto err;
4567 outs = get_absolute_expression ();
4568 if (*input_line_pointer++ != ',')
4569 goto err;
4570 rots = get_absolute_expression ();
4571 }
4572 set_regstack (ins, locs, outs, rots);
4573 return;
4574
4575 err:
4576 as_bad (_("Comma expected"));
4577 ignore_rest_of_line ();
4578 }
4579
4580 static void
dot_rot(int type)4581 dot_rot (int type)
4582 {
4583 offsetT num_regs;
4584 valueT num_alloced = 0;
4585 struct dynreg **drpp, *dr;
4586 int ch, base_reg = 0;
4587 char *name, *start;
4588 size_t len;
4589
4590 switch (type)
4591 {
4592 case DYNREG_GR: base_reg = REG_GR + 32; break;
4593 case DYNREG_FR: base_reg = REG_FR + 32; break;
4594 case DYNREG_PR: base_reg = REG_P + 16; break;
4595 default: break;
4596 }
4597
4598 /* First, remove existing names from hash table. */
4599 for (dr = md.dynreg[type]; dr && dr->num_regs; dr = dr->next)
4600 {
4601 str_hash_delete (md.dynreg_hash, dr->name);
4602 /* FIXME: Free dr->name. */
4603 dr->num_regs = 0;
4604 }
4605
4606 drpp = &md.dynreg[type];
4607 while (1)
4608 {
4609 ch = get_symbol_name (&start);
4610 len = strlen (ia64_canonicalize_symbol_name (start));
4611 *input_line_pointer = ch;
4612
4613 SKIP_WHITESPACE_AFTER_NAME ();
4614 if (*input_line_pointer != '[')
4615 {
4616 as_bad (_("Expected '['"));
4617 goto err;
4618 }
4619 ++input_line_pointer; /* skip '[' */
4620
4621 num_regs = get_absolute_expression ();
4622
4623 if (*input_line_pointer++ != ']')
4624 {
4625 as_bad (_("Expected ']'"));
4626 goto err;
4627 }
4628 if (num_regs <= 0)
4629 {
4630 as_bad (_("Number of elements must be positive"));
4631 goto err;
4632 }
4633 SKIP_WHITESPACE ();
4634
4635 num_alloced += num_regs;
4636 switch (type)
4637 {
4638 case DYNREG_GR:
4639 if (num_alloced > md.rot.num_regs)
4640 {
4641 as_bad (_("Used more than the declared %d rotating registers"),
4642 md.rot.num_regs);
4643 goto err;
4644 }
4645 break;
4646 case DYNREG_FR:
4647 if (num_alloced > 96)
4648 {
4649 as_bad (_("Used more than the available 96 rotating registers"));
4650 goto err;
4651 }
4652 break;
4653 case DYNREG_PR:
4654 if (num_alloced > 48)
4655 {
4656 as_bad (_("Used more than the available 48 rotating registers"));
4657 goto err;
4658 }
4659 break;
4660
4661 default:
4662 break;
4663 }
4664
4665 if (!*drpp)
4666 {
4667 *drpp = XOBNEW (¬es, struct dynreg);
4668 memset (*drpp, 0, sizeof (*dr));
4669 }
4670
4671 name = XOBNEWVEC (¬es, char, len + 1);
4672 memcpy (name, start, len);
4673 name[len] = '\0';
4674
4675 dr = *drpp;
4676 dr->name = name;
4677 dr->num_regs = num_regs;
4678 dr->base = base_reg;
4679 drpp = &dr->next;
4680 base_reg += num_regs;
4681
4682 if (str_hash_insert (md.dynreg_hash, name, dr, 0) != NULL)
4683 {
4684 as_bad (_("Attempt to redefine register set `%s'"), name);
4685 obstack_free (¬es, name);
4686 goto err;
4687 }
4688
4689 if (*input_line_pointer != ',')
4690 break;
4691 ++input_line_pointer; /* skip comma */
4692 SKIP_WHITESPACE ();
4693 }
4694 demand_empty_rest_of_line ();
4695 return;
4696
4697 err:
4698 ignore_rest_of_line ();
4699 }
4700
4701 static void
dot_byteorder(int byteorder)4702 dot_byteorder (int byteorder)
4703 {
4704 segment_info_type *seginfo = seg_info (now_seg);
4705
4706 if (byteorder == -1)
4707 {
4708 if (seginfo->tc_segment_info_data.endian == 0)
4709 seginfo->tc_segment_info_data.endian = default_big_endian ? 1 : 2;
4710 byteorder = seginfo->tc_segment_info_data.endian == 1;
4711 }
4712 else
4713 seginfo->tc_segment_info_data.endian = byteorder ? 1 : 2;
4714
4715 if (target_big_endian != byteorder)
4716 {
4717 target_big_endian = byteorder;
4718 if (target_big_endian)
4719 {
4720 ia64_number_to_chars = number_to_chars_bigendian;
4721 ia64_float_to_chars = ia64_float_to_chars_bigendian;
4722 }
4723 else
4724 {
4725 ia64_number_to_chars = number_to_chars_littleendian;
4726 ia64_float_to_chars = ia64_float_to_chars_littleendian;
4727 }
4728 }
4729 }
4730
4731 static void
dot_psr(int dummy ATTRIBUTE_UNUSED)4732 dot_psr (int dummy ATTRIBUTE_UNUSED)
4733 {
4734 char *option;
4735 int ch;
4736
4737 while (1)
4738 {
4739 ch = get_symbol_name (&option);
4740 if (strcmp (option, "lsb") == 0)
4741 md.flags &= ~EF_IA_64_BE;
4742 else if (strcmp (option, "msb") == 0)
4743 md.flags |= EF_IA_64_BE;
4744 else if (strcmp (option, "abi32") == 0)
4745 md.flags &= ~EF_IA_64_ABI64;
4746 else if (strcmp (option, "abi64") == 0)
4747 md.flags |= EF_IA_64_ABI64;
4748 else
4749 as_bad (_("Unknown psr option `%s'"), option);
4750 *input_line_pointer = ch;
4751
4752 SKIP_WHITESPACE_AFTER_NAME ();
4753 if (*input_line_pointer != ',')
4754 break;
4755
4756 ++input_line_pointer;
4757 SKIP_WHITESPACE ();
4758 }
4759 demand_empty_rest_of_line ();
4760 }
4761
4762 static void
dot_ln(int dummy ATTRIBUTE_UNUSED)4763 dot_ln (int dummy ATTRIBUTE_UNUSED)
4764 {
4765 new_logical_line (0, get_absolute_expression ());
4766 demand_empty_rest_of_line ();
4767 }
4768
4769 static void
cross_section(int ref,void (* builder)(int),int ua)4770 cross_section (int ref, void (*builder) (int), int ua)
4771 {
4772 char *start, *end;
4773 int saved_auto_align;
4774 unsigned int section_count;
4775 const char *name;
4776
4777 start = input_line_pointer;
4778 name = obj_elf_section_name ();
4779 if (name == NULL)
4780 return;
4781 end = input_line_pointer;
4782 if (*input_line_pointer != ',')
4783 {
4784 as_bad (_("Comma expected after section name"));
4785 ignore_rest_of_line ();
4786 return;
4787 }
4788 *end = '\0';
4789 end = input_line_pointer + 1; /* skip comma */
4790 input_line_pointer = start;
4791 md.keep_pending_output = 1;
4792 section_count = bfd_count_sections (stdoutput);
4793 obj_elf_section (0);
4794 if (section_count != bfd_count_sections (stdoutput))
4795 as_warn (_("Creating sections with .xdataN/.xrealN/.xstringZ is deprecated."));
4796 input_line_pointer = end;
4797 saved_auto_align = md.auto_align;
4798 if (ua)
4799 md.auto_align = 0;
4800 (*builder) (ref);
4801 if (ua)
4802 md.auto_align = saved_auto_align;
4803 obj_elf_previous (0);
4804 md.keep_pending_output = 0;
4805 }
4806
4807 static void
dot_xdata(int size)4808 dot_xdata (int size)
4809 {
4810 cross_section (size, cons, 0);
4811 }
4812
4813 /* Why doesn't float_cons() call md_cons_align() the way cons() does? */
4814
4815 static void
stmt_float_cons(int kind)4816 stmt_float_cons (int kind)
4817 {
4818 size_t alignment;
4819
4820 switch (kind)
4821 {
4822 case 'd':
4823 alignment = 3;
4824 break;
4825
4826 case 'x':
4827 case 'X':
4828 alignment = 4;
4829 break;
4830
4831 case 'f':
4832 default:
4833 alignment = 2;
4834 break;
4835 }
4836 do_align (alignment, NULL, 0, 0);
4837 float_cons (kind);
4838 }
4839
4840 static void
stmt_cons_ua(int size)4841 stmt_cons_ua (int size)
4842 {
4843 int saved_auto_align = md.auto_align;
4844
4845 md.auto_align = 0;
4846 cons (size);
4847 md.auto_align = saved_auto_align;
4848 }
4849
4850 static void
dot_xfloat_cons(int kind)4851 dot_xfloat_cons (int kind)
4852 {
4853 cross_section (kind, stmt_float_cons, 0);
4854 }
4855
4856 static void
dot_xstringer(int zero)4857 dot_xstringer (int zero)
4858 {
4859 cross_section (zero, stringer, 0);
4860 }
4861
4862 static void
dot_xdata_ua(int size)4863 dot_xdata_ua (int size)
4864 {
4865 cross_section (size, cons, 1);
4866 }
4867
4868 static void
dot_xfloat_cons_ua(int kind)4869 dot_xfloat_cons_ua (int kind)
4870 {
4871 cross_section (kind, float_cons, 1);
4872 }
4873
4874 /* .reg.val <regname>,value */
4875
4876 static void
dot_reg_val(int dummy ATTRIBUTE_UNUSED)4877 dot_reg_val (int dummy ATTRIBUTE_UNUSED)
4878 {
4879 expressionS reg;
4880
4881 expression_and_evaluate (®);
4882 if (reg.X_op != O_register)
4883 {
4884 as_bad (_("Register name expected"));
4885 ignore_rest_of_line ();
4886 }
4887 else if (*input_line_pointer++ != ',')
4888 {
4889 as_bad (_("Comma expected"));
4890 ignore_rest_of_line ();
4891 }
4892 else
4893 {
4894 valueT value = get_absolute_expression ();
4895 int regno = reg.X_add_number;
4896 if (regno <= REG_GR || regno > REG_GR + 127)
4897 as_warn (_("Register value annotation ignored"));
4898 else
4899 {
4900 gr_values[regno - REG_GR].known = 1;
4901 gr_values[regno - REG_GR].value = value;
4902 gr_values[regno - REG_GR].path = md.path;
4903 }
4904 }
4905 demand_empty_rest_of_line ();
4906 }
4907
4908 /*
4909 .serialize.data
4910 .serialize.instruction
4911 */
4912 static void
dot_serialize(int type)4913 dot_serialize (int type)
4914 {
4915 insn_group_break (0, 0, 0);
4916 if (type)
4917 instruction_serialization ();
4918 else
4919 data_serialization ();
4920 insn_group_break (0, 0, 0);
4921 demand_empty_rest_of_line ();
4922 }
4923
4924 /* select dv checking mode
4925 .auto
4926 .explicit
4927 .default
4928
4929 A stop is inserted when changing modes
4930 */
4931
4932 static void
dot_dv_mode(int type)4933 dot_dv_mode (int type)
4934 {
4935 if (md.manual_bundling)
4936 as_warn (_("Directive invalid within a bundle"));
4937
4938 if (type == 'E' || type == 'A')
4939 md.mode_explicitly_set = 0;
4940 else
4941 md.mode_explicitly_set = 1;
4942
4943 md.detect_dv = 1;
4944 switch (type)
4945 {
4946 case 'A':
4947 case 'a':
4948 if (md.explicit_mode)
4949 insn_group_break (1, 0, 0);
4950 md.explicit_mode = 0;
4951 break;
4952 case 'E':
4953 case 'e':
4954 if (!md.explicit_mode)
4955 insn_group_break (1, 0, 0);
4956 md.explicit_mode = 1;
4957 break;
4958 default:
4959 case 'd':
4960 if (md.explicit_mode != md.default_explicit_mode)
4961 insn_group_break (1, 0, 0);
4962 md.explicit_mode = md.default_explicit_mode;
4963 md.mode_explicitly_set = 0;
4964 break;
4965 }
4966 }
4967
4968 static void
print_prmask(valueT mask)4969 print_prmask (valueT mask)
4970 {
4971 int regno;
4972 const char *comma = "";
4973 for (regno = 0; regno < 64; regno++)
4974 {
4975 if (mask & ((valueT) 1 << regno))
4976 {
4977 fprintf (stderr, "%s p%d", comma, regno);
4978 comma = ",";
4979 }
4980 }
4981 }
4982
4983 /*
4984 .pred.rel.clear [p1 [,p2 [,...]]] (also .pred.rel "clear" or @clear)
4985 .pred.rel.imply p1, p2 (also .pred.rel "imply" or @imply)
4986 .pred.rel.mutex p1, p2 [,...] (also .pred.rel "mutex" or @mutex)
4987 .pred.safe_across_calls p1 [, p2 [,...]]
4988 */
4989
4990 static void
dot_pred_rel(int type)4991 dot_pred_rel (int type)
4992 {
4993 valueT mask = 0;
4994 int count = 0;
4995 int p1 = -1, p2 = -1;
4996
4997 if (type == 0)
4998 {
4999 if (*input_line_pointer == '"')
5000 {
5001 int len;
5002 char *form = demand_copy_C_string (&len);
5003
5004 if (strcmp (form, "mutex") == 0)
5005 type = 'm';
5006 else if (strcmp (form, "clear") == 0)
5007 type = 'c';
5008 else if (strcmp (form, "imply") == 0)
5009 type = 'i';
5010 obstack_free (¬es, form);
5011 }
5012 else if (*input_line_pointer == '@')
5013 {
5014 char *form;
5015 char c;
5016
5017 ++input_line_pointer;
5018 c = get_symbol_name (&form);
5019
5020 if (strcmp (form, "mutex") == 0)
5021 type = 'm';
5022 else if (strcmp (form, "clear") == 0)
5023 type = 'c';
5024 else if (strcmp (form, "imply") == 0)
5025 type = 'i';
5026 (void) restore_line_pointer (c);
5027 }
5028 else
5029 {
5030 as_bad (_("Missing predicate relation type"));
5031 ignore_rest_of_line ();
5032 return;
5033 }
5034 if (type == 0)
5035 {
5036 as_bad (_("Unrecognized predicate relation type"));
5037 ignore_rest_of_line ();
5038 return;
5039 }
5040 if (*input_line_pointer == ',')
5041 ++input_line_pointer;
5042 SKIP_WHITESPACE ();
5043 }
5044
5045 while (1)
5046 {
5047 valueT bits = 1;
5048 int sep, regno;
5049 expressionS pr, *pr1, *pr2;
5050
5051 sep = parse_operand_and_eval (&pr, ',');
5052 if (pr.X_op == O_register
5053 && pr.X_add_number >= REG_P
5054 && pr.X_add_number <= REG_P + 63)
5055 {
5056 regno = pr.X_add_number - REG_P;
5057 bits <<= regno;
5058 count++;
5059 if (p1 == -1)
5060 p1 = regno;
5061 else if (p2 == -1)
5062 p2 = regno;
5063 }
5064 else if (type != 'i'
5065 && pr.X_op == O_subtract
5066 && (pr1 = symbol_get_value_expression (pr.X_add_symbol))
5067 && pr1->X_op == O_register
5068 && pr1->X_add_number >= REG_P
5069 && pr1->X_add_number <= REG_P + 63
5070 && (pr2 = symbol_get_value_expression (pr.X_op_symbol))
5071 && pr2->X_op == O_register
5072 && pr2->X_add_number >= REG_P
5073 && pr2->X_add_number <= REG_P + 63)
5074 {
5075 /* It's a range. */
5076 int stop;
5077
5078 regno = pr1->X_add_number - REG_P;
5079 stop = pr2->X_add_number - REG_P;
5080 if (regno >= stop)
5081 {
5082 as_bad (_("Bad register range"));
5083 ignore_rest_of_line ();
5084 return;
5085 }
5086 bits = ((bits << stop) << 1) - (bits << regno);
5087 count += stop - regno + 1;
5088 }
5089 else
5090 {
5091 as_bad (_("Predicate register expected"));
5092 ignore_rest_of_line ();
5093 return;
5094 }
5095 if (mask & bits)
5096 as_warn (_("Duplicate predicate register ignored"));
5097 mask |= bits;
5098 if (sep != ',')
5099 break;
5100 }
5101
5102 switch (type)
5103 {
5104 case 'c':
5105 if (count == 0)
5106 mask = ~(valueT) 0;
5107 clear_qp_mutex (mask);
5108 clear_qp_implies (mask, (valueT) 0);
5109 break;
5110 case 'i':
5111 if (count != 2 || p1 == -1 || p2 == -1)
5112 as_bad (_("Predicate source and target required"));
5113 else if (p1 == 0 || p2 == 0)
5114 as_bad (_("Use of p0 is not valid in this context"));
5115 else
5116 add_qp_imply (p1, p2);
5117 break;
5118 case 'm':
5119 if (count < 2)
5120 {
5121 as_bad (_("At least two PR arguments expected"));
5122 break;
5123 }
5124 else if (mask & 1)
5125 {
5126 as_bad (_("Use of p0 is not valid in this context"));
5127 break;
5128 }
5129 add_qp_mutex (mask);
5130 break;
5131 case 's':
5132 /* note that we don't override any existing relations */
5133 if (count == 0)
5134 {
5135 as_bad (_("At least one PR argument expected"));
5136 break;
5137 }
5138 if (md.debug_dv)
5139 {
5140 fprintf (stderr, "Safe across calls: ");
5141 print_prmask (mask);
5142 fprintf (stderr, "\n");
5143 }
5144 qp_safe_across_calls = mask;
5145 break;
5146 }
5147 demand_empty_rest_of_line ();
5148 }
5149
5150 /* .entry label [, label [, ...]]
5151 Hint to DV code that the given labels are to be considered entry points.
5152 Otherwise, only global labels are considered entry points. */
5153
5154 static void
dot_entry(int dummy ATTRIBUTE_UNUSED)5155 dot_entry (int dummy ATTRIBUTE_UNUSED)
5156 {
5157 char *name;
5158 int c;
5159 symbolS *symbolP;
5160
5161 do
5162 {
5163 c = get_symbol_name (&name);
5164 symbolP = symbol_find_or_make (name);
5165
5166 if (str_hash_insert (md.entry_hash, S_GET_NAME (symbolP), symbolP, 0))
5167 as_bad (_("duplicate entry hint %s"), name);
5168
5169 *input_line_pointer = c;
5170 SKIP_WHITESPACE_AFTER_NAME ();
5171 c = *input_line_pointer;
5172 if (c == ',')
5173 {
5174 input_line_pointer++;
5175 SKIP_WHITESPACE ();
5176 if (*input_line_pointer == '\n')
5177 c = '\n';
5178 }
5179 }
5180 while (c == ',');
5181
5182 demand_empty_rest_of_line ();
5183 }
5184
5185 /* .mem.offset offset, base
5186 "base" is used to distinguish between offsets from a different base. */
5187
5188 static void
dot_mem_offset(int dummy ATTRIBUTE_UNUSED)5189 dot_mem_offset (int dummy ATTRIBUTE_UNUSED)
5190 {
5191 md.mem_offset.hint = 1;
5192 md.mem_offset.offset = get_absolute_expression ();
5193 if (*input_line_pointer != ',')
5194 {
5195 as_bad (_("Comma expected"));
5196 ignore_rest_of_line ();
5197 return;
5198 }
5199 ++input_line_pointer;
5200 md.mem_offset.base = get_absolute_expression ();
5201 demand_empty_rest_of_line ();
5202 }
5203
5204 /* ia64-specific pseudo-ops: */
5205 const pseudo_typeS md_pseudo_table[] =
5206 {
5207 { "radix", dot_radix, 0 },
5208 { "lcomm", s_lcomm_bytes, 1 },
5209 { "loc", dot_loc, 0 },
5210 { "bss", dot_special_section, SPECIAL_SECTION_BSS },
5211 { "sbss", dot_special_section, SPECIAL_SECTION_SBSS },
5212 { "sdata", dot_special_section, SPECIAL_SECTION_SDATA },
5213 { "rodata", dot_special_section, SPECIAL_SECTION_RODATA },
5214 { "comment", dot_special_section, SPECIAL_SECTION_COMMENT },
5215 { "ia_64.unwind", dot_special_section, SPECIAL_SECTION_UNWIND },
5216 { "ia_64.unwind_info", dot_special_section, SPECIAL_SECTION_UNWIND_INFO },
5217 { "init_array", dot_special_section, SPECIAL_SECTION_INIT_ARRAY },
5218 { "fini_array", dot_special_section, SPECIAL_SECTION_FINI_ARRAY },
5219 { "proc", dot_proc, 0 },
5220 { "body", dot_body, 0 },
5221 { "prologue", dot_prologue, 0 },
5222 { "endp", dot_endp, 0 },
5223
5224 { "fframe", dot_fframe, 0 },
5225 { "vframe", dot_vframe, 0 },
5226 { "vframesp", dot_vframesp, 0 },
5227 { "vframepsp", dot_vframesp, 1 },
5228 { "save", dot_save, 0 },
5229 { "restore", dot_restore, 0 },
5230 { "restorereg", dot_restorereg, 0 },
5231 { "restorereg.p", dot_restorereg, 1 },
5232 { "handlerdata", dot_handlerdata, 0 },
5233 { "unwentry", dot_unwentry, 0 },
5234 { "altrp", dot_altrp, 0 },
5235 { "savesp", dot_savemem, 0 },
5236 { "savepsp", dot_savemem, 1 },
5237 { "save.g", dot_saveg, 0 },
5238 { "save.f", dot_savef, 0 },
5239 { "save.b", dot_saveb, 0 },
5240 { "save.gf", dot_savegf, 0 },
5241 { "spill", dot_spill, 0 },
5242 { "spillreg", dot_spillreg, 0 },
5243 { "spillsp", dot_spillmem, 0 },
5244 { "spillpsp", dot_spillmem, 1 },
5245 { "spillreg.p", dot_spillreg, 1 },
5246 { "spillsp.p", dot_spillmem, ~0 },
5247 { "spillpsp.p", dot_spillmem, ~1 },
5248 { "label_state", dot_label_state, 0 },
5249 { "copy_state", dot_copy_state, 0 },
5250 { "unwabi", dot_unwabi, 0 },
5251 { "personality", dot_personality, 0 },
5252 { "mii", dot_template, 0x0 },
5253 { "mli", dot_template, 0x2 }, /* old format, for compatibility */
5254 { "mlx", dot_template, 0x2 },
5255 { "mmi", dot_template, 0x4 },
5256 { "mfi", dot_template, 0x6 },
5257 { "mmf", dot_template, 0x7 },
5258 { "mib", dot_template, 0x8 },
5259 { "mbb", dot_template, 0x9 },
5260 { "bbb", dot_template, 0xb },
5261 { "mmb", dot_template, 0xc },
5262 { "mfb", dot_template, 0xe },
5263 { "align", dot_align, 0 },
5264 { "regstk", dot_regstk, 0 },
5265 { "rotr", dot_rot, DYNREG_GR },
5266 { "rotf", dot_rot, DYNREG_FR },
5267 { "rotp", dot_rot, DYNREG_PR },
5268 { "lsb", dot_byteorder, 0 },
5269 { "msb", dot_byteorder, 1 },
5270 { "psr", dot_psr, 0 },
5271 { "alias", dot_alias, 0 },
5272 { "secalias", dot_alias, 1 },
5273 { "ln", dot_ln, 0 }, /* source line info (for debugging) */
5274
5275 { "xdata1", dot_xdata, 1 },
5276 { "xdata2", dot_xdata, 2 },
5277 { "xdata4", dot_xdata, 4 },
5278 { "xdata8", dot_xdata, 8 },
5279 { "xdata16", dot_xdata, 16 },
5280 { "xreal4", dot_xfloat_cons, 'f' },
5281 { "xreal8", dot_xfloat_cons, 'd' },
5282 { "xreal10", dot_xfloat_cons, 'x' },
5283 { "xreal16", dot_xfloat_cons, 'X' },
5284 { "xstring", dot_xstringer, 8 + 0 },
5285 { "xstringz", dot_xstringer, 8 + 1 },
5286
5287 /* unaligned versions: */
5288 { "xdata2.ua", dot_xdata_ua, 2 },
5289 { "xdata4.ua", dot_xdata_ua, 4 },
5290 { "xdata8.ua", dot_xdata_ua, 8 },
5291 { "xdata16.ua", dot_xdata_ua, 16 },
5292 { "xreal4.ua", dot_xfloat_cons_ua, 'f' },
5293 { "xreal8.ua", dot_xfloat_cons_ua, 'd' },
5294 { "xreal10.ua", dot_xfloat_cons_ua, 'x' },
5295 { "xreal16.ua", dot_xfloat_cons_ua, 'X' },
5296
5297 /* annotations/DV checking support */
5298 { "entry", dot_entry, 0 },
5299 { "mem.offset", dot_mem_offset, 0 },
5300 { "pred.rel", dot_pred_rel, 0 },
5301 { "pred.rel.clear", dot_pred_rel, 'c' },
5302 { "pred.rel.imply", dot_pred_rel, 'i' },
5303 { "pred.rel.mutex", dot_pred_rel, 'm' },
5304 { "pred.safe_across_calls", dot_pred_rel, 's' },
5305 { "reg.val", dot_reg_val, 0 },
5306 { "serialize.data", dot_serialize, 0 },
5307 { "serialize.instruction", dot_serialize, 1 },
5308 { "auto", dot_dv_mode, 'a' },
5309 { "explicit", dot_dv_mode, 'e' },
5310 { "default", dot_dv_mode, 'd' },
5311
5312 /* ??? These are needed to make gas/testsuite/gas/elf/ehopt.s work.
5313 IA-64 aligns data allocation pseudo-ops by default, so we have to
5314 tell it that these ones are supposed to be unaligned. Long term,
5315 should rewrite so that only IA-64 specific data allocation pseudo-ops
5316 are aligned by default. */
5317 {"2byte", stmt_cons_ua, 2},
5318 {"4byte", stmt_cons_ua, 4},
5319 {"8byte", stmt_cons_ua, 8},
5320
5321 #ifdef TE_VMS
5322 {"vms_common", obj_elf_vms_common, 0},
5323 #endif
5324
5325 { NULL, 0, 0 }
5326 };
5327
5328 static const struct pseudo_opcode
5329 {
5330 const char *name;
5331 void (*handler) (int);
5332 int arg;
5333 }
5334 pseudo_opcode[] =
5335 {
5336 /* these are more like pseudo-ops, but don't start with a dot */
5337 { "data1", cons, 1 },
5338 { "data2", cons, 2 },
5339 { "data4", cons, 4 },
5340 { "data8", cons, 8 },
5341 { "data16", cons, 16 },
5342 { "real4", stmt_float_cons, 'f' },
5343 { "real8", stmt_float_cons, 'd' },
5344 { "real10", stmt_float_cons, 'x' },
5345 { "real16", stmt_float_cons, 'X' },
5346 { "string", stringer, 8 + 0 },
5347 { "stringz", stringer, 8 + 1 },
5348
5349 /* unaligned versions: */
5350 { "data2.ua", stmt_cons_ua, 2 },
5351 { "data4.ua", stmt_cons_ua, 4 },
5352 { "data8.ua", stmt_cons_ua, 8 },
5353 { "data16.ua", stmt_cons_ua, 16 },
5354 { "real4.ua", float_cons, 'f' },
5355 { "real8.ua", float_cons, 'd' },
5356 { "real10.ua", float_cons, 'x' },
5357 { "real16.ua", float_cons, 'X' },
5358 };
5359
5360 /* Declare a register by creating a symbol for it and entering it in
5361 the symbol table. */
5362
5363 static symbolS *
declare_register(const char * name,unsigned int regnum)5364 declare_register (const char *name, unsigned int regnum)
5365 {
5366 symbolS *sym;
5367
5368 sym = symbol_create (name, reg_section, &zero_address_frag, regnum);
5369
5370 if (str_hash_insert (md.reg_hash, S_GET_NAME (sym), sym, 0) != NULL)
5371 as_fatal (_("duplicate %s"), name);
5372
5373 return sym;
5374 }
5375
5376 static void
declare_register_set(const char * prefix,unsigned int num_regs,unsigned int base_regnum)5377 declare_register_set (const char *prefix,
5378 unsigned int num_regs,
5379 unsigned int base_regnum)
5380 {
5381 char name[8];
5382 unsigned int i;
5383
5384 for (i = 0; i < num_regs; ++i)
5385 {
5386 snprintf (name, sizeof (name), "%s%u", prefix, i);
5387 declare_register (name, base_regnum + i);
5388 }
5389 }
5390
5391 static unsigned int
operand_width(enum ia64_opnd opnd)5392 operand_width (enum ia64_opnd opnd)
5393 {
5394 const struct ia64_operand *odesc = &elf64_ia64_operands[opnd];
5395 unsigned int bits = 0;
5396 int i;
5397
5398 bits = 0;
5399 for (i = 0; i < NELEMS (odesc->field) && odesc->field[i].bits; ++i)
5400 bits += odesc->field[i].bits;
5401
5402 return bits;
5403 }
5404
5405 static enum operand_match_result
operand_match(const struct ia64_opcode * idesc,int res_index,expressionS * e)5406 operand_match (const struct ia64_opcode *idesc, int res_index, expressionS *e)
5407 {
5408 enum ia64_opnd opnd = idesc->operands[res_index];
5409 int bits, relocatable = 0;
5410 struct insn_fix *fix;
5411 bfd_signed_vma val;
5412
5413 switch (opnd)
5414 {
5415 /* constants: */
5416
5417 case IA64_OPND_AR_CCV:
5418 if (e->X_op == O_register && e->X_add_number == REG_AR + 32)
5419 return OPERAND_MATCH;
5420 break;
5421
5422 case IA64_OPND_AR_CSD:
5423 if (e->X_op == O_register && e->X_add_number == REG_AR + 25)
5424 return OPERAND_MATCH;
5425 break;
5426
5427 case IA64_OPND_AR_PFS:
5428 if (e->X_op == O_register && e->X_add_number == REG_AR + 64)
5429 return OPERAND_MATCH;
5430 break;
5431
5432 case IA64_OPND_GR0:
5433 if (e->X_op == O_register && e->X_add_number == REG_GR + 0)
5434 return OPERAND_MATCH;
5435 break;
5436
5437 case IA64_OPND_IP:
5438 if (e->X_op == O_register && e->X_add_number == REG_IP)
5439 return OPERAND_MATCH;
5440 break;
5441
5442 case IA64_OPND_PR:
5443 if (e->X_op == O_register && e->X_add_number == REG_PR)
5444 return OPERAND_MATCH;
5445 break;
5446
5447 case IA64_OPND_PR_ROT:
5448 if (e->X_op == O_register && e->X_add_number == REG_PR_ROT)
5449 return OPERAND_MATCH;
5450 break;
5451
5452 case IA64_OPND_PSR:
5453 if (e->X_op == O_register && e->X_add_number == REG_PSR)
5454 return OPERAND_MATCH;
5455 break;
5456
5457 case IA64_OPND_PSR_L:
5458 if (e->X_op == O_register && e->X_add_number == REG_PSR_L)
5459 return OPERAND_MATCH;
5460 break;
5461
5462 case IA64_OPND_PSR_UM:
5463 if (e->X_op == O_register && e->X_add_number == REG_PSR_UM)
5464 return OPERAND_MATCH;
5465 break;
5466
5467 case IA64_OPND_C1:
5468 if (e->X_op == O_constant)
5469 {
5470 if (e->X_add_number == 1)
5471 return OPERAND_MATCH;
5472 else
5473 return OPERAND_OUT_OF_RANGE;
5474 }
5475 break;
5476
5477 case IA64_OPND_C8:
5478 if (e->X_op == O_constant)
5479 {
5480 if (e->X_add_number == 8)
5481 return OPERAND_MATCH;
5482 else
5483 return OPERAND_OUT_OF_RANGE;
5484 }
5485 break;
5486
5487 case IA64_OPND_C16:
5488 if (e->X_op == O_constant)
5489 {
5490 if (e->X_add_number == 16)
5491 return OPERAND_MATCH;
5492 else
5493 return OPERAND_OUT_OF_RANGE;
5494 }
5495 break;
5496
5497 /* register operands: */
5498
5499 case IA64_OPND_AR3:
5500 if (e->X_op == O_register && e->X_add_number >= REG_AR
5501 && e->X_add_number < REG_AR + 128)
5502 return OPERAND_MATCH;
5503 break;
5504
5505 case IA64_OPND_B1:
5506 case IA64_OPND_B2:
5507 if (e->X_op == O_register && e->X_add_number >= REG_BR
5508 && e->X_add_number < REG_BR + 8)
5509 return OPERAND_MATCH;
5510 break;
5511
5512 case IA64_OPND_CR3:
5513 if (e->X_op == O_register && e->X_add_number >= REG_CR
5514 && e->X_add_number < REG_CR + 128)
5515 return OPERAND_MATCH;
5516 break;
5517
5518 case IA64_OPND_DAHR3:
5519 if (e->X_op == O_register && e->X_add_number >= REG_DAHR
5520 && e->X_add_number < REG_DAHR + 8)
5521 return OPERAND_MATCH;
5522 break;
5523
5524 case IA64_OPND_F1:
5525 case IA64_OPND_F2:
5526 case IA64_OPND_F3:
5527 case IA64_OPND_F4:
5528 if (e->X_op == O_register && e->X_add_number >= REG_FR
5529 && e->X_add_number < REG_FR + 128)
5530 return OPERAND_MATCH;
5531 break;
5532
5533 case IA64_OPND_P1:
5534 case IA64_OPND_P2:
5535 if (e->X_op == O_register && e->X_add_number >= REG_P
5536 && e->X_add_number < REG_P + 64)
5537 return OPERAND_MATCH;
5538 break;
5539
5540 case IA64_OPND_R1:
5541 case IA64_OPND_R2:
5542 case IA64_OPND_R3:
5543 if (e->X_op == O_register && e->X_add_number >= REG_GR
5544 && e->X_add_number < REG_GR + 128)
5545 return OPERAND_MATCH;
5546 break;
5547
5548 case IA64_OPND_R3_2:
5549 if (e->X_op == O_register && e->X_add_number >= REG_GR)
5550 {
5551 if (e->X_add_number < REG_GR + 4)
5552 return OPERAND_MATCH;
5553 else if (e->X_add_number < REG_GR + 128)
5554 return OPERAND_OUT_OF_RANGE;
5555 }
5556 break;
5557
5558 /* indirect operands: */
5559 case IA64_OPND_CPUID_R3:
5560 case IA64_OPND_DBR_R3:
5561 case IA64_OPND_DTR_R3:
5562 case IA64_OPND_ITR_R3:
5563 case IA64_OPND_IBR_R3:
5564 case IA64_OPND_MSR_R3:
5565 case IA64_OPND_PKR_R3:
5566 case IA64_OPND_PMC_R3:
5567 case IA64_OPND_PMD_R3:
5568 case IA64_OPND_DAHR_R3:
5569 case IA64_OPND_RR_R3:
5570 if (e->X_op == O_index && e->X_op_symbol
5571 && (S_GET_VALUE (e->X_op_symbol) - IND_CPUID
5572 == opnd - IA64_OPND_CPUID_R3))
5573 return OPERAND_MATCH;
5574 break;
5575
5576 case IA64_OPND_MR3:
5577 if (e->X_op == O_index && !e->X_op_symbol)
5578 return OPERAND_MATCH;
5579 break;
5580
5581 /* immediate operands: */
5582 case IA64_OPND_CNT2a:
5583 case IA64_OPND_LEN4:
5584 case IA64_OPND_LEN6:
5585 bits = operand_width (idesc->operands[res_index]);
5586 if (e->X_op == O_constant)
5587 {
5588 if ((bfd_vma) (e->X_add_number - 1) < ((bfd_vma) 1 << bits))
5589 return OPERAND_MATCH;
5590 else
5591 return OPERAND_OUT_OF_RANGE;
5592 }
5593 break;
5594
5595 case IA64_OPND_CNT2b:
5596 if (e->X_op == O_constant)
5597 {
5598 if ((bfd_vma) (e->X_add_number - 1) < 3)
5599 return OPERAND_MATCH;
5600 else
5601 return OPERAND_OUT_OF_RANGE;
5602 }
5603 break;
5604
5605 case IA64_OPND_CNT2c:
5606 val = e->X_add_number;
5607 if (e->X_op == O_constant)
5608 {
5609 if ((val == 0 || val == 7 || val == 15 || val == 16))
5610 return OPERAND_MATCH;
5611 else
5612 return OPERAND_OUT_OF_RANGE;
5613 }
5614 break;
5615
5616 case IA64_OPND_SOR:
5617 /* SOR must be an integer multiple of 8 */
5618 if (e->X_op == O_constant && e->X_add_number & 0x7)
5619 return OPERAND_OUT_OF_RANGE;
5620 /* Fall through. */
5621 case IA64_OPND_SOF:
5622 case IA64_OPND_SOL:
5623 if (e->X_op == O_constant)
5624 {
5625 if ((bfd_vma) e->X_add_number <= 96)
5626 return OPERAND_MATCH;
5627 else
5628 return OPERAND_OUT_OF_RANGE;
5629 }
5630 break;
5631
5632 case IA64_OPND_IMMU62:
5633 if (e->X_op == O_constant)
5634 {
5635 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << 62))
5636 return OPERAND_MATCH;
5637 else
5638 return OPERAND_OUT_OF_RANGE;
5639 }
5640 else
5641 {
5642 /* FIXME -- need 62-bit relocation type */
5643 as_bad (_("62-bit relocation not yet implemented"));
5644 }
5645 break;
5646
5647 case IA64_OPND_IMMU64:
5648 if (e->X_op == O_symbol || e->X_op == O_pseudo_fixup
5649 || e->X_op == O_subtract)
5650 {
5651 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5652 fix->code = BFD_RELOC_IA64_IMM64;
5653 if (e->X_op != O_subtract)
5654 {
5655 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5656 if (e->X_op == O_pseudo_fixup)
5657 e->X_op = O_symbol;
5658 }
5659
5660 fix->opnd = idesc->operands[res_index];
5661 fix->expr = *e;
5662 fix->is_pcrel = 0;
5663 ++CURR_SLOT.num_fixups;
5664 return OPERAND_MATCH;
5665 }
5666 else if (e->X_op == O_constant)
5667 return OPERAND_MATCH;
5668 break;
5669
5670 case IA64_OPND_IMMU5b:
5671 if (e->X_op == O_constant)
5672 {
5673 val = e->X_add_number;
5674 if (val >= 32 && val <= 63)
5675 return OPERAND_MATCH;
5676 else
5677 return OPERAND_OUT_OF_RANGE;
5678 }
5679 break;
5680
5681 case IA64_OPND_CCNT5:
5682 case IA64_OPND_CNT5:
5683 case IA64_OPND_CNT6:
5684 case IA64_OPND_CPOS6a:
5685 case IA64_OPND_CPOS6b:
5686 case IA64_OPND_CPOS6c:
5687 case IA64_OPND_IMMU2:
5688 case IA64_OPND_IMMU7a:
5689 case IA64_OPND_IMMU7b:
5690 case IA64_OPND_IMMU16:
5691 case IA64_OPND_IMMU19:
5692 case IA64_OPND_IMMU21:
5693 case IA64_OPND_IMMU24:
5694 case IA64_OPND_MBTYPE4:
5695 case IA64_OPND_MHTYPE8:
5696 case IA64_OPND_POS6:
5697 bits = operand_width (idesc->operands[res_index]);
5698 if (e->X_op == O_constant)
5699 {
5700 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5701 return OPERAND_MATCH;
5702 else
5703 return OPERAND_OUT_OF_RANGE;
5704 }
5705 break;
5706
5707 case IA64_OPND_IMMU9:
5708 bits = operand_width (idesc->operands[res_index]);
5709 if (e->X_op == O_constant)
5710 {
5711 if ((bfd_vma) e->X_add_number < ((bfd_vma) 1 << bits))
5712 {
5713 int lobits = e->X_add_number & 0x3;
5714 if (((bfd_vma) e->X_add_number & 0x3C) != 0 && lobits == 0)
5715 e->X_add_number |= (bfd_vma) 0x3;
5716 return OPERAND_MATCH;
5717 }
5718 else
5719 return OPERAND_OUT_OF_RANGE;
5720 }
5721 break;
5722
5723 case IA64_OPND_IMM44:
5724 /* least 16 bits must be zero */
5725 if ((e->X_add_number & 0xffff) != 0)
5726 /* XXX technically, this is wrong: we should not be issuing warning
5727 messages until we're sure this instruction pattern is going to
5728 be used! */
5729 as_warn (_("lower 16 bits of mask ignored"));
5730
5731 if (e->X_op == O_constant)
5732 {
5733 if (((e->X_add_number >= 0
5734 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 44))
5735 || (e->X_add_number < 0
5736 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 44))))
5737 {
5738 /* sign-extend */
5739 if (e->X_add_number >= 0
5740 && (e->X_add_number & ((bfd_vma) 1 << 43)) != 0)
5741 {
5742 e->X_add_number |= ~(((bfd_vma) 1 << 44) - 1);
5743 }
5744 return OPERAND_MATCH;
5745 }
5746 else
5747 return OPERAND_OUT_OF_RANGE;
5748 }
5749 break;
5750
5751 case IA64_OPND_IMM17:
5752 /* bit 0 is a don't care (pr0 is hardwired to 1) */
5753 if (e->X_op == O_constant)
5754 {
5755 if (((e->X_add_number >= 0
5756 && (bfd_vma) e->X_add_number < ((bfd_vma) 1 << 17))
5757 || (e->X_add_number < 0
5758 && (bfd_vma) -e->X_add_number <= ((bfd_vma) 1 << 17))))
5759 {
5760 /* sign-extend */
5761 if (e->X_add_number >= 0
5762 && (e->X_add_number & ((bfd_vma) 1 << 16)) != 0)
5763 {
5764 e->X_add_number |= ~(((bfd_vma) 1 << 17) - 1);
5765 }
5766 return OPERAND_MATCH;
5767 }
5768 else
5769 return OPERAND_OUT_OF_RANGE;
5770 }
5771 break;
5772
5773 case IA64_OPND_IMM14:
5774 case IA64_OPND_IMM22:
5775 relocatable = 1;
5776 /* Fall through. */
5777 case IA64_OPND_IMM1:
5778 case IA64_OPND_IMM8:
5779 case IA64_OPND_IMM8U4:
5780 case IA64_OPND_IMM8M1:
5781 case IA64_OPND_IMM8M1U4:
5782 case IA64_OPND_IMM8M1U8:
5783 case IA64_OPND_IMM9a:
5784 case IA64_OPND_IMM9b:
5785 bits = operand_width (idesc->operands[res_index]);
5786 if (relocatable && (e->X_op == O_symbol
5787 || e->X_op == O_subtract
5788 || e->X_op == O_pseudo_fixup))
5789 {
5790 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5791
5792 if (idesc->operands[res_index] == IA64_OPND_IMM14)
5793 fix->code = BFD_RELOC_IA64_IMM14;
5794 else
5795 fix->code = BFD_RELOC_IA64_IMM22;
5796
5797 if (e->X_op != O_subtract)
5798 {
5799 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5800 if (e->X_op == O_pseudo_fixup)
5801 e->X_op = O_symbol;
5802 }
5803
5804 fix->opnd = idesc->operands[res_index];
5805 fix->expr = *e;
5806 fix->is_pcrel = 0;
5807 ++CURR_SLOT.num_fixups;
5808 return OPERAND_MATCH;
5809 }
5810 else if (e->X_op != O_constant
5811 && ! (e->X_op == O_big && opnd == IA64_OPND_IMM8M1U8))
5812 return OPERAND_MISMATCH;
5813
5814 if (opnd == IA64_OPND_IMM8M1U4)
5815 {
5816 /* Zero is not valid for unsigned compares that take an adjusted
5817 constant immediate range. */
5818 if (e->X_add_number == 0)
5819 return OPERAND_OUT_OF_RANGE;
5820
5821 /* Sign-extend 32-bit unsigned numbers, so that the following range
5822 checks will work. */
5823 val = e->X_add_number;
5824 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5825 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5826
5827 /* Check for 0x100000000. This is valid because
5828 0x100000000-1 is the same as ((uint32_t) -1). */
5829 if (val == ((bfd_signed_vma) 1 << 32))
5830 return OPERAND_MATCH;
5831
5832 val = val - 1;
5833 }
5834 else if (opnd == IA64_OPND_IMM8M1U8)
5835 {
5836 /* Zero is not valid for unsigned compares that take an adjusted
5837 constant immediate range. */
5838 if (e->X_add_number == 0)
5839 return OPERAND_OUT_OF_RANGE;
5840
5841 /* Check for 0x10000000000000000. */
5842 if (e->X_op == O_big)
5843 {
5844 if (generic_bignum[0] == 0
5845 && generic_bignum[1] == 0
5846 && generic_bignum[2] == 0
5847 && generic_bignum[3] == 0
5848 && generic_bignum[4] == 1)
5849 return OPERAND_MATCH;
5850 else
5851 return OPERAND_OUT_OF_RANGE;
5852 }
5853 else
5854 val = e->X_add_number - 1;
5855 }
5856 else if (opnd == IA64_OPND_IMM8M1)
5857 val = e->X_add_number - 1;
5858 else if (opnd == IA64_OPND_IMM8U4)
5859 {
5860 /* Sign-extend 32-bit unsigned numbers, so that the following range
5861 checks will work. */
5862 val = e->X_add_number;
5863 if ((val & (~(bfd_vma) 0 << 32)) == 0)
5864 val = (val ^ ((bfd_vma) 1 << 31)) - ((bfd_vma) 1 << 31);
5865 }
5866 else
5867 val = e->X_add_number;
5868
5869 if ((val >= 0 && (bfd_vma) val < ((bfd_vma) 1 << (bits - 1)))
5870 || (val < 0 && (bfd_vma) -val <= ((bfd_vma) 1 << (bits - 1))))
5871 return OPERAND_MATCH;
5872 else
5873 return OPERAND_OUT_OF_RANGE;
5874
5875 case IA64_OPND_INC3:
5876 /* +/- 1, 4, 8, 16 */
5877 val = e->X_add_number;
5878 if (val < 0)
5879 val = -val;
5880 if (e->X_op == O_constant)
5881 {
5882 if ((val == 1 || val == 4 || val == 8 || val == 16))
5883 return OPERAND_MATCH;
5884 else
5885 return OPERAND_OUT_OF_RANGE;
5886 }
5887 break;
5888
5889 case IA64_OPND_TGT25:
5890 case IA64_OPND_TGT25b:
5891 case IA64_OPND_TGT25c:
5892 case IA64_OPND_TGT64:
5893 if (e->X_op == O_symbol)
5894 {
5895 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5896 if (opnd == IA64_OPND_TGT25)
5897 fix->code = BFD_RELOC_IA64_PCREL21F;
5898 else if (opnd == IA64_OPND_TGT25b)
5899 fix->code = BFD_RELOC_IA64_PCREL21M;
5900 else if (opnd == IA64_OPND_TGT25c)
5901 fix->code = BFD_RELOC_IA64_PCREL21B;
5902 else if (opnd == IA64_OPND_TGT64)
5903 fix->code = BFD_RELOC_IA64_PCREL60B;
5904 else
5905 abort ();
5906
5907 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5908 fix->opnd = idesc->operands[res_index];
5909 fix->expr = *e;
5910 fix->is_pcrel = 1;
5911 ++CURR_SLOT.num_fixups;
5912 return OPERAND_MATCH;
5913 }
5914 /* Fall through. */
5915 case IA64_OPND_TAG13:
5916 case IA64_OPND_TAG13b:
5917 switch (e->X_op)
5918 {
5919 case O_constant:
5920 return OPERAND_MATCH;
5921
5922 case O_symbol:
5923 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5924 /* There are no external relocs for TAG13/TAG13b fields, so we
5925 create a dummy reloc. This will not live past md_apply_fix. */
5926 fix->code = BFD_RELOC_UNUSED;
5927 fix->code = ia64_gen_real_reloc_type (e->X_op_symbol, fix->code);
5928 fix->opnd = idesc->operands[res_index];
5929 fix->expr = *e;
5930 fix->is_pcrel = 1;
5931 ++CURR_SLOT.num_fixups;
5932 return OPERAND_MATCH;
5933
5934 default:
5935 break;
5936 }
5937 break;
5938
5939 case IA64_OPND_LDXMOV:
5940 fix = CURR_SLOT.fixup + CURR_SLOT.num_fixups;
5941 fix->code = BFD_RELOC_IA64_LDXMOV;
5942 fix->opnd = idesc->operands[res_index];
5943 fix->expr = *e;
5944 fix->is_pcrel = 0;
5945 ++CURR_SLOT.num_fixups;
5946 return OPERAND_MATCH;
5947
5948 case IA64_OPND_STRD5b:
5949 if (e->X_op == O_constant)
5950 {
5951 /* 5-bit signed scaled by 64 */
5952 if ((e->X_add_number <= ( 0xf << 6 ))
5953 && (e->X_add_number >= -( 0x10 << 6 )))
5954 {
5955
5956 /* Must be a multiple of 64 */
5957 if ((e->X_add_number & 0x3f) != 0)
5958 as_warn (_("stride must be a multiple of 64; lower 6 bits ignored"));
5959
5960 e->X_add_number &= ~ 0x3f;
5961 return OPERAND_MATCH;
5962 }
5963 else
5964 return OPERAND_OUT_OF_RANGE;
5965 }
5966 break;
5967 case IA64_OPND_CNT6a:
5968 if (e->X_op == O_constant)
5969 {
5970 /* 6-bit unsigned biased by 1 -- count 0 is meaningless */
5971 if ((e->X_add_number <= 64)
5972 && (e->X_add_number > 0) )
5973 {
5974 return OPERAND_MATCH;
5975 }
5976 else
5977 return OPERAND_OUT_OF_RANGE;
5978 }
5979 break;
5980
5981 default:
5982 break;
5983 }
5984 return OPERAND_MISMATCH;
5985 }
5986
5987 static int
parse_operand(expressionS * e,int more)5988 parse_operand (expressionS *e, int more)
5989 {
5990 int sep = '\0';
5991
5992 memset (e, 0, sizeof (*e));
5993 e->X_op = O_absent;
5994 SKIP_WHITESPACE ();
5995 expression (e);
5996 sep = *input_line_pointer;
5997 if (more && (sep == ',' || sep == more))
5998 ++input_line_pointer;
5999 return sep;
6000 }
6001
6002 static int
parse_operand_and_eval(expressionS * e,int more)6003 parse_operand_and_eval (expressionS *e, int more)
6004 {
6005 int sep = parse_operand (e, more);
6006 resolve_expression (e);
6007 return sep;
6008 }
6009
6010 static int
parse_operand_maybe_eval(expressionS * e,int more,enum ia64_opnd op)6011 parse_operand_maybe_eval (expressionS *e, int more, enum ia64_opnd op)
6012 {
6013 int sep = parse_operand (e, more);
6014 switch (op)
6015 {
6016 case IA64_OPND_IMM14:
6017 case IA64_OPND_IMM22:
6018 case IA64_OPND_IMMU64:
6019 case IA64_OPND_TGT25:
6020 case IA64_OPND_TGT25b:
6021 case IA64_OPND_TGT25c:
6022 case IA64_OPND_TGT64:
6023 case IA64_OPND_TAG13:
6024 case IA64_OPND_TAG13b:
6025 case IA64_OPND_LDXMOV:
6026 break;
6027 default:
6028 resolve_expression (e);
6029 break;
6030 }
6031 return sep;
6032 }
6033
6034 /* Returns the next entry in the opcode table that matches the one in
6035 IDESC, and frees the entry in IDESC. If no matching entry is
6036 found, NULL is returned instead. */
6037
6038 static struct ia64_opcode *
get_next_opcode(struct ia64_opcode * idesc)6039 get_next_opcode (struct ia64_opcode *idesc)
6040 {
6041 struct ia64_opcode *next = ia64_find_next_opcode (idesc);
6042 ia64_free_opcode (idesc);
6043 return next;
6044 }
6045
6046 /* Parse the operands for the opcode and find the opcode variant that
6047 matches the specified operands, or NULL if no match is possible. */
6048
6049 static struct ia64_opcode *
parse_operands(struct ia64_opcode * idesc)6050 parse_operands (struct ia64_opcode *idesc)
6051 {
6052 int i = 0, highest_unmatched_operand, num_operands = 0, num_outputs = 0;
6053 int error_pos, out_of_range_pos, curr_out_of_range_pos, sep = 0;
6054 int reg1, reg2;
6055 char reg_class;
6056 enum ia64_opnd expected_operand = IA64_OPND_NIL;
6057 enum operand_match_result result;
6058 char mnemonic[129];
6059 char *first_arg = 0, *end, *saved_input_pointer;
6060 unsigned int sof;
6061
6062 gas_assert (strlen (idesc->name) <= 128);
6063
6064 strcpy (mnemonic, idesc->name);
6065 if (idesc->operands[2] == IA64_OPND_SOF
6066 || idesc->operands[1] == IA64_OPND_SOF)
6067 {
6068 /* To make the common idiom "alloc loc?=ar.pfs,0,1,0,0" work, we
6069 can't parse the first operand until we have parsed the
6070 remaining operands of the "alloc" instruction. */
6071 SKIP_WHITESPACE ();
6072 first_arg = input_line_pointer;
6073 end = strchr (input_line_pointer, '=');
6074 if (!end)
6075 {
6076 as_bad (_("Expected separator `='"));
6077 return 0;
6078 }
6079 input_line_pointer = end + 1;
6080 ++i;
6081 ++num_outputs;
6082 }
6083
6084 for (; ; ++i)
6085 {
6086 if (i < NELEMS (CURR_SLOT.opnd))
6087 {
6088 enum ia64_opnd op = IA64_OPND_NIL;
6089 if (i < NELEMS (idesc->operands))
6090 op = idesc->operands[i];
6091 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + i, '=', op);
6092 if (CURR_SLOT.opnd[i].X_op == O_absent)
6093 break;
6094 }
6095 else
6096 {
6097 expressionS dummy;
6098
6099 sep = parse_operand (&dummy, '=');
6100 if (dummy.X_op == O_absent)
6101 break;
6102 }
6103
6104 ++num_operands;
6105
6106 if (sep != '=' && sep != ',')
6107 break;
6108
6109 if (sep == '=')
6110 {
6111 if (num_outputs > 0)
6112 as_bad (_("Duplicate equal sign (=) in instruction"));
6113 else
6114 num_outputs = i + 1;
6115 }
6116 }
6117 if (sep != '\0')
6118 {
6119 as_bad (_("Illegal operand separator `%c'"), sep);
6120 return 0;
6121 }
6122
6123 if (idesc->operands[2] == IA64_OPND_SOF
6124 || idesc->operands[1] == IA64_OPND_SOF)
6125 {
6126 /* Map alloc r1=ar.pfs,i,l,o,r to alloc r1=ar.pfs,(i+l+o),(i+l),r.
6127 Note, however, that due to that mapping operand numbers in error
6128 messages for any of the constant operands will not be correct. */
6129 know (strcmp (idesc->name, "alloc") == 0);
6130 /* The first operand hasn't been parsed/initialized, yet (but
6131 num_operands intentionally doesn't account for that). */
6132 i = num_operands > 4 ? 2 : 1;
6133 #define FORCE_CONST(n) (CURR_SLOT.opnd[n].X_op == O_constant \
6134 ? CURR_SLOT.opnd[n].X_add_number \
6135 : 0)
6136 sof = set_regstack (FORCE_CONST(i),
6137 FORCE_CONST(i + 1),
6138 FORCE_CONST(i + 2),
6139 FORCE_CONST(i + 3));
6140 #undef FORCE_CONST
6141
6142 /* now we can parse the first arg: */
6143 saved_input_pointer = input_line_pointer;
6144 input_line_pointer = first_arg;
6145 sep = parse_operand_maybe_eval (CURR_SLOT.opnd + 0, '=',
6146 idesc->operands[0]);
6147 if (sep != '=')
6148 --num_outputs; /* force error */
6149 input_line_pointer = saved_input_pointer;
6150
6151 CURR_SLOT.opnd[i].X_add_number = sof;
6152 if (CURR_SLOT.opnd[i + 1].X_op == O_constant
6153 && CURR_SLOT.opnd[i + 2].X_op == O_constant)
6154 CURR_SLOT.opnd[i + 1].X_add_number
6155 = sof - CURR_SLOT.opnd[i + 2].X_add_number;
6156 else
6157 CURR_SLOT.opnd[i + 1].X_op = O_illegal;
6158 CURR_SLOT.opnd[i + 2] = CURR_SLOT.opnd[i + 3];
6159 }
6160
6161 highest_unmatched_operand = -4;
6162 curr_out_of_range_pos = -1;
6163 error_pos = 0;
6164 for (; idesc; idesc = get_next_opcode (idesc))
6165 {
6166 if (num_outputs != idesc->num_outputs)
6167 continue; /* mismatch in # of outputs */
6168 if (highest_unmatched_operand < 0)
6169 highest_unmatched_operand |= 1;
6170 if (num_operands > NELEMS (idesc->operands)
6171 || (num_operands < NELEMS (idesc->operands)
6172 && idesc->operands[num_operands])
6173 || (num_operands > 0 && !idesc->operands[num_operands - 1]))
6174 continue; /* mismatch in number of arguments */
6175 if (highest_unmatched_operand < 0)
6176 highest_unmatched_operand |= 2;
6177
6178 CURR_SLOT.num_fixups = 0;
6179
6180 /* Try to match all operands. If we see an out-of-range operand,
6181 then continue trying to match the rest of the operands, since if
6182 the rest match, then this idesc will give the best error message. */
6183
6184 out_of_range_pos = -1;
6185 for (i = 0; i < num_operands && idesc->operands[i]; ++i)
6186 {
6187 result = operand_match (idesc, i, CURR_SLOT.opnd + i);
6188 if (result != OPERAND_MATCH)
6189 {
6190 if (result != OPERAND_OUT_OF_RANGE)
6191 break;
6192 if (out_of_range_pos < 0)
6193 /* remember position of the first out-of-range operand: */
6194 out_of_range_pos = i;
6195 }
6196 }
6197
6198 /* If we did not match all operands, or if at least one operand was
6199 out-of-range, then this idesc does not match. Keep track of which
6200 idesc matched the most operands before failing. If we have two
6201 idescs that failed at the same position, and one had an out-of-range
6202 operand, then prefer the out-of-range operand. Thus if we have
6203 "add r0=0x1000000,r1" we get an error saying the constant is out
6204 of range instead of an error saying that the constant should have been
6205 a register. */
6206
6207 if (i != num_operands || out_of_range_pos >= 0)
6208 {
6209 if (i > highest_unmatched_operand
6210 || (i == highest_unmatched_operand
6211 && out_of_range_pos > curr_out_of_range_pos))
6212 {
6213 highest_unmatched_operand = i;
6214 if (out_of_range_pos >= 0)
6215 {
6216 expected_operand = idesc->operands[out_of_range_pos];
6217 error_pos = out_of_range_pos;
6218 }
6219 else
6220 {
6221 expected_operand = idesc->operands[i];
6222 error_pos = i;
6223 }
6224 curr_out_of_range_pos = out_of_range_pos;
6225 }
6226 continue;
6227 }
6228
6229 break;
6230 }
6231 if (!idesc)
6232 {
6233 if (expected_operand)
6234 as_bad (_("Operand %u of `%s' should be %s"),
6235 error_pos + 1, mnemonic,
6236 elf64_ia64_operands[expected_operand].desc);
6237 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 1))
6238 as_bad (_("Wrong number of output operands"));
6239 else if (highest_unmatched_operand < 0 && !(highest_unmatched_operand & 2))
6240 as_bad (_("Wrong number of input operands"));
6241 else
6242 as_bad (_("Operand mismatch"));
6243 return 0;
6244 }
6245
6246 /* Check that the instruction doesn't use
6247 - r0, f0, or f1 as output operands
6248 - the same predicate twice as output operands
6249 - r0 as address of a base update load or store
6250 - the same GR as output and address of a base update load
6251 - two even- or two odd-numbered FRs as output operands of a floating
6252 point parallel load.
6253 At most two (conflicting) output (or output-like) operands can exist,
6254 (floating point parallel loads have three outputs, but the base register,
6255 if updated, cannot conflict with the actual outputs). */
6256 reg2 = reg1 = -1;
6257 for (i = 0; i < num_operands; ++i)
6258 {
6259 int regno = 0;
6260
6261 reg_class = 0;
6262 switch (idesc->operands[i])
6263 {
6264 case IA64_OPND_R1:
6265 case IA64_OPND_R2:
6266 case IA64_OPND_R3:
6267 if (i < num_outputs)
6268 {
6269 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6270 reg_class = 'r';
6271 else if (reg1 < 0)
6272 reg1 = CURR_SLOT.opnd[i].X_add_number;
6273 else if (reg2 < 0)
6274 reg2 = CURR_SLOT.opnd[i].X_add_number;
6275 }
6276 break;
6277 case IA64_OPND_P1:
6278 case IA64_OPND_P2:
6279 if (i < num_outputs)
6280 {
6281 if (reg1 < 0)
6282 reg1 = CURR_SLOT.opnd[i].X_add_number;
6283 else if (reg2 < 0)
6284 reg2 = CURR_SLOT.opnd[i].X_add_number;
6285 }
6286 break;
6287 case IA64_OPND_F1:
6288 case IA64_OPND_F2:
6289 case IA64_OPND_F3:
6290 case IA64_OPND_F4:
6291 if (i < num_outputs)
6292 {
6293 if (CURR_SLOT.opnd[i].X_add_number >= REG_FR
6294 && CURR_SLOT.opnd[i].X_add_number <= REG_FR + 1)
6295 {
6296 reg_class = 'f';
6297 regno = CURR_SLOT.opnd[i].X_add_number - REG_FR;
6298 }
6299 else if (reg1 < 0)
6300 reg1 = CURR_SLOT.opnd[i].X_add_number;
6301 else if (reg2 < 0)
6302 reg2 = CURR_SLOT.opnd[i].X_add_number;
6303 }
6304 break;
6305 case IA64_OPND_MR3:
6306 if (idesc->flags & IA64_OPCODE_POSTINC)
6307 {
6308 if (CURR_SLOT.opnd[i].X_add_number == REG_GR)
6309 reg_class = 'm';
6310 else if (reg1 < 0)
6311 reg1 = CURR_SLOT.opnd[i].X_add_number;
6312 else if (reg2 < 0)
6313 reg2 = CURR_SLOT.opnd[i].X_add_number;
6314 }
6315 break;
6316 default:
6317 break;
6318 }
6319 switch (reg_class)
6320 {
6321 case 0:
6322 break;
6323 default:
6324 as_warn (_("Invalid use of `%c%d' as output operand"), reg_class, regno);
6325 break;
6326 case 'm':
6327 as_warn (_("Invalid use of `r%d' as base update address operand"), regno);
6328 break;
6329 }
6330 }
6331 if (reg1 == reg2)
6332 {
6333 if (reg1 >= REG_GR && reg1 <= REG_GR + 127)
6334 {
6335 reg1 -= REG_GR;
6336 reg_class = 'r';
6337 }
6338 else if (reg1 >= REG_P && reg1 <= REG_P + 63)
6339 {
6340 reg1 -= REG_P;
6341 reg_class = 'p';
6342 }
6343 else if (reg1 >= REG_FR && reg1 <= REG_FR + 127)
6344 {
6345 reg1 -= REG_FR;
6346 reg_class = 'f';
6347 }
6348 else
6349 reg_class = 0;
6350 if (reg_class)
6351 as_warn (_("Invalid duplicate use of `%c%d'"), reg_class, reg1);
6352 }
6353 else if (((reg1 >= REG_FR && reg1 <= REG_FR + 31
6354 && reg2 >= REG_FR && reg2 <= REG_FR + 31)
6355 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6356 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127))
6357 && ! ((reg1 ^ reg2) & 1))
6358 as_warn (_("Invalid simultaneous use of `f%d' and `f%d'"),
6359 reg1 - REG_FR, reg2 - REG_FR);
6360 else if ((reg1 >= REG_FR && reg1 <= REG_FR + 31
6361 && reg2 >= REG_FR + 32 && reg2 <= REG_FR + 127)
6362 || (reg1 >= REG_FR + 32 && reg1 <= REG_FR + 127
6363 && reg2 >= REG_FR && reg2 <= REG_FR + 31))
6364 as_warn (_("Dangerous simultaneous use of `f%d' and `f%d'"),
6365 reg1 - REG_FR, reg2 - REG_FR);
6366 return idesc;
6367 }
6368
6369 static void
build_insn(struct slot * slot,bfd_vma * insnp)6370 build_insn (struct slot *slot, bfd_vma *insnp)
6371 {
6372 const struct ia64_operand *odesc, *o2desc;
6373 struct ia64_opcode *idesc = slot->idesc;
6374 bfd_vma insn;
6375 bfd_signed_vma val;
6376 const char *err;
6377 int i;
6378
6379 insn = idesc->opcode | slot->qp_regno;
6380
6381 for (i = 0; i < NELEMS (idesc->operands) && idesc->operands[i]; ++i)
6382 {
6383 if (slot->opnd[i].X_op == O_register
6384 || slot->opnd[i].X_op == O_constant
6385 || slot->opnd[i].X_op == O_index)
6386 val = slot->opnd[i].X_add_number;
6387 else if (slot->opnd[i].X_op == O_big)
6388 {
6389 /* This must be the value 0x10000000000000000. */
6390 gas_assert (idesc->operands[i] == IA64_OPND_IMM8M1U8);
6391 val = 0;
6392 }
6393 else
6394 val = 0;
6395
6396 switch (idesc->operands[i])
6397 {
6398 case IA64_OPND_IMMU64:
6399 *insnp++ = (val >> 22) & 0x1ffffffffffLL;
6400 insn |= (((val & 0x7f) << 13) | (((val >> 7) & 0x1ff) << 27)
6401 | (((val >> 16) & 0x1f) << 22) | (((val >> 21) & 0x1) << 21)
6402 | (((val >> 63) & 0x1) << 36));
6403 continue;
6404
6405 case IA64_OPND_IMMU62:
6406 val &= 0x3fffffffffffffffULL;
6407 if (val != slot->opnd[i].X_add_number)
6408 as_warn (_("Value truncated to 62 bits"));
6409 *insnp++ = (val >> 21) & 0x1ffffffffffLL;
6410 insn |= (((val & 0xfffff) << 6) | (((val >> 20) & 0x1) << 36));
6411 continue;
6412
6413 case IA64_OPND_TGT64:
6414 val >>= 4;
6415 *insnp++ = ((val >> 20) & 0x7fffffffffLL) << 2;
6416 insn |= ((((val >> 59) & 0x1) << 36)
6417 | (((val >> 0) & 0xfffff) << 13));
6418 continue;
6419
6420 case IA64_OPND_AR3:
6421 val -= REG_AR;
6422 break;
6423
6424 case IA64_OPND_B1:
6425 case IA64_OPND_B2:
6426 val -= REG_BR;
6427 break;
6428
6429 case IA64_OPND_CR3:
6430 val -= REG_CR;
6431 break;
6432
6433 case IA64_OPND_DAHR3:
6434 val -= REG_DAHR;
6435 break;
6436
6437 case IA64_OPND_F1:
6438 case IA64_OPND_F2:
6439 case IA64_OPND_F3:
6440 case IA64_OPND_F4:
6441 val -= REG_FR;
6442 break;
6443
6444 case IA64_OPND_P1:
6445 case IA64_OPND_P2:
6446 val -= REG_P;
6447 break;
6448
6449 case IA64_OPND_R1:
6450 case IA64_OPND_R2:
6451 case IA64_OPND_R3:
6452 case IA64_OPND_R3_2:
6453 case IA64_OPND_CPUID_R3:
6454 case IA64_OPND_DBR_R3:
6455 case IA64_OPND_DTR_R3:
6456 case IA64_OPND_ITR_R3:
6457 case IA64_OPND_IBR_R3:
6458 case IA64_OPND_MR3:
6459 case IA64_OPND_MSR_R3:
6460 case IA64_OPND_PKR_R3:
6461 case IA64_OPND_PMC_R3:
6462 case IA64_OPND_PMD_R3:
6463 case IA64_OPND_DAHR_R3:
6464 case IA64_OPND_RR_R3:
6465 val -= REG_GR;
6466 break;
6467
6468 default:
6469 break;
6470 }
6471
6472 odesc = elf64_ia64_operands + idesc->operands[i];
6473 err = (*odesc->insert) (odesc, val, &insn);
6474 if (err)
6475 as_bad_where (slot->src_file, slot->src_line,
6476 _("Bad operand value: %s"), err);
6477 if (idesc->flags & IA64_OPCODE_PSEUDO)
6478 {
6479 if ((idesc->flags & IA64_OPCODE_F2_EQ_F3)
6480 && odesc == elf64_ia64_operands + IA64_OPND_F3)
6481 {
6482 o2desc = elf64_ia64_operands + IA64_OPND_F2;
6483 (*o2desc->insert) (o2desc, val, &insn);
6484 }
6485 if ((idesc->flags & IA64_OPCODE_LEN_EQ_64MCNT)
6486 && (odesc == elf64_ia64_operands + IA64_OPND_CPOS6a
6487 || odesc == elf64_ia64_operands + IA64_OPND_POS6))
6488 {
6489 o2desc = elf64_ia64_operands + IA64_OPND_LEN6;
6490 (*o2desc->insert) (o2desc, 64 - val, &insn);
6491 }
6492 }
6493 }
6494 *insnp = insn;
6495 }
6496
6497 static void
emit_one_bundle(void)6498 emit_one_bundle (void)
6499 {
6500 int manual_bundling_off = 0, manual_bundling = 0;
6501 enum ia64_unit required_unit, insn_unit = 0;
6502 enum ia64_insn_type type[3], insn_type;
6503 unsigned int template_val, orig_template;
6504 bfd_vma insn[3] = { -1, -1, -1 };
6505 struct ia64_opcode *idesc;
6506 int end_of_insn_group = 0, user_template = -1;
6507 int n, i, j, first, curr, last_slot;
6508 bfd_vma t0 = 0, t1 = 0;
6509 struct label_fix *lfix;
6510 bool mark_label;
6511 struct insn_fix *ifix;
6512 char mnemonic[16];
6513 fixS *fix;
6514 char *f;
6515 int addr_mod;
6516
6517 first = (md.curr_slot + NUM_SLOTS - md.num_slots_in_use) % NUM_SLOTS;
6518 know (first >= 0 && first < NUM_SLOTS);
6519 n = MIN (3, md.num_slots_in_use);
6520
6521 /* Determine template: user user_template if specified, best match
6522 otherwise: */
6523
6524 if (md.slot[first].user_template >= 0)
6525 user_template = template_val = md.slot[first].user_template;
6526 else
6527 {
6528 /* Auto select appropriate template. */
6529 memset (type, 0, sizeof (type));
6530 curr = first;
6531 for (i = 0; i < n; ++i)
6532 {
6533 if (md.slot[curr].label_fixups && i != 0)
6534 break;
6535 type[i] = md.slot[curr].idesc->type;
6536 curr = (curr + 1) % NUM_SLOTS;
6537 }
6538 template_val = best_template[type[0]][type[1]][type[2]];
6539 }
6540
6541 /* initialize instructions with appropriate nops: */
6542 for (i = 0; i < 3; ++i)
6543 insn[i] = nop[ia64_templ_desc[template_val].exec_unit[i]];
6544
6545 f = frag_more (16);
6546
6547 /* Check to see if this bundle is at an offset that is a multiple of 16-bytes
6548 from the start of the frag. */
6549 addr_mod = frag_now_fix () & 15;
6550 if (frag_now->has_code && frag_now->insn_addr != addr_mod)
6551 as_bad (_("instruction address is not a multiple of 16"));
6552 frag_now->insn_addr = addr_mod;
6553 frag_now->has_code = 1;
6554
6555 /* now fill in slots with as many insns as possible: */
6556 curr = first;
6557 idesc = md.slot[curr].idesc;
6558 end_of_insn_group = 0;
6559 last_slot = -1;
6560 for (i = 0; i < 3 && md.num_slots_in_use > 0; ++i)
6561 {
6562 /* If we have unwind records, we may need to update some now. */
6563 unw_rec_list *ptr = md.slot[curr].unwind_record;
6564 unw_rec_list *end_ptr = NULL;
6565
6566 if (ptr)
6567 {
6568 /* Find the last prologue/body record in the list for the current
6569 insn, and set the slot number for all records up to that point.
6570 This needs to be done now, because prologue/body records refer to
6571 the current point, not the point after the instruction has been
6572 issued. This matters because there may have been nops emitted
6573 meanwhile. Any non-prologue non-body record followed by a
6574 prologue/body record must also refer to the current point. */
6575 unw_rec_list *last_ptr;
6576
6577 for (j = 1; end_ptr == NULL && j < md.num_slots_in_use; ++j)
6578 end_ptr = md.slot[(curr + j) % NUM_SLOTS].unwind_record;
6579 for (last_ptr = NULL; ptr != end_ptr; ptr = ptr->next)
6580 if (ptr->r.type == prologue || ptr->r.type == prologue_gr
6581 || ptr->r.type == body)
6582 last_ptr = ptr;
6583 if (last_ptr)
6584 {
6585 /* Make last_ptr point one after the last prologue/body
6586 record. */
6587 last_ptr = last_ptr->next;
6588 for (ptr = md.slot[curr].unwind_record; ptr != last_ptr;
6589 ptr = ptr->next)
6590 {
6591 ptr->slot_number = (unsigned long) f + i;
6592 ptr->slot_frag = frag_now;
6593 }
6594 /* Remove the initialized records, so that we won't accidentally
6595 update them again if we insert a nop and continue. */
6596 md.slot[curr].unwind_record = last_ptr;
6597 }
6598 }
6599
6600 manual_bundling_off = md.slot[curr].manual_bundling_off;
6601 if (md.slot[curr].manual_bundling_on)
6602 {
6603 if (curr == first)
6604 manual_bundling = 1;
6605 else
6606 break; /* Need to start a new bundle. */
6607 }
6608
6609 /* If this instruction specifies a template, then it must be the first
6610 instruction of a bundle. */
6611 if (curr != first && md.slot[curr].user_template >= 0)
6612 break;
6613
6614 if (idesc->flags & IA64_OPCODE_SLOT2)
6615 {
6616 if (manual_bundling && !manual_bundling_off)
6617 {
6618 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6619 _("`%s' must be last in bundle"), idesc->name);
6620 if (i < 2)
6621 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6622 }
6623 i = 2;
6624 }
6625 if (idesc->flags & IA64_OPCODE_LAST)
6626 {
6627 int required_slot;
6628 unsigned int required_template;
6629
6630 /* If we need a stop bit after an M slot, our only choice is
6631 template 5 (M;;MI). If we need a stop bit after a B
6632 slot, our only choice is to place it at the end of the
6633 bundle, because the only available templates are MIB,
6634 MBB, BBB, MMB, and MFB. We don't handle anything other
6635 than M and B slots because these are the only kind of
6636 instructions that can have the IA64_OPCODE_LAST bit set. */
6637 required_template = template_val;
6638 switch (idesc->type)
6639 {
6640 case IA64_TYPE_M:
6641 required_slot = 0;
6642 required_template = 5;
6643 break;
6644
6645 case IA64_TYPE_B:
6646 required_slot = 2;
6647 break;
6648
6649 default:
6650 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6651 _("Internal error: don't know how to force %s to end of instruction group"),
6652 idesc->name);
6653 required_slot = i;
6654 break;
6655 }
6656 if (manual_bundling
6657 && (i > required_slot
6658 || (required_slot == 2 && !manual_bundling_off)
6659 || (user_template >= 0
6660 /* Changing from MMI to M;MI is OK. */
6661 && (template_val ^ required_template) > 1)))
6662 {
6663 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6664 _("`%s' must be last in instruction group"),
6665 idesc->name);
6666 if (i < 2 && required_slot == 2 && !manual_bundling_off)
6667 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6668 }
6669 if (required_slot < i)
6670 /* Can't fit this instruction. */
6671 break;
6672
6673 i = required_slot;
6674 if (required_template != template_val)
6675 {
6676 /* If we switch the template, we need to reset the NOPs
6677 after slot i. The slot-types of the instructions ahead
6678 of i never change, so we don't need to worry about
6679 changing NOPs in front of this slot. */
6680 for (j = i; j < 3; ++j)
6681 insn[j] = nop[ia64_templ_desc[required_template].exec_unit[j]];
6682
6683 /* We just picked a template that includes the stop bit in the
6684 middle, so we don't need another one emitted later. */
6685 md.slot[curr].end_of_insn_group = 0;
6686 }
6687 template_val = required_template;
6688 }
6689 if (curr != first && md.slot[curr].label_fixups)
6690 {
6691 if (manual_bundling)
6692 {
6693 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6694 _("Label must be first in a bundle"));
6695 manual_bundling = -1; /* Suppress meaningless post-loop errors. */
6696 }
6697 /* This insn must go into the first slot of a bundle. */
6698 break;
6699 }
6700
6701 if (end_of_insn_group && md.num_slots_in_use >= 1)
6702 {
6703 /* We need an instruction group boundary in the middle of a
6704 bundle. See if we can switch to an other template with
6705 an appropriate boundary. */
6706
6707 orig_template = template_val;
6708 if (i == 1 && (user_template == 4
6709 || (user_template < 0
6710 && (ia64_templ_desc[template_val].exec_unit[0]
6711 == IA64_UNIT_M))))
6712 {
6713 template_val = 5;
6714 end_of_insn_group = 0;
6715 }
6716 else if (i == 2 && (user_template == 0
6717 || (user_template < 0
6718 && (ia64_templ_desc[template_val].exec_unit[1]
6719 == IA64_UNIT_I)))
6720 /* This test makes sure we don't switch the template if
6721 the next instruction is one that needs to be first in
6722 an instruction group. Since all those instructions are
6723 in the M group, there is no way such an instruction can
6724 fit in this bundle even if we switch the template. The
6725 reason we have to check for this is that otherwise we
6726 may end up generating "MI;;I M.." which has the deadly
6727 effect that the second M instruction is no longer the
6728 first in the group! --davidm 99/12/16 */
6729 && (idesc->flags & IA64_OPCODE_FIRST) == 0)
6730 {
6731 template_val = 1;
6732 end_of_insn_group = 0;
6733 }
6734 else if (i == 1
6735 && user_template == 0
6736 && !(idesc->flags & IA64_OPCODE_FIRST))
6737 /* Use the next slot. */
6738 continue;
6739 else if (curr != first)
6740 /* can't fit this insn */
6741 break;
6742
6743 if (template_val != orig_template)
6744 /* if we switch the template, we need to reset the NOPs
6745 after slot i. The slot-types of the instructions ahead
6746 of i never change, so we don't need to worry about
6747 changing NOPs in front of this slot. */
6748 for (j = i; j < 3; ++j)
6749 insn[j] = nop[ia64_templ_desc[template_val].exec_unit[j]];
6750 }
6751 required_unit = ia64_templ_desc[template_val].exec_unit[i];
6752
6753 /* resolve dynamic opcodes such as "break", "hint", and "nop": */
6754 if (idesc->type == IA64_TYPE_DYN)
6755 {
6756 enum ia64_opnd opnd1, opnd2;
6757
6758 if ((strcmp (idesc->name, "nop") == 0)
6759 || (strcmp (idesc->name, "break") == 0))
6760 insn_unit = required_unit;
6761 else if (strcmp (idesc->name, "hint") == 0)
6762 {
6763 insn_unit = required_unit;
6764 if (required_unit == IA64_UNIT_B)
6765 {
6766 switch (md.hint_b)
6767 {
6768 case hint_b_ok:
6769 break;
6770 case hint_b_warning:
6771 as_warn (_("hint in B unit may be treated as nop"));
6772 break;
6773 case hint_b_error:
6774 /* When manual bundling is off and there is no
6775 user template, we choose a different unit so
6776 that hint won't go into the current slot. We
6777 will fill the current bundle with nops and
6778 try to put hint into the next bundle. */
6779 if (!manual_bundling && user_template < 0)
6780 insn_unit = IA64_UNIT_I;
6781 else
6782 as_bad (_("hint in B unit can't be used"));
6783 break;
6784 }
6785 }
6786 }
6787 else if (strcmp (idesc->name, "chk.s") == 0
6788 || strcmp (idesc->name, "mov") == 0)
6789 {
6790 insn_unit = IA64_UNIT_M;
6791 if (required_unit == IA64_UNIT_I
6792 || (required_unit == IA64_UNIT_F && template_val == 6))
6793 insn_unit = IA64_UNIT_I;
6794 }
6795 else
6796 as_fatal (_("emit_one_bundle: unexpected dynamic op"));
6797
6798 snprintf (mnemonic, sizeof (mnemonic), "%s.%c",
6799 idesc->name, "?imbfxx"[insn_unit]);
6800 opnd1 = idesc->operands[0];
6801 opnd2 = idesc->operands[1];
6802 ia64_free_opcode (idesc);
6803 idesc = ia64_find_opcode (mnemonic);
6804 /* moves to/from ARs have collisions */
6805 if (opnd1 == IA64_OPND_AR3 || opnd2 == IA64_OPND_AR3)
6806 {
6807 while (idesc != NULL
6808 && (idesc->operands[0] != opnd1
6809 || idesc->operands[1] != opnd2))
6810 idesc = get_next_opcode (idesc);
6811 }
6812 md.slot[curr].idesc = idesc;
6813 }
6814 else
6815 {
6816 insn_type = idesc->type;
6817 insn_unit = IA64_UNIT_NIL;
6818 switch (insn_type)
6819 {
6820 case IA64_TYPE_A:
6821 if (required_unit == IA64_UNIT_I || required_unit == IA64_UNIT_M)
6822 insn_unit = required_unit;
6823 break;
6824 case IA64_TYPE_X: insn_unit = IA64_UNIT_L; break;
6825 case IA64_TYPE_I: insn_unit = IA64_UNIT_I; break;
6826 case IA64_TYPE_M: insn_unit = IA64_UNIT_M; break;
6827 case IA64_TYPE_B: insn_unit = IA64_UNIT_B; break;
6828 case IA64_TYPE_F: insn_unit = IA64_UNIT_F; break;
6829 default: break;
6830 }
6831 }
6832
6833 if (insn_unit != required_unit)
6834 continue; /* Try next slot. */
6835
6836 /* Now is a good time to fix up the labels for this insn. */
6837 mark_label = false;
6838 for (lfix = md.slot[curr].label_fixups; lfix; lfix = lfix->next)
6839 {
6840 S_SET_VALUE (lfix->sym, frag_now_fix () - 16);
6841 symbol_set_frag (lfix->sym, frag_now);
6842 mark_label |= lfix->dw2_mark_labels;
6843 }
6844 for (lfix = md.slot[curr].tag_fixups; lfix; lfix = lfix->next)
6845 {
6846 S_SET_VALUE (lfix->sym, frag_now_fix () - 16 + i);
6847 symbol_set_frag (lfix->sym, frag_now);
6848 }
6849
6850 if (debug_type == DEBUG_DWARF2
6851 || md.slot[curr].loc_directive_seen
6852 || mark_label)
6853 {
6854 bfd_vma addr = frag_now->fr_address + frag_now_fix () - 16 + i;
6855
6856 md.slot[curr].loc_directive_seen = 0;
6857 if (mark_label)
6858 md.slot[curr].debug_line.flags |= DWARF2_FLAG_BASIC_BLOCK;
6859
6860 dwarf2_gen_line_info (addr, &md.slot[curr].debug_line);
6861 }
6862
6863 build_insn (md.slot + curr, insn + i);
6864
6865 ptr = md.slot[curr].unwind_record;
6866 if (ptr)
6867 {
6868 /* Set slot numbers for all remaining unwind records belonging to the
6869 current insn. There can not be any prologue/body unwind records
6870 here. */
6871 for (; ptr != end_ptr; ptr = ptr->next)
6872 {
6873 ptr->slot_number = (unsigned long) f + i;
6874 ptr->slot_frag = frag_now;
6875 }
6876 md.slot[curr].unwind_record = NULL;
6877 }
6878
6879 for (j = 0; j < md.slot[curr].num_fixups; ++j)
6880 {
6881 unsigned long where;
6882
6883 ifix = md.slot[curr].fixup + j;
6884 where = frag_now_fix () - 16 + i;
6885 #ifdef TE_HPUX
6886 /* Relocations for instructions specify the slot in the
6887 bottom two bits of r_offset. The IA64 HP-UX linker
6888 expects PCREL60B relocations to specify slot 2 of an
6889 instruction. gas generates PCREL60B against slot 1. */
6890 if (ifix->code == BFD_RELOC_IA64_PCREL60B)
6891 {
6892 know (i == 1);
6893 ++where;
6894 }
6895 #endif
6896
6897 fix = fix_new_exp (frag_now, where, 8,
6898 &ifix->expr, ifix->is_pcrel, ifix->code);
6899 fix->tc_fix_data.opnd = ifix->opnd;
6900 fix->fx_file = md.slot[curr].src_file;
6901 fix->fx_line = md.slot[curr].src_line;
6902 }
6903
6904 end_of_insn_group = md.slot[curr].end_of_insn_group;
6905
6906 /* This adjustment to "i" must occur after the fix, otherwise the fix
6907 is assigned to the wrong slot, and the VMS linker complains. */
6908 if (required_unit == IA64_UNIT_L)
6909 {
6910 know (i == 1);
6911 /* skip one slot for long/X-unit instructions */
6912 ++i;
6913 }
6914 --md.num_slots_in_use;
6915 last_slot = i;
6916
6917 /* clear slot: */
6918 ia64_free_opcode (md.slot[curr].idesc);
6919 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6920 md.slot[curr].user_template = -1;
6921
6922 if (manual_bundling_off)
6923 {
6924 manual_bundling = 0;
6925 break;
6926 }
6927 curr = (curr + 1) % NUM_SLOTS;
6928 idesc = md.slot[curr].idesc;
6929 }
6930
6931 /* A user template was specified, but the first following instruction did
6932 not fit. This can happen with or without manual bundling. */
6933 if (md.num_slots_in_use > 0 && last_slot < 0)
6934 {
6935 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6936 _("`%s' does not fit into %s template"),
6937 idesc->name, ia64_templ_desc[template_val].name);
6938 /* Drop first insn so we don't livelock. */
6939 --md.num_slots_in_use;
6940 know (curr == first);
6941 ia64_free_opcode (md.slot[curr].idesc);
6942 memset (md.slot + curr, 0, sizeof (md.slot[curr]));
6943 md.slot[curr].user_template = -1;
6944 }
6945 else if (manual_bundling > 0)
6946 {
6947 if (md.num_slots_in_use > 0)
6948 {
6949 if (last_slot >= 2)
6950 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6951 _("`%s' does not fit into bundle"), idesc->name);
6952 else
6953 {
6954 const char *where;
6955
6956 if (template_val == 2)
6957 where = "X slot";
6958 else if (last_slot == 0)
6959 where = "slots 2 or 3";
6960 else
6961 where = "slot 3";
6962 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6963 _("`%s' can't go in %s of %s template"),
6964 idesc->name, where, ia64_templ_desc[template_val].name);
6965 }
6966 }
6967 else
6968 as_bad_where (md.slot[curr].src_file, md.slot[curr].src_line,
6969 _("Missing '}' at end of file"));
6970 }
6971
6972 know (md.num_slots_in_use < NUM_SLOTS);
6973
6974 t0 = end_of_insn_group | (template_val << 1) | (insn[0] << 5) | (insn[1] << 46);
6975 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
6976
6977 number_to_chars_littleendian (f + 0, t0, 8);
6978 number_to_chars_littleendian (f + 8, t1, 8);
6979 }
6980
6981 int
md_parse_option(int c,const char * arg)6982 md_parse_option (int c, const char *arg)
6983 {
6984
6985 switch (c)
6986 {
6987 /* Switches from the Intel assembler. */
6988 case 'm':
6989 if (strcmp (arg, "ilp64") == 0
6990 || strcmp (arg, "lp64") == 0
6991 || strcmp (arg, "p64") == 0)
6992 {
6993 md.flags |= EF_IA_64_ABI64;
6994 }
6995 else if (strcmp (arg, "ilp32") == 0)
6996 {
6997 md.flags &= ~EF_IA_64_ABI64;
6998 }
6999 else if (strcmp (arg, "le") == 0)
7000 {
7001 md.flags &= ~EF_IA_64_BE;
7002 default_big_endian = 0;
7003 }
7004 else if (strcmp (arg, "be") == 0)
7005 {
7006 md.flags |= EF_IA_64_BE;
7007 default_big_endian = 1;
7008 }
7009 else if (startswith (arg, "unwind-check="))
7010 {
7011 arg += 13;
7012 if (strcmp (arg, "warning") == 0)
7013 md.unwind_check = unwind_check_warning;
7014 else if (strcmp (arg, "error") == 0)
7015 md.unwind_check = unwind_check_error;
7016 else
7017 return 0;
7018 }
7019 else if (startswith (arg, "hint.b="))
7020 {
7021 arg += 7;
7022 if (strcmp (arg, "ok") == 0)
7023 md.hint_b = hint_b_ok;
7024 else if (strcmp (arg, "warning") == 0)
7025 md.hint_b = hint_b_warning;
7026 else if (strcmp (arg, "error") == 0)
7027 md.hint_b = hint_b_error;
7028 else
7029 return 0;
7030 }
7031 else if (startswith (arg, "tune="))
7032 {
7033 arg += 5;
7034 if (strcmp (arg, "itanium1") == 0)
7035 md.tune = itanium1;
7036 else if (strcmp (arg, "itanium2") == 0)
7037 md.tune = itanium2;
7038 else
7039 return 0;
7040 }
7041 else
7042 return 0;
7043 break;
7044
7045 case 'N':
7046 if (strcmp (arg, "so") == 0)
7047 {
7048 /* Suppress signon message. */
7049 }
7050 else if (strcmp (arg, "pi") == 0)
7051 {
7052 /* Reject privileged instructions. FIXME */
7053 }
7054 else if (strcmp (arg, "us") == 0)
7055 {
7056 /* Allow union of signed and unsigned range. FIXME */
7057 }
7058 else if (strcmp (arg, "close_fcalls") == 0)
7059 {
7060 /* Do not resolve global function calls. */
7061 }
7062 else
7063 return 0;
7064 break;
7065
7066 case 'C':
7067 /* temp[="prefix"] Insert temporary labels into the object file
7068 symbol table prefixed by "prefix".
7069 Default prefix is ":temp:".
7070 */
7071 break;
7072
7073 case 'a':
7074 /* indirect=<tgt> Assume unannotated indirect branches behavior
7075 according to <tgt> --
7076 exit: branch out from the current context (default)
7077 labels: all labels in context may be branch targets
7078 */
7079 if (!startswith (arg, "indirect="))
7080 return 0;
7081 break;
7082
7083 case 'x':
7084 /* -X conflicts with an ignored option, use -x instead */
7085 md.detect_dv = 1;
7086 if (!arg || strcmp (arg, "explicit") == 0)
7087 {
7088 /* set default mode to explicit */
7089 md.default_explicit_mode = 1;
7090 break;
7091 }
7092 else if (strcmp (arg, "auto") == 0)
7093 {
7094 md.default_explicit_mode = 0;
7095 }
7096 else if (strcmp (arg, "none") == 0)
7097 {
7098 md.detect_dv = 0;
7099 }
7100 else if (strcmp (arg, "debug") == 0)
7101 {
7102 md.debug_dv = 1;
7103 }
7104 else if (strcmp (arg, "debugx") == 0)
7105 {
7106 md.default_explicit_mode = 1;
7107 md.debug_dv = 1;
7108 }
7109 else if (strcmp (arg, "debugn") == 0)
7110 {
7111 md.debug_dv = 1;
7112 md.detect_dv = 0;
7113 }
7114 else
7115 {
7116 as_bad (_("Unrecognized option '-x%s'"), arg);
7117 }
7118 break;
7119
7120 case 'S':
7121 /* nops Print nops statistics. */
7122 break;
7123
7124 /* GNU specific switches for gcc. */
7125 case OPTION_MCONSTANT_GP:
7126 md.flags |= EF_IA_64_CONS_GP;
7127 break;
7128
7129 case OPTION_MAUTO_PIC:
7130 md.flags |= EF_IA_64_NOFUNCDESC_CONS_GP;
7131 break;
7132
7133 default:
7134 return 0;
7135 }
7136
7137 return 1;
7138 }
7139
7140 void
md_show_usage(FILE * stream)7141 md_show_usage (FILE *stream)
7142 {
7143 fputs (_("\
7144 IA-64 options:\n\
7145 --mconstant-gp mark output file as using the constant-GP model\n\
7146 (sets ELF header flag EF_IA_64_CONS_GP)\n\
7147 --mauto-pic mark output file as using the constant-GP model\n\
7148 without function descriptors (sets ELF header flag\n\
7149 EF_IA_64_NOFUNCDESC_CONS_GP)\n\
7150 -milp32|-milp64|-mlp64|-mp64 select data model (default -mlp64)\n\
7151 -mle | -mbe select little- or big-endian byte order (default -mle)\n\
7152 -mtune=[itanium1|itanium2]\n\
7153 tune for a specific CPU (default -mtune=itanium2)\n\
7154 -munwind-check=[warning|error]\n\
7155 unwind directive check (default -munwind-check=warning)\n\
7156 -mhint.b=[ok|warning|error]\n\
7157 hint.b check (default -mhint.b=error)\n\
7158 -x | -xexplicit turn on dependency violation checking\n"), stream);
7159 /* Note for translators: "automagically" can be translated as "automatically" here. */
7160 fputs (_("\
7161 -xauto automagically remove dependency violations (default)\n\
7162 -xnone turn off dependency violation checking\n\
7163 -xdebug debug dependency violation checker\n\
7164 -xdebugn debug dependency violation checker but turn off\n\
7165 dependency violation checking\n\
7166 -xdebugx debug dependency violation checker and turn on\n\
7167 dependency violation checking\n"),
7168 stream);
7169 }
7170
7171 void
ia64_after_parse_args(void)7172 ia64_after_parse_args (void)
7173 {
7174 if (debug_type == DEBUG_STABS)
7175 as_fatal (_("--gstabs is not supported for ia64"));
7176 }
7177
7178 /* Return true if TYPE fits in TEMPL at SLOT. */
7179
7180 static int
match(int templ,int type,int slot)7181 match (int templ, int type, int slot)
7182 {
7183 enum ia64_unit unit;
7184 int result;
7185
7186 unit = ia64_templ_desc[templ].exec_unit[slot];
7187 switch (type)
7188 {
7189 case IA64_TYPE_DYN: result = 1; break; /* for nop and break */
7190 case IA64_TYPE_A:
7191 result = (unit == IA64_UNIT_I || unit == IA64_UNIT_M);
7192 break;
7193 case IA64_TYPE_X: result = (unit == IA64_UNIT_L); break;
7194 case IA64_TYPE_I: result = (unit == IA64_UNIT_I); break;
7195 case IA64_TYPE_M: result = (unit == IA64_UNIT_M); break;
7196 case IA64_TYPE_B: result = (unit == IA64_UNIT_B); break;
7197 case IA64_TYPE_F: result = (unit == IA64_UNIT_F); break;
7198 default: result = 0; break;
7199 }
7200 return result;
7201 }
7202
7203 /* For Itanium 1, add a bit of extra goodness if a nop of type F or B would fit
7204 in TEMPL at SLOT. For Itanium 2, add a bit of extra goodness if a nop of
7205 type M or I would fit in TEMPL at SLOT. */
7206
7207 static inline int
extra_goodness(int templ,int slot)7208 extra_goodness (int templ, int slot)
7209 {
7210 switch (md.tune)
7211 {
7212 case itanium1:
7213 if (slot == 1 && match (templ, IA64_TYPE_F, slot))
7214 return 2;
7215 else if (slot == 2 && match (templ, IA64_TYPE_B, slot))
7216 return 1;
7217 else
7218 return 0;
7219 break;
7220 case itanium2:
7221 if (match (templ, IA64_TYPE_M, slot)
7222 || match (templ, IA64_TYPE_I, slot))
7223 /* Favor M- and I-unit NOPs. We definitely want to avoid
7224 F-unit and B-unit may cause split-issue or less-than-optimal
7225 branch-prediction. */
7226 return 2;
7227 else
7228 return 0;
7229 break;
7230 default:
7231 abort ();
7232 return 0;
7233 }
7234 }
7235
7236 /* This function is called once, at assembler startup time. It sets
7237 up all the tables, etc. that the MD part of the assembler will need
7238 that can be determined before arguments are parsed. */
7239 void
md_begin(void)7240 md_begin (void)
7241 {
7242 int i, j, k, t, goodness, best, ok;
7243
7244 md.auto_align = 1;
7245 md.explicit_mode = md.default_explicit_mode;
7246
7247 bfd_set_section_alignment (text_section, 4);
7248
7249 /* Make sure function pointers get initialized. */
7250 target_big_endian = -1;
7251 dot_byteorder (default_big_endian);
7252
7253 alias_hash = str_htab_create ();
7254 alias_name_hash = str_htab_create ();
7255 secalias_hash = str_htab_create ();
7256 secalias_name_hash = str_htab_create ();
7257
7258 pseudo_func[FUNC_DTP_MODULE].u.sym =
7259 symbol_new (".<dtpmod>", undefined_section,
7260 &zero_address_frag, FUNC_DTP_MODULE);
7261
7262 pseudo_func[FUNC_DTP_RELATIVE].u.sym =
7263 symbol_new (".<dtprel>", undefined_section,
7264 &zero_address_frag, FUNC_DTP_RELATIVE);
7265
7266 pseudo_func[FUNC_FPTR_RELATIVE].u.sym =
7267 symbol_new (".<fptr>", undefined_section,
7268 &zero_address_frag, FUNC_FPTR_RELATIVE);
7269
7270 pseudo_func[FUNC_GP_RELATIVE].u.sym =
7271 symbol_new (".<gprel>", undefined_section,
7272 &zero_address_frag, FUNC_GP_RELATIVE);
7273
7274 pseudo_func[FUNC_LT_RELATIVE].u.sym =
7275 symbol_new (".<ltoff>", undefined_section,
7276 &zero_address_frag, FUNC_LT_RELATIVE);
7277
7278 pseudo_func[FUNC_LT_RELATIVE_X].u.sym =
7279 symbol_new (".<ltoffx>", undefined_section,
7280 &zero_address_frag, FUNC_LT_RELATIVE_X);
7281
7282 pseudo_func[FUNC_PC_RELATIVE].u.sym =
7283 symbol_new (".<pcrel>", undefined_section,
7284 &zero_address_frag, FUNC_PC_RELATIVE);
7285
7286 pseudo_func[FUNC_PLT_RELATIVE].u.sym =
7287 symbol_new (".<pltoff>", undefined_section,
7288 &zero_address_frag, FUNC_PLT_RELATIVE);
7289
7290 pseudo_func[FUNC_SEC_RELATIVE].u.sym =
7291 symbol_new (".<secrel>", undefined_section,
7292 &zero_address_frag, FUNC_SEC_RELATIVE);
7293
7294 pseudo_func[FUNC_SEG_RELATIVE].u.sym =
7295 symbol_new (".<segrel>", undefined_section,
7296 &zero_address_frag, FUNC_SEG_RELATIVE);
7297
7298 pseudo_func[FUNC_TP_RELATIVE].u.sym =
7299 symbol_new (".<tprel>", undefined_section,
7300 &zero_address_frag, FUNC_TP_RELATIVE);
7301
7302 pseudo_func[FUNC_LTV_RELATIVE].u.sym =
7303 symbol_new (".<ltv>", undefined_section,
7304 &zero_address_frag, FUNC_LTV_RELATIVE);
7305
7306 pseudo_func[FUNC_LT_FPTR_RELATIVE].u.sym =
7307 symbol_new (".<ltoff.fptr>", undefined_section,
7308 &zero_address_frag, FUNC_LT_FPTR_RELATIVE);
7309
7310 pseudo_func[FUNC_LT_DTP_MODULE].u.sym =
7311 symbol_new (".<ltoff.dtpmod>", undefined_section,
7312 &zero_address_frag, FUNC_LT_DTP_MODULE);
7313
7314 pseudo_func[FUNC_LT_DTP_RELATIVE].u.sym =
7315 symbol_new (".<ltoff.dptrel>", undefined_section,
7316 &zero_address_frag, FUNC_LT_DTP_RELATIVE);
7317
7318 pseudo_func[FUNC_LT_TP_RELATIVE].u.sym =
7319 symbol_new (".<ltoff.tprel>", undefined_section,
7320 &zero_address_frag, FUNC_LT_TP_RELATIVE);
7321
7322 pseudo_func[FUNC_IPLT_RELOC].u.sym =
7323 symbol_new (".<iplt>", undefined_section,
7324 &zero_address_frag, FUNC_IPLT_RELOC);
7325
7326 #ifdef TE_VMS
7327 pseudo_func[FUNC_SLOTCOUNT_RELOC].u.sym =
7328 symbol_new (".<slotcount>", undefined_section,
7329 &zero_address_frag, FUNC_SLOTCOUNT_RELOC);
7330 #endif
7331
7332 if (md.tune != itanium1)
7333 {
7334 /* Convert MFI NOPs bundles into MMI NOPs bundles. */
7335 le_nop[0] = 0x8;
7336 le_nop_stop[0] = 0x9;
7337 }
7338
7339 /* Compute the table of best templates. We compute goodness as a
7340 base 4 value, in which each match counts for 3. Match-failures
7341 result in NOPs and we use extra_goodness() to pick the execution
7342 units that are best suited for issuing the NOP. */
7343 for (i = 0; i < IA64_NUM_TYPES; ++i)
7344 for (j = 0; j < IA64_NUM_TYPES; ++j)
7345 for (k = 0; k < IA64_NUM_TYPES; ++k)
7346 {
7347 best = 0;
7348 for (t = 0; t < NELEMS (ia64_templ_desc); ++t)
7349 {
7350 goodness = 0;
7351 if (match (t, i, 0))
7352 {
7353 if (match (t, j, 1))
7354 {
7355 if ((t == 2 && j == IA64_TYPE_X) || match (t, k, 2))
7356 goodness = 3 + 3 + 3;
7357 else
7358 goodness = 3 + 3 + extra_goodness (t, 2);
7359 }
7360 else if (match (t, j, 2))
7361 goodness = 3 + 3 + extra_goodness (t, 1);
7362 else
7363 {
7364 goodness = 3;
7365 goodness += extra_goodness (t, 1);
7366 goodness += extra_goodness (t, 2);
7367 }
7368 }
7369 else if (match (t, i, 1))
7370 {
7371 if ((t == 2 && i == IA64_TYPE_X) || match (t, j, 2))
7372 goodness = 3 + 3;
7373 else
7374 goodness = 3 + extra_goodness (t, 2);
7375 }
7376 else if (match (t, i, 2))
7377 goodness = 3 + extra_goodness (t, 1);
7378
7379 if (goodness > best)
7380 {
7381 best = goodness;
7382 best_template[i][j][k] = t;
7383 }
7384 }
7385 }
7386
7387 #ifdef DEBUG_TEMPLATES
7388 /* For debugging changes to the best_template calculations. We don't care
7389 about combinations with invalid instructions, so start the loops at 1. */
7390 for (i = 0; i < IA64_NUM_TYPES; ++i)
7391 for (j = 0; j < IA64_NUM_TYPES; ++j)
7392 for (k = 0; k < IA64_NUM_TYPES; ++k)
7393 {
7394 char type_letter[IA64_NUM_TYPES] = { 'n', 'a', 'i', 'm', 'b', 'f',
7395 'x', 'd' };
7396 fprintf (stderr, "%c%c%c %s\n", type_letter[i], type_letter[j],
7397 type_letter[k],
7398 ia64_templ_desc[best_template[i][j][k]].name);
7399 }
7400 #endif
7401
7402 for (i = 0; i < NUM_SLOTS; ++i)
7403 md.slot[i].user_template = -1;
7404
7405 md.pseudo_hash = str_htab_create ();
7406 for (i = 0; i < NELEMS (pseudo_opcode); ++i)
7407 if (str_hash_insert (md.pseudo_hash, pseudo_opcode[i].name,
7408 pseudo_opcode + i, 0) != NULL)
7409 as_fatal (_("duplicate %s"), pseudo_opcode[i].name);
7410
7411 md.reg_hash = str_htab_create ();
7412 md.dynreg_hash = str_htab_create ();
7413 md.const_hash = str_htab_create ();
7414 md.entry_hash = str_htab_create ();
7415
7416 /* general registers: */
7417 declare_register_set ("r", 128, REG_GR);
7418 declare_register ("gp", REG_GR + 1);
7419 declare_register ("sp", REG_GR + 12);
7420 declare_register ("tp", REG_GR + 13);
7421 declare_register_set ("ret", 4, REG_GR + 8);
7422
7423 /* floating point registers: */
7424 declare_register_set ("f", 128, REG_FR);
7425 declare_register_set ("farg", 8, REG_FR + 8);
7426 declare_register_set ("fret", 8, REG_FR + 8);
7427
7428 /* branch registers: */
7429 declare_register_set ("b", 8, REG_BR);
7430 declare_register ("rp", REG_BR + 0);
7431
7432 /* predicate registers: */
7433 declare_register_set ("p", 64, REG_P);
7434 declare_register ("pr", REG_PR);
7435 declare_register ("pr.rot", REG_PR_ROT);
7436
7437 /* application registers: */
7438 declare_register_set ("ar", 128, REG_AR);
7439 for (i = 0; i < NELEMS (ar); ++i)
7440 declare_register (ar[i].name, REG_AR + ar[i].regnum);
7441
7442 /* control registers: */
7443 declare_register_set ("cr", 128, REG_CR);
7444 for (i = 0; i < NELEMS (cr); ++i)
7445 declare_register (cr[i].name, REG_CR + cr[i].regnum);
7446
7447 /* dahr registers: */
7448 declare_register_set ("dahr", 8, REG_DAHR);
7449
7450 declare_register ("ip", REG_IP);
7451 declare_register ("cfm", REG_CFM);
7452 declare_register ("psr", REG_PSR);
7453 declare_register ("psr.l", REG_PSR_L);
7454 declare_register ("psr.um", REG_PSR_UM);
7455
7456 for (i = 0; i < NELEMS (indirect_reg); ++i)
7457 {
7458 unsigned int regnum = indirect_reg[i].regnum;
7459
7460 md.indregsym[regnum - IND_CPUID] = declare_register (indirect_reg[i].name, regnum);
7461 }
7462
7463 /* pseudo-registers used to specify unwind info: */
7464 declare_register ("psp", REG_PSP);
7465
7466 for (i = 0; i < NELEMS (const_bits); ++i)
7467 if (str_hash_insert (md.const_hash, const_bits[i].name, const_bits + i, 0))
7468 as_fatal (_("duplicate %s"), const_bits[i].name);
7469
7470 /* Set the architecture and machine depending on defaults and command line
7471 options. */
7472 if (md.flags & EF_IA_64_ABI64)
7473 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf64);
7474 else
7475 ok = bfd_set_arch_mach (stdoutput, bfd_arch_ia64, bfd_mach_ia64_elf32);
7476
7477 if (! ok)
7478 as_warn (_("Could not set architecture and machine"));
7479
7480 /* Set the pointer size and pointer shift size depending on md.flags */
7481
7482 if (md.flags & EF_IA_64_ABI64)
7483 {
7484 md.pointer_size = 8; /* pointers are 8 bytes */
7485 md.pointer_size_shift = 3; /* alignment is 8 bytes = 2^2 */
7486 }
7487 else
7488 {
7489 md.pointer_size = 4; /* pointers are 4 bytes */
7490 md.pointer_size_shift = 2; /* alignment is 4 bytes = 2^2 */
7491 }
7492
7493 md.mem_offset.hint = 0;
7494 md.path = 0;
7495 md.maxpaths = 0;
7496 md.entry_labels = NULL;
7497 }
7498
7499 /* Set the default options in md. Cannot do this in md_begin because
7500 that is called after md_parse_option which is where we set the
7501 options in md based on command line options. */
7502
7503 void
ia64_init(int argc ATTRIBUTE_UNUSED,char ** argv ATTRIBUTE_UNUSED)7504 ia64_init (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED)
7505 {
7506 md.flags = MD_FLAGS_DEFAULT;
7507 #ifndef TE_VMS
7508 /* Don't turn on dependency checking for VMS, doesn't work. */
7509 md.detect_dv = 1;
7510 #endif
7511 /* FIXME: We should change it to unwind_check_error someday. */
7512 md.unwind_check = unwind_check_warning;
7513 md.hint_b = hint_b_error;
7514 md.tune = itanium2;
7515 }
7516
7517 /* Return a string for the target object file format. */
7518
7519 const char *
ia64_target_format(void)7520 ia64_target_format (void)
7521 {
7522 if (OUTPUT_FLAVOR == bfd_target_elf_flavour)
7523 {
7524 if (md.flags & EF_IA_64_BE)
7525 {
7526 if (md.flags & EF_IA_64_ABI64)
7527 #if defined(TE_AIX50)
7528 return "elf64-ia64-aix-big";
7529 #elif defined(TE_HPUX)
7530 return "elf64-ia64-hpux-big";
7531 #else
7532 return "elf64-ia64-big";
7533 #endif
7534 else
7535 #if defined(TE_AIX50)
7536 return "elf32-ia64-aix-big";
7537 #elif defined(TE_HPUX)
7538 return "elf32-ia64-hpux-big";
7539 #else
7540 return "elf32-ia64-big";
7541 #endif
7542 }
7543 else
7544 {
7545 if (md.flags & EF_IA_64_ABI64)
7546 #if defined (TE_AIX50)
7547 return "elf64-ia64-aix-little";
7548 #elif defined (TE_VMS)
7549 {
7550 md.flags |= EF_IA_64_ARCHVER_1;
7551 return "elf64-ia64-vms";
7552 }
7553 #else
7554 return "elf64-ia64-little";
7555 #endif
7556 else
7557 #ifdef TE_AIX50
7558 return "elf32-ia64-aix-little";
7559 #else
7560 return "elf32-ia64-little";
7561 #endif
7562 }
7563 }
7564 else
7565 return "unknown-format";
7566 }
7567
7568 void
ia64_end_of_source(void)7569 ia64_end_of_source (void)
7570 {
7571 /* terminate insn group upon reaching end of file: */
7572 insn_group_break (1, 0, 0);
7573
7574 /* emits slots we haven't written yet: */
7575 ia64_flush_insns ();
7576
7577 bfd_set_private_flags (stdoutput, md.flags);
7578
7579 md.mem_offset.hint = 0;
7580 }
7581
7582 void
ia64_start_line(void)7583 ia64_start_line (void)
7584 {
7585 static int first;
7586
7587 if (!first) {
7588 /* Make sure we don't reference input_line_pointer[-1] when that's
7589 not valid. */
7590 first = 1;
7591 return;
7592 }
7593
7594 if (md.qp.X_op == O_register)
7595 as_bad (_("qualifying predicate not followed by instruction"));
7596 md.qp.X_op = O_absent;
7597
7598 if (ignore_input ())
7599 return;
7600
7601 if (input_line_pointer[0] == ';' && input_line_pointer[-1] == ';')
7602 {
7603 if (md.detect_dv && !md.explicit_mode)
7604 {
7605 static int warned;
7606
7607 if (!warned)
7608 {
7609 warned = 1;
7610 as_warn (_("Explicit stops are ignored in auto mode"));
7611 }
7612 }
7613 else
7614 insn_group_break (1, 0, 0);
7615 }
7616 else if (input_line_pointer[-1] == '{')
7617 {
7618 if (md.manual_bundling)
7619 as_warn (_("Found '{' when manual bundling is already turned on"));
7620 else
7621 CURR_SLOT.manual_bundling_on = 1;
7622 md.manual_bundling = 1;
7623
7624 /* Bundling is only acceptable in explicit mode
7625 or when in default automatic mode. */
7626 if (md.detect_dv && !md.explicit_mode)
7627 {
7628 if (!md.mode_explicitly_set
7629 && !md.default_explicit_mode)
7630 dot_dv_mode ('E');
7631 else
7632 as_warn (_("Found '{' after explicit switch to automatic mode"));
7633 }
7634 }
7635 else if (input_line_pointer[-1] == '}')
7636 {
7637 if (!md.manual_bundling)
7638 as_warn (_("Found '}' when manual bundling is off"));
7639 else
7640 PREV_SLOT.manual_bundling_off = 1;
7641 md.manual_bundling = 0;
7642
7643 /* switch back to automatic mode, if applicable */
7644 if (md.detect_dv
7645 && md.explicit_mode
7646 && !md.mode_explicitly_set
7647 && !md.default_explicit_mode)
7648 dot_dv_mode ('A');
7649 }
7650 }
7651
7652 /* This is a hook for ia64_frob_label, so that it can distinguish tags from
7653 labels. */
7654 static int defining_tag = 0;
7655
7656 int
ia64_unrecognized_line(int ch)7657 ia64_unrecognized_line (int ch)
7658 {
7659 switch (ch)
7660 {
7661 case '(':
7662 expression_and_evaluate (&md.qp);
7663 if (*input_line_pointer++ != ')')
7664 {
7665 as_bad (_("Expected ')'"));
7666 return 0;
7667 }
7668 if (md.qp.X_op != O_register)
7669 {
7670 as_bad (_("Qualifying predicate expected"));
7671 return 0;
7672 }
7673 if (md.qp.X_add_number < REG_P || md.qp.X_add_number >= REG_P + 64)
7674 {
7675 as_bad (_("Predicate register expected"));
7676 return 0;
7677 }
7678 return 1;
7679
7680 case '[':
7681 {
7682 char *s;
7683 char c;
7684 symbolS *tag;
7685 int temp;
7686
7687 if (md.qp.X_op == O_register)
7688 {
7689 as_bad (_("Tag must come before qualifying predicate."));
7690 return 0;
7691 }
7692
7693 /* This implements just enough of read_a_source_file in read.c to
7694 recognize labels. */
7695 if (is_name_beginner (*input_line_pointer))
7696 {
7697 c = get_symbol_name (&s);
7698 }
7699 else if (LOCAL_LABELS_FB
7700 && ISDIGIT (*input_line_pointer))
7701 {
7702 temp = 0;
7703 while (ISDIGIT (*input_line_pointer))
7704 temp = (temp * 10) + *input_line_pointer++ - '0';
7705 fb_label_instance_inc (temp);
7706 s = fb_label_name (temp, 0);
7707 c = *input_line_pointer;
7708 }
7709 else
7710 {
7711 s = NULL;
7712 c = '\0';
7713 }
7714 if (c != ':')
7715 {
7716 /* Put ':' back for error messages' sake. */
7717 *input_line_pointer++ = ':';
7718 as_bad (_("Expected ':'"));
7719 return 0;
7720 }
7721
7722 defining_tag = 1;
7723 tag = colon (s);
7724 defining_tag = 0;
7725 /* Put ':' back for error messages' sake. */
7726 *input_line_pointer++ = ':';
7727 if (*input_line_pointer++ != ']')
7728 {
7729 as_bad (_("Expected ']'"));
7730 return 0;
7731 }
7732 if (! tag)
7733 {
7734 as_bad (_("Tag name expected"));
7735 return 0;
7736 }
7737 return 1;
7738 }
7739
7740 default:
7741 break;
7742 }
7743
7744 /* Not a valid line. */
7745 return 0;
7746 }
7747
7748 void
ia64_frob_label(struct symbol * sym)7749 ia64_frob_label (struct symbol *sym)
7750 {
7751 struct label_fix *fix;
7752
7753 /* Tags need special handling since they are not bundle breaks like
7754 labels. */
7755 if (defining_tag)
7756 {
7757 fix = XOBNEW (¬es, struct label_fix);
7758 fix->sym = sym;
7759 fix->next = CURR_SLOT.tag_fixups;
7760 fix->dw2_mark_labels = false;
7761 CURR_SLOT.tag_fixups = fix;
7762
7763 return;
7764 }
7765
7766 if (bfd_section_flags (now_seg) & SEC_CODE)
7767 {
7768 md.last_text_seg = now_seg;
7769 md.last_text_subseg = now_subseg;
7770 fix = XOBNEW (¬es, struct label_fix);
7771 fix->sym = sym;
7772 fix->next = CURR_SLOT.label_fixups;
7773 fix->dw2_mark_labels = dwarf2_loc_mark_labels;
7774 CURR_SLOT.label_fixups = fix;
7775
7776 /* Keep track of how many code entry points we've seen. */
7777 if (md.path == md.maxpaths)
7778 {
7779 md.maxpaths += 20;
7780 md.entry_labels = XRESIZEVEC (const char *, md.entry_labels,
7781 md.maxpaths);
7782 }
7783 md.entry_labels[md.path++] = S_GET_NAME (sym);
7784 }
7785 }
7786
7787 #ifdef TE_HPUX
7788 /* The HP-UX linker will give unresolved symbol errors for symbols
7789 that are declared but unused. This routine removes declared,
7790 unused symbols from an object. */
7791 int
ia64_frob_symbol(struct symbol * sym)7792 ia64_frob_symbol (struct symbol *sym)
7793 {
7794 if ((S_GET_SEGMENT (sym) == bfd_und_section_ptr && ! symbol_used_p (sym) &&
7795 ELF_ST_VISIBILITY (S_GET_OTHER (sym)) == STV_DEFAULT)
7796 || (S_GET_SEGMENT (sym) == bfd_abs_section_ptr
7797 && ! S_IS_EXTERNAL (sym)))
7798 return 1;
7799 return 0;
7800 }
7801 #endif
7802
7803 void
ia64_flush_pending_output(void)7804 ia64_flush_pending_output (void)
7805 {
7806 if (!md.keep_pending_output
7807 && bfd_section_flags (now_seg) & SEC_CODE)
7808 {
7809 /* ??? This causes many unnecessary stop bits to be emitted.
7810 Unfortunately, it isn't clear if it is safe to remove this. */
7811 insn_group_break (1, 0, 0);
7812 ia64_flush_insns ();
7813 }
7814 }
7815
7816 /* Do ia64-specific expression optimization. All that's done here is
7817 to transform index expressions that are either due to the indexing
7818 of rotating registers or due to the indexing of indirect register
7819 sets. */
7820 int
ia64_optimize_expr(expressionS * l,operatorT op,expressionS * r)7821 ia64_optimize_expr (expressionS *l, operatorT op, expressionS *r)
7822 {
7823 if (op != O_index)
7824 return 0;
7825 resolve_expression (l);
7826 if (l->X_op == O_register)
7827 {
7828 unsigned num_regs = l->X_add_number >> 16;
7829
7830 resolve_expression (r);
7831 if (num_regs)
7832 {
7833 /* Left side is a .rotX-allocated register. */
7834 if (r->X_op != O_constant)
7835 {
7836 as_bad (_("Rotating register index must be a non-negative constant"));
7837 r->X_add_number = 0;
7838 }
7839 else if ((valueT) r->X_add_number >= num_regs)
7840 {
7841 as_bad (_("Index out of range 0..%u"), num_regs - 1);
7842 r->X_add_number = 0;
7843 }
7844 l->X_add_number = (l->X_add_number & 0xffff) + r->X_add_number;
7845 return 1;
7846 }
7847 else if (l->X_add_number >= IND_CPUID && l->X_add_number <= IND_RR)
7848 {
7849 if (r->X_op != O_register
7850 || r->X_add_number < REG_GR
7851 || r->X_add_number > REG_GR + 127)
7852 {
7853 as_bad (_("Indirect register index must be a general register"));
7854 r->X_add_number = REG_GR;
7855 }
7856 l->X_op = O_index;
7857 l->X_op_symbol = md.indregsym[l->X_add_number - IND_CPUID];
7858 l->X_add_number = r->X_add_number;
7859 return 1;
7860 }
7861 }
7862 as_bad (_("Index can only be applied to rotating or indirect registers"));
7863 /* Fall back to some register use of which has as little as possible
7864 side effects, to minimize subsequent error messages. */
7865 l->X_op = O_register;
7866 l->X_add_number = REG_GR + 3;
7867 return 1;
7868 }
7869
7870 int
ia64_parse_name(char * name,expressionS * e,char * nextcharP)7871 ia64_parse_name (char *name, expressionS *e, char *nextcharP)
7872 {
7873 struct const_desc *cdesc;
7874 struct dynreg *dr = 0;
7875 unsigned int idx;
7876 struct symbol *sym;
7877 char *end;
7878
7879 if (*name == '@')
7880 {
7881 enum pseudo_type pseudo_type = PSEUDO_FUNC_NONE;
7882
7883 /* Find what relocation pseudo-function we're dealing with. */
7884 for (idx = 0; idx < NELEMS (pseudo_func); ++idx)
7885 if (pseudo_func[idx].name
7886 && pseudo_func[idx].name[0] == name[1]
7887 && strcmp (pseudo_func[idx].name + 1, name + 2) == 0)
7888 {
7889 pseudo_type = pseudo_func[idx].type;
7890 break;
7891 }
7892 switch (pseudo_type)
7893 {
7894 case PSEUDO_FUNC_RELOC:
7895 end = input_line_pointer;
7896 if (*nextcharP != '(')
7897 {
7898 as_bad (_("Expected '('"));
7899 break;
7900 }
7901 /* Skip '('. */
7902 ++input_line_pointer;
7903 expression (e);
7904 if (*input_line_pointer != ')')
7905 {
7906 as_bad (_("Missing ')'"));
7907 goto done;
7908 }
7909 /* Skip ')'. */
7910 ++input_line_pointer;
7911 #ifdef TE_VMS
7912 if (idx == FUNC_SLOTCOUNT_RELOC)
7913 {
7914 /* @slotcount can accept any expression. Canonicalize. */
7915 e->X_add_symbol = make_expr_symbol (e);
7916 e->X_op = O_symbol;
7917 e->X_add_number = 0;
7918 }
7919 #endif
7920 if (e->X_op != O_symbol)
7921 {
7922 if (e->X_op != O_pseudo_fixup)
7923 {
7924 as_bad (_("Not a symbolic expression"));
7925 goto done;
7926 }
7927 if (idx != FUNC_LT_RELATIVE)
7928 {
7929 as_bad (_("Illegal combination of relocation functions"));
7930 goto done;
7931 }
7932 switch (S_GET_VALUE (e->X_op_symbol))
7933 {
7934 case FUNC_FPTR_RELATIVE:
7935 idx = FUNC_LT_FPTR_RELATIVE; break;
7936 case FUNC_DTP_MODULE:
7937 idx = FUNC_LT_DTP_MODULE; break;
7938 case FUNC_DTP_RELATIVE:
7939 idx = FUNC_LT_DTP_RELATIVE; break;
7940 case FUNC_TP_RELATIVE:
7941 idx = FUNC_LT_TP_RELATIVE; break;
7942 default:
7943 as_bad (_("Illegal combination of relocation functions"));
7944 goto done;
7945 }
7946 }
7947 /* Make sure gas doesn't get rid of local symbols that are used
7948 in relocs. */
7949 e->X_op = O_pseudo_fixup;
7950 e->X_op_symbol = pseudo_func[idx].u.sym;
7951 done:
7952 *nextcharP = *input_line_pointer;
7953 break;
7954
7955 case PSEUDO_FUNC_CONST:
7956 e->X_op = O_constant;
7957 e->X_add_number = pseudo_func[idx].u.ival;
7958 break;
7959
7960 case PSEUDO_FUNC_REG:
7961 e->X_op = O_register;
7962 e->X_add_number = pseudo_func[idx].u.ival;
7963 break;
7964
7965 default:
7966 return 0;
7967 }
7968 return 1;
7969 }
7970
7971 /* first see if NAME is a known register name: */
7972 sym = str_hash_find (md.reg_hash, name);
7973 if (sym)
7974 {
7975 e->X_op = O_register;
7976 e->X_add_number = S_GET_VALUE (sym);
7977 return 1;
7978 }
7979
7980 cdesc = str_hash_find (md.const_hash, name);
7981 if (cdesc)
7982 {
7983 e->X_op = O_constant;
7984 e->X_add_number = cdesc->value;
7985 return 1;
7986 }
7987
7988 /* check for inN, locN, or outN: */
7989 idx = 0;
7990 switch (name[0])
7991 {
7992 case 'i':
7993 if (name[1] == 'n' && ISDIGIT (name[2]))
7994 {
7995 dr = &md.in;
7996 idx = 2;
7997 }
7998 break;
7999
8000 case 'l':
8001 if (name[1] == 'o' && name[2] == 'c' && ISDIGIT (name[3]))
8002 {
8003 dr = &md.loc;
8004 idx = 3;
8005 }
8006 break;
8007
8008 case 'o':
8009 if (name[1] == 'u' && name[2] == 't' && ISDIGIT (name[3]))
8010 {
8011 dr = &md.out;
8012 idx = 3;
8013 }
8014 break;
8015
8016 default:
8017 break;
8018 }
8019
8020 /* Ignore register numbers with leading zeroes, except zero itself. */
8021 if (dr && (name[idx] != '0' || name[idx + 1] == '\0'))
8022 {
8023 unsigned long regnum;
8024
8025 /* The name is inN, locN, or outN; parse the register number. */
8026 regnum = strtoul (name + idx, &end, 10);
8027 if (end > name + idx && *end == '\0' && regnum < 96)
8028 {
8029 if (regnum >= dr->num_regs)
8030 {
8031 if (!dr->num_regs)
8032 as_bad (_("No current frame"));
8033 else
8034 as_bad (_("Register number out of range 0..%u"),
8035 dr->num_regs - 1);
8036 regnum = 0;
8037 }
8038 e->X_op = O_register;
8039 e->X_add_number = dr->base + regnum;
8040 return 1;
8041 }
8042 }
8043
8044 end = xstrdup (name);
8045 name = ia64_canonicalize_symbol_name (end);
8046 if ((dr = str_hash_find (md.dynreg_hash, name)))
8047 {
8048 /* We've got ourselves the name of a rotating register set.
8049 Store the base register number in the low 16 bits of
8050 X_add_number and the size of the register set in the top 16
8051 bits. */
8052 e->X_op = O_register;
8053 e->X_add_number = dr->base | (dr->num_regs << 16);
8054 free (end);
8055 return 1;
8056 }
8057 free (end);
8058 return 0;
8059 }
8060
8061 /* Remove the '#' suffix that indicates a symbol as opposed to a register. */
8062
8063 char *
ia64_canonicalize_symbol_name(char * name)8064 ia64_canonicalize_symbol_name (char *name)
8065 {
8066 size_t len = strlen (name), full = len;
8067
8068 while (len > 0 && name[len - 1] == '#')
8069 --len;
8070 if (len <= 0)
8071 {
8072 if (full > 0)
8073 as_bad (_("Standalone `#' is illegal"));
8074 }
8075 else if (len < full - 1)
8076 as_warn (_("Redundant `#' suffix operators"));
8077 name[len] = '\0';
8078 return name;
8079 }
8080
8081 /* Return true if idesc is a conditional branch instruction. This excludes
8082 the modulo scheduled branches, and br.ia. Mod-sched branches are excluded
8083 because they always read/write resources regardless of the value of the
8084 qualifying predicate. br.ia must always use p0, and hence is always
8085 taken. Thus this function returns true for branches which can fall
8086 through, and which use no resources if they do fall through. */
8087
8088 static int
is_conditional_branch(struct ia64_opcode * idesc)8089 is_conditional_branch (struct ia64_opcode *idesc)
8090 {
8091 /* br is a conditional branch. Everything that starts with br. except
8092 br.ia, br.c{loop,top,exit}, and br.w{top,exit} is a conditional branch.
8093 Everything that starts with brl is a conditional branch. */
8094 return (idesc->name[0] == 'b' && idesc->name[1] == 'r'
8095 && (idesc->name[2] == '\0'
8096 || (idesc->name[2] == '.' && idesc->name[3] != 'i'
8097 && idesc->name[3] != 'c' && idesc->name[3] != 'w')
8098 || idesc->name[2] == 'l'
8099 /* br.cond, br.call, br.clr */
8100 || (idesc->name[2] == '.' && idesc->name[3] == 'c'
8101 && (idesc->name[4] == 'a' || idesc->name[4] == 'o'
8102 || (idesc->name[4] == 'l' && idesc->name[5] == 'r')))));
8103 }
8104
8105 /* Return whether the given opcode is a taken branch. If there's any doubt,
8106 returns zero. */
8107
8108 static int
is_taken_branch(struct ia64_opcode * idesc)8109 is_taken_branch (struct ia64_opcode *idesc)
8110 {
8111 return ((is_conditional_branch (idesc) && CURR_SLOT.qp_regno == 0)
8112 || startswith (idesc->name, "br.ia"));
8113 }
8114
8115 /* Return whether the given opcode is an interruption or rfi. If there's any
8116 doubt, returns zero. */
8117
8118 static int
is_interruption_or_rfi(struct ia64_opcode * idesc)8119 is_interruption_or_rfi (struct ia64_opcode *idesc)
8120 {
8121 if (strcmp (idesc->name, "rfi") == 0)
8122 return 1;
8123 return 0;
8124 }
8125
8126 /* Returns the index of the given dependency in the opcode's list of chks, or
8127 -1 if there is no dependency. */
8128
8129 static int
depends_on(int depind,struct ia64_opcode * idesc)8130 depends_on (int depind, struct ia64_opcode *idesc)
8131 {
8132 int i;
8133 const struct ia64_opcode_dependency *dep = idesc->dependencies;
8134 for (i = 0; i < dep->nchks; i++)
8135 {
8136 if (depind == DEP (dep->chks[i]))
8137 return i;
8138 }
8139 return -1;
8140 }
8141
8142 /* Determine a set of specific resources used for a particular resource
8143 class. Returns the number of specific resources identified For those
8144 cases which are not determinable statically, the resource returned is
8145 marked nonspecific.
8146
8147 Meanings of value in 'NOTE':
8148 1) only read/write when the register number is explicitly encoded in the
8149 insn.
8150 2) only read CFM when accessing a rotating GR, FR, or PR. mov pr only
8151 accesses CFM when qualifying predicate is in the rotating region.
8152 3) general register value is used to specify an indirect register; not
8153 determinable statically.
8154 4) only read the given resource when bits 7:0 of the indirect index
8155 register value does not match the register number of the resource; not
8156 determinable statically.
8157 5) all rules are implementation specific.
8158 6) only when both the index specified by the reader and the index specified
8159 by the writer have the same value in bits 63:61; not determinable
8160 statically.
8161 7) only access the specified resource when the corresponding mask bit is
8162 set
8163 8) PSR.dfh is only read when these insns reference FR32-127. PSR.dfl is
8164 only read when these insns reference FR2-31
8165 9) PSR.mfl is only written when these insns write FR2-31. PSR.mfh is only
8166 written when these insns write FR32-127
8167 10) The PSR.bn bit is only accessed when one of GR16-31 is specified in the
8168 instruction
8169 11) The target predicates are written independently of PR[qp], but source
8170 registers are only read if PR[qp] is true. Since the state of PR[qp]
8171 cannot statically be determined, all source registers are marked used.
8172 12) This insn only reads the specified predicate register when that
8173 register is the PR[qp].
8174 13) This reference to ld-c only applies to the GR whose value is loaded
8175 with data returned from memory, not the post-incremented address register.
8176 14) The RSE resource includes the implementation-specific RSE internal
8177 state resources. At least one (and possibly more) of these resources are
8178 read by each instruction listed in IC:rse-readers. At least one (and
8179 possibly more) of these resources are written by each insn listed in
8180 IC:rse-writers.
8181 15+16) Represents reserved instructions, which the assembler does not
8182 generate.
8183 17) CR[TPR] has a RAW dependency only between mov-to-CR-TPR and
8184 mov-to-PSR-l or ssm instructions that set PSR.i, PSR.pp or PSR.up.
8185
8186 Memory resources (i.e. locations in memory) are *not* marked or tracked by
8187 this code; there are no dependency violations based on memory access.
8188 */
8189
8190 #define MAX_SPECS 256
8191 #define DV_CHK 1
8192 #define DV_REG 0
8193
8194 static int
specify_resource(const struct ia64_dependency * dep,struct ia64_opcode * idesc,int type,struct rsrc specs[MAX_SPECS],int note,int path)8195 specify_resource (const struct ia64_dependency *dep,
8196 struct ia64_opcode *idesc,
8197 /* is this a DV chk or a DV reg? */
8198 int type,
8199 /* returned specific resources */
8200 struct rsrc specs[MAX_SPECS],
8201 /* resource note for this insn's usage */
8202 int note,
8203 /* which execution path to examine */
8204 int path)
8205 {
8206 int count = 0;
8207 int i;
8208 int rsrc_write = 0;
8209 struct rsrc tmpl;
8210
8211 if (dep->mode == IA64_DV_WAW
8212 || (dep->mode == IA64_DV_RAW && type == DV_REG)
8213 || (dep->mode == IA64_DV_WAR && type == DV_CHK))
8214 rsrc_write = 1;
8215
8216 /* template for any resources we identify */
8217 tmpl.dependency = dep;
8218 tmpl.note = note;
8219 tmpl.insn_srlz = tmpl.data_srlz = 0;
8220 tmpl.qp_regno = CURR_SLOT.qp_regno;
8221 tmpl.link_to_qp_branch = 1;
8222 tmpl.mem_offset.hint = 0;
8223 tmpl.mem_offset.offset = 0;
8224 tmpl.mem_offset.base = 0;
8225 tmpl.specific = 1;
8226 tmpl.index = -1;
8227 tmpl.cmp_type = CMP_NONE;
8228 tmpl.depind = 0;
8229 tmpl.file = NULL;
8230 tmpl.line = 0;
8231 tmpl.path = 0;
8232
8233 #define UNHANDLED \
8234 as_warn (_("Unhandled dependency %s for %s (%s), note %d"), \
8235 dep->name, idesc->name, (rsrc_write?"write":"read"), note)
8236 #define KNOWN(REG) (gr_values[REG].known && gr_values[REG].path >= path)
8237
8238 /* we don't need to track these */
8239 if (dep->semantics == IA64_DVS_NONE)
8240 return 0;
8241
8242 switch (dep->specifier)
8243 {
8244 case IA64_RS_AR_K:
8245 if (note == 1)
8246 {
8247 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8248 {
8249 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8250 if (regno >= 0 && regno <= 7)
8251 {
8252 specs[count] = tmpl;
8253 specs[count++].index = regno;
8254 }
8255 }
8256 }
8257 else if (note == 0)
8258 {
8259 for (i = 0; i < 8; i++)
8260 {
8261 specs[count] = tmpl;
8262 specs[count++].index = i;
8263 }
8264 }
8265 else
8266 {
8267 UNHANDLED;
8268 }
8269 break;
8270
8271 case IA64_RS_AR_UNAT:
8272 /* This is a mov =AR or mov AR= instruction. */
8273 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8274 {
8275 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8276 if (regno == AR_UNAT)
8277 {
8278 specs[count++] = tmpl;
8279 }
8280 }
8281 else
8282 {
8283 /* This is a spill/fill, or other instruction that modifies the
8284 unat register. */
8285
8286 /* Unless we can determine the specific bits used, mark the whole
8287 thing; bits 8:3 of the memory address indicate the bit used in
8288 UNAT. The .mem.offset hint may be used to eliminate a small
8289 subset of conflicts. */
8290 specs[count] = tmpl;
8291 if (md.mem_offset.hint)
8292 {
8293 if (md.debug_dv)
8294 fprintf (stderr, " Using hint for spill/fill\n");
8295 /* The index isn't actually used, just set it to something
8296 approximating the bit index. */
8297 specs[count].index = (md.mem_offset.offset >> 3) & 0x3F;
8298 specs[count].mem_offset.hint = 1;
8299 specs[count].mem_offset.offset = md.mem_offset.offset;
8300 specs[count++].mem_offset.base = md.mem_offset.base;
8301 }
8302 else
8303 {
8304 specs[count++].specific = 0;
8305 }
8306 }
8307 break;
8308
8309 case IA64_RS_AR:
8310 if (note == 1)
8311 {
8312 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8313 {
8314 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8315 if ((regno >= 8 && regno <= 15)
8316 || (regno >= 20 && regno <= 23)
8317 || (regno >= 31 && regno <= 39)
8318 || (regno >= 41 && regno <= 47)
8319 || (regno >= 67 && regno <= 111))
8320 {
8321 specs[count] = tmpl;
8322 specs[count++].index = regno;
8323 }
8324 }
8325 }
8326 else
8327 {
8328 UNHANDLED;
8329 }
8330 break;
8331
8332 case IA64_RS_ARb:
8333 if (note == 1)
8334 {
8335 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
8336 {
8337 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
8338 if ((regno >= 48 && regno <= 63)
8339 || (regno >= 112 && regno <= 127))
8340 {
8341 specs[count] = tmpl;
8342 specs[count++].index = regno;
8343 }
8344 }
8345 }
8346 else if (note == 0)
8347 {
8348 for (i = 48; i < 64; i++)
8349 {
8350 specs[count] = tmpl;
8351 specs[count++].index = i;
8352 }
8353 for (i = 112; i < 128; i++)
8354 {
8355 specs[count] = tmpl;
8356 specs[count++].index = i;
8357 }
8358 }
8359 else
8360 {
8361 UNHANDLED;
8362 }
8363 break;
8364
8365 case IA64_RS_BR:
8366 if (note != 1)
8367 {
8368 UNHANDLED;
8369 }
8370 else
8371 {
8372 if (rsrc_write)
8373 {
8374 for (i = 0; i < idesc->num_outputs; i++)
8375 if (idesc->operands[i] == IA64_OPND_B1
8376 || idesc->operands[i] == IA64_OPND_B2)
8377 {
8378 specs[count] = tmpl;
8379 specs[count++].index =
8380 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8381 }
8382 }
8383 else
8384 {
8385 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8386 if (idesc->operands[i] == IA64_OPND_B1
8387 || idesc->operands[i] == IA64_OPND_B2)
8388 {
8389 specs[count] = tmpl;
8390 specs[count++].index =
8391 CURR_SLOT.opnd[i].X_add_number - REG_BR;
8392 }
8393 }
8394 }
8395 break;
8396
8397 case IA64_RS_CPUID: /* four or more registers */
8398 if (note == 3)
8399 {
8400 if (idesc->operands[!rsrc_write] == IA64_OPND_CPUID_R3)
8401 {
8402 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8403 if (regno >= 0 && regno < NELEMS (gr_values)
8404 && KNOWN (regno))
8405 {
8406 specs[count] = tmpl;
8407 specs[count++].index = gr_values[regno].value & 0xFF;
8408 }
8409 else
8410 {
8411 specs[count] = tmpl;
8412 specs[count++].specific = 0;
8413 }
8414 }
8415 }
8416 else
8417 {
8418 UNHANDLED;
8419 }
8420 break;
8421
8422 case IA64_RS_DBR: /* four or more registers */
8423 if (note == 3)
8424 {
8425 if (idesc->operands[!rsrc_write] == IA64_OPND_DBR_R3)
8426 {
8427 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8428 if (regno >= 0 && regno < NELEMS (gr_values)
8429 && KNOWN (regno))
8430 {
8431 specs[count] = tmpl;
8432 specs[count++].index = gr_values[regno].value & 0xFF;
8433 }
8434 else
8435 {
8436 specs[count] = tmpl;
8437 specs[count++].specific = 0;
8438 }
8439 }
8440 }
8441 else if (note == 0 && !rsrc_write)
8442 {
8443 specs[count] = tmpl;
8444 specs[count++].specific = 0;
8445 }
8446 else
8447 {
8448 UNHANDLED;
8449 }
8450 break;
8451
8452 case IA64_RS_IBR: /* four or more registers */
8453 if (note == 3)
8454 {
8455 if (idesc->operands[!rsrc_write] == IA64_OPND_IBR_R3)
8456 {
8457 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8458 if (regno >= 0 && regno < NELEMS (gr_values)
8459 && KNOWN (regno))
8460 {
8461 specs[count] = tmpl;
8462 specs[count++].index = gr_values[regno].value & 0xFF;
8463 }
8464 else
8465 {
8466 specs[count] = tmpl;
8467 specs[count++].specific = 0;
8468 }
8469 }
8470 }
8471 else
8472 {
8473 UNHANDLED;
8474 }
8475 break;
8476
8477 case IA64_RS_MSR:
8478 if (note == 5)
8479 {
8480 /* These are implementation specific. Force all references to
8481 conflict with all other references. */
8482 specs[count] = tmpl;
8483 specs[count++].specific = 0;
8484 }
8485 else
8486 {
8487 UNHANDLED;
8488 }
8489 break;
8490
8491 case IA64_RS_PKR: /* 16 or more registers */
8492 if (note == 3 || note == 4)
8493 {
8494 if (idesc->operands[!rsrc_write] == IA64_OPND_PKR_R3)
8495 {
8496 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8497 if (regno >= 0 && regno < NELEMS (gr_values)
8498 && KNOWN (regno))
8499 {
8500 if (note == 3)
8501 {
8502 specs[count] = tmpl;
8503 specs[count++].index = gr_values[regno].value & 0xFF;
8504 }
8505 else
8506 for (i = 0; i < NELEMS (gr_values); i++)
8507 {
8508 /* Uses all registers *except* the one in R3. */
8509 if ((unsigned)i != (gr_values[regno].value & 0xFF))
8510 {
8511 specs[count] = tmpl;
8512 specs[count++].index = i;
8513 }
8514 }
8515 }
8516 else
8517 {
8518 specs[count] = tmpl;
8519 specs[count++].specific = 0;
8520 }
8521 }
8522 }
8523 else if (note == 0)
8524 {
8525 /* probe et al. */
8526 specs[count] = tmpl;
8527 specs[count++].specific = 0;
8528 }
8529 break;
8530
8531 case IA64_RS_PMC: /* four or more registers */
8532 if (note == 3)
8533 {
8534 if (idesc->operands[!rsrc_write] == IA64_OPND_PMC_R3
8535 || (!rsrc_write && idesc->operands[1] == IA64_OPND_PMD_R3))
8536
8537 {
8538 int reg_index = ((idesc->operands[1] == IA64_OPND_R3 && !rsrc_write)
8539 ? 1 : !rsrc_write);
8540 int regno = CURR_SLOT.opnd[reg_index].X_add_number - REG_GR;
8541 if (regno >= 0 && regno < NELEMS (gr_values)
8542 && KNOWN (regno))
8543 {
8544 specs[count] = tmpl;
8545 specs[count++].index = gr_values[regno].value & 0xFF;
8546 }
8547 else
8548 {
8549 specs[count] = tmpl;
8550 specs[count++].specific = 0;
8551 }
8552 }
8553 }
8554 else
8555 {
8556 UNHANDLED;
8557 }
8558 break;
8559
8560 case IA64_RS_PMD: /* four or more registers */
8561 if (note == 3)
8562 {
8563 if (idesc->operands[!rsrc_write] == IA64_OPND_PMD_R3)
8564 {
8565 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8566 if (regno >= 0 && regno < NELEMS (gr_values)
8567 && KNOWN (regno))
8568 {
8569 specs[count] = tmpl;
8570 specs[count++].index = gr_values[regno].value & 0xFF;
8571 }
8572 else
8573 {
8574 specs[count] = tmpl;
8575 specs[count++].specific = 0;
8576 }
8577 }
8578 }
8579 else
8580 {
8581 UNHANDLED;
8582 }
8583 break;
8584
8585 case IA64_RS_RR: /* eight registers */
8586 if (note == 6)
8587 {
8588 if (idesc->operands[!rsrc_write] == IA64_OPND_RR_R3)
8589 {
8590 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_GR;
8591 if (regno >= 0 && regno < NELEMS (gr_values)
8592 && KNOWN (regno))
8593 {
8594 specs[count] = tmpl;
8595 specs[count++].index = (gr_values[regno].value >> 61) & 0x7;
8596 }
8597 else
8598 {
8599 specs[count] = tmpl;
8600 specs[count++].specific = 0;
8601 }
8602 }
8603 }
8604 else if (note == 0 && !rsrc_write)
8605 {
8606 specs[count] = tmpl;
8607 specs[count++].specific = 0;
8608 }
8609 else
8610 {
8611 UNHANDLED;
8612 }
8613 break;
8614
8615 case IA64_RS_CR_IRR:
8616 if (note == 0)
8617 {
8618 /* handle mov-from-CR-IVR; it's a read that writes CR[IRR] */
8619 int regno = CURR_SLOT.opnd[1].X_add_number - REG_CR;
8620 if (rsrc_write
8621 && idesc->operands[1] == IA64_OPND_CR3
8622 && regno == CR_IVR)
8623 {
8624 for (i = 0; i < 4; i++)
8625 {
8626 specs[count] = tmpl;
8627 specs[count++].index = CR_IRR0 + i;
8628 }
8629 }
8630 }
8631 else if (note == 1)
8632 {
8633 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8634 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8635 && regno >= CR_IRR0
8636 && regno <= CR_IRR3)
8637 {
8638 specs[count] = tmpl;
8639 specs[count++].index = regno;
8640 }
8641 }
8642 else
8643 {
8644 UNHANDLED;
8645 }
8646 break;
8647
8648 case IA64_RS_CR_IIB:
8649 if (note != 0)
8650 {
8651 UNHANDLED;
8652 }
8653 else
8654 {
8655 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8656 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8657 && (regno == CR_IIB0 || regno == CR_IIB1))
8658 {
8659 specs[count] = tmpl;
8660 specs[count++].index = regno;
8661 }
8662 }
8663 break;
8664
8665 case IA64_RS_CR_LRR:
8666 if (note != 1)
8667 {
8668 UNHANDLED;
8669 }
8670 else
8671 {
8672 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8673 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3
8674 && (regno == CR_LRR0 || regno == CR_LRR1))
8675 {
8676 specs[count] = tmpl;
8677 specs[count++].index = regno;
8678 }
8679 }
8680 break;
8681
8682 case IA64_RS_CR:
8683 if (note == 1)
8684 {
8685 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
8686 {
8687 specs[count] = tmpl;
8688 specs[count++].index =
8689 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
8690 }
8691 }
8692 else
8693 {
8694 UNHANDLED;
8695 }
8696 break;
8697
8698 case IA64_RS_DAHR:
8699 if (note == 0)
8700 {
8701 if (idesc->operands[!rsrc_write] == IA64_OPND_DAHR3)
8702 {
8703 specs[count] = tmpl;
8704 specs[count++].index =
8705 CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_DAHR;
8706 }
8707 }
8708 else
8709 {
8710 UNHANDLED;
8711 }
8712 break;
8713
8714 case IA64_RS_FR:
8715 case IA64_RS_FRb:
8716 if (note != 1)
8717 {
8718 UNHANDLED;
8719 }
8720 else if (rsrc_write)
8721 {
8722 if (dep->specifier == IA64_RS_FRb
8723 && idesc->operands[0] == IA64_OPND_F1)
8724 {
8725 specs[count] = tmpl;
8726 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_FR;
8727 }
8728 }
8729 else
8730 {
8731 for (i = idesc->num_outputs; i < NELEMS (idesc->operands); i++)
8732 {
8733 if (idesc->operands[i] == IA64_OPND_F2
8734 || idesc->operands[i] == IA64_OPND_F3
8735 || idesc->operands[i] == IA64_OPND_F4)
8736 {
8737 specs[count] = tmpl;
8738 specs[count++].index =
8739 CURR_SLOT.opnd[i].X_add_number - REG_FR;
8740 }
8741 }
8742 }
8743 break;
8744
8745 case IA64_RS_GR:
8746 if (note == 13)
8747 {
8748 /* This reference applies only to the GR whose value is loaded with
8749 data returned from memory. */
8750 specs[count] = tmpl;
8751 specs[count++].index = CURR_SLOT.opnd[0].X_add_number - REG_GR;
8752 }
8753 else if (note == 1)
8754 {
8755 if (rsrc_write)
8756 {
8757 for (i = 0; i < idesc->num_outputs; i++)
8758 if (idesc->operands[i] == IA64_OPND_R1
8759 || idesc->operands[i] == IA64_OPND_R2
8760 || idesc->operands[i] == IA64_OPND_R3)
8761 {
8762 specs[count] = tmpl;
8763 specs[count++].index =
8764 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8765 }
8766 if (idesc->flags & IA64_OPCODE_POSTINC)
8767 for (i = 0; i < NELEMS (idesc->operands); i++)
8768 if (idesc->operands[i] == IA64_OPND_MR3)
8769 {
8770 specs[count] = tmpl;
8771 specs[count++].index =
8772 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8773 }
8774 }
8775 else
8776 {
8777 /* Look for anything that reads a GR. */
8778 for (i = 0; i < NELEMS (idesc->operands); i++)
8779 {
8780 if (idesc->operands[i] == IA64_OPND_MR3
8781 || idesc->operands[i] == IA64_OPND_CPUID_R3
8782 || idesc->operands[i] == IA64_OPND_DBR_R3
8783 || idesc->operands[i] == IA64_OPND_IBR_R3
8784 || idesc->operands[i] == IA64_OPND_MSR_R3
8785 || idesc->operands[i] == IA64_OPND_PKR_R3
8786 || idesc->operands[i] == IA64_OPND_PMC_R3
8787 || idesc->operands[i] == IA64_OPND_PMD_R3
8788 || idesc->operands[i] == IA64_OPND_DAHR_R3
8789 || idesc->operands[i] == IA64_OPND_RR_R3
8790 || ((i >= idesc->num_outputs)
8791 && (idesc->operands[i] == IA64_OPND_R1
8792 || idesc->operands[i] == IA64_OPND_R2
8793 || idesc->operands[i] == IA64_OPND_R3
8794 /* addl source register. */
8795 || idesc->operands[i] == IA64_OPND_R3_2)))
8796 {
8797 specs[count] = tmpl;
8798 specs[count++].index =
8799 CURR_SLOT.opnd[i].X_add_number - REG_GR;
8800 }
8801 }
8802 }
8803 }
8804 else
8805 {
8806 UNHANDLED;
8807 }
8808 break;
8809
8810 /* This is the same as IA64_RS_PRr, except that the register range is
8811 from 1 - 15, and there are no rotating register reads/writes here. */
8812 case IA64_RS_PR:
8813 if (note == 0)
8814 {
8815 for (i = 1; i < 16; i++)
8816 {
8817 specs[count] = tmpl;
8818 specs[count++].index = i;
8819 }
8820 }
8821 else if (note == 7)
8822 {
8823 valueT mask = 0;
8824 /* Mark only those registers indicated by the mask. */
8825 if (rsrc_write)
8826 {
8827 mask = CURR_SLOT.opnd[2].X_add_number;
8828 for (i = 1; i < 16; i++)
8829 if (mask & ((valueT) 1 << i))
8830 {
8831 specs[count] = tmpl;
8832 specs[count++].index = i;
8833 }
8834 }
8835 else
8836 {
8837 UNHANDLED;
8838 }
8839 }
8840 else if (note == 11) /* note 11 implies note 1 as well */
8841 {
8842 if (rsrc_write)
8843 {
8844 for (i = 0; i < idesc->num_outputs; i++)
8845 {
8846 if (idesc->operands[i] == IA64_OPND_P1
8847 || idesc->operands[i] == IA64_OPND_P2)
8848 {
8849 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8850 if (regno >= 1 && regno < 16)
8851 {
8852 specs[count] = tmpl;
8853 specs[count++].index = regno;
8854 }
8855 }
8856 }
8857 }
8858 else
8859 {
8860 UNHANDLED;
8861 }
8862 }
8863 else if (note == 12)
8864 {
8865 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8866 {
8867 specs[count] = tmpl;
8868 specs[count++].index = CURR_SLOT.qp_regno;
8869 }
8870 }
8871 else if (note == 1)
8872 {
8873 if (rsrc_write)
8874 {
8875 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8876 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8877 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
8878 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
8879
8880 if ((idesc->operands[0] == IA64_OPND_P1
8881 || idesc->operands[0] == IA64_OPND_P2)
8882 && p1 >= 1 && p1 < 16)
8883 {
8884 specs[count] = tmpl;
8885 specs[count].cmp_type =
8886 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
8887 specs[count++].index = p1;
8888 }
8889 if ((idesc->operands[1] == IA64_OPND_P1
8890 || idesc->operands[1] == IA64_OPND_P2)
8891 && p2 >= 1 && p2 < 16)
8892 {
8893 specs[count] = tmpl;
8894 specs[count].cmp_type =
8895 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
8896 specs[count++].index = p2;
8897 }
8898 }
8899 else
8900 {
8901 if (CURR_SLOT.qp_regno >= 1 && CURR_SLOT.qp_regno < 16)
8902 {
8903 specs[count] = tmpl;
8904 specs[count++].index = CURR_SLOT.qp_regno;
8905 }
8906 if (idesc->operands[1] == IA64_OPND_PR)
8907 {
8908 for (i = 1; i < 16; i++)
8909 {
8910 specs[count] = tmpl;
8911 specs[count++].index = i;
8912 }
8913 }
8914 }
8915 }
8916 else
8917 {
8918 UNHANDLED;
8919 }
8920 break;
8921
8922 /* This is the general case for PRs. IA64_RS_PR and IA64_RS_PR63 are
8923 simplified cases of this. */
8924 case IA64_RS_PRr:
8925 if (note == 0)
8926 {
8927 for (i = 16; i < 63; i++)
8928 {
8929 specs[count] = tmpl;
8930 specs[count++].index = i;
8931 }
8932 }
8933 else if (note == 7)
8934 {
8935 valueT mask = 0;
8936 /* Mark only those registers indicated by the mask. */
8937 if (rsrc_write
8938 && idesc->operands[0] == IA64_OPND_PR)
8939 {
8940 mask = CURR_SLOT.opnd[2].X_add_number;
8941 if (mask & ((valueT) 1 << 16))
8942 for (i = 16; i < 63; i++)
8943 {
8944 specs[count] = tmpl;
8945 specs[count++].index = i;
8946 }
8947 }
8948 else if (rsrc_write
8949 && idesc->operands[0] == IA64_OPND_PR_ROT)
8950 {
8951 for (i = 16; i < 63; i++)
8952 {
8953 specs[count] = tmpl;
8954 specs[count++].index = i;
8955 }
8956 }
8957 else
8958 {
8959 UNHANDLED;
8960 }
8961 }
8962 else if (note == 11) /* note 11 implies note 1 as well */
8963 {
8964 if (rsrc_write)
8965 {
8966 for (i = 0; i < idesc->num_outputs; i++)
8967 {
8968 if (idesc->operands[i] == IA64_OPND_P1
8969 || idesc->operands[i] == IA64_OPND_P2)
8970 {
8971 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
8972 if (regno >= 16 && regno < 63)
8973 {
8974 specs[count] = tmpl;
8975 specs[count++].index = regno;
8976 }
8977 }
8978 }
8979 }
8980 else
8981 {
8982 UNHANDLED;
8983 }
8984 }
8985 else if (note == 12)
8986 {
8987 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
8988 {
8989 specs[count] = tmpl;
8990 specs[count++].index = CURR_SLOT.qp_regno;
8991 }
8992 }
8993 else if (note == 1)
8994 {
8995 if (rsrc_write)
8996 {
8997 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
8998 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
8999 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9000 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9001
9002 if ((idesc->operands[0] == IA64_OPND_P1
9003 || idesc->operands[0] == IA64_OPND_P2)
9004 && p1 >= 16 && p1 < 63)
9005 {
9006 specs[count] = tmpl;
9007 specs[count].cmp_type =
9008 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9009 specs[count++].index = p1;
9010 }
9011 if ((idesc->operands[1] == IA64_OPND_P1
9012 || idesc->operands[1] == IA64_OPND_P2)
9013 && p2 >= 16 && p2 < 63)
9014 {
9015 specs[count] = tmpl;
9016 specs[count].cmp_type =
9017 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9018 specs[count++].index = p2;
9019 }
9020 }
9021 else
9022 {
9023 if (CURR_SLOT.qp_regno >= 16 && CURR_SLOT.qp_regno < 63)
9024 {
9025 specs[count] = tmpl;
9026 specs[count++].index = CURR_SLOT.qp_regno;
9027 }
9028 if (idesc->operands[1] == IA64_OPND_PR)
9029 {
9030 for (i = 16; i < 63; i++)
9031 {
9032 specs[count] = tmpl;
9033 specs[count++].index = i;
9034 }
9035 }
9036 }
9037 }
9038 else
9039 {
9040 UNHANDLED;
9041 }
9042 break;
9043
9044 case IA64_RS_PSR:
9045 /* Verify that the instruction is using the PSR bit indicated in
9046 dep->regindex. */
9047 if (note == 0)
9048 {
9049 if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_UM)
9050 {
9051 if (dep->regindex < 6)
9052 {
9053 specs[count++] = tmpl;
9054 }
9055 }
9056 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR)
9057 {
9058 if (dep->regindex < 32
9059 || dep->regindex == 35
9060 || dep->regindex == 36
9061 || (!rsrc_write && dep->regindex == PSR_CPL))
9062 {
9063 specs[count++] = tmpl;
9064 }
9065 }
9066 else if (idesc->operands[!rsrc_write] == IA64_OPND_PSR_L)
9067 {
9068 if (dep->regindex < 32
9069 || dep->regindex == 35
9070 || dep->regindex == 36
9071 || (rsrc_write && dep->regindex == PSR_CPL))
9072 {
9073 specs[count++] = tmpl;
9074 }
9075 }
9076 else
9077 {
9078 /* Several PSR bits have very specific dependencies. */
9079 switch (dep->regindex)
9080 {
9081 default:
9082 specs[count++] = tmpl;
9083 break;
9084 case PSR_IC:
9085 if (rsrc_write)
9086 {
9087 specs[count++] = tmpl;
9088 }
9089 else
9090 {
9091 /* Only certain CR accesses use PSR.ic */
9092 if (idesc->operands[0] == IA64_OPND_CR3
9093 || idesc->operands[1] == IA64_OPND_CR3)
9094 {
9095 int reg_index =
9096 ((idesc->operands[0] == IA64_OPND_CR3)
9097 ? 0 : 1);
9098 int regno =
9099 CURR_SLOT.opnd[reg_index].X_add_number - REG_CR;
9100
9101 switch (regno)
9102 {
9103 default:
9104 break;
9105 case CR_ITIR:
9106 case CR_IFS:
9107 case CR_IIM:
9108 case CR_IIP:
9109 case CR_IPSR:
9110 case CR_ISR:
9111 case CR_IFA:
9112 case CR_IHA:
9113 case CR_IIB0:
9114 case CR_IIB1:
9115 case CR_IIPA:
9116 specs[count++] = tmpl;
9117 break;
9118 }
9119 }
9120 }
9121 break;
9122 case PSR_CPL:
9123 if (rsrc_write)
9124 {
9125 specs[count++] = tmpl;
9126 }
9127 else
9128 {
9129 /* Only some AR accesses use cpl */
9130 if (idesc->operands[0] == IA64_OPND_AR3
9131 || idesc->operands[1] == IA64_OPND_AR3)
9132 {
9133 int reg_index =
9134 ((idesc->operands[0] == IA64_OPND_AR3)
9135 ? 0 : 1);
9136 int regno =
9137 CURR_SLOT.opnd[reg_index].X_add_number - REG_AR;
9138
9139 if (regno == AR_ITC
9140 || regno == AR_RUC
9141 || (reg_index == 0
9142 && (regno == AR_RSC
9143 || (regno >= AR_K0
9144 && regno <= AR_K7))))
9145 {
9146 specs[count++] = tmpl;
9147 }
9148 }
9149 else
9150 {
9151 specs[count++] = tmpl;
9152 }
9153 break;
9154 }
9155 }
9156 }
9157 }
9158 else if (note == 7)
9159 {
9160 valueT mask = 0;
9161 if (idesc->operands[0] == IA64_OPND_IMMU24)
9162 {
9163 mask = CURR_SLOT.opnd[0].X_add_number;
9164 }
9165 else
9166 {
9167 UNHANDLED;
9168 }
9169 if (mask & ((valueT) 1 << dep->regindex))
9170 {
9171 specs[count++] = tmpl;
9172 }
9173 }
9174 else if (note == 8)
9175 {
9176 int min = dep->regindex == PSR_DFL ? 2 : 32;
9177 int max = dep->regindex == PSR_DFL ? 31 : 127;
9178 /* dfh is read on FR32-127; dfl is read on FR2-31 */
9179 for (i = 0; i < NELEMS (idesc->operands); i++)
9180 {
9181 if (idesc->operands[i] == IA64_OPND_F1
9182 || idesc->operands[i] == IA64_OPND_F2
9183 || idesc->operands[i] == IA64_OPND_F3
9184 || idesc->operands[i] == IA64_OPND_F4)
9185 {
9186 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9187 if (reg >= min && reg <= max)
9188 {
9189 specs[count++] = tmpl;
9190 }
9191 }
9192 }
9193 }
9194 else if (note == 9)
9195 {
9196 int min = dep->regindex == PSR_MFL ? 2 : 32;
9197 int max = dep->regindex == PSR_MFL ? 31 : 127;
9198 /* mfh is read on writes to FR32-127; mfl is read on writes to
9199 FR2-31 */
9200 for (i = 0; i < idesc->num_outputs; i++)
9201 {
9202 if (idesc->operands[i] == IA64_OPND_F1)
9203 {
9204 int reg = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9205 if (reg >= min && reg <= max)
9206 {
9207 specs[count++] = tmpl;
9208 }
9209 }
9210 }
9211 }
9212 else if (note == 10)
9213 {
9214 for (i = 0; i < NELEMS (idesc->operands); i++)
9215 {
9216 if (idesc->operands[i] == IA64_OPND_R1
9217 || idesc->operands[i] == IA64_OPND_R2
9218 || idesc->operands[i] == IA64_OPND_R3)
9219 {
9220 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9221 if (regno >= 16 && regno <= 31)
9222 {
9223 specs[count++] = tmpl;
9224 }
9225 }
9226 }
9227 }
9228 else
9229 {
9230 UNHANDLED;
9231 }
9232 break;
9233
9234 case IA64_RS_AR_FPSR:
9235 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3)
9236 {
9237 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9238 if (regno == AR_FPSR)
9239 {
9240 specs[count++] = tmpl;
9241 }
9242 }
9243 else
9244 {
9245 specs[count++] = tmpl;
9246 }
9247 break;
9248
9249 case IA64_RS_ARX:
9250 /* Handle all AR[REG] resources */
9251 if (note == 0 || note == 1)
9252 {
9253 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_AR;
9254 if (idesc->operands[!rsrc_write] == IA64_OPND_AR3
9255 && regno == dep->regindex)
9256 {
9257 specs[count++] = tmpl;
9258 }
9259 /* other AR[REG] resources may be affected by AR accesses */
9260 else if (idesc->operands[0] == IA64_OPND_AR3)
9261 {
9262 /* AR[] writes */
9263 regno = CURR_SLOT.opnd[0].X_add_number - REG_AR;
9264 switch (dep->regindex)
9265 {
9266 default:
9267 break;
9268 case AR_BSP:
9269 case AR_RNAT:
9270 if (regno == AR_BSPSTORE)
9271 {
9272 specs[count++] = tmpl;
9273 }
9274 /* Fall through. */
9275 case AR_RSC:
9276 if (!rsrc_write &&
9277 (regno == AR_BSPSTORE
9278 || regno == AR_RNAT))
9279 {
9280 specs[count++] = tmpl;
9281 }
9282 break;
9283 }
9284 }
9285 else if (idesc->operands[1] == IA64_OPND_AR3)
9286 {
9287 /* AR[] reads */
9288 regno = CURR_SLOT.opnd[1].X_add_number - REG_AR;
9289 switch (dep->regindex)
9290 {
9291 default:
9292 break;
9293 case AR_RSC:
9294 if (regno == AR_BSPSTORE || regno == AR_RNAT)
9295 {
9296 specs[count++] = tmpl;
9297 }
9298 break;
9299 }
9300 }
9301 else
9302 {
9303 specs[count++] = tmpl;
9304 }
9305 }
9306 else
9307 {
9308 UNHANDLED;
9309 }
9310 break;
9311
9312 case IA64_RS_CRX:
9313 /* Handle all CR[REG] resources.
9314 ??? FIXME: The rule 17 isn't really handled correctly. */
9315 if (note == 0 || note == 1 || note == 17)
9316 {
9317 if (idesc->operands[!rsrc_write] == IA64_OPND_CR3)
9318 {
9319 int regno = CURR_SLOT.opnd[!rsrc_write].X_add_number - REG_CR;
9320 if (regno == dep->regindex)
9321 {
9322 specs[count++] = tmpl;
9323 }
9324 else if (!rsrc_write)
9325 {
9326 /* Reads from CR[IVR] affect other resources. */
9327 if (regno == CR_IVR)
9328 {
9329 if ((dep->regindex >= CR_IRR0
9330 && dep->regindex <= CR_IRR3)
9331 || dep->regindex == CR_TPR)
9332 {
9333 specs[count++] = tmpl;
9334 }
9335 }
9336 }
9337 }
9338 else
9339 {
9340 specs[count++] = tmpl;
9341 }
9342 }
9343 else
9344 {
9345 UNHANDLED;
9346 }
9347 break;
9348
9349 case IA64_RS_INSERVICE:
9350 /* look for write of EOI (67) or read of IVR (65) */
9351 if ((idesc->operands[0] == IA64_OPND_CR3
9352 && CURR_SLOT.opnd[0].X_add_number - REG_CR == CR_EOI)
9353 || (idesc->operands[1] == IA64_OPND_CR3
9354 && CURR_SLOT.opnd[1].X_add_number - REG_CR == CR_IVR))
9355 {
9356 specs[count++] = tmpl;
9357 }
9358 break;
9359
9360 case IA64_RS_GR0:
9361 if (note == 1)
9362 {
9363 specs[count++] = tmpl;
9364 }
9365 else
9366 {
9367 UNHANDLED;
9368 }
9369 break;
9370
9371 case IA64_RS_CFM:
9372 if (note != 2)
9373 {
9374 specs[count++] = tmpl;
9375 }
9376 else
9377 {
9378 /* Check if any of the registers accessed are in the rotating region.
9379 mov to/from pr accesses CFM only when qp_regno is in the rotating
9380 region */
9381 for (i = 0; i < NELEMS (idesc->operands); i++)
9382 {
9383 if (idesc->operands[i] == IA64_OPND_R1
9384 || idesc->operands[i] == IA64_OPND_R2
9385 || idesc->operands[i] == IA64_OPND_R3)
9386 {
9387 int num = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9388 /* Assumes that md.rot.num_regs is always valid */
9389 if (md.rot.num_regs > 0
9390 && num > 31
9391 && num < 31 + md.rot.num_regs)
9392 {
9393 specs[count] = tmpl;
9394 specs[count++].specific = 0;
9395 }
9396 }
9397 else if (idesc->operands[i] == IA64_OPND_F1
9398 || idesc->operands[i] == IA64_OPND_F2
9399 || idesc->operands[i] == IA64_OPND_F3
9400 || idesc->operands[i] == IA64_OPND_F4)
9401 {
9402 int num = CURR_SLOT.opnd[i].X_add_number - REG_FR;
9403 if (num > 31)
9404 {
9405 specs[count] = tmpl;
9406 specs[count++].specific = 0;
9407 }
9408 }
9409 else if (idesc->operands[i] == IA64_OPND_P1
9410 || idesc->operands[i] == IA64_OPND_P2)
9411 {
9412 int num = CURR_SLOT.opnd[i].X_add_number - REG_P;
9413 if (num > 15)
9414 {
9415 specs[count] = tmpl;
9416 specs[count++].specific = 0;
9417 }
9418 }
9419 }
9420 if (CURR_SLOT.qp_regno > 15)
9421 {
9422 specs[count] = tmpl;
9423 specs[count++].specific = 0;
9424 }
9425 }
9426 break;
9427
9428 /* This is the same as IA64_RS_PRr, except simplified to account for
9429 the fact that there is only one register. */
9430 case IA64_RS_PR63:
9431 if (note == 0)
9432 {
9433 specs[count++] = tmpl;
9434 }
9435 else if (note == 7)
9436 {
9437 valueT mask = 0;
9438 if (idesc->operands[2] == IA64_OPND_IMM17)
9439 mask = CURR_SLOT.opnd[2].X_add_number;
9440 if (mask & ((valueT) 1 << 63))
9441 specs[count++] = tmpl;
9442 }
9443 else if (note == 11)
9444 {
9445 if ((idesc->operands[0] == IA64_OPND_P1
9446 && CURR_SLOT.opnd[0].X_add_number - REG_P == 63)
9447 || (idesc->operands[1] == IA64_OPND_P2
9448 && CURR_SLOT.opnd[1].X_add_number - REG_P == 63))
9449 {
9450 specs[count++] = tmpl;
9451 }
9452 }
9453 else if (note == 12)
9454 {
9455 if (CURR_SLOT.qp_regno == 63)
9456 {
9457 specs[count++] = tmpl;
9458 }
9459 }
9460 else if (note == 1)
9461 {
9462 if (rsrc_write)
9463 {
9464 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9465 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9466 int or_andcm = strstr (idesc->name, "or.andcm") != NULL;
9467 int and_orcm = strstr (idesc->name, "and.orcm") != NULL;
9468
9469 if (p1 == 63
9470 && (idesc->operands[0] == IA64_OPND_P1
9471 || idesc->operands[0] == IA64_OPND_P2))
9472 {
9473 specs[count] = tmpl;
9474 specs[count++].cmp_type =
9475 (or_andcm ? CMP_OR : (and_orcm ? CMP_AND : CMP_NONE));
9476 }
9477 if (p2 == 63
9478 && (idesc->operands[1] == IA64_OPND_P1
9479 || idesc->operands[1] == IA64_OPND_P2))
9480 {
9481 specs[count] = tmpl;
9482 specs[count++].cmp_type =
9483 (or_andcm ? CMP_AND : (and_orcm ? CMP_OR : CMP_NONE));
9484 }
9485 }
9486 else
9487 {
9488 if (CURR_SLOT.qp_regno == 63)
9489 {
9490 specs[count++] = tmpl;
9491 }
9492 }
9493 }
9494 else
9495 {
9496 UNHANDLED;
9497 }
9498 break;
9499
9500 case IA64_RS_RSE:
9501 /* FIXME we can identify some individual RSE written resources, but RSE
9502 read resources have not yet been completely identified, so for now
9503 treat RSE as a single resource */
9504 if (startswith (idesc->name, "mov"))
9505 {
9506 if (rsrc_write)
9507 {
9508 if (idesc->operands[0] == IA64_OPND_AR3
9509 && CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE)
9510 {
9511 specs[count++] = tmpl;
9512 }
9513 }
9514 else
9515 {
9516 if (idesc->operands[0] == IA64_OPND_AR3)
9517 {
9518 if (CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_BSPSTORE
9519 || CURR_SLOT.opnd[0].X_add_number - REG_AR == AR_RNAT)
9520 {
9521 specs[count++] = tmpl;
9522 }
9523 }
9524 else if (idesc->operands[1] == IA64_OPND_AR3)
9525 {
9526 if (CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSP
9527 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_BSPSTORE
9528 || CURR_SLOT.opnd[1].X_add_number - REG_AR == AR_RNAT)
9529 {
9530 specs[count++] = tmpl;
9531 }
9532 }
9533 }
9534 }
9535 else
9536 {
9537 specs[count++] = tmpl;
9538 }
9539 break;
9540
9541 case IA64_RS_ANY:
9542 /* FIXME -- do any of these need to be non-specific? */
9543 specs[count++] = tmpl;
9544 break;
9545
9546 default:
9547 as_bad (_("Unrecognized dependency specifier %d\n"), dep->specifier);
9548 break;
9549 }
9550
9551 return count;
9552 }
9553
9554 /* Clear branch flags on marked resources. This breaks the link between the
9555 QP of the marking instruction and a subsequent branch on the same QP. */
9556
9557 static void
clear_qp_branch_flag(valueT mask)9558 clear_qp_branch_flag (valueT mask)
9559 {
9560 int i;
9561 for (i = 0; i < regdepslen; i++)
9562 {
9563 valueT bit = ((valueT) 1 << regdeps[i].qp_regno);
9564 if ((bit & mask) != 0)
9565 {
9566 regdeps[i].link_to_qp_branch = 0;
9567 }
9568 }
9569 }
9570
9571 /* MASK contains 2 and only 2 PRs which are mutually exclusive. Remove
9572 any mutexes which contain one of the PRs and create new ones when
9573 needed. */
9574
9575 static int
update_qp_mutex(valueT mask)9576 update_qp_mutex (valueT mask)
9577 {
9578 int i;
9579 int add = 0;
9580
9581 i = 0;
9582 while (i < qp_mutexeslen)
9583 {
9584 if ((qp_mutexes[i].prmask & mask) != 0)
9585 {
9586 /* If it destroys and creates the same mutex, do nothing. */
9587 if (qp_mutexes[i].prmask == mask
9588 && qp_mutexes[i].path == md.path)
9589 {
9590 i++;
9591 add = -1;
9592 }
9593 else
9594 {
9595 int keep = 0;
9596
9597 if (md.debug_dv)
9598 {
9599 fprintf (stderr, " Clearing mutex relation");
9600 print_prmask (qp_mutexes[i].prmask);
9601 fprintf (stderr, "\n");
9602 }
9603
9604 /* Deal with the old mutex with more than 3+ PRs only if
9605 the new mutex on the same execution path with it.
9606
9607 FIXME: The 3+ mutex support is incomplete.
9608 dot_pred_rel () may be a better place to fix it. */
9609 if (qp_mutexes[i].path == md.path)
9610 {
9611 /* If it is a proper subset of the mutex, create a
9612 new mutex. */
9613 if (add == 0
9614 && (qp_mutexes[i].prmask & mask) == mask)
9615 add = 1;
9616
9617 qp_mutexes[i].prmask &= ~mask;
9618 if (qp_mutexes[i].prmask & (qp_mutexes[i].prmask - 1))
9619 {
9620 /* Modify the mutex if there are more than one
9621 PR left. */
9622 keep = 1;
9623 i++;
9624 }
9625 }
9626
9627 if (keep == 0)
9628 /* Remove the mutex. */
9629 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9630 }
9631 }
9632 else
9633 ++i;
9634 }
9635
9636 if (add == 1)
9637 add_qp_mutex (mask);
9638
9639 return add;
9640 }
9641
9642 /* Remove any mutexes which contain any of the PRs indicated in the mask.
9643
9644 Any changes to a PR clears the mutex relations which include that PR. */
9645
9646 static void
clear_qp_mutex(valueT mask)9647 clear_qp_mutex (valueT mask)
9648 {
9649 int i;
9650
9651 i = 0;
9652 while (i < qp_mutexeslen)
9653 {
9654 if ((qp_mutexes[i].prmask & mask) != 0)
9655 {
9656 if (md.debug_dv)
9657 {
9658 fprintf (stderr, " Clearing mutex relation");
9659 print_prmask (qp_mutexes[i].prmask);
9660 fprintf (stderr, "\n");
9661 }
9662 qp_mutexes[i] = qp_mutexes[--qp_mutexeslen];
9663 }
9664 else
9665 ++i;
9666 }
9667 }
9668
9669 /* Clear implies relations which contain PRs in the given masks.
9670 P1_MASK indicates the source of the implies relation, while P2_MASK
9671 indicates the implied PR. */
9672
9673 static void
clear_qp_implies(valueT p1_mask,valueT p2_mask)9674 clear_qp_implies (valueT p1_mask, valueT p2_mask)
9675 {
9676 int i;
9677
9678 i = 0;
9679 while (i < qp_implieslen)
9680 {
9681 if ((((valueT) 1 << qp_implies[i].p1) & p1_mask) != 0
9682 || (((valueT) 1 << qp_implies[i].p2) & p2_mask) != 0)
9683 {
9684 if (md.debug_dv)
9685 fprintf (stderr, "Clearing implied relation PR%d->PR%d\n",
9686 qp_implies[i].p1, qp_implies[i].p2);
9687 qp_implies[i] = qp_implies[--qp_implieslen];
9688 }
9689 else
9690 ++i;
9691 }
9692 }
9693
9694 /* Add the PRs specified to the list of implied relations. */
9695
9696 static void
add_qp_imply(int p1,int p2)9697 add_qp_imply (int p1, int p2)
9698 {
9699 valueT mask;
9700 valueT bit;
9701 int i;
9702
9703 /* p0 is not meaningful here. */
9704 if (p1 == 0 || p2 == 0)
9705 abort ();
9706
9707 if (p1 == p2)
9708 return;
9709
9710 /* If it exists already, ignore it. */
9711 for (i = 0; i < qp_implieslen; i++)
9712 {
9713 if (qp_implies[i].p1 == p1
9714 && qp_implies[i].p2 == p2
9715 && qp_implies[i].path == md.path
9716 && !qp_implies[i].p2_branched)
9717 return;
9718 }
9719
9720 if (qp_implieslen == qp_impliestotlen)
9721 {
9722 qp_impliestotlen += 20;
9723 qp_implies = XRESIZEVEC (struct qp_imply, qp_implies, qp_impliestotlen);
9724 }
9725 if (md.debug_dv)
9726 fprintf (stderr, " Registering PR%d implies PR%d\n", p1, p2);
9727 qp_implies[qp_implieslen].p1 = p1;
9728 qp_implies[qp_implieslen].p2 = p2;
9729 qp_implies[qp_implieslen].path = md.path;
9730 qp_implies[qp_implieslen++].p2_branched = 0;
9731
9732 /* Add in the implied transitive relations; for everything that p2 implies,
9733 make p1 imply that, too; for everything that implies p1, make it imply p2
9734 as well. */
9735 for (i = 0; i < qp_implieslen; i++)
9736 {
9737 if (qp_implies[i].p1 == p2)
9738 add_qp_imply (p1, qp_implies[i].p2);
9739 if (qp_implies[i].p2 == p1)
9740 add_qp_imply (qp_implies[i].p1, p2);
9741 }
9742 /* Add in mutex relations implied by this implies relation; for each mutex
9743 relation containing p2, duplicate it and replace p2 with p1. */
9744 bit = (valueT) 1 << p1;
9745 mask = (valueT) 1 << p2;
9746 for (i = 0; i < qp_mutexeslen; i++)
9747 {
9748 if (qp_mutexes[i].prmask & mask)
9749 add_qp_mutex ((qp_mutexes[i].prmask & ~mask) | bit);
9750 }
9751 }
9752
9753 /* Add the PRs specified in the mask to the mutex list; this means that only
9754 one of the PRs can be true at any time. PR0 should never be included in
9755 the mask. */
9756
9757 static void
add_qp_mutex(valueT mask)9758 add_qp_mutex (valueT mask)
9759 {
9760 if (mask & 0x1)
9761 abort ();
9762
9763 if (qp_mutexeslen == qp_mutexestotlen)
9764 {
9765 qp_mutexestotlen += 20;
9766 qp_mutexes = XRESIZEVEC (struct qpmutex, qp_mutexes, qp_mutexestotlen);
9767 }
9768 if (md.debug_dv)
9769 {
9770 fprintf (stderr, " Registering mutex on");
9771 print_prmask (mask);
9772 fprintf (stderr, "\n");
9773 }
9774 qp_mutexes[qp_mutexeslen].path = md.path;
9775 qp_mutexes[qp_mutexeslen++].prmask = mask;
9776 }
9777
9778 static int
has_suffix_p(const char * name,const char * suffix)9779 has_suffix_p (const char *name, const char *suffix)
9780 {
9781 size_t namelen = strlen (name);
9782 size_t sufflen = strlen (suffix);
9783
9784 if (namelen <= sufflen)
9785 return 0;
9786 return strcmp (name + namelen - sufflen, suffix) == 0;
9787 }
9788
9789 static void
clear_register_values(void)9790 clear_register_values (void)
9791 {
9792 int i;
9793 if (md.debug_dv)
9794 fprintf (stderr, " Clearing register values\n");
9795 for (i = 1; i < NELEMS (gr_values); i++)
9796 gr_values[i].known = 0;
9797 }
9798
9799 /* Keep track of register values/changes which affect DV tracking.
9800
9801 optimization note: should add a flag to classes of insns where otherwise we
9802 have to examine a group of strings to identify them. */
9803
9804 static void
note_register_values(struct ia64_opcode * idesc)9805 note_register_values (struct ia64_opcode *idesc)
9806 {
9807 valueT qp_changemask = 0;
9808 int i;
9809
9810 /* Invalidate values for registers being written to. */
9811 for (i = 0; i < idesc->num_outputs; i++)
9812 {
9813 if (idesc->operands[i] == IA64_OPND_R1
9814 || idesc->operands[i] == IA64_OPND_R2
9815 || idesc->operands[i] == IA64_OPND_R3)
9816 {
9817 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9818 if (regno > 0 && regno < NELEMS (gr_values))
9819 gr_values[regno].known = 0;
9820 }
9821 else if (idesc->operands[i] == IA64_OPND_R3_2)
9822 {
9823 int regno = CURR_SLOT.opnd[i].X_add_number - REG_GR;
9824 if (regno > 0 && regno < 4)
9825 gr_values[regno].known = 0;
9826 }
9827 else if (idesc->operands[i] == IA64_OPND_P1
9828 || idesc->operands[i] == IA64_OPND_P2)
9829 {
9830 int regno = CURR_SLOT.opnd[i].X_add_number - REG_P;
9831 qp_changemask |= (valueT) 1 << regno;
9832 }
9833 else if (idesc->operands[i] == IA64_OPND_PR)
9834 {
9835 if (idesc->operands[2] & (valueT) 0x10000)
9836 qp_changemask = ~(valueT) 0x1FFFF | idesc->operands[2];
9837 else
9838 qp_changemask = idesc->operands[2];
9839 break;
9840 }
9841 else if (idesc->operands[i] == IA64_OPND_PR_ROT)
9842 {
9843 if (idesc->operands[1] & ((valueT) 1 << 43))
9844 qp_changemask = -((valueT) 1 << 44) | idesc->operands[1];
9845 else
9846 qp_changemask = idesc->operands[1];
9847 qp_changemask &= ~(valueT) 0xFFFF;
9848 break;
9849 }
9850 }
9851
9852 /* Always clear qp branch flags on any PR change. */
9853 /* FIXME there may be exceptions for certain compares. */
9854 clear_qp_branch_flag (qp_changemask);
9855
9856 /* Invalidate rotating registers on insns which affect RRBs in CFM. */
9857 if (idesc->flags & IA64_OPCODE_MOD_RRBS)
9858 {
9859 qp_changemask |= ~(valueT) 0xFFFF;
9860 if (strcmp (idesc->name, "clrrrb.pr") != 0)
9861 {
9862 for (i = 32; i < 32 + md.rot.num_regs; i++)
9863 gr_values[i].known = 0;
9864 }
9865 clear_qp_mutex (qp_changemask);
9866 clear_qp_implies (qp_changemask, qp_changemask);
9867 }
9868 /* After a call, all register values are undefined, except those marked
9869 as "safe". */
9870 else if (startswith (idesc->name, "br.call")
9871 || startswith (idesc->name, "brl.call"))
9872 {
9873 /* FIXME keep GR values which are marked as "safe_across_calls" */
9874 clear_register_values ();
9875 clear_qp_mutex (~qp_safe_across_calls);
9876 clear_qp_implies (~qp_safe_across_calls, ~qp_safe_across_calls);
9877 clear_qp_branch_flag (~qp_safe_across_calls);
9878 }
9879 else if (is_interruption_or_rfi (idesc)
9880 || is_taken_branch (idesc))
9881 {
9882 clear_register_values ();
9883 clear_qp_mutex (~(valueT) 0);
9884 clear_qp_implies (~(valueT) 0, ~(valueT) 0);
9885 }
9886 /* Look for mutex and implies relations. */
9887 else if ((idesc->operands[0] == IA64_OPND_P1
9888 || idesc->operands[0] == IA64_OPND_P2)
9889 && (idesc->operands[1] == IA64_OPND_P1
9890 || idesc->operands[1] == IA64_OPND_P2))
9891 {
9892 int p1 = CURR_SLOT.opnd[0].X_add_number - REG_P;
9893 int p2 = CURR_SLOT.opnd[1].X_add_number - REG_P;
9894 valueT p1mask = (p1 != 0) ? (valueT) 1 << p1 : 0;
9895 valueT p2mask = (p2 != 0) ? (valueT) 1 << p2 : 0;
9896
9897 /* If both PRs are PR0, we can't really do anything. */
9898 if (p1 == 0 && p2 == 0)
9899 {
9900 if (md.debug_dv)
9901 fprintf (stderr, " Ignoring PRs due to inclusion of p0\n");
9902 }
9903 /* In general, clear mutexes and implies which include P1 or P2,
9904 with the following exceptions. */
9905 else if (has_suffix_p (idesc->name, ".or.andcm")
9906 || has_suffix_p (idesc->name, ".and.orcm"))
9907 {
9908 clear_qp_implies (p2mask, p1mask);
9909 }
9910 else if (has_suffix_p (idesc->name, ".andcm")
9911 || has_suffix_p (idesc->name, ".and"))
9912 {
9913 clear_qp_implies (0, p1mask | p2mask);
9914 }
9915 else if (has_suffix_p (idesc->name, ".orcm")
9916 || has_suffix_p (idesc->name, ".or"))
9917 {
9918 clear_qp_mutex (p1mask | p2mask);
9919 clear_qp_implies (p1mask | p2mask, 0);
9920 }
9921 else
9922 {
9923 int added = 0;
9924
9925 clear_qp_implies (p1mask | p2mask, p1mask | p2mask);
9926
9927 /* If one of the PRs is PR0, we call clear_qp_mutex. */
9928 if (p1 == 0 || p2 == 0)
9929 clear_qp_mutex (p1mask | p2mask);
9930 else
9931 added = update_qp_mutex (p1mask | p2mask);
9932
9933 if (CURR_SLOT.qp_regno == 0
9934 || has_suffix_p (idesc->name, ".unc"))
9935 {
9936 if (added == 0 && p1 && p2)
9937 add_qp_mutex (p1mask | p2mask);
9938 if (CURR_SLOT.qp_regno != 0)
9939 {
9940 if (p1)
9941 add_qp_imply (p1, CURR_SLOT.qp_regno);
9942 if (p2)
9943 add_qp_imply (p2, CURR_SLOT.qp_regno);
9944 }
9945 }
9946 }
9947 }
9948 /* Look for mov imm insns into GRs. */
9949 else if (idesc->operands[0] == IA64_OPND_R1
9950 && (idesc->operands[1] == IA64_OPND_IMM22
9951 || idesc->operands[1] == IA64_OPND_IMMU64)
9952 && CURR_SLOT.opnd[1].X_op == O_constant
9953 && (strcmp (idesc->name, "mov") == 0
9954 || strcmp (idesc->name, "movl") == 0))
9955 {
9956 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9957 if (regno > 0 && regno < NELEMS (gr_values))
9958 {
9959 gr_values[regno].known = 1;
9960 gr_values[regno].value = CURR_SLOT.opnd[1].X_add_number;
9961 gr_values[regno].path = md.path;
9962 if (md.debug_dv)
9963 {
9964 fprintf (stderr, " Know gr%d = ", regno);
9965 fprintf_vma (stderr, gr_values[regno].value);
9966 fputs ("\n", stderr);
9967 }
9968 }
9969 }
9970 /* Look for dep.z imm insns. */
9971 else if (idesc->operands[0] == IA64_OPND_R1
9972 && idesc->operands[1] == IA64_OPND_IMM8
9973 && strcmp (idesc->name, "dep.z") == 0)
9974 {
9975 int regno = CURR_SLOT.opnd[0].X_add_number - REG_GR;
9976 if (regno > 0 && regno < NELEMS (gr_values))
9977 {
9978 valueT value = CURR_SLOT.opnd[1].X_add_number;
9979
9980 if (CURR_SLOT.opnd[3].X_add_number < 64)
9981 value &= ((valueT)1 << CURR_SLOT.opnd[3].X_add_number) - 1;
9982 value <<= CURR_SLOT.opnd[2].X_add_number;
9983 gr_values[regno].known = 1;
9984 gr_values[regno].value = value;
9985 gr_values[regno].path = md.path;
9986 if (md.debug_dv)
9987 {
9988 fprintf (stderr, " Know gr%d = ", regno);
9989 fprintf_vma (stderr, gr_values[regno].value);
9990 fputs ("\n", stderr);
9991 }
9992 }
9993 }
9994 else
9995 {
9996 clear_qp_mutex (qp_changemask);
9997 clear_qp_implies (qp_changemask, qp_changemask);
9998 }
9999 }
10000
10001 /* Return whether the given predicate registers are currently mutex. */
10002
10003 static int
qp_mutex(int p1,int p2,int path)10004 qp_mutex (int p1, int p2, int path)
10005 {
10006 int i;
10007 valueT mask;
10008
10009 if (p1 != p2)
10010 {
10011 mask = ((valueT) 1 << p1) | (valueT) 1 << p2;
10012 for (i = 0; i < qp_mutexeslen; i++)
10013 {
10014 if (qp_mutexes[i].path >= path
10015 && (qp_mutexes[i].prmask & mask) == mask)
10016 return 1;
10017 }
10018 }
10019 return 0;
10020 }
10021
10022 /* Return whether the given resource is in the given insn's list of chks
10023 Return 1 if the conflict is absolutely determined, 2 if it's a potential
10024 conflict. */
10025
10026 static int
resources_match(struct rsrc * rs,struct ia64_opcode * idesc,int note,int qp_regno,int path)10027 resources_match (struct rsrc *rs,
10028 struct ia64_opcode *idesc,
10029 int note,
10030 int qp_regno,
10031 int path)
10032 {
10033 struct rsrc specs[MAX_SPECS];
10034 int count;
10035
10036 /* If the marked resource's qp_regno and the given qp_regno are mutex,
10037 we don't need to check. One exception is note 11, which indicates that
10038 target predicates are written regardless of PR[qp]. */
10039 if (qp_mutex (rs->qp_regno, qp_regno, path)
10040 && note != 11)
10041 return 0;
10042
10043 count = specify_resource (rs->dependency, idesc, DV_CHK, specs, note, path);
10044 while (count-- > 0)
10045 {
10046 /* UNAT checking is a bit more specific than other resources */
10047 if (rs->dependency->specifier == IA64_RS_AR_UNAT
10048 && specs[count].mem_offset.hint
10049 && rs->mem_offset.hint)
10050 {
10051 if (rs->mem_offset.base == specs[count].mem_offset.base)
10052 {
10053 if (((rs->mem_offset.offset >> 3) & 0x3F) ==
10054 ((specs[count].mem_offset.offset >> 3) & 0x3F))
10055 return 1;
10056 else
10057 continue;
10058 }
10059 }
10060
10061 /* Skip apparent PR write conflicts where both writes are an AND or both
10062 writes are an OR. */
10063 if (rs->dependency->specifier == IA64_RS_PR
10064 || rs->dependency->specifier == IA64_RS_PRr
10065 || rs->dependency->specifier == IA64_RS_PR63)
10066 {
10067 if (specs[count].cmp_type != CMP_NONE
10068 && specs[count].cmp_type == rs->cmp_type)
10069 {
10070 if (md.debug_dv)
10071 fprintf (stderr, " %s on parallel compare allowed (PR%d)\n",
10072 dv_mode[rs->dependency->mode],
10073 rs->dependency->specifier != IA64_RS_PR63 ?
10074 specs[count].index : 63);
10075 continue;
10076 }
10077 if (md.debug_dv)
10078 fprintf (stderr,
10079 " %s on parallel compare conflict %s vs %s on PR%d\n",
10080 dv_mode[rs->dependency->mode],
10081 dv_cmp_type[rs->cmp_type],
10082 dv_cmp_type[specs[count].cmp_type],
10083 rs->dependency->specifier != IA64_RS_PR63 ?
10084 specs[count].index : 63);
10085
10086 }
10087
10088 /* If either resource is not specific, conservatively assume a conflict
10089 */
10090 if (!specs[count].specific || !rs->specific)
10091 return 2;
10092 else if (specs[count].index == rs->index)
10093 return 1;
10094 }
10095
10096 return 0;
10097 }
10098
10099 /* Indicate an instruction group break; if INSERT_STOP is non-zero, then
10100 insert a stop to create the break. Update all resource dependencies
10101 appropriately. If QP_REGNO is non-zero, only apply the break to resources
10102 which use the same QP_REGNO and have the link_to_qp_branch flag set.
10103 If SAVE_CURRENT is non-zero, don't affect resources marked by the current
10104 instruction. */
10105
10106 static void
insn_group_break(int insert_stop,int qp_regno,int save_current)10107 insn_group_break (int insert_stop, int qp_regno, int save_current)
10108 {
10109 int i;
10110
10111 if (insert_stop && md.num_slots_in_use > 0)
10112 PREV_SLOT.end_of_insn_group = 1;
10113
10114 if (md.debug_dv)
10115 {
10116 fprintf (stderr, " Insn group break%s",
10117 (insert_stop ? " (w/stop)" : ""));
10118 if (qp_regno != 0)
10119 fprintf (stderr, " effective for QP=%d", qp_regno);
10120 fprintf (stderr, "\n");
10121 }
10122
10123 i = 0;
10124 while (i < regdepslen)
10125 {
10126 const struct ia64_dependency *dep = regdeps[i].dependency;
10127
10128 if (qp_regno != 0
10129 && regdeps[i].qp_regno != qp_regno)
10130 {
10131 ++i;
10132 continue;
10133 }
10134
10135 if (save_current
10136 && CURR_SLOT.src_file == regdeps[i].file
10137 && CURR_SLOT.src_line == regdeps[i].line)
10138 {
10139 ++i;
10140 continue;
10141 }
10142
10143 /* clear dependencies which are automatically cleared by a stop, or
10144 those that have reached the appropriate state of insn serialization */
10145 if (dep->semantics == IA64_DVS_IMPLIED
10146 || dep->semantics == IA64_DVS_IMPLIEDF
10147 || regdeps[i].insn_srlz == STATE_SRLZ)
10148 {
10149 print_dependency ("Removing", i);
10150 regdeps[i] = regdeps[--regdepslen];
10151 }
10152 else
10153 {
10154 if (dep->semantics == IA64_DVS_DATA
10155 || dep->semantics == IA64_DVS_INSTR
10156 || dep->semantics == IA64_DVS_SPECIFIC)
10157 {
10158 if (regdeps[i].insn_srlz == STATE_NONE)
10159 regdeps[i].insn_srlz = STATE_STOP;
10160 if (regdeps[i].data_srlz == STATE_NONE)
10161 regdeps[i].data_srlz = STATE_STOP;
10162 }
10163 ++i;
10164 }
10165 }
10166 }
10167
10168 /* Add the given resource usage spec to the list of active dependencies. */
10169
10170 static void
mark_resource(struct ia64_opcode * idesc ATTRIBUTE_UNUSED,const struct ia64_dependency * dep ATTRIBUTE_UNUSED,struct rsrc * spec,int depind,int path)10171 mark_resource (struct ia64_opcode *idesc ATTRIBUTE_UNUSED,
10172 const struct ia64_dependency *dep ATTRIBUTE_UNUSED,
10173 struct rsrc *spec,
10174 int depind,
10175 int path)
10176 {
10177 if (regdepslen == regdepstotlen)
10178 {
10179 regdepstotlen += 20;
10180 regdeps = XRESIZEVEC (struct rsrc, regdeps, regdepstotlen);
10181 }
10182
10183 regdeps[regdepslen] = *spec;
10184 regdeps[regdepslen].depind = depind;
10185 regdeps[regdepslen].path = path;
10186 regdeps[regdepslen].file = CURR_SLOT.src_file;
10187 regdeps[regdepslen].line = CURR_SLOT.src_line;
10188
10189 print_dependency ("Adding", regdepslen);
10190
10191 ++regdepslen;
10192 }
10193
10194 static void
print_dependency(const char * action,int depind)10195 print_dependency (const char *action, int depind)
10196 {
10197 if (md.debug_dv)
10198 {
10199 fprintf (stderr, " %s %s '%s'",
10200 action, dv_mode[(regdeps[depind].dependency)->mode],
10201 (regdeps[depind].dependency)->name);
10202 if (regdeps[depind].specific && regdeps[depind].index >= 0)
10203 fprintf (stderr, " (%d)", regdeps[depind].index);
10204 if (regdeps[depind].mem_offset.hint)
10205 {
10206 fputs (" ", stderr);
10207 fprintf_vma (stderr, regdeps[depind].mem_offset.base);
10208 fputs ("+", stderr);
10209 fprintf_vma (stderr, regdeps[depind].mem_offset.offset);
10210 }
10211 fprintf (stderr, "\n");
10212 }
10213 }
10214
10215 static void
instruction_serialization(void)10216 instruction_serialization (void)
10217 {
10218 int i;
10219 if (md.debug_dv)
10220 fprintf (stderr, " Instruction serialization\n");
10221 for (i = 0; i < regdepslen; i++)
10222 if (regdeps[i].insn_srlz == STATE_STOP)
10223 regdeps[i].insn_srlz = STATE_SRLZ;
10224 }
10225
10226 static void
data_serialization(void)10227 data_serialization (void)
10228 {
10229 int i = 0;
10230 if (md.debug_dv)
10231 fprintf (stderr, " Data serialization\n");
10232 while (i < regdepslen)
10233 {
10234 if (regdeps[i].data_srlz == STATE_STOP
10235 /* Note: as of 991210, all "other" dependencies are cleared by a
10236 data serialization. This might change with new tables */
10237 || (regdeps[i].dependency)->semantics == IA64_DVS_OTHER)
10238 {
10239 print_dependency ("Removing", i);
10240 regdeps[i] = regdeps[--regdepslen];
10241 }
10242 else
10243 ++i;
10244 }
10245 }
10246
10247 /* Insert stops and serializations as needed to avoid DVs. */
10248
10249 static void
remove_marked_resource(struct rsrc * rs)10250 remove_marked_resource (struct rsrc *rs)
10251 {
10252 switch (rs->dependency->semantics)
10253 {
10254 case IA64_DVS_SPECIFIC:
10255 if (md.debug_dv)
10256 fprintf (stderr, "Implementation-specific, assume worst case...\n");
10257 /* Fall through. */
10258 case IA64_DVS_INSTR:
10259 if (md.debug_dv)
10260 fprintf (stderr, "Inserting instr serialization\n");
10261 if (rs->insn_srlz < STATE_STOP)
10262 insn_group_break (1, 0, 0);
10263 if (rs->insn_srlz < STATE_SRLZ)
10264 {
10265 struct slot oldslot = CURR_SLOT;
10266 /* Manually jam a srlz.i insn into the stream */
10267 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10268 CURR_SLOT.user_template = -1;
10269 CURR_SLOT.idesc = ia64_find_opcode ("srlz.i");
10270 instruction_serialization ();
10271 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10272 if (++md.num_slots_in_use >= NUM_SLOTS)
10273 emit_one_bundle ();
10274 CURR_SLOT = oldslot;
10275 }
10276 insn_group_break (1, 0, 0);
10277 break;
10278 case IA64_DVS_OTHER: /* as of rev2 (991220) of the DV tables, all
10279 "other" types of DV are eliminated
10280 by a data serialization */
10281 case IA64_DVS_DATA:
10282 if (md.debug_dv)
10283 fprintf (stderr, "Inserting data serialization\n");
10284 if (rs->data_srlz < STATE_STOP)
10285 insn_group_break (1, 0, 0);
10286 {
10287 struct slot oldslot = CURR_SLOT;
10288 /* Manually jam a srlz.d insn into the stream */
10289 memset (&CURR_SLOT, 0, sizeof (CURR_SLOT));
10290 CURR_SLOT.user_template = -1;
10291 CURR_SLOT.idesc = ia64_find_opcode ("srlz.d");
10292 data_serialization ();
10293 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10294 if (++md.num_slots_in_use >= NUM_SLOTS)
10295 emit_one_bundle ();
10296 CURR_SLOT = oldslot;
10297 }
10298 break;
10299 case IA64_DVS_IMPLIED:
10300 case IA64_DVS_IMPLIEDF:
10301 if (md.debug_dv)
10302 fprintf (stderr, "Inserting stop\n");
10303 insn_group_break (1, 0, 0);
10304 break;
10305 default:
10306 break;
10307 }
10308 }
10309
10310 /* Check the resources used by the given opcode against the current dependency
10311 list.
10312
10313 The check is run once for each execution path encountered. In this case,
10314 a unique execution path is the sequence of instructions following a code
10315 entry point, e.g. the following has three execution paths, one starting
10316 at L0, one at L1, and one at L2.
10317
10318 L0: nop
10319 L1: add
10320 L2: add
10321 br.ret
10322 */
10323
10324 static void
check_dependencies(struct ia64_opcode * idesc)10325 check_dependencies (struct ia64_opcode *idesc)
10326 {
10327 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10328 int path;
10329 int i;
10330
10331 /* Note that the number of marked resources may change within the
10332 loop if in auto mode. */
10333 i = 0;
10334 while (i < regdepslen)
10335 {
10336 struct rsrc *rs = ®deps[i];
10337 const struct ia64_dependency *dep = rs->dependency;
10338 int chkind;
10339 int note;
10340 int start_over = 0;
10341
10342 if (dep->semantics == IA64_DVS_NONE
10343 || (chkind = depends_on (rs->depind, idesc)) == -1)
10344 {
10345 ++i;
10346 continue;
10347 }
10348
10349 note = NOTE (opdeps->chks[chkind]);
10350
10351 /* Check this resource against each execution path seen thus far. */
10352 for (path = 0; path <= md.path; path++)
10353 {
10354 int matchtype;
10355
10356 /* If the dependency wasn't on the path being checked, ignore it. */
10357 if (rs->path < path)
10358 continue;
10359
10360 /* If the QP for this insn implies a QP which has branched, don't
10361 bother checking. Ed. NOTE: I don't think this check is terribly
10362 useful; what's the point of generating code which will only be
10363 reached if its QP is zero?
10364 This code was specifically inserted to handle the following code,
10365 based on notes from Intel's DV checking code, where p1 implies p2.
10366
10367 mov r4 = 2
10368 (p2) br.cond L
10369 (p1) mov r4 = 7
10370 */
10371 if (CURR_SLOT.qp_regno != 0)
10372 {
10373 int skip = 0;
10374 int implies;
10375 for (implies = 0; implies < qp_implieslen; implies++)
10376 {
10377 if (qp_implies[implies].path >= path
10378 && qp_implies[implies].p1 == CURR_SLOT.qp_regno
10379 && qp_implies[implies].p2_branched)
10380 {
10381 skip = 1;
10382 break;
10383 }
10384 }
10385 if (skip)
10386 continue;
10387 }
10388
10389 if ((matchtype = resources_match (rs, idesc, note,
10390 CURR_SLOT.qp_regno, path)) != 0)
10391 {
10392 char msg[1024];
10393 char pathmsg[256] = "";
10394 char indexmsg[256] = "";
10395 int certain = (matchtype == 1 && CURR_SLOT.qp_regno == 0);
10396
10397 if (path != 0)
10398 snprintf (pathmsg, sizeof (pathmsg),
10399 " when entry is at label '%s'",
10400 md.entry_labels[path - 1]);
10401 if (matchtype == 1 && rs->index >= 0)
10402 snprintf (indexmsg, sizeof (indexmsg),
10403 ", specific resource number is %d",
10404 rs->index);
10405 snprintf (msg, sizeof (msg),
10406 "Use of '%s' %s %s dependency '%s' (%s)%s%s",
10407 idesc->name,
10408 (certain ? "violates" : "may violate"),
10409 dv_mode[dep->mode], dep->name,
10410 dv_sem[dep->semantics],
10411 pathmsg, indexmsg);
10412
10413 if (md.explicit_mode)
10414 {
10415 as_warn ("%s", msg);
10416 if (path < md.path)
10417 as_warn (_("Only the first path encountering the conflict is reported"));
10418 as_warn_where (rs->file, rs->line,
10419 _("This is the location of the conflicting usage"));
10420 /* Don't bother checking other paths, to avoid duplicating
10421 the same warning */
10422 break;
10423 }
10424 else
10425 {
10426 if (md.debug_dv)
10427 fprintf (stderr, "%s @ %s:%d\n", msg, rs->file, rs->line);
10428
10429 remove_marked_resource (rs);
10430
10431 /* since the set of dependencies has changed, start over */
10432 /* FIXME -- since we're removing dvs as we go, we
10433 probably don't really need to start over... */
10434 start_over = 1;
10435 break;
10436 }
10437 }
10438 }
10439 if (start_over)
10440 i = 0;
10441 else
10442 ++i;
10443 }
10444 }
10445
10446 /* Register new dependencies based on the given opcode. */
10447
10448 static void
mark_resources(struct ia64_opcode * idesc)10449 mark_resources (struct ia64_opcode *idesc)
10450 {
10451 int i;
10452 const struct ia64_opcode_dependency *opdeps = idesc->dependencies;
10453 int add_only_qp_reads = 0;
10454
10455 /* A conditional branch only uses its resources if it is taken; if it is
10456 taken, we stop following that path. The other branch types effectively
10457 *always* write their resources. If it's not taken, register only QP
10458 reads. */
10459 if (is_conditional_branch (idesc) || is_interruption_or_rfi (idesc))
10460 {
10461 add_only_qp_reads = 1;
10462 }
10463
10464 if (md.debug_dv)
10465 fprintf (stderr, "Registering '%s' resource usage\n", idesc->name);
10466
10467 for (i = 0; i < opdeps->nregs; i++)
10468 {
10469 const struct ia64_dependency *dep;
10470 struct rsrc specs[MAX_SPECS];
10471 int note;
10472 int path;
10473 int count;
10474
10475 dep = ia64_find_dependency (opdeps->regs[i]);
10476 note = NOTE (opdeps->regs[i]);
10477
10478 if (add_only_qp_reads
10479 && !(dep->mode == IA64_DV_WAR
10480 && (dep->specifier == IA64_RS_PR
10481 || dep->specifier == IA64_RS_PRr
10482 || dep->specifier == IA64_RS_PR63)))
10483 continue;
10484
10485 count = specify_resource (dep, idesc, DV_REG, specs, note, md.path);
10486
10487 while (count-- > 0)
10488 {
10489 mark_resource (idesc, dep, &specs[count],
10490 DEP (opdeps->regs[i]), md.path);
10491 }
10492
10493 /* The execution path may affect register values, which may in turn
10494 affect which indirect-access resources are accessed. */
10495 switch (dep->specifier)
10496 {
10497 default:
10498 break;
10499 case IA64_RS_CPUID:
10500 case IA64_RS_DBR:
10501 case IA64_RS_IBR:
10502 case IA64_RS_MSR:
10503 case IA64_RS_PKR:
10504 case IA64_RS_PMC:
10505 case IA64_RS_PMD:
10506 case IA64_RS_RR:
10507 for (path = 0; path < md.path; path++)
10508 {
10509 count = specify_resource (dep, idesc, DV_REG, specs, note, path);
10510 while (count-- > 0)
10511 mark_resource (idesc, dep, &specs[count],
10512 DEP (opdeps->regs[i]), path);
10513 }
10514 break;
10515 }
10516 }
10517 }
10518
10519 /* Remove dependencies when they no longer apply. */
10520
10521 static void
update_dependencies(struct ia64_opcode * idesc)10522 update_dependencies (struct ia64_opcode *idesc)
10523 {
10524 int i;
10525
10526 if (strcmp (idesc->name, "srlz.i") == 0)
10527 {
10528 instruction_serialization ();
10529 }
10530 else if (strcmp (idesc->name, "srlz.d") == 0)
10531 {
10532 data_serialization ();
10533 }
10534 else if (is_interruption_or_rfi (idesc)
10535 || is_taken_branch (idesc))
10536 {
10537 /* Although technically the taken branch doesn't clear dependencies
10538 which require a srlz.[id], we don't follow the branch; the next
10539 instruction is assumed to start with a clean slate. */
10540 regdepslen = 0;
10541 md.path = 0;
10542 }
10543 else if (is_conditional_branch (idesc)
10544 && CURR_SLOT.qp_regno != 0)
10545 {
10546 int is_call = strstr (idesc->name, ".call") != NULL;
10547
10548 for (i = 0; i < qp_implieslen; i++)
10549 {
10550 /* If the conditional branch's predicate is implied by the predicate
10551 in an existing dependency, remove that dependency. */
10552 if (qp_implies[i].p2 == CURR_SLOT.qp_regno)
10553 {
10554 int depind = 0;
10555 /* Note that this implied predicate takes a branch so that if
10556 a later insn generates a DV but its predicate implies this
10557 one, we can avoid the false DV warning. */
10558 qp_implies[i].p2_branched = 1;
10559 while (depind < regdepslen)
10560 {
10561 if (regdeps[depind].qp_regno == qp_implies[i].p1)
10562 {
10563 print_dependency ("Removing", depind);
10564 regdeps[depind] = regdeps[--regdepslen];
10565 }
10566 else
10567 ++depind;
10568 }
10569 }
10570 }
10571 /* Any marked resources which have this same predicate should be
10572 cleared, provided that the QP hasn't been modified between the
10573 marking instruction and the branch. */
10574 if (is_call)
10575 {
10576 insn_group_break (0, CURR_SLOT.qp_regno, 1);
10577 }
10578 else
10579 {
10580 i = 0;
10581 while (i < regdepslen)
10582 {
10583 if (regdeps[i].qp_regno == CURR_SLOT.qp_regno
10584 && regdeps[i].link_to_qp_branch
10585 && (regdeps[i].file != CURR_SLOT.src_file
10586 || regdeps[i].line != CURR_SLOT.src_line))
10587 {
10588 /* Treat like a taken branch */
10589 print_dependency ("Removing", i);
10590 regdeps[i] = regdeps[--regdepslen];
10591 }
10592 else
10593 ++i;
10594 }
10595 }
10596 }
10597 }
10598
10599 /* Examine the current instruction for dependency violations. */
10600
10601 static int
check_dv(struct ia64_opcode * idesc)10602 check_dv (struct ia64_opcode *idesc)
10603 {
10604 if (md.debug_dv)
10605 {
10606 fprintf (stderr, "Checking %s for violations (line %d, %d/%d)\n",
10607 idesc->name, CURR_SLOT.src_line,
10608 idesc->dependencies->nchks,
10609 idesc->dependencies->nregs);
10610 }
10611
10612 /* Look through the list of currently marked resources; if the current
10613 instruction has the dependency in its chks list which uses that resource,
10614 check against the specific resources used. */
10615 check_dependencies (idesc);
10616
10617 /* Look up the instruction's regdeps (RAW writes, WAW writes, and WAR reads),
10618 then add them to the list of marked resources. */
10619 mark_resources (idesc);
10620
10621 /* There are several types of dependency semantics, and each has its own
10622 requirements for being cleared
10623
10624 Instruction serialization (insns separated by interruption, rfi, or
10625 writer + srlz.i + reader, all in separate groups) clears DVS_INSTR.
10626
10627 Data serialization (instruction serialization, or writer + srlz.d +
10628 reader, where writer and srlz.d are in separate groups) clears
10629 DVS_DATA. (This also clears DVS_OTHER, but that is not guaranteed to
10630 always be the case).
10631
10632 Instruction group break (groups separated by stop, taken branch,
10633 interruption or rfi) clears DVS_IMPLIED and DVS_IMPLIEDF.
10634 */
10635 update_dependencies (idesc);
10636
10637 /* Sometimes, knowing a register value allows us to avoid giving a false DV
10638 warning. Keep track of as many as possible that are useful. */
10639 note_register_values (idesc);
10640
10641 /* We don't need or want this anymore. */
10642 md.mem_offset.hint = 0;
10643
10644 return 0;
10645 }
10646
10647 /* Translate one line of assembly. Pseudo ops and labels do not show
10648 here. */
10649 void
md_assemble(char * str)10650 md_assemble (char *str)
10651 {
10652 char *saved_input_line_pointer, *temp;
10653 const char *mnemonic;
10654 const struct pseudo_opcode *pdesc;
10655 struct ia64_opcode *idesc;
10656 unsigned char qp_regno;
10657 unsigned int flags;
10658 int ch;
10659
10660 saved_input_line_pointer = input_line_pointer;
10661 input_line_pointer = str;
10662
10663 /* extract the opcode (mnemonic): */
10664
10665 ch = get_symbol_name (&temp);
10666 mnemonic = temp;
10667 pdesc = (struct pseudo_opcode *) str_hash_find (md.pseudo_hash, mnemonic);
10668 if (pdesc)
10669 {
10670 (void) restore_line_pointer (ch);
10671 (*pdesc->handler) (pdesc->arg);
10672 goto done;
10673 }
10674
10675 /* Find the instruction descriptor matching the arguments. */
10676
10677 idesc = ia64_find_opcode (mnemonic);
10678 (void) restore_line_pointer (ch);
10679 if (!idesc)
10680 {
10681 as_bad (_("Unknown opcode `%s'"), mnemonic);
10682 goto done;
10683 }
10684
10685 idesc = parse_operands (idesc);
10686 if (!idesc)
10687 goto done;
10688
10689 /* Handle the dynamic ops we can handle now: */
10690 if (idesc->type == IA64_TYPE_DYN)
10691 {
10692 if (strcmp (idesc->name, "add") == 0)
10693 {
10694 if (CURR_SLOT.opnd[2].X_op == O_register
10695 && CURR_SLOT.opnd[2].X_add_number < 4)
10696 mnemonic = "addl";
10697 else
10698 mnemonic = "adds";
10699 ia64_free_opcode (idesc);
10700 idesc = ia64_find_opcode (mnemonic);
10701 }
10702 else if (strcmp (idesc->name, "mov") == 0)
10703 {
10704 enum ia64_opnd opnd1, opnd2;
10705 int rop;
10706
10707 opnd1 = idesc->operands[0];
10708 opnd2 = idesc->operands[1];
10709 if (opnd1 == IA64_OPND_AR3)
10710 rop = 0;
10711 else if (opnd2 == IA64_OPND_AR3)
10712 rop = 1;
10713 else
10714 abort ();
10715 if (CURR_SLOT.opnd[rop].X_op == O_register)
10716 {
10717 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10718 mnemonic = "mov.i";
10719 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10720 mnemonic = "mov.m";
10721 else
10722 rop = -1;
10723 }
10724 else
10725 abort ();
10726 if (rop >= 0)
10727 {
10728 ia64_free_opcode (idesc);
10729 idesc = ia64_find_opcode (mnemonic);
10730 while (idesc != NULL
10731 && (idesc->operands[0] != opnd1
10732 || idesc->operands[1] != opnd2))
10733 idesc = get_next_opcode (idesc);
10734 }
10735 }
10736 }
10737 else if (strcmp (idesc->name, "mov.i") == 0
10738 || strcmp (idesc->name, "mov.m") == 0)
10739 {
10740 enum ia64_opnd opnd1, opnd2;
10741 int rop;
10742
10743 opnd1 = idesc->operands[0];
10744 opnd2 = idesc->operands[1];
10745 if (opnd1 == IA64_OPND_AR3)
10746 rop = 0;
10747 else if (opnd2 == IA64_OPND_AR3)
10748 rop = 1;
10749 else
10750 abort ();
10751 if (CURR_SLOT.opnd[rop].X_op == O_register)
10752 {
10753 char unit = 'a';
10754 if (ar_is_only_in_integer_unit (CURR_SLOT.opnd[rop].X_add_number))
10755 unit = 'i';
10756 else if (ar_is_only_in_memory_unit (CURR_SLOT.opnd[rop].X_add_number))
10757 unit = 'm';
10758 if (unit != 'a' && unit != idesc->name [4])
10759 as_bad (_("AR %d can only be accessed by %c-unit"),
10760 (int) (CURR_SLOT.opnd[rop].X_add_number - REG_AR),
10761 TOUPPER (unit));
10762 }
10763 }
10764 else if (strcmp (idesc->name, "hint.b") == 0)
10765 {
10766 switch (md.hint_b)
10767 {
10768 case hint_b_ok:
10769 break;
10770 case hint_b_warning:
10771 as_warn (_("hint.b may be treated as nop"));
10772 break;
10773 case hint_b_error:
10774 as_bad (_("hint.b shouldn't be used"));
10775 break;
10776 }
10777 }
10778
10779 qp_regno = 0;
10780 if (md.qp.X_op == O_register)
10781 {
10782 qp_regno = md.qp.X_add_number - REG_P;
10783 md.qp.X_op = O_absent;
10784 }
10785
10786 flags = idesc->flags;
10787
10788 if ((flags & IA64_OPCODE_FIRST) != 0)
10789 {
10790 /* The alignment frag has to end with a stop bit only if the
10791 next instruction after the alignment directive has to be
10792 the first instruction in an instruction group. */
10793 if (align_frag)
10794 {
10795 while (align_frag->fr_type != rs_align_code)
10796 {
10797 align_frag = align_frag->fr_next;
10798 if (!align_frag)
10799 break;
10800 }
10801 /* align_frag can be NULL if there are directives in
10802 between. */
10803 if (align_frag && align_frag->fr_next == frag_now)
10804 align_frag->tc_frag_data = 1;
10805 }
10806
10807 insn_group_break (1, 0, 0);
10808 }
10809 align_frag = NULL;
10810
10811 if ((flags & IA64_OPCODE_NO_PRED) != 0 && qp_regno != 0)
10812 {
10813 as_bad (_("`%s' cannot be predicated"), idesc->name);
10814 goto done;
10815 }
10816
10817 /* Build the instruction. */
10818 CURR_SLOT.qp_regno = qp_regno;
10819 CURR_SLOT.idesc = idesc;
10820 CURR_SLOT.src_file = as_where (&CURR_SLOT.src_line);
10821 dwarf2_where (&CURR_SLOT.debug_line);
10822 dwarf2_consume_line_info ();
10823
10824 /* Add unwind entries, if there are any. */
10825 if (unwind.current_entry)
10826 {
10827 CURR_SLOT.unwind_record = unwind.current_entry;
10828 unwind.current_entry = NULL;
10829 }
10830 if (unwind.pending_saves)
10831 {
10832 if (unwind.pending_saves->next)
10833 {
10834 /* Attach the next pending save to the next slot so that its
10835 slot number will get set correctly. */
10836 add_unwind_entry (unwind.pending_saves->next, NOT_A_CHAR);
10837 unwind.pending_saves = &unwind.pending_saves->next->r.record.p;
10838 }
10839 else
10840 unwind.pending_saves = NULL;
10841 }
10842 if (unwind.proc_pending.sym && S_IS_DEFINED (unwind.proc_pending.sym))
10843 unwind.insn = 1;
10844
10845 /* Check for dependency violations. */
10846 if (md.detect_dv)
10847 check_dv (idesc);
10848
10849 md.curr_slot = (md.curr_slot + 1) % NUM_SLOTS;
10850 if (++md.num_slots_in_use >= NUM_SLOTS)
10851 emit_one_bundle ();
10852
10853 if ((flags & IA64_OPCODE_LAST) != 0)
10854 insn_group_break (1, 0, 0);
10855
10856 md.last_text_seg = now_seg;
10857 md.last_text_subseg = now_subseg;
10858
10859 done:
10860 input_line_pointer = saved_input_line_pointer;
10861 }
10862
10863 /* Called when symbol NAME cannot be found in the symbol table.
10864 Should be used for dynamic valued symbols only. */
10865
10866 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)10867 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
10868 {
10869 return 0;
10870 }
10871
10872 /* Called for any expression that can not be recognized. When the
10873 function is called, `input_line_pointer' will point to the start of
10874 the expression. */
10875
10876 void
md_operand(expressionS * e)10877 md_operand (expressionS *e)
10878 {
10879 switch (*input_line_pointer)
10880 {
10881 case '[':
10882 ++input_line_pointer;
10883 expression_and_evaluate (e);
10884 if (*input_line_pointer != ']')
10885 {
10886 as_bad (_("Closing bracket missing"));
10887 goto err;
10888 }
10889 else
10890 {
10891 if (e->X_op != O_register
10892 || e->X_add_number < REG_GR
10893 || e->X_add_number > REG_GR + 127)
10894 {
10895 as_bad (_("Index must be a general register"));
10896 e->X_add_number = REG_GR;
10897 }
10898
10899 ++input_line_pointer;
10900 e->X_op = O_index;
10901 }
10902 break;
10903
10904 default:
10905 break;
10906 }
10907 return;
10908
10909 err:
10910 ignore_rest_of_line ();
10911 }
10912
10913 /* Return 1 if it's OK to adjust a reloc by replacing the symbol with
10914 a section symbol plus some offset. For relocs involving @fptr(),
10915 directives we don't want such adjustments since we need to have the
10916 original symbol's name in the reloc. */
10917 int
ia64_fix_adjustable(fixS * fix)10918 ia64_fix_adjustable (fixS *fix)
10919 {
10920 /* Prevent all adjustments to global symbols */
10921 if (S_IS_EXTERNAL (fix->fx_addsy) || S_IS_WEAK (fix->fx_addsy))
10922 return 0;
10923
10924 switch (fix->fx_r_type)
10925 {
10926 case BFD_RELOC_IA64_FPTR64I:
10927 case BFD_RELOC_IA64_FPTR32MSB:
10928 case BFD_RELOC_IA64_FPTR32LSB:
10929 case BFD_RELOC_IA64_FPTR64MSB:
10930 case BFD_RELOC_IA64_FPTR64LSB:
10931 case BFD_RELOC_IA64_LTOFF_FPTR22:
10932 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10933 return 0;
10934 default:
10935 break;
10936 }
10937
10938 return 1;
10939 }
10940
10941 int
ia64_force_relocation(fixS * fix)10942 ia64_force_relocation (fixS *fix)
10943 {
10944 switch (fix->fx_r_type)
10945 {
10946 case BFD_RELOC_IA64_FPTR64I:
10947 case BFD_RELOC_IA64_FPTR32MSB:
10948 case BFD_RELOC_IA64_FPTR32LSB:
10949 case BFD_RELOC_IA64_FPTR64MSB:
10950 case BFD_RELOC_IA64_FPTR64LSB:
10951
10952 case BFD_RELOC_IA64_LTOFF22:
10953 case BFD_RELOC_IA64_LTOFF64I:
10954 case BFD_RELOC_IA64_LTOFF_FPTR22:
10955 case BFD_RELOC_IA64_LTOFF_FPTR64I:
10956 case BFD_RELOC_IA64_PLTOFF22:
10957 case BFD_RELOC_IA64_PLTOFF64I:
10958 case BFD_RELOC_IA64_PLTOFF64MSB:
10959 case BFD_RELOC_IA64_PLTOFF64LSB:
10960
10961 case BFD_RELOC_IA64_LTOFF22X:
10962 case BFD_RELOC_IA64_LDXMOV:
10963 return 1;
10964
10965 default:
10966 break;
10967 }
10968
10969 return generic_force_reloc (fix);
10970 }
10971
10972 /* Decide from what point a pc-relative relocation is relative to,
10973 relative to the pc-relative fixup. Er, relatively speaking. */
10974 long
ia64_pcrel_from_section(fixS * fix,segT sec)10975 ia64_pcrel_from_section (fixS *fix, segT sec)
10976 {
10977 unsigned long off = fix->fx_frag->fr_address + fix->fx_where;
10978
10979 if (bfd_section_flags (sec) & SEC_CODE)
10980 off &= ~0xfUL;
10981
10982 return off;
10983 }
10984
10985
10986 /* Used to emit section-relative relocs for the dwarf2 debug data. */
10987 void
ia64_dwarf2_emit_offset(symbolS * symbol,unsigned int size)10988 ia64_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10989 {
10990 expressionS exp;
10991
10992 exp.X_op = O_pseudo_fixup;
10993 exp.X_op_symbol = pseudo_func[FUNC_SEC_RELATIVE].u.sym;
10994 exp.X_add_number = 0;
10995 exp.X_add_symbol = symbol;
10996 emit_expr (&exp, size);
10997 }
10998
10999 /* This is called whenever some data item (not an instruction) needs a
11000 fixup. We pick the right reloc code depending on the byteorder
11001 currently in effect. */
11002 void
ia64_cons_fix_new(fragS * f,int where,int nbytes,expressionS * exp,bfd_reloc_code_real_type code)11003 ia64_cons_fix_new (fragS *f, int where, int nbytes, expressionS *exp,
11004 bfd_reloc_code_real_type code)
11005 {
11006 fixS *fix;
11007
11008 switch (nbytes)
11009 {
11010 /* There are no reloc for 8 and 16 bit quantities, but we allow
11011 them here since they will work fine as long as the expression
11012 is fully defined at the end of the pass over the source file. */
11013 case 1: code = BFD_RELOC_8; break;
11014 case 2: code = BFD_RELOC_16; break;
11015 case 4:
11016 if (target_big_endian)
11017 code = BFD_RELOC_IA64_DIR32MSB;
11018 else
11019 code = BFD_RELOC_IA64_DIR32LSB;
11020 break;
11021
11022 case 8:
11023 /* In 32-bit mode, data8 could mean function descriptors too. */
11024 if (exp->X_op == O_pseudo_fixup
11025 && exp->X_op_symbol
11026 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC
11027 && !(md.flags & EF_IA_64_ABI64))
11028 {
11029 if (target_big_endian)
11030 code = BFD_RELOC_IA64_IPLTMSB;
11031 else
11032 code = BFD_RELOC_IA64_IPLTLSB;
11033 exp->X_op = O_symbol;
11034 break;
11035 }
11036 else
11037 {
11038 if (target_big_endian)
11039 code = BFD_RELOC_IA64_DIR64MSB;
11040 else
11041 code = BFD_RELOC_IA64_DIR64LSB;
11042 break;
11043 }
11044
11045 case 16:
11046 if (exp->X_op == O_pseudo_fixup
11047 && exp->X_op_symbol
11048 && S_GET_VALUE (exp->X_op_symbol) == FUNC_IPLT_RELOC)
11049 {
11050 if (target_big_endian)
11051 code = BFD_RELOC_IA64_IPLTMSB;
11052 else
11053 code = BFD_RELOC_IA64_IPLTLSB;
11054 exp->X_op = O_symbol;
11055 break;
11056 }
11057 /* FALLTHRU */
11058
11059 default:
11060 as_bad (_("Unsupported fixup size %d"), nbytes);
11061 ignore_rest_of_line ();
11062 return;
11063 }
11064
11065 if (exp->X_op == O_pseudo_fixup)
11066 {
11067 exp->X_op = O_symbol;
11068 code = ia64_gen_real_reloc_type (exp->X_op_symbol, code);
11069 /* ??? If code unchanged, unsupported. */
11070 }
11071
11072 fix = fix_new_exp (f, where, nbytes, exp, 0, code);
11073 /* We need to store the byte order in effect in case we're going
11074 to fix an 8 or 16 bit relocation (for which there no real
11075 relocs available). See md_apply_fix(). */
11076 fix->tc_fix_data.bigendian = target_big_endian;
11077 }
11078
11079 /* Return the actual relocation we wish to associate with the pseudo
11080 reloc described by SYM and R_TYPE. SYM should be one of the
11081 symbols in the pseudo_func array, or NULL. */
11082
11083 static bfd_reloc_code_real_type
ia64_gen_real_reloc_type(struct symbol * sym,bfd_reloc_code_real_type r_type)11084 ia64_gen_real_reloc_type (struct symbol *sym, bfd_reloc_code_real_type r_type)
11085 {
11086 bfd_reloc_code_real_type newr = 0;
11087 const char *type = NULL, *suffix = "";
11088
11089 if (sym == NULL)
11090 {
11091 return r_type;
11092 }
11093
11094 switch (S_GET_VALUE (sym))
11095 {
11096 case FUNC_FPTR_RELATIVE:
11097 switch (r_type)
11098 {
11099 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_FPTR64I; break;
11100 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_FPTR32MSB; break;
11101 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_FPTR32LSB; break;
11102 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_FPTR64MSB; break;
11103 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_FPTR64LSB; break;
11104 default: type = "FPTR"; break;
11105 }
11106 break;
11107
11108 case FUNC_GP_RELATIVE:
11109 switch (r_type)
11110 {
11111 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_GPREL22; break;
11112 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_GPREL64I; break;
11113 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_GPREL32MSB; break;
11114 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_GPREL32LSB; break;
11115 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_GPREL64MSB; break;
11116 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_GPREL64LSB; break;
11117 default: type = "GPREL"; break;
11118 }
11119 break;
11120
11121 case FUNC_LT_RELATIVE:
11122 switch (r_type)
11123 {
11124 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22; break;
11125 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_LTOFF64I; break;
11126 default: type = "LTOFF"; break;
11127 }
11128 break;
11129
11130 case FUNC_LT_RELATIVE_X:
11131 switch (r_type)
11132 {
11133 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_LTOFF22X; break;
11134 default: type = "LTOFF"; suffix = "X"; break;
11135 }
11136 break;
11137
11138 case FUNC_PC_RELATIVE:
11139 switch (r_type)
11140 {
11141 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PCREL22; break;
11142 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PCREL64I; break;
11143 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_PCREL32MSB; break;
11144 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_PCREL32LSB; break;
11145 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PCREL64MSB; break;
11146 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PCREL64LSB; break;
11147 default: type = "PCREL"; break;
11148 }
11149 break;
11150
11151 case FUNC_PLT_RELATIVE:
11152 switch (r_type)
11153 {
11154 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_PLTOFF22; break;
11155 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_PLTOFF64I; break;
11156 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_PLTOFF64MSB;break;
11157 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_PLTOFF64LSB;break;
11158 default: type = "PLTOFF"; break;
11159 }
11160 break;
11161
11162 case FUNC_SEC_RELATIVE:
11163 switch (r_type)
11164 {
11165 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SECREL32MSB;break;
11166 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SECREL32LSB;break;
11167 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SECREL64MSB;break;
11168 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SECREL64LSB;break;
11169 default: type = "SECREL"; break;
11170 }
11171 break;
11172
11173 case FUNC_SEG_RELATIVE:
11174 switch (r_type)
11175 {
11176 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_SEGREL32MSB;break;
11177 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_SEGREL32LSB;break;
11178 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_SEGREL64MSB;break;
11179 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_SEGREL64LSB;break;
11180 default: type = "SEGREL"; break;
11181 }
11182 break;
11183
11184 case FUNC_LTV_RELATIVE:
11185 switch (r_type)
11186 {
11187 case BFD_RELOC_IA64_DIR32MSB: newr = BFD_RELOC_IA64_LTV32MSB; break;
11188 case BFD_RELOC_IA64_DIR32LSB: newr = BFD_RELOC_IA64_LTV32LSB; break;
11189 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_LTV64MSB; break;
11190 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_LTV64LSB; break;
11191 default: type = "LTV"; break;
11192 }
11193 break;
11194
11195 case FUNC_LT_FPTR_RELATIVE:
11196 switch (r_type)
11197 {
11198 case BFD_RELOC_IA64_IMM22:
11199 newr = BFD_RELOC_IA64_LTOFF_FPTR22; break;
11200 case BFD_RELOC_IA64_IMM64:
11201 newr = BFD_RELOC_IA64_LTOFF_FPTR64I; break;
11202 case BFD_RELOC_IA64_DIR32MSB:
11203 newr = BFD_RELOC_IA64_LTOFF_FPTR32MSB; break;
11204 case BFD_RELOC_IA64_DIR32LSB:
11205 newr = BFD_RELOC_IA64_LTOFF_FPTR32LSB; break;
11206 case BFD_RELOC_IA64_DIR64MSB:
11207 newr = BFD_RELOC_IA64_LTOFF_FPTR64MSB; break;
11208 case BFD_RELOC_IA64_DIR64LSB:
11209 newr = BFD_RELOC_IA64_LTOFF_FPTR64LSB; break;
11210 default:
11211 type = "LTOFF_FPTR"; break;
11212 }
11213 break;
11214
11215 case FUNC_TP_RELATIVE:
11216 switch (r_type)
11217 {
11218 case BFD_RELOC_IA64_IMM14: newr = BFD_RELOC_IA64_TPREL14; break;
11219 case BFD_RELOC_IA64_IMM22: newr = BFD_RELOC_IA64_TPREL22; break;
11220 case BFD_RELOC_IA64_IMM64: newr = BFD_RELOC_IA64_TPREL64I; break;
11221 case BFD_RELOC_IA64_DIR64MSB: newr = BFD_RELOC_IA64_TPREL64MSB; break;
11222 case BFD_RELOC_IA64_DIR64LSB: newr = BFD_RELOC_IA64_TPREL64LSB; break;
11223 default: type = "TPREL"; break;
11224 }
11225 break;
11226
11227 case FUNC_LT_TP_RELATIVE:
11228 switch (r_type)
11229 {
11230 case BFD_RELOC_IA64_IMM22:
11231 newr = BFD_RELOC_IA64_LTOFF_TPREL22; break;
11232 default:
11233 type = "LTOFF_TPREL"; break;
11234 }
11235 break;
11236
11237 case FUNC_DTP_MODULE:
11238 switch (r_type)
11239 {
11240 case BFD_RELOC_IA64_DIR64MSB:
11241 newr = BFD_RELOC_IA64_DTPMOD64MSB; break;
11242 case BFD_RELOC_IA64_DIR64LSB:
11243 newr = BFD_RELOC_IA64_DTPMOD64LSB; break;
11244 default:
11245 type = "DTPMOD"; break;
11246 }
11247 break;
11248
11249 case FUNC_LT_DTP_MODULE:
11250 switch (r_type)
11251 {
11252 case BFD_RELOC_IA64_IMM22:
11253 newr = BFD_RELOC_IA64_LTOFF_DTPMOD22; break;
11254 default:
11255 type = "LTOFF_DTPMOD"; break;
11256 }
11257 break;
11258
11259 case FUNC_DTP_RELATIVE:
11260 switch (r_type)
11261 {
11262 case BFD_RELOC_IA64_DIR32MSB:
11263 newr = BFD_RELOC_IA64_DTPREL32MSB; break;
11264 case BFD_RELOC_IA64_DIR32LSB:
11265 newr = BFD_RELOC_IA64_DTPREL32LSB; break;
11266 case BFD_RELOC_IA64_DIR64MSB:
11267 newr = BFD_RELOC_IA64_DTPREL64MSB; break;
11268 case BFD_RELOC_IA64_DIR64LSB:
11269 newr = BFD_RELOC_IA64_DTPREL64LSB; break;
11270 case BFD_RELOC_IA64_IMM14:
11271 newr = BFD_RELOC_IA64_DTPREL14; break;
11272 case BFD_RELOC_IA64_IMM22:
11273 newr = BFD_RELOC_IA64_DTPREL22; break;
11274 case BFD_RELOC_IA64_IMM64:
11275 newr = BFD_RELOC_IA64_DTPREL64I; break;
11276 default:
11277 type = "DTPREL"; break;
11278 }
11279 break;
11280
11281 case FUNC_LT_DTP_RELATIVE:
11282 switch (r_type)
11283 {
11284 case BFD_RELOC_IA64_IMM22:
11285 newr = BFD_RELOC_IA64_LTOFF_DTPREL22; break;
11286 default:
11287 type = "LTOFF_DTPREL"; break;
11288 }
11289 break;
11290
11291 case FUNC_IPLT_RELOC:
11292 switch (r_type)
11293 {
11294 case BFD_RELOC_IA64_IPLTMSB: return r_type;
11295 case BFD_RELOC_IA64_IPLTLSB: return r_type;
11296 default: type = "IPLT"; break;
11297 }
11298 break;
11299
11300 #ifdef TE_VMS
11301 case FUNC_SLOTCOUNT_RELOC:
11302 return DUMMY_RELOC_IA64_SLOTCOUNT;
11303 #endif
11304
11305 default:
11306 abort ();
11307 }
11308
11309 if (newr)
11310 return newr;
11311 else
11312 {
11313 int width;
11314
11315 if (!type)
11316 abort ();
11317 switch (r_type)
11318 {
11319 case BFD_RELOC_IA64_DIR32MSB: width = 32; suffix = "MSB"; break;
11320 case BFD_RELOC_IA64_DIR32LSB: width = 32; suffix = "LSB"; break;
11321 case BFD_RELOC_IA64_DIR64MSB: width = 64; suffix = "MSB"; break;
11322 case BFD_RELOC_IA64_DIR64LSB: width = 64; suffix = "LSB"; break;
11323 case BFD_RELOC_UNUSED: width = 13; break;
11324 case BFD_RELOC_IA64_IMM14: width = 14; break;
11325 case BFD_RELOC_IA64_IMM22: width = 22; break;
11326 case BFD_RELOC_IA64_IMM64: width = 64; suffix = "I"; break;
11327 default: abort ();
11328 }
11329
11330 /* This should be an error, but since previously there wasn't any
11331 diagnostic here, don't make it fail because of this for now. */
11332 as_warn (_("Cannot express %s%d%s relocation"), type, width, suffix);
11333 return r_type;
11334 }
11335 }
11336
11337 /* Here is where generate the appropriate reloc for pseudo relocation
11338 functions. */
11339 void
ia64_validate_fix(fixS * fix)11340 ia64_validate_fix (fixS *fix)
11341 {
11342 switch (fix->fx_r_type)
11343 {
11344 case BFD_RELOC_IA64_FPTR64I:
11345 case BFD_RELOC_IA64_FPTR32MSB:
11346 case BFD_RELOC_IA64_FPTR64LSB:
11347 case BFD_RELOC_IA64_LTOFF_FPTR22:
11348 case BFD_RELOC_IA64_LTOFF_FPTR64I:
11349 if (fix->fx_offset != 0)
11350 as_bad_where (fix->fx_file, fix->fx_line,
11351 _("No addend allowed in @fptr() relocation"));
11352 break;
11353 default:
11354 break;
11355 }
11356 }
11357
11358 static void
fix_insn(fixS * fix,const struct ia64_operand * odesc,valueT value)11359 fix_insn (fixS *fix, const struct ia64_operand *odesc, valueT value)
11360 {
11361 bfd_vma insn[3], t0, t1, control_bits;
11362 const char *err;
11363 char *fixpos;
11364 long slot;
11365
11366 slot = fix->fx_where & 0x3;
11367 fixpos = fix->fx_frag->fr_literal + (fix->fx_where - slot);
11368
11369 /* Bundles are always in little-endian byte order */
11370 t0 = bfd_getl64 (fixpos);
11371 t1 = bfd_getl64 (fixpos + 8);
11372 control_bits = t0 & 0x1f;
11373 insn[0] = (t0 >> 5) & 0x1ffffffffffLL;
11374 insn[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18);
11375 insn[2] = (t1 >> 23) & 0x1ffffffffffLL;
11376
11377 err = NULL;
11378 if (odesc - elf64_ia64_operands == IA64_OPND_IMMU64)
11379 {
11380 insn[1] = (value >> 22) & 0x1ffffffffffLL;
11381 insn[2] |= (((value & 0x7f) << 13)
11382 | (((value >> 7) & 0x1ff) << 27)
11383 | (((value >> 16) & 0x1f) << 22)
11384 | (((value >> 21) & 0x1) << 21)
11385 | (((value >> 63) & 0x1) << 36));
11386 }
11387 else if (odesc - elf64_ia64_operands == IA64_OPND_IMMU62)
11388 {
11389 if (value & ~0x3fffffffffffffffULL)
11390 err = _("integer operand out of range");
11391 insn[1] = (value >> 21) & 0x1ffffffffffLL;
11392 insn[2] |= (((value & 0xfffff) << 6) | (((value >> 20) & 0x1) << 36));
11393 }
11394 else if (odesc - elf64_ia64_operands == IA64_OPND_TGT64)
11395 {
11396 value >>= 4;
11397 insn[1] = ((value >> 20) & 0x7fffffffffLL) << 2;
11398 insn[2] |= ((((value >> 59) & 0x1) << 36)
11399 | (((value >> 0) & 0xfffff) << 13));
11400 }
11401 else
11402 err = (*odesc->insert) (odesc, value, insn + slot);
11403
11404 if (err)
11405 as_bad_where (fix->fx_file, fix->fx_line, "%s", err);
11406
11407 t0 = control_bits | (insn[0] << 5) | (insn[1] << 46);
11408 t1 = ((insn[1] >> 18) & 0x7fffff) | (insn[2] << 23);
11409 number_to_chars_littleendian (fixpos + 0, t0, 8);
11410 number_to_chars_littleendian (fixpos + 8, t1, 8);
11411 }
11412
11413 /* Attempt to simplify or even eliminate a fixup. The return value is
11414 ignored; perhaps it was once meaningful, but now it is historical.
11415 To indicate that a fixup has been eliminated, set FIXP->FX_DONE.
11416
11417 If fixp->fx_addsy is non-NULL, we'll have to generate a reloc entry
11418 (if possible). */
11419
11420 void
md_apply_fix(fixS * fix,valueT * valP,segT seg ATTRIBUTE_UNUSED)11421 md_apply_fix (fixS *fix, valueT *valP, segT seg ATTRIBUTE_UNUSED)
11422 {
11423 char *fixpos;
11424 valueT value = *valP;
11425
11426 fixpos = fix->fx_frag->fr_literal + fix->fx_where;
11427
11428 if (fix->fx_pcrel)
11429 {
11430 switch (fix->fx_r_type)
11431 {
11432 case BFD_RELOC_IA64_PCREL21B: break;
11433 case BFD_RELOC_IA64_PCREL21BI: break;
11434 case BFD_RELOC_IA64_PCREL21F: break;
11435 case BFD_RELOC_IA64_PCREL21M: break;
11436 case BFD_RELOC_IA64_PCREL60B: break;
11437 case BFD_RELOC_IA64_PCREL22: break;
11438 case BFD_RELOC_IA64_PCREL64I: break;
11439 case BFD_RELOC_IA64_PCREL32MSB: break;
11440 case BFD_RELOC_IA64_PCREL32LSB: break;
11441 case BFD_RELOC_IA64_PCREL64MSB: break;
11442 case BFD_RELOC_IA64_PCREL64LSB: break;
11443 default:
11444 fix->fx_r_type = ia64_gen_real_reloc_type (pseudo_func[FUNC_PC_RELATIVE].u.sym,
11445 fix->fx_r_type);
11446 break;
11447 }
11448 }
11449 if (fix->fx_addsy)
11450 {
11451 switch ((unsigned) fix->fx_r_type)
11452 {
11453 case BFD_RELOC_UNUSED:
11454 /* This must be a TAG13 or TAG13b operand. There are no external
11455 relocs defined for them, so we must give an error. */
11456 as_bad_where (fix->fx_file, fix->fx_line,
11457 _("%s must have a constant value"),
11458 elf64_ia64_operands[fix->tc_fix_data.opnd].desc);
11459 fix->fx_done = 1;
11460 return;
11461
11462 case BFD_RELOC_IA64_TPREL14:
11463 case BFD_RELOC_IA64_TPREL22:
11464 case BFD_RELOC_IA64_TPREL64I:
11465 case BFD_RELOC_IA64_LTOFF_TPREL22:
11466 case BFD_RELOC_IA64_LTOFF_DTPMOD22:
11467 case BFD_RELOC_IA64_DTPREL14:
11468 case BFD_RELOC_IA64_DTPREL22:
11469 case BFD_RELOC_IA64_DTPREL64I:
11470 case BFD_RELOC_IA64_LTOFF_DTPREL22:
11471 S_SET_THREAD_LOCAL (fix->fx_addsy);
11472 break;
11473
11474 #ifdef TE_VMS
11475 case DUMMY_RELOC_IA64_SLOTCOUNT:
11476 as_bad_where (fix->fx_file, fix->fx_line,
11477 _("cannot resolve @slotcount parameter"));
11478 fix->fx_done = 1;
11479 return;
11480 #endif
11481
11482 default:
11483 break;
11484 }
11485 }
11486 else if (fix->tc_fix_data.opnd == IA64_OPND_NIL)
11487 {
11488 #ifdef TE_VMS
11489 if (fix->fx_r_type == DUMMY_RELOC_IA64_SLOTCOUNT)
11490 {
11491 /* For @slotcount, convert an addresses difference to a slots
11492 difference. */
11493 valueT v;
11494
11495 v = (value >> 4) * 3;
11496 switch (value & 0x0f)
11497 {
11498 case 0:
11499 case 1:
11500 case 2:
11501 v += value & 0x0f;
11502 break;
11503 case 0x0f:
11504 v += 2;
11505 break;
11506 case 0x0e:
11507 v += 1;
11508 break;
11509 default:
11510 as_bad (_("invalid @slotcount value"));
11511 }
11512 value = v;
11513 }
11514 #endif
11515
11516 if (fix->tc_fix_data.bigendian)
11517 number_to_chars_bigendian (fixpos, value, fix->fx_size);
11518 else
11519 number_to_chars_littleendian (fixpos, value, fix->fx_size);
11520 fix->fx_done = 1;
11521 }
11522 else
11523 {
11524 fix_insn (fix, elf64_ia64_operands + fix->tc_fix_data.opnd, value);
11525 fix->fx_done = 1;
11526 }
11527 }
11528
11529 /* Generate the BFD reloc to be stuck in the object file from the
11530 fixup used internally in the assembler. */
11531
11532 arelent *
tc_gen_reloc(asection * sec ATTRIBUTE_UNUSED,fixS * fixp)11533 tc_gen_reloc (asection *sec ATTRIBUTE_UNUSED, fixS *fixp)
11534 {
11535 arelent *reloc;
11536
11537 reloc = XNEW (arelent);
11538 reloc->sym_ptr_ptr = XNEW (asymbol *);
11539 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
11540 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
11541 reloc->addend = fixp->fx_offset;
11542 reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
11543
11544 if (!reloc->howto)
11545 {
11546 as_bad_where (fixp->fx_file, fixp->fx_line,
11547 _("Cannot represent %s relocation in object file"),
11548 bfd_get_reloc_code_name (fixp->fx_r_type));
11549 free (reloc);
11550 return NULL;
11551 }
11552 return reloc;
11553 }
11554
11555 /* Turn a string in input_line_pointer into a floating point constant
11556 of type TYPE, and store the appropriate bytes in *LIT. The number
11557 of LITTLENUMS emitted is stored in *SIZE. An error message is
11558 returned, or NULL on OK. */
11559
11560 const char *
md_atof(int type,char * lit,int * size)11561 md_atof (int type, char *lit, int *size)
11562 {
11563 LITTLENUM_TYPE words[MAX_LITTLENUMS];
11564 char *t;
11565 int prec;
11566
11567 switch (type)
11568 {
11569 /* IEEE floats */
11570 case 'f':
11571 case 'F':
11572 case 's':
11573 case 'S':
11574 prec = 2;
11575 break;
11576
11577 case 'd':
11578 case 'D':
11579 case 'r':
11580 case 'R':
11581 prec = 4;
11582 break;
11583
11584 case 'x':
11585 case 'X':
11586 case 'p':
11587 case 'P':
11588 prec = 5;
11589 break;
11590
11591 default:
11592 *size = 0;
11593 return _("Unrecognized or unsupported floating point constant");
11594 }
11595 t = atof_ieee (input_line_pointer, type, words);
11596 if (t)
11597 input_line_pointer = t;
11598
11599 (*ia64_float_to_chars) (lit, words, prec);
11600
11601 if (type == 'X')
11602 {
11603 /* It is 10 byte floating point with 6 byte padding. */
11604 memset (&lit [10], 0, 6);
11605 *size = 8 * sizeof (LITTLENUM_TYPE);
11606 }
11607 else
11608 *size = prec * sizeof (LITTLENUM_TYPE);
11609
11610 return NULL;
11611 }
11612
11613 /* Handle ia64 specific semantics of the align directive. */
11614
11615 void
ia64_md_do_align(int n ATTRIBUTE_UNUSED,const char * fill ATTRIBUTE_UNUSED,int len ATTRIBUTE_UNUSED,int max ATTRIBUTE_UNUSED)11616 ia64_md_do_align (int n ATTRIBUTE_UNUSED,
11617 const char *fill ATTRIBUTE_UNUSED,
11618 int len ATTRIBUTE_UNUSED,
11619 int max ATTRIBUTE_UNUSED)
11620 {
11621 if (subseg_text_p (now_seg))
11622 ia64_flush_insns ();
11623 }
11624
11625 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
11626 of an rs_align_code fragment. */
11627
11628 void
ia64_handle_align(fragS * fragp)11629 ia64_handle_align (fragS *fragp)
11630 {
11631 int bytes;
11632 char *p;
11633 const unsigned char *nop_type;
11634
11635 if (fragp->fr_type != rs_align_code)
11636 return;
11637
11638 /* Check if this frag has to end with a stop bit. */
11639 nop_type = fragp->tc_frag_data ? le_nop_stop : le_nop;
11640
11641 bytes = fragp->fr_next->fr_address - fragp->fr_address - fragp->fr_fix;
11642 p = fragp->fr_literal + fragp->fr_fix;
11643
11644 /* If no paddings are needed, we check if we need a stop bit. */
11645 if (!bytes && fragp->tc_frag_data)
11646 {
11647 if (fragp->fr_fix < 16)
11648 #if 1
11649 /* FIXME: It won't work with
11650 .align 16
11651 alloc r32=ar.pfs,1,2,4,0
11652 */
11653 ;
11654 #else
11655 as_bad_where (fragp->fr_file, fragp->fr_line,
11656 _("Can't add stop bit to mark end of instruction group"));
11657 #endif
11658 else
11659 /* Bundles are always in little-endian byte order. Make sure
11660 the previous bundle has the stop bit. */
11661 *(p - 16) |= 1;
11662 }
11663
11664 /* Make sure we are on a 16-byte boundary, in case someone has been
11665 putting data into a text section. */
11666 if (bytes & 15)
11667 {
11668 int fix = bytes & 15;
11669 memset (p, 0, fix);
11670 p += fix;
11671 bytes -= fix;
11672 fragp->fr_fix += fix;
11673 }
11674
11675 /* Instruction bundles are always little-endian. */
11676 memcpy (p, nop_type, 16);
11677 fragp->fr_var = 16;
11678 }
11679
11680 static void
ia64_float_to_chars_bigendian(char * lit,LITTLENUM_TYPE * words,int prec)11681 ia64_float_to_chars_bigendian (char *lit, LITTLENUM_TYPE *words,
11682 int prec)
11683 {
11684 while (prec--)
11685 {
11686 number_to_chars_bigendian (lit, (long) (*words++),
11687 sizeof (LITTLENUM_TYPE));
11688 lit += sizeof (LITTLENUM_TYPE);
11689 }
11690 }
11691
11692 static void
ia64_float_to_chars_littleendian(char * lit,LITTLENUM_TYPE * words,int prec)11693 ia64_float_to_chars_littleendian (char *lit, LITTLENUM_TYPE *words,
11694 int prec)
11695 {
11696 while (prec--)
11697 {
11698 number_to_chars_littleendian (lit, (long) (words[prec]),
11699 sizeof (LITTLENUM_TYPE));
11700 lit += sizeof (LITTLENUM_TYPE);
11701 }
11702 }
11703
11704 void
ia64_elf_section_change_hook(void)11705 ia64_elf_section_change_hook (void)
11706 {
11707 if (elf_section_type (now_seg) == SHT_IA_64_UNWIND
11708 && elf_linked_to_section (now_seg) == NULL)
11709 elf_linked_to_section (now_seg) = text_section;
11710 dot_byteorder (-1);
11711 }
11712
11713 /* Check if a label should be made global. */
11714 void
ia64_check_label(symbolS * label)11715 ia64_check_label (symbolS *label)
11716 {
11717 if (*input_line_pointer == ':')
11718 {
11719 S_SET_EXTERNAL (label);
11720 input_line_pointer++;
11721 }
11722 }
11723
11724 /* Used to remember where .alias and .secalias directives are seen. We
11725 will rename symbol and section names when we are about to output
11726 the relocatable file. */
11727 struct alias
11728 {
11729 const char *file; /* The file where the directive is seen. */
11730 unsigned int line; /* The line number the directive is at. */
11731 const char *name; /* The original name of the symbol. */
11732 };
11733
11734 /* Called for .alias and .secalias directives. If SECTION is 1, it is
11735 .secalias. Otherwise, it is .alias. */
11736 static void
dot_alias(int section)11737 dot_alias (int section)
11738 {
11739 char *name, *alias;
11740 char delim;
11741 char *end_name;
11742 int len;
11743 struct alias *h;
11744 const char *a;
11745 htab_t ahash, nhash;
11746 const char *kind;
11747
11748 delim = get_symbol_name (&name);
11749 end_name = input_line_pointer;
11750 *end_name = delim;
11751
11752 if (name == end_name)
11753 {
11754 as_bad (_("expected symbol name"));
11755 ignore_rest_of_line ();
11756 return;
11757 }
11758
11759 SKIP_WHITESPACE_AFTER_NAME ();
11760
11761 if (*input_line_pointer != ',')
11762 {
11763 *end_name = 0;
11764 as_bad (_("expected comma after \"%s\""), name);
11765 *end_name = delim;
11766 ignore_rest_of_line ();
11767 return;
11768 }
11769
11770 input_line_pointer++;
11771 *end_name = 0;
11772 ia64_canonicalize_symbol_name (name);
11773
11774 /* We call demand_copy_C_string to check if alias string is valid.
11775 There should be a closing `"' and no `\0' in the string. */
11776 alias = demand_copy_C_string (&len);
11777 if (alias == NULL)
11778 {
11779 ignore_rest_of_line ();
11780 return;
11781 }
11782
11783 /* Make a copy of name string. */
11784 len = strlen (name) + 1;
11785 obstack_grow (¬es, name, len);
11786 name = obstack_finish (¬es);
11787
11788 if (section)
11789 {
11790 kind = "section";
11791 ahash = secalias_hash;
11792 nhash = secalias_name_hash;
11793 }
11794 else
11795 {
11796 kind = "symbol";
11797 ahash = alias_hash;
11798 nhash = alias_name_hash;
11799 }
11800
11801 /* Check if alias has been used before. */
11802
11803 h = (struct alias *) str_hash_find (ahash, alias);
11804 if (h)
11805 {
11806 if (strcmp (h->name, name))
11807 as_bad (_("`%s' is already the alias of %s `%s'"),
11808 alias, kind, h->name);
11809 obstack_free (¬es, name);
11810 obstack_free (¬es, alias);
11811 goto out;
11812 }
11813
11814 /* Check if name already has an alias. */
11815 a = (const char *) str_hash_find (nhash, name);
11816 if (a)
11817 {
11818 if (strcmp (a, alias))
11819 as_bad (_("%s `%s' already has an alias `%s'"), kind, name, a);
11820 obstack_free (¬es, name);
11821 obstack_free (¬es, alias);
11822 goto out;
11823 }
11824
11825 h = XNEW (struct alias);
11826 h->file = as_where (&h->line);
11827 h->name = name;
11828
11829 str_hash_insert (ahash, alias, h, 0);
11830 str_hash_insert (nhash, name, alias, 0);
11831
11832 out:
11833 demand_empty_rest_of_line ();
11834 }
11835
11836 /* It renames the original symbol name to its alias. */
11837 static int
do_alias(void ** slot,void * arg ATTRIBUTE_UNUSED)11838 do_alias (void **slot, void *arg ATTRIBUTE_UNUSED)
11839 {
11840 string_tuple_t *tuple = *((string_tuple_t **) slot);
11841 struct alias *h = (struct alias *) tuple->value;
11842 symbolS *sym = symbol_find (h->name);
11843
11844 if (sym == NULL)
11845 {
11846 #ifdef TE_VMS
11847 /* Uses .alias extensively to alias CRTL functions to same with
11848 decc$ prefix. Sometimes function gets optimized away and a
11849 warning results, which should be suppressed. */
11850 if (!startswith (tuple->key, "decc$"))
11851 #endif
11852 as_warn_where (h->file, h->line,
11853 _("symbol `%s' aliased to `%s' is not used"),
11854 h->name, tuple->key);
11855 }
11856 else
11857 S_SET_NAME (sym, (char *) tuple->key);
11858
11859 return 1;
11860 }
11861
11862 /* Called from write_object_file. */
11863 void
ia64_adjust_symtab(void)11864 ia64_adjust_symtab (void)
11865 {
11866 htab_traverse (alias_hash, do_alias, NULL);
11867 }
11868
11869 /* It renames the original section name to its alias. */
11870 static int
do_secalias(void ** slot,void * arg ATTRIBUTE_UNUSED)11871 do_secalias (void **slot, void *arg ATTRIBUTE_UNUSED)
11872 {
11873 string_tuple_t *tuple = *((string_tuple_t **) slot);
11874 struct alias *h = (struct alias *) tuple->value;
11875 segT sec = bfd_get_section_by_name (stdoutput, h->name);
11876
11877 if (sec == NULL)
11878 as_warn_where (h->file, h->line,
11879 _("section `%s' aliased to `%s' is not used"),
11880 h->name, tuple->key);
11881 else
11882 sec->name = tuple->key;
11883
11884 return 1;
11885 }
11886
11887 /* Called from write_object_file. */
11888 void
ia64_frob_file(void)11889 ia64_frob_file (void)
11890 {
11891 htab_traverse (secalias_hash, do_secalias, NULL);
11892 }
11893
11894 #ifdef TE_VMS
11895 #define NT_VMS_MHD 1
11896 #define NT_VMS_LNM 2
11897
11898 /* Integrity VMS 8.x identifies it's ELF modules with a standard ELF
11899 .note section. */
11900
11901 /* Manufacture a VMS-like time string. */
11902 static void
get_vms_time(char * Now)11903 get_vms_time (char *Now)
11904 {
11905 char *pnt;
11906 time_t timeb;
11907
11908 time (&timeb);
11909 pnt = ctime (&timeb);
11910 pnt[3] = 0;
11911 pnt[7] = 0;
11912 pnt[10] = 0;
11913 pnt[16] = 0;
11914 pnt[24] = 0;
11915 sprintf (Now, "%2s-%3s-%s %s", pnt + 8, pnt + 4, pnt + 20, pnt + 11);
11916 }
11917
11918 void
ia64_vms_note(void)11919 ia64_vms_note (void)
11920 {
11921 char *p;
11922 asection *seg = now_seg;
11923 subsegT subseg = now_subseg;
11924 asection *secp = NULL;
11925 char *bname;
11926 char buf [256];
11927 symbolS *sym;
11928
11929 /* Create the .note section. */
11930
11931 secp = subseg_new (".note", 0);
11932 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11933
11934 /* Module header note (MHD). */
11935 bname = xstrdup (lbasename (out_file_name));
11936 if ((p = strrchr (bname, '.')))
11937 *p = '\0';
11938
11939 /* VMS note header is 24 bytes long. */
11940 p = frag_more (8 + 8 + 8);
11941 number_to_chars_littleendian (p + 0, 8, 8);
11942 number_to_chars_littleendian (p + 8, 40 + strlen (bname), 8);
11943 number_to_chars_littleendian (p + 16, NT_VMS_MHD, 8);
11944
11945 p = frag_more (8);
11946 strcpy (p, "IPF/VMS");
11947
11948 p = frag_more (17 + 17 + strlen (bname) + 1 + 5);
11949 get_vms_time (p);
11950 strcpy (p + 17, "24-FEB-2005 15:00");
11951 p += 17 + 17;
11952 strcpy (p, bname);
11953 p += strlen (bname) + 1;
11954 free (bname);
11955 strcpy (p, "V1.0");
11956
11957 frag_align (3, 0, 0);
11958
11959 /* Language processor name note. */
11960 sprintf (buf, "GNU assembler version %s (%s) using BFD version %s",
11961 VERSION, TARGET_ALIAS, BFD_VERSION_STRING);
11962
11963 p = frag_more (8 + 8 + 8);
11964 number_to_chars_littleendian (p + 0, 8, 8);
11965 number_to_chars_littleendian (p + 8, strlen (buf) + 1, 8);
11966 number_to_chars_littleendian (p + 16, NT_VMS_LNM, 8);
11967
11968 p = frag_more (8);
11969 strcpy (p, "IPF/VMS");
11970
11971 p = frag_more (strlen (buf) + 1);
11972 strcpy (p, buf);
11973
11974 frag_align (3, 0, 0);
11975
11976 secp = subseg_new (".vms_display_name_info", 0);
11977 bfd_set_section_flags (secp, SEC_HAS_CONTENTS | SEC_READONLY);
11978
11979 /* This symbol should be passed on the command line and be variable
11980 according to language. */
11981 sym = symbol_new ("__gnat_vms_display_name@gnat_demangler_rtl",
11982 absolute_section, &zero_address_frag, 0);
11983 symbol_table_insert (sym);
11984 symbol_get_bfdsym (sym)->flags |= BSF_DEBUGGING | BSF_DYNAMIC;
11985
11986 p = frag_more (4);
11987 /* Format 3 of VMS demangler Spec. */
11988 number_to_chars_littleendian (p, 3, 4);
11989
11990 p = frag_more (4);
11991 /* Place holder for symbol table index of above symbol. */
11992 number_to_chars_littleendian (p, -1, 4);
11993
11994 frag_align (3, 0, 0);
11995
11996 /* We probably can't restore the current segment, for there likely
11997 isn't one yet... */
11998 if (seg && subseg)
11999 subseg_set (seg, subseg);
12000 }
12001
12002 #endif /* TE_VMS */
12003