1 /* 32-bit ELF support for ARM
2    Copyright (C) 1998-2021 Free Software Foundation, Inc.
3 
4    This file is part of BFD, the Binary File Descriptor library.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19    MA 02110-1301, USA.  */
20 
21 #include "sysdep.h"
22 #include <limits.h>
23 
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
33 
34 /* Return the relocation section associated with NAME.  HTAB is the
35    bfd's elf32_arm_link_hash_entry.  */
36 #define RELOC_SECTION(HTAB, NAME) \
37   ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 
39 /* Return size of a relocation entry.  HTAB is the bfd's
40    elf32_arm_link_hash_entry.  */
41 #define RELOC_SIZE(HTAB) \
42   ((HTAB)->use_rel \
43    ? sizeof (Elf32_External_Rel) \
44    : sizeof (Elf32_External_Rela))
45 
46 /* Return function to swap relocations in.  HTAB is the bfd's
47    elf32_arm_link_hash_entry.  */
48 #define SWAP_RELOC_IN(HTAB) \
49   ((HTAB)->use_rel \
50    ? bfd_elf32_swap_reloc_in \
51    : bfd_elf32_swap_reloca_in)
52 
53 /* Return function to swap relocations out.  HTAB is the bfd's
54    elf32_arm_link_hash_entry.  */
55 #define SWAP_RELOC_OUT(HTAB) \
56   ((HTAB)->use_rel \
57    ? bfd_elf32_swap_reloc_out \
58    : bfd_elf32_swap_reloca_out)
59 
60 #define elf_info_to_howto		NULL
61 #define elf_info_to_howto_rel		elf32_arm_info_to_howto
62 
63 #define ARM_ELF_ABI_VERSION		0
64 #define ARM_ELF_OS_ABI_VERSION		ELFOSABI_ARM
65 
66 /* The Adjusted Place, as defined by AAELF.  */
67 #define Pa(X) ((X) & 0xfffffffc)
68 
69 static bool elf32_arm_write_section (bfd *output_bfd,
70 				     struct bfd_link_info *link_info,
71 				     asection *sec,
72 				     bfd_byte *contents);
73 
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75    R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76    in that slot.  */
77 
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80   /* No relocation.  */
81   HOWTO (R_ARM_NONE,		/* type */
82 	 0,			/* rightshift */
83 	 3,			/* size (0 = byte, 1 = short, 2 = long) */
84 	 0,			/* bitsize */
85 	 false,			/* pc_relative */
86 	 0,			/* bitpos */
87 	 complain_overflow_dont,/* complain_on_overflow */
88 	 bfd_elf_generic_reloc,	/* special_function */
89 	 "R_ARM_NONE",		/* name */
90 	 false,			/* partial_inplace */
91 	 0,			/* src_mask */
92 	 0,			/* dst_mask */
93 	 false),		/* pcrel_offset */
94 
95   HOWTO (R_ARM_PC24,		/* type */
96 	 2,			/* rightshift */
97 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
98 	 24,			/* bitsize */
99 	 true,			/* pc_relative */
100 	 0,			/* bitpos */
101 	 complain_overflow_signed,/* complain_on_overflow */
102 	 bfd_elf_generic_reloc,	/* special_function */
103 	 "R_ARM_PC24",		/* name */
104 	 false,			/* partial_inplace */
105 	 0x00ffffff,		/* src_mask */
106 	 0x00ffffff,		/* dst_mask */
107 	 true),			/* pcrel_offset */
108 
109   /* 32 bit absolute */
110   HOWTO (R_ARM_ABS32,		/* type */
111 	 0,			/* rightshift */
112 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
113 	 32,			/* bitsize */
114 	 false,			/* pc_relative */
115 	 0,			/* bitpos */
116 	 complain_overflow_bitfield,/* complain_on_overflow */
117 	 bfd_elf_generic_reloc,	/* special_function */
118 	 "R_ARM_ABS32",		/* name */
119 	 false,			/* partial_inplace */
120 	 0xffffffff,		/* src_mask */
121 	 0xffffffff,		/* dst_mask */
122 	 false),		/* pcrel_offset */
123 
124   /* standard 32bit pc-relative reloc */
125   HOWTO (R_ARM_REL32,		/* type */
126 	 0,			/* rightshift */
127 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
128 	 32,			/* bitsize */
129 	 true,			/* pc_relative */
130 	 0,			/* bitpos */
131 	 complain_overflow_bitfield,/* complain_on_overflow */
132 	 bfd_elf_generic_reloc,	/* special_function */
133 	 "R_ARM_REL32",		/* name */
134 	 false,			/* partial_inplace */
135 	 0xffffffff,		/* src_mask */
136 	 0xffffffff,		/* dst_mask */
137 	 true),			/* pcrel_offset */
138 
139   /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140   HOWTO (R_ARM_LDR_PC_G0,	/* type */
141 	 0,			/* rightshift */
142 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
143 	 32,			/* bitsize */
144 	 true,			/* pc_relative */
145 	 0,			/* bitpos */
146 	 complain_overflow_dont,/* complain_on_overflow */
147 	 bfd_elf_generic_reloc,	/* special_function */
148 	 "R_ARM_LDR_PC_G0",     /* name */
149 	 false,			/* partial_inplace */
150 	 0xffffffff,		/* src_mask */
151 	 0xffffffff,		/* dst_mask */
152 	 true),			/* pcrel_offset */
153 
154    /* 16 bit absolute */
155   HOWTO (R_ARM_ABS16,		/* type */
156 	 0,			/* rightshift */
157 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
158 	 16,			/* bitsize */
159 	 false,			/* pc_relative */
160 	 0,			/* bitpos */
161 	 complain_overflow_bitfield,/* complain_on_overflow */
162 	 bfd_elf_generic_reloc,	/* special_function */
163 	 "R_ARM_ABS16",		/* name */
164 	 false,			/* partial_inplace */
165 	 0x0000ffff,		/* src_mask */
166 	 0x0000ffff,		/* dst_mask */
167 	 false),		/* pcrel_offset */
168 
169   /* 12 bit absolute */
170   HOWTO (R_ARM_ABS12,		/* type */
171 	 0,			/* rightshift */
172 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
173 	 12,			/* bitsize */
174 	 false,			/* pc_relative */
175 	 0,			/* bitpos */
176 	 complain_overflow_bitfield,/* complain_on_overflow */
177 	 bfd_elf_generic_reloc,	/* special_function */
178 	 "R_ARM_ABS12",		/* name */
179 	 false,			/* partial_inplace */
180 	 0x00000fff,		/* src_mask */
181 	 0x00000fff,		/* dst_mask */
182 	 false),		/* pcrel_offset */
183 
184   HOWTO (R_ARM_THM_ABS5,	/* type */
185 	 6,			/* rightshift */
186 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
187 	 5,			/* bitsize */
188 	 false,			/* pc_relative */
189 	 0,			/* bitpos */
190 	 complain_overflow_bitfield,/* complain_on_overflow */
191 	 bfd_elf_generic_reloc,	/* special_function */
192 	 "R_ARM_THM_ABS5",	/* name */
193 	 false,			/* partial_inplace */
194 	 0x000007e0,		/* src_mask */
195 	 0x000007e0,		/* dst_mask */
196 	 false),		/* pcrel_offset */
197 
198   /* 8 bit absolute */
199   HOWTO (R_ARM_ABS8,		/* type */
200 	 0,			/* rightshift */
201 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
202 	 8,			/* bitsize */
203 	 false,			/* pc_relative */
204 	 0,			/* bitpos */
205 	 complain_overflow_bitfield,/* complain_on_overflow */
206 	 bfd_elf_generic_reloc,	/* special_function */
207 	 "R_ARM_ABS8",		/* name */
208 	 false,			/* partial_inplace */
209 	 0x000000ff,		/* src_mask */
210 	 0x000000ff,		/* dst_mask */
211 	 false),		/* pcrel_offset */
212 
213   HOWTO (R_ARM_SBREL32,		/* type */
214 	 0,			/* rightshift */
215 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
216 	 32,			/* bitsize */
217 	 false,			/* pc_relative */
218 	 0,			/* bitpos */
219 	 complain_overflow_dont,/* complain_on_overflow */
220 	 bfd_elf_generic_reloc,	/* special_function */
221 	 "R_ARM_SBREL32",	/* name */
222 	 false,			/* partial_inplace */
223 	 0xffffffff,		/* src_mask */
224 	 0xffffffff,		/* dst_mask */
225 	 false),		/* pcrel_offset */
226 
227   HOWTO (R_ARM_THM_CALL,	/* type */
228 	 1,			/* rightshift */
229 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
230 	 24,			/* bitsize */
231 	 true,			/* pc_relative */
232 	 0,			/* bitpos */
233 	 complain_overflow_signed,/* complain_on_overflow */
234 	 bfd_elf_generic_reloc,	/* special_function */
235 	 "R_ARM_THM_CALL",	/* name */
236 	 false,			/* partial_inplace */
237 	 0x07ff2fff,		/* src_mask */
238 	 0x07ff2fff,		/* dst_mask */
239 	 true),			/* pcrel_offset */
240 
241   HOWTO (R_ARM_THM_PC8,		/* type */
242 	 1,			/* rightshift */
243 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
244 	 8,			/* bitsize */
245 	 true,			/* pc_relative */
246 	 0,			/* bitpos */
247 	 complain_overflow_signed,/* complain_on_overflow */
248 	 bfd_elf_generic_reloc,	/* special_function */
249 	 "R_ARM_THM_PC8",	/* name */
250 	 false,			/* partial_inplace */
251 	 0x000000ff,		/* src_mask */
252 	 0x000000ff,		/* dst_mask */
253 	 true),			/* pcrel_offset */
254 
255   HOWTO (R_ARM_BREL_ADJ,	/* type */
256 	 1,			/* rightshift */
257 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
258 	 32,			/* bitsize */
259 	 false,			/* pc_relative */
260 	 0,			/* bitpos */
261 	 complain_overflow_signed,/* complain_on_overflow */
262 	 bfd_elf_generic_reloc,	/* special_function */
263 	 "R_ARM_BREL_ADJ",	/* name */
264 	 false,			/* partial_inplace */
265 	 0xffffffff,		/* src_mask */
266 	 0xffffffff,		/* dst_mask */
267 	 false),		/* pcrel_offset */
268 
269   HOWTO (R_ARM_TLS_DESC,	/* type */
270 	 0,			/* rightshift */
271 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
272 	 32,			/* bitsize */
273 	 false,			/* pc_relative */
274 	 0,			/* bitpos */
275 	 complain_overflow_bitfield,/* complain_on_overflow */
276 	 bfd_elf_generic_reloc,	/* special_function */
277 	 "R_ARM_TLS_DESC",	/* name */
278 	 false,			/* partial_inplace */
279 	 0xffffffff,		/* src_mask */
280 	 0xffffffff,		/* dst_mask */
281 	 false),		/* pcrel_offset */
282 
283   HOWTO (R_ARM_THM_SWI8,	/* type */
284 	 0,			/* rightshift */
285 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
286 	 0,			/* bitsize */
287 	 false,			/* pc_relative */
288 	 0,			/* bitpos */
289 	 complain_overflow_signed,/* complain_on_overflow */
290 	 bfd_elf_generic_reloc,	/* special_function */
291 	 "R_ARM_SWI8",		/* name */
292 	 false,			/* partial_inplace */
293 	 0x00000000,		/* src_mask */
294 	 0x00000000,		/* dst_mask */
295 	 false),		/* pcrel_offset */
296 
297   /* BLX instruction for the ARM.  */
298   HOWTO (R_ARM_XPC25,		/* type */
299 	 2,			/* rightshift */
300 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
301 	 24,			/* bitsize */
302 	 true,			/* pc_relative */
303 	 0,			/* bitpos */
304 	 complain_overflow_signed,/* complain_on_overflow */
305 	 bfd_elf_generic_reloc,	/* special_function */
306 	 "R_ARM_XPC25",		/* name */
307 	 false,			/* partial_inplace */
308 	 0x00ffffff,		/* src_mask */
309 	 0x00ffffff,		/* dst_mask */
310 	 true),			/* pcrel_offset */
311 
312   /* BLX instruction for the Thumb.  */
313   HOWTO (R_ARM_THM_XPC22,	/* type */
314 	 2,			/* rightshift */
315 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
316 	 24,			/* bitsize */
317 	 true,			/* pc_relative */
318 	 0,			/* bitpos */
319 	 complain_overflow_signed,/* complain_on_overflow */
320 	 bfd_elf_generic_reloc,	/* special_function */
321 	 "R_ARM_THM_XPC22",	/* name */
322 	 false,			/* partial_inplace */
323 	 0x07ff2fff,		/* src_mask */
324 	 0x07ff2fff,		/* dst_mask */
325 	 true),			/* pcrel_offset */
326 
327   /* Dynamic TLS relocations.  */
328 
329   HOWTO (R_ARM_TLS_DTPMOD32,	/* type */
330 	 0,			/* rightshift */
331 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
332 	 32,			/* bitsize */
333 	 false,			/* pc_relative */
334 	 0,			/* bitpos */
335 	 complain_overflow_bitfield,/* complain_on_overflow */
336 	 bfd_elf_generic_reloc, /* special_function */
337 	 "R_ARM_TLS_DTPMOD32",	/* name */
338 	 true,			/* partial_inplace */
339 	 0xffffffff,		/* src_mask */
340 	 0xffffffff,		/* dst_mask */
341 	 false),		/* pcrel_offset */
342 
343   HOWTO (R_ARM_TLS_DTPOFF32,	/* type */
344 	 0,			/* rightshift */
345 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
346 	 32,			/* bitsize */
347 	 false,			/* pc_relative */
348 	 0,			/* bitpos */
349 	 complain_overflow_bitfield,/* complain_on_overflow */
350 	 bfd_elf_generic_reloc, /* special_function */
351 	 "R_ARM_TLS_DTPOFF32",	/* name */
352 	 true,			/* partial_inplace */
353 	 0xffffffff,		/* src_mask */
354 	 0xffffffff,		/* dst_mask */
355 	 false),		/* pcrel_offset */
356 
357   HOWTO (R_ARM_TLS_TPOFF32,	/* type */
358 	 0,			/* rightshift */
359 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
360 	 32,			/* bitsize */
361 	 false,			/* pc_relative */
362 	 0,			/* bitpos */
363 	 complain_overflow_bitfield,/* complain_on_overflow */
364 	 bfd_elf_generic_reloc, /* special_function */
365 	 "R_ARM_TLS_TPOFF32",	/* name */
366 	 true,			/* partial_inplace */
367 	 0xffffffff,		/* src_mask */
368 	 0xffffffff,		/* dst_mask */
369 	 false),		/* pcrel_offset */
370 
371   /* Relocs used in ARM Linux */
372 
373   HOWTO (R_ARM_COPY,		/* type */
374 	 0,			/* rightshift */
375 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
376 	 32,			/* bitsize */
377 	 false,			/* pc_relative */
378 	 0,			/* bitpos */
379 	 complain_overflow_bitfield,/* complain_on_overflow */
380 	 bfd_elf_generic_reloc, /* special_function */
381 	 "R_ARM_COPY",		/* name */
382 	 true,			/* partial_inplace */
383 	 0xffffffff,		/* src_mask */
384 	 0xffffffff,		/* dst_mask */
385 	 false),		/* pcrel_offset */
386 
387   HOWTO (R_ARM_GLOB_DAT,	/* type */
388 	 0,			/* rightshift */
389 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
390 	 32,			/* bitsize */
391 	 false,			/* pc_relative */
392 	 0,			/* bitpos */
393 	 complain_overflow_bitfield,/* complain_on_overflow */
394 	 bfd_elf_generic_reloc, /* special_function */
395 	 "R_ARM_GLOB_DAT",	/* name */
396 	 true,			/* partial_inplace */
397 	 0xffffffff,		/* src_mask */
398 	 0xffffffff,		/* dst_mask */
399 	 false),		/* pcrel_offset */
400 
401   HOWTO (R_ARM_JUMP_SLOT,	/* type */
402 	 0,			/* rightshift */
403 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
404 	 32,			/* bitsize */
405 	 false,			/* pc_relative */
406 	 0,			/* bitpos */
407 	 complain_overflow_bitfield,/* complain_on_overflow */
408 	 bfd_elf_generic_reloc, /* special_function */
409 	 "R_ARM_JUMP_SLOT",	/* name */
410 	 true,			/* partial_inplace */
411 	 0xffffffff,		/* src_mask */
412 	 0xffffffff,		/* dst_mask */
413 	 false),		/* pcrel_offset */
414 
415   HOWTO (R_ARM_RELATIVE,	/* type */
416 	 0,			/* rightshift */
417 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
418 	 32,			/* bitsize */
419 	 false,			/* pc_relative */
420 	 0,			/* bitpos */
421 	 complain_overflow_bitfield,/* complain_on_overflow */
422 	 bfd_elf_generic_reloc, /* special_function */
423 	 "R_ARM_RELATIVE",	/* name */
424 	 true,			/* partial_inplace */
425 	 0xffffffff,		/* src_mask */
426 	 0xffffffff,		/* dst_mask */
427 	 false),		/* pcrel_offset */
428 
429   HOWTO (R_ARM_GOTOFF32,	/* type */
430 	 0,			/* rightshift */
431 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
432 	 32,			/* bitsize */
433 	 false,			/* pc_relative */
434 	 0,			/* bitpos */
435 	 complain_overflow_bitfield,/* complain_on_overflow */
436 	 bfd_elf_generic_reloc, /* special_function */
437 	 "R_ARM_GOTOFF32",	/* name */
438 	 true,			/* partial_inplace */
439 	 0xffffffff,		/* src_mask */
440 	 0xffffffff,		/* dst_mask */
441 	 false),		/* pcrel_offset */
442 
443   HOWTO (R_ARM_GOTPC,		/* type */
444 	 0,			/* rightshift */
445 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
446 	 32,			/* bitsize */
447 	 true,			/* pc_relative */
448 	 0,			/* bitpos */
449 	 complain_overflow_bitfield,/* complain_on_overflow */
450 	 bfd_elf_generic_reloc, /* special_function */
451 	 "R_ARM_GOTPC",		/* name */
452 	 true,			/* partial_inplace */
453 	 0xffffffff,		/* src_mask */
454 	 0xffffffff,		/* dst_mask */
455 	 true),			/* pcrel_offset */
456 
457   HOWTO (R_ARM_GOT32,		/* type */
458 	 0,			/* rightshift */
459 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
460 	 32,			/* bitsize */
461 	 false,			/* pc_relative */
462 	 0,			/* bitpos */
463 	 complain_overflow_bitfield,/* complain_on_overflow */
464 	 bfd_elf_generic_reloc, /* special_function */
465 	 "R_ARM_GOT32",		/* name */
466 	 true,			/* partial_inplace */
467 	 0xffffffff,		/* src_mask */
468 	 0xffffffff,		/* dst_mask */
469 	 false),		/* pcrel_offset */
470 
471   HOWTO (R_ARM_PLT32,		/* type */
472 	 2,			/* rightshift */
473 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
474 	 24,			/* bitsize */
475 	 true,			/* pc_relative */
476 	 0,			/* bitpos */
477 	 complain_overflow_bitfield,/* complain_on_overflow */
478 	 bfd_elf_generic_reloc, /* special_function */
479 	 "R_ARM_PLT32",		/* name */
480 	 false,			/* partial_inplace */
481 	 0x00ffffff,		/* src_mask */
482 	 0x00ffffff,		/* dst_mask */
483 	 true),			/* pcrel_offset */
484 
485   HOWTO (R_ARM_CALL,		/* type */
486 	 2,			/* rightshift */
487 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
488 	 24,			/* bitsize */
489 	 true,			/* pc_relative */
490 	 0,			/* bitpos */
491 	 complain_overflow_signed,/* complain_on_overflow */
492 	 bfd_elf_generic_reloc,	/* special_function */
493 	 "R_ARM_CALL",		/* name */
494 	 false,			/* partial_inplace */
495 	 0x00ffffff,		/* src_mask */
496 	 0x00ffffff,		/* dst_mask */
497 	 true),			/* pcrel_offset */
498 
499   HOWTO (R_ARM_JUMP24,		/* type */
500 	 2,			/* rightshift */
501 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
502 	 24,			/* bitsize */
503 	 true,			/* pc_relative */
504 	 0,			/* bitpos */
505 	 complain_overflow_signed,/* complain_on_overflow */
506 	 bfd_elf_generic_reloc,	/* special_function */
507 	 "R_ARM_JUMP24",	/* name */
508 	 false,			/* partial_inplace */
509 	 0x00ffffff,		/* src_mask */
510 	 0x00ffffff,		/* dst_mask */
511 	 true),			/* pcrel_offset */
512 
513   HOWTO (R_ARM_THM_JUMP24,	/* type */
514 	 1,			/* rightshift */
515 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
516 	 24,			/* bitsize */
517 	 true,			/* pc_relative */
518 	 0,			/* bitpos */
519 	 complain_overflow_signed,/* complain_on_overflow */
520 	 bfd_elf_generic_reloc,	/* special_function */
521 	 "R_ARM_THM_JUMP24",	/* name */
522 	 false,			/* partial_inplace */
523 	 0x07ff2fff,		/* src_mask */
524 	 0x07ff2fff,		/* dst_mask */
525 	 true),			/* pcrel_offset */
526 
527   HOWTO (R_ARM_BASE_ABS,	/* type */
528 	 0,			/* rightshift */
529 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
530 	 32,			/* bitsize */
531 	 false,			/* pc_relative */
532 	 0,			/* bitpos */
533 	 complain_overflow_dont,/* complain_on_overflow */
534 	 bfd_elf_generic_reloc,	/* special_function */
535 	 "R_ARM_BASE_ABS",	/* name */
536 	 false,			/* partial_inplace */
537 	 0xffffffff,		/* src_mask */
538 	 0xffffffff,		/* dst_mask */
539 	 false),		/* pcrel_offset */
540 
541   HOWTO (R_ARM_ALU_PCREL7_0,	/* type */
542 	 0,			/* rightshift */
543 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
544 	 12,			/* bitsize */
545 	 true,			/* pc_relative */
546 	 0,			/* bitpos */
547 	 complain_overflow_dont,/* complain_on_overflow */
548 	 bfd_elf_generic_reloc,	/* special_function */
549 	 "R_ARM_ALU_PCREL_7_0",	/* name */
550 	 false,			/* partial_inplace */
551 	 0x00000fff,		/* src_mask */
552 	 0x00000fff,		/* dst_mask */
553 	 true),			/* pcrel_offset */
554 
555   HOWTO (R_ARM_ALU_PCREL15_8,	/* type */
556 	 0,			/* rightshift */
557 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
558 	 12,			/* bitsize */
559 	 true,			/* pc_relative */
560 	 8,			/* bitpos */
561 	 complain_overflow_dont,/* complain_on_overflow */
562 	 bfd_elf_generic_reloc,	/* special_function */
563 	 "R_ARM_ALU_PCREL_15_8",/* name */
564 	 false,			/* partial_inplace */
565 	 0x00000fff,		/* src_mask */
566 	 0x00000fff,		/* dst_mask */
567 	 true),			/* pcrel_offset */
568 
569   HOWTO (R_ARM_ALU_PCREL23_15,	/* type */
570 	 0,			/* rightshift */
571 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
572 	 12,			/* bitsize */
573 	 true,			/* pc_relative */
574 	 16,			/* bitpos */
575 	 complain_overflow_dont,/* complain_on_overflow */
576 	 bfd_elf_generic_reloc,	/* special_function */
577 	 "R_ARM_ALU_PCREL_23_15",/* name */
578 	 false,			/* partial_inplace */
579 	 0x00000fff,		/* src_mask */
580 	 0x00000fff,		/* dst_mask */
581 	 true),			/* pcrel_offset */
582 
583   HOWTO (R_ARM_LDR_SBREL_11_0,	/* type */
584 	 0,			/* rightshift */
585 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
586 	 12,			/* bitsize */
587 	 false,			/* pc_relative */
588 	 0,			/* bitpos */
589 	 complain_overflow_dont,/* complain_on_overflow */
590 	 bfd_elf_generic_reloc,	/* special_function */
591 	 "R_ARM_LDR_SBREL_11_0",/* name */
592 	 false,			/* partial_inplace */
593 	 0x00000fff,		/* src_mask */
594 	 0x00000fff,		/* dst_mask */
595 	 false),		/* pcrel_offset */
596 
597   HOWTO (R_ARM_ALU_SBREL_19_12,	/* type */
598 	 0,			/* rightshift */
599 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
600 	 8,			/* bitsize */
601 	 false,			/* pc_relative */
602 	 12,			/* bitpos */
603 	 complain_overflow_dont,/* complain_on_overflow */
604 	 bfd_elf_generic_reloc,	/* special_function */
605 	 "R_ARM_ALU_SBREL_19_12",/* name */
606 	 false,			/* partial_inplace */
607 	 0x000ff000,		/* src_mask */
608 	 0x000ff000,		/* dst_mask */
609 	 false),		/* pcrel_offset */
610 
611   HOWTO (R_ARM_ALU_SBREL_27_20,	/* type */
612 	 0,			/* rightshift */
613 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
614 	 8,			/* bitsize */
615 	 false,			/* pc_relative */
616 	 20,			/* bitpos */
617 	 complain_overflow_dont,/* complain_on_overflow */
618 	 bfd_elf_generic_reloc,	/* special_function */
619 	 "R_ARM_ALU_SBREL_27_20",/* name */
620 	 false,			/* partial_inplace */
621 	 0x0ff00000,		/* src_mask */
622 	 0x0ff00000,		/* dst_mask */
623 	 false),		/* pcrel_offset */
624 
625   HOWTO (R_ARM_TARGET1,		/* type */
626 	 0,			/* rightshift */
627 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
628 	 32,			/* bitsize */
629 	 false,			/* pc_relative */
630 	 0,			/* bitpos */
631 	 complain_overflow_dont,/* complain_on_overflow */
632 	 bfd_elf_generic_reloc,	/* special_function */
633 	 "R_ARM_TARGET1",	/* name */
634 	 false,			/* partial_inplace */
635 	 0xffffffff,		/* src_mask */
636 	 0xffffffff,		/* dst_mask */
637 	 false),		/* pcrel_offset */
638 
639   HOWTO (R_ARM_ROSEGREL32,	/* type */
640 	 0,			/* rightshift */
641 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
642 	 32,			/* bitsize */
643 	 false,			/* pc_relative */
644 	 0,			/* bitpos */
645 	 complain_overflow_dont,/* complain_on_overflow */
646 	 bfd_elf_generic_reloc,	/* special_function */
647 	 "R_ARM_ROSEGREL32",	/* name */
648 	 false,			/* partial_inplace */
649 	 0xffffffff,		/* src_mask */
650 	 0xffffffff,		/* dst_mask */
651 	 false),		/* pcrel_offset */
652 
653   HOWTO (R_ARM_V4BX,		/* type */
654 	 0,			/* rightshift */
655 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
656 	 32,			/* bitsize */
657 	 false,			/* pc_relative */
658 	 0,			/* bitpos */
659 	 complain_overflow_dont,/* complain_on_overflow */
660 	 bfd_elf_generic_reloc,	/* special_function */
661 	 "R_ARM_V4BX",		/* name */
662 	 false,			/* partial_inplace */
663 	 0xffffffff,		/* src_mask */
664 	 0xffffffff,		/* dst_mask */
665 	 false),		/* pcrel_offset */
666 
667   HOWTO (R_ARM_TARGET2,		/* type */
668 	 0,			/* rightshift */
669 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
670 	 32,			/* bitsize */
671 	 false,			/* pc_relative */
672 	 0,			/* bitpos */
673 	 complain_overflow_signed,/* complain_on_overflow */
674 	 bfd_elf_generic_reloc,	/* special_function */
675 	 "R_ARM_TARGET2",	/* name */
676 	 false,			/* partial_inplace */
677 	 0xffffffff,		/* src_mask */
678 	 0xffffffff,		/* dst_mask */
679 	 true),			/* pcrel_offset */
680 
681   HOWTO (R_ARM_PREL31,		/* type */
682 	 0,			/* rightshift */
683 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
684 	 31,			/* bitsize */
685 	 true,			/* pc_relative */
686 	 0,			/* bitpos */
687 	 complain_overflow_signed,/* complain_on_overflow */
688 	 bfd_elf_generic_reloc,	/* special_function */
689 	 "R_ARM_PREL31",	/* name */
690 	 false,			/* partial_inplace */
691 	 0x7fffffff,		/* src_mask */
692 	 0x7fffffff,		/* dst_mask */
693 	 true),			/* pcrel_offset */
694 
695   HOWTO (R_ARM_MOVW_ABS_NC,	/* type */
696 	 0,			/* rightshift */
697 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
698 	 16,			/* bitsize */
699 	 false,			/* pc_relative */
700 	 0,			/* bitpos */
701 	 complain_overflow_dont,/* complain_on_overflow */
702 	 bfd_elf_generic_reloc,	/* special_function */
703 	 "R_ARM_MOVW_ABS_NC",	/* name */
704 	 false,			/* partial_inplace */
705 	 0x000f0fff,		/* src_mask */
706 	 0x000f0fff,		/* dst_mask */
707 	 false),		/* pcrel_offset */
708 
709   HOWTO (R_ARM_MOVT_ABS,	/* type */
710 	 0,			/* rightshift */
711 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
712 	 16,			/* bitsize */
713 	 false,			/* pc_relative */
714 	 0,			/* bitpos */
715 	 complain_overflow_bitfield,/* complain_on_overflow */
716 	 bfd_elf_generic_reloc,	/* special_function */
717 	 "R_ARM_MOVT_ABS",	/* name */
718 	 false,			/* partial_inplace */
719 	 0x000f0fff,		/* src_mask */
720 	 0x000f0fff,		/* dst_mask */
721 	 false),		/* pcrel_offset */
722 
723   HOWTO (R_ARM_MOVW_PREL_NC,	/* type */
724 	 0,			/* rightshift */
725 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
726 	 16,			/* bitsize */
727 	 true,			/* pc_relative */
728 	 0,			/* bitpos */
729 	 complain_overflow_dont,/* complain_on_overflow */
730 	 bfd_elf_generic_reloc,	/* special_function */
731 	 "R_ARM_MOVW_PREL_NC",	/* name */
732 	 false,			/* partial_inplace */
733 	 0x000f0fff,		/* src_mask */
734 	 0x000f0fff,		/* dst_mask */
735 	 true),			/* pcrel_offset */
736 
737   HOWTO (R_ARM_MOVT_PREL,	/* type */
738 	 0,			/* rightshift */
739 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
740 	 16,			/* bitsize */
741 	 true,			/* pc_relative */
742 	 0,			/* bitpos */
743 	 complain_overflow_bitfield,/* complain_on_overflow */
744 	 bfd_elf_generic_reloc,	/* special_function */
745 	 "R_ARM_MOVT_PREL",	/* name */
746 	 false,			/* partial_inplace */
747 	 0x000f0fff,		/* src_mask */
748 	 0x000f0fff,		/* dst_mask */
749 	 true),			/* pcrel_offset */
750 
751   HOWTO (R_ARM_THM_MOVW_ABS_NC,	/* type */
752 	 0,			/* rightshift */
753 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
754 	 16,			/* bitsize */
755 	 false,			/* pc_relative */
756 	 0,			/* bitpos */
757 	 complain_overflow_dont,/* complain_on_overflow */
758 	 bfd_elf_generic_reloc,	/* special_function */
759 	 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 	 false,			/* partial_inplace */
761 	 0x040f70ff,		/* src_mask */
762 	 0x040f70ff,		/* dst_mask */
763 	 false),		/* pcrel_offset */
764 
765   HOWTO (R_ARM_THM_MOVT_ABS,	/* type */
766 	 0,			/* rightshift */
767 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
768 	 16,			/* bitsize */
769 	 false,			/* pc_relative */
770 	 0,			/* bitpos */
771 	 complain_overflow_bitfield,/* complain_on_overflow */
772 	 bfd_elf_generic_reloc,	/* special_function */
773 	 "R_ARM_THM_MOVT_ABS",	/* name */
774 	 false,			/* partial_inplace */
775 	 0x040f70ff,		/* src_mask */
776 	 0x040f70ff,		/* dst_mask */
777 	 false),		/* pcrel_offset */
778 
779   HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 	 0,			/* rightshift */
781 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
782 	 16,			/* bitsize */
783 	 true,			/* pc_relative */
784 	 0,			/* bitpos */
785 	 complain_overflow_dont,/* complain_on_overflow */
786 	 bfd_elf_generic_reloc,	/* special_function */
787 	 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 	 false,			/* partial_inplace */
789 	 0x040f70ff,		/* src_mask */
790 	 0x040f70ff,		/* dst_mask */
791 	 true),			/* pcrel_offset */
792 
793   HOWTO (R_ARM_THM_MOVT_PREL,	/* type */
794 	 0,			/* rightshift */
795 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
796 	 16,			/* bitsize */
797 	 true,			/* pc_relative */
798 	 0,			/* bitpos */
799 	 complain_overflow_bitfield,/* complain_on_overflow */
800 	 bfd_elf_generic_reloc,	/* special_function */
801 	 "R_ARM_THM_MOVT_PREL",	/* name */
802 	 false,			/* partial_inplace */
803 	 0x040f70ff,		/* src_mask */
804 	 0x040f70ff,		/* dst_mask */
805 	 true),			/* pcrel_offset */
806 
807   HOWTO (R_ARM_THM_JUMP19,	/* type */
808 	 1,			/* rightshift */
809 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
810 	 19,			/* bitsize */
811 	 true,			/* pc_relative */
812 	 0,			/* bitpos */
813 	 complain_overflow_signed,/* complain_on_overflow */
814 	 bfd_elf_generic_reloc, /* special_function */
815 	 "R_ARM_THM_JUMP19",	/* name */
816 	 false,			/* partial_inplace */
817 	 0x043f2fff,		/* src_mask */
818 	 0x043f2fff,		/* dst_mask */
819 	 true),			/* pcrel_offset */
820 
821   HOWTO (R_ARM_THM_JUMP6,	/* type */
822 	 1,			/* rightshift */
823 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
824 	 6,			/* bitsize */
825 	 true,			/* pc_relative */
826 	 0,			/* bitpos */
827 	 complain_overflow_unsigned,/* complain_on_overflow */
828 	 bfd_elf_generic_reloc,	/* special_function */
829 	 "R_ARM_THM_JUMP6",	/* name */
830 	 false,			/* partial_inplace */
831 	 0x02f8,		/* src_mask */
832 	 0x02f8,		/* dst_mask */
833 	 true),			/* pcrel_offset */
834 
835   /* These are declared as 13-bit signed relocations because we can
836      address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837      versa.  */
838   HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 	 0,			/* rightshift */
840 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
841 	 13,			/* bitsize */
842 	 true,			/* pc_relative */
843 	 0,			/* bitpos */
844 	 complain_overflow_dont,/* complain_on_overflow */
845 	 bfd_elf_generic_reloc,	/* special_function */
846 	 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 	 false,			/* partial_inplace */
848 	 0xffffffff,		/* src_mask */
849 	 0xffffffff,		/* dst_mask */
850 	 true),			/* pcrel_offset */
851 
852   HOWTO (R_ARM_THM_PC12,	/* type */
853 	 0,			/* rightshift */
854 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
855 	 13,			/* bitsize */
856 	 true,			/* pc_relative */
857 	 0,			/* bitpos */
858 	 complain_overflow_dont,/* complain_on_overflow */
859 	 bfd_elf_generic_reloc,	/* special_function */
860 	 "R_ARM_THM_PC12",	/* name */
861 	 false,			/* partial_inplace */
862 	 0xffffffff,		/* src_mask */
863 	 0xffffffff,		/* dst_mask */
864 	 true),			/* pcrel_offset */
865 
866   HOWTO (R_ARM_ABS32_NOI,	/* type */
867 	 0,			/* rightshift */
868 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
869 	 32,			/* bitsize */
870 	 false,			/* pc_relative */
871 	 0,			/* bitpos */
872 	 complain_overflow_dont,/* complain_on_overflow */
873 	 bfd_elf_generic_reloc,	/* special_function */
874 	 "R_ARM_ABS32_NOI",	/* name */
875 	 false,			/* partial_inplace */
876 	 0xffffffff,		/* src_mask */
877 	 0xffffffff,		/* dst_mask */
878 	 false),		/* pcrel_offset */
879 
880   HOWTO (R_ARM_REL32_NOI,	/* type */
881 	 0,			/* rightshift */
882 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
883 	 32,			/* bitsize */
884 	 true,			/* pc_relative */
885 	 0,			/* bitpos */
886 	 complain_overflow_dont,/* complain_on_overflow */
887 	 bfd_elf_generic_reloc,	/* special_function */
888 	 "R_ARM_REL32_NOI",	/* name */
889 	 false,			/* partial_inplace */
890 	 0xffffffff,		/* src_mask */
891 	 0xffffffff,		/* dst_mask */
892 	 false),		/* pcrel_offset */
893 
894   /* Group relocations.  */
895 
896   HOWTO (R_ARM_ALU_PC_G0_NC,	/* type */
897 	 0,			/* rightshift */
898 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
899 	 32,			/* bitsize */
900 	 true,			/* pc_relative */
901 	 0,			/* bitpos */
902 	 complain_overflow_dont,/* complain_on_overflow */
903 	 bfd_elf_generic_reloc,	/* special_function */
904 	 "R_ARM_ALU_PC_G0_NC",	/* name */
905 	 false,			/* partial_inplace */
906 	 0xffffffff,		/* src_mask */
907 	 0xffffffff,		/* dst_mask */
908 	 true),			/* pcrel_offset */
909 
910   HOWTO (R_ARM_ALU_PC_G0,	/* type */
911 	 0,			/* rightshift */
912 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
913 	 32,			/* bitsize */
914 	 true,			/* pc_relative */
915 	 0,			/* bitpos */
916 	 complain_overflow_dont,/* complain_on_overflow */
917 	 bfd_elf_generic_reloc,	/* special_function */
918 	 "R_ARM_ALU_PC_G0",	/* name */
919 	 false,			/* partial_inplace */
920 	 0xffffffff,		/* src_mask */
921 	 0xffffffff,		/* dst_mask */
922 	 true),			/* pcrel_offset */
923 
924   HOWTO (R_ARM_ALU_PC_G1_NC,	/* type */
925 	 0,			/* rightshift */
926 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
927 	 32,			/* bitsize */
928 	 true,			/* pc_relative */
929 	 0,			/* bitpos */
930 	 complain_overflow_dont,/* complain_on_overflow */
931 	 bfd_elf_generic_reloc,	/* special_function */
932 	 "R_ARM_ALU_PC_G1_NC",	/* name */
933 	 false,			/* partial_inplace */
934 	 0xffffffff,		/* src_mask */
935 	 0xffffffff,		/* dst_mask */
936 	 true),			/* pcrel_offset */
937 
938   HOWTO (R_ARM_ALU_PC_G1,	/* type */
939 	 0,			/* rightshift */
940 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
941 	 32,			/* bitsize */
942 	 true,			/* pc_relative */
943 	 0,			/* bitpos */
944 	 complain_overflow_dont,/* complain_on_overflow */
945 	 bfd_elf_generic_reloc,	/* special_function */
946 	 "R_ARM_ALU_PC_G1",	/* name */
947 	 false,			/* partial_inplace */
948 	 0xffffffff,		/* src_mask */
949 	 0xffffffff,		/* dst_mask */
950 	 true),			/* pcrel_offset */
951 
952   HOWTO (R_ARM_ALU_PC_G2,	/* type */
953 	 0,			/* rightshift */
954 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
955 	 32,			/* bitsize */
956 	 true,			/* pc_relative */
957 	 0,			/* bitpos */
958 	 complain_overflow_dont,/* complain_on_overflow */
959 	 bfd_elf_generic_reloc,	/* special_function */
960 	 "R_ARM_ALU_PC_G2",	/* name */
961 	 false,			/* partial_inplace */
962 	 0xffffffff,		/* src_mask */
963 	 0xffffffff,		/* dst_mask */
964 	 true),			/* pcrel_offset */
965 
966   HOWTO (R_ARM_LDR_PC_G1,	/* type */
967 	 0,			/* rightshift */
968 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
969 	 32,			/* bitsize */
970 	 true,			/* pc_relative */
971 	 0,			/* bitpos */
972 	 complain_overflow_dont,/* complain_on_overflow */
973 	 bfd_elf_generic_reloc,	/* special_function */
974 	 "R_ARM_LDR_PC_G1",	/* name */
975 	 false,			/* partial_inplace */
976 	 0xffffffff,		/* src_mask */
977 	 0xffffffff,		/* dst_mask */
978 	 true),			/* pcrel_offset */
979 
980   HOWTO (R_ARM_LDR_PC_G2,	/* type */
981 	 0,			/* rightshift */
982 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
983 	 32,			/* bitsize */
984 	 true,			/* pc_relative */
985 	 0,			/* bitpos */
986 	 complain_overflow_dont,/* complain_on_overflow */
987 	 bfd_elf_generic_reloc,	/* special_function */
988 	 "R_ARM_LDR_PC_G2",	/* name */
989 	 false,			/* partial_inplace */
990 	 0xffffffff,		/* src_mask */
991 	 0xffffffff,		/* dst_mask */
992 	 true),			/* pcrel_offset */
993 
994   HOWTO (R_ARM_LDRS_PC_G0,	/* type */
995 	 0,			/* rightshift */
996 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
997 	 32,			/* bitsize */
998 	 true,			/* pc_relative */
999 	 0,			/* bitpos */
1000 	 complain_overflow_dont,/* complain_on_overflow */
1001 	 bfd_elf_generic_reloc,	/* special_function */
1002 	 "R_ARM_LDRS_PC_G0",	/* name */
1003 	 false,			/* partial_inplace */
1004 	 0xffffffff,		/* src_mask */
1005 	 0xffffffff,		/* dst_mask */
1006 	 true),			/* pcrel_offset */
1007 
1008   HOWTO (R_ARM_LDRS_PC_G1,	/* type */
1009 	 0,			/* rightshift */
1010 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1011 	 32,			/* bitsize */
1012 	 true,			/* pc_relative */
1013 	 0,			/* bitpos */
1014 	 complain_overflow_dont,/* complain_on_overflow */
1015 	 bfd_elf_generic_reloc,	/* special_function */
1016 	 "R_ARM_LDRS_PC_G1",	/* name */
1017 	 false,			/* partial_inplace */
1018 	 0xffffffff,		/* src_mask */
1019 	 0xffffffff,		/* dst_mask */
1020 	 true),			/* pcrel_offset */
1021 
1022   HOWTO (R_ARM_LDRS_PC_G2,	/* type */
1023 	 0,			/* rightshift */
1024 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1025 	 32,			/* bitsize */
1026 	 true,			/* pc_relative */
1027 	 0,			/* bitpos */
1028 	 complain_overflow_dont,/* complain_on_overflow */
1029 	 bfd_elf_generic_reloc,	/* special_function */
1030 	 "R_ARM_LDRS_PC_G2",	/* name */
1031 	 false,			/* partial_inplace */
1032 	 0xffffffff,		/* src_mask */
1033 	 0xffffffff,		/* dst_mask */
1034 	 true),			/* pcrel_offset */
1035 
1036   HOWTO (R_ARM_LDC_PC_G0,	/* type */
1037 	 0,			/* rightshift */
1038 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1039 	 32,			/* bitsize */
1040 	 true,			/* pc_relative */
1041 	 0,			/* bitpos */
1042 	 complain_overflow_dont,/* complain_on_overflow */
1043 	 bfd_elf_generic_reloc,	/* special_function */
1044 	 "R_ARM_LDC_PC_G0",	/* name */
1045 	 false,			/* partial_inplace */
1046 	 0xffffffff,		/* src_mask */
1047 	 0xffffffff,		/* dst_mask */
1048 	 true),			/* pcrel_offset */
1049 
1050   HOWTO (R_ARM_LDC_PC_G1,	/* type */
1051 	 0,			/* rightshift */
1052 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1053 	 32,			/* bitsize */
1054 	 true,			/* pc_relative */
1055 	 0,			/* bitpos */
1056 	 complain_overflow_dont,/* complain_on_overflow */
1057 	 bfd_elf_generic_reloc,	/* special_function */
1058 	 "R_ARM_LDC_PC_G1",	/* name */
1059 	 false,			/* partial_inplace */
1060 	 0xffffffff,		/* src_mask */
1061 	 0xffffffff,		/* dst_mask */
1062 	 true),			/* pcrel_offset */
1063 
1064   HOWTO (R_ARM_LDC_PC_G2,	/* type */
1065 	 0,			/* rightshift */
1066 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1067 	 32,			/* bitsize */
1068 	 true,			/* pc_relative */
1069 	 0,			/* bitpos */
1070 	 complain_overflow_dont,/* complain_on_overflow */
1071 	 bfd_elf_generic_reloc,	/* special_function */
1072 	 "R_ARM_LDC_PC_G2",	/* name */
1073 	 false,			/* partial_inplace */
1074 	 0xffffffff,		/* src_mask */
1075 	 0xffffffff,		/* dst_mask */
1076 	 true),			/* pcrel_offset */
1077 
1078   HOWTO (R_ARM_ALU_SB_G0_NC,	/* type */
1079 	 0,			/* rightshift */
1080 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1081 	 32,			/* bitsize */
1082 	 true,			/* pc_relative */
1083 	 0,			/* bitpos */
1084 	 complain_overflow_dont,/* complain_on_overflow */
1085 	 bfd_elf_generic_reloc,	/* special_function */
1086 	 "R_ARM_ALU_SB_G0_NC",	/* name */
1087 	 false,			/* partial_inplace */
1088 	 0xffffffff,		/* src_mask */
1089 	 0xffffffff,		/* dst_mask */
1090 	 true),			/* pcrel_offset */
1091 
1092   HOWTO (R_ARM_ALU_SB_G0,	/* type */
1093 	 0,			/* rightshift */
1094 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1095 	 32,			/* bitsize */
1096 	 true,			/* pc_relative */
1097 	 0,			/* bitpos */
1098 	 complain_overflow_dont,/* complain_on_overflow */
1099 	 bfd_elf_generic_reloc,	/* special_function */
1100 	 "R_ARM_ALU_SB_G0",	/* name */
1101 	 false,			/* partial_inplace */
1102 	 0xffffffff,		/* src_mask */
1103 	 0xffffffff,		/* dst_mask */
1104 	 true),			/* pcrel_offset */
1105 
1106   HOWTO (R_ARM_ALU_SB_G1_NC,	/* type */
1107 	 0,			/* rightshift */
1108 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1109 	 32,			/* bitsize */
1110 	 true,			/* pc_relative */
1111 	 0,			/* bitpos */
1112 	 complain_overflow_dont,/* complain_on_overflow */
1113 	 bfd_elf_generic_reloc,	/* special_function */
1114 	 "R_ARM_ALU_SB_G1_NC",	/* name */
1115 	 false,			/* partial_inplace */
1116 	 0xffffffff,		/* src_mask */
1117 	 0xffffffff,		/* dst_mask */
1118 	 true),			/* pcrel_offset */
1119 
1120   HOWTO (R_ARM_ALU_SB_G1,	/* type */
1121 	 0,			/* rightshift */
1122 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1123 	 32,			/* bitsize */
1124 	 true,			/* pc_relative */
1125 	 0,			/* bitpos */
1126 	 complain_overflow_dont,/* complain_on_overflow */
1127 	 bfd_elf_generic_reloc,	/* special_function */
1128 	 "R_ARM_ALU_SB_G1",	/* name */
1129 	 false,			/* partial_inplace */
1130 	 0xffffffff,		/* src_mask */
1131 	 0xffffffff,		/* dst_mask */
1132 	 true),			/* pcrel_offset */
1133 
1134   HOWTO (R_ARM_ALU_SB_G2,	/* type */
1135 	 0,			/* rightshift */
1136 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1137 	 32,			/* bitsize */
1138 	 true,			/* pc_relative */
1139 	 0,			/* bitpos */
1140 	 complain_overflow_dont,/* complain_on_overflow */
1141 	 bfd_elf_generic_reloc,	/* special_function */
1142 	 "R_ARM_ALU_SB_G2",	/* name */
1143 	 false,			/* partial_inplace */
1144 	 0xffffffff,		/* src_mask */
1145 	 0xffffffff,		/* dst_mask */
1146 	 true),			/* pcrel_offset */
1147 
1148   HOWTO (R_ARM_LDR_SB_G0,	/* type */
1149 	 0,			/* rightshift */
1150 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1151 	 32,			/* bitsize */
1152 	 true,			/* pc_relative */
1153 	 0,			/* bitpos */
1154 	 complain_overflow_dont,/* complain_on_overflow */
1155 	 bfd_elf_generic_reloc,	/* special_function */
1156 	 "R_ARM_LDR_SB_G0",	/* name */
1157 	 false,			/* partial_inplace */
1158 	 0xffffffff,		/* src_mask */
1159 	 0xffffffff,		/* dst_mask */
1160 	 true),			/* pcrel_offset */
1161 
1162   HOWTO (R_ARM_LDR_SB_G1,	/* type */
1163 	 0,			/* rightshift */
1164 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1165 	 32,			/* bitsize */
1166 	 true,			/* pc_relative */
1167 	 0,			/* bitpos */
1168 	 complain_overflow_dont,/* complain_on_overflow */
1169 	 bfd_elf_generic_reloc,	/* special_function */
1170 	 "R_ARM_LDR_SB_G1",	/* name */
1171 	 false,			/* partial_inplace */
1172 	 0xffffffff,		/* src_mask */
1173 	 0xffffffff,		/* dst_mask */
1174 	 true),			/* pcrel_offset */
1175 
1176   HOWTO (R_ARM_LDR_SB_G2,	/* type */
1177 	 0,			/* rightshift */
1178 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1179 	 32,			/* bitsize */
1180 	 true,			/* pc_relative */
1181 	 0,			/* bitpos */
1182 	 complain_overflow_dont,/* complain_on_overflow */
1183 	 bfd_elf_generic_reloc,	/* special_function */
1184 	 "R_ARM_LDR_SB_G2",	/* name */
1185 	 false,			/* partial_inplace */
1186 	 0xffffffff,		/* src_mask */
1187 	 0xffffffff,		/* dst_mask */
1188 	 true),			/* pcrel_offset */
1189 
1190   HOWTO (R_ARM_LDRS_SB_G0,	/* type */
1191 	 0,			/* rightshift */
1192 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1193 	 32,			/* bitsize */
1194 	 true,			/* pc_relative */
1195 	 0,			/* bitpos */
1196 	 complain_overflow_dont,/* complain_on_overflow */
1197 	 bfd_elf_generic_reloc,	/* special_function */
1198 	 "R_ARM_LDRS_SB_G0",	/* name */
1199 	 false,			/* partial_inplace */
1200 	 0xffffffff,		/* src_mask */
1201 	 0xffffffff,		/* dst_mask */
1202 	 true),			/* pcrel_offset */
1203 
1204   HOWTO (R_ARM_LDRS_SB_G1,	/* type */
1205 	 0,			/* rightshift */
1206 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1207 	 32,			/* bitsize */
1208 	 true,			/* pc_relative */
1209 	 0,			/* bitpos */
1210 	 complain_overflow_dont,/* complain_on_overflow */
1211 	 bfd_elf_generic_reloc,	/* special_function */
1212 	 "R_ARM_LDRS_SB_G1",	/* name */
1213 	 false,			/* partial_inplace */
1214 	 0xffffffff,		/* src_mask */
1215 	 0xffffffff,		/* dst_mask */
1216 	 true),			/* pcrel_offset */
1217 
1218   HOWTO (R_ARM_LDRS_SB_G2,	/* type */
1219 	 0,			/* rightshift */
1220 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1221 	 32,			/* bitsize */
1222 	 true,			/* pc_relative */
1223 	 0,			/* bitpos */
1224 	 complain_overflow_dont,/* complain_on_overflow */
1225 	 bfd_elf_generic_reloc,	/* special_function */
1226 	 "R_ARM_LDRS_SB_G2",	/* name */
1227 	 false,			/* partial_inplace */
1228 	 0xffffffff,		/* src_mask */
1229 	 0xffffffff,		/* dst_mask */
1230 	 true),			/* pcrel_offset */
1231 
1232   HOWTO (R_ARM_LDC_SB_G0,	/* type */
1233 	 0,			/* rightshift */
1234 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1235 	 32,			/* bitsize */
1236 	 true,			/* pc_relative */
1237 	 0,			/* bitpos */
1238 	 complain_overflow_dont,/* complain_on_overflow */
1239 	 bfd_elf_generic_reloc,	/* special_function */
1240 	 "R_ARM_LDC_SB_G0",	/* name */
1241 	 false,			/* partial_inplace */
1242 	 0xffffffff,		/* src_mask */
1243 	 0xffffffff,		/* dst_mask */
1244 	 true),			/* pcrel_offset */
1245 
1246   HOWTO (R_ARM_LDC_SB_G1,	/* type */
1247 	 0,			/* rightshift */
1248 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1249 	 32,			/* bitsize */
1250 	 true,			/* pc_relative */
1251 	 0,			/* bitpos */
1252 	 complain_overflow_dont,/* complain_on_overflow */
1253 	 bfd_elf_generic_reloc,	/* special_function */
1254 	 "R_ARM_LDC_SB_G1",	/* name */
1255 	 false,			/* partial_inplace */
1256 	 0xffffffff,		/* src_mask */
1257 	 0xffffffff,		/* dst_mask */
1258 	 true),			/* pcrel_offset */
1259 
1260   HOWTO (R_ARM_LDC_SB_G2,	/* type */
1261 	 0,			/* rightshift */
1262 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1263 	 32,			/* bitsize */
1264 	 true,			/* pc_relative */
1265 	 0,			/* bitpos */
1266 	 complain_overflow_dont,/* complain_on_overflow */
1267 	 bfd_elf_generic_reloc,	/* special_function */
1268 	 "R_ARM_LDC_SB_G2",	/* name */
1269 	 false,			/* partial_inplace */
1270 	 0xffffffff,		/* src_mask */
1271 	 0xffffffff,		/* dst_mask */
1272 	 true),			/* pcrel_offset */
1273 
1274   /* End of group relocations.  */
1275 
1276   HOWTO (R_ARM_MOVW_BREL_NC,	/* type */
1277 	 0,			/* rightshift */
1278 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1279 	 16,			/* bitsize */
1280 	 false,			/* pc_relative */
1281 	 0,			/* bitpos */
1282 	 complain_overflow_dont,/* complain_on_overflow */
1283 	 bfd_elf_generic_reloc,	/* special_function */
1284 	 "R_ARM_MOVW_BREL_NC",	/* name */
1285 	 false,			/* partial_inplace */
1286 	 0x0000ffff,		/* src_mask */
1287 	 0x0000ffff,		/* dst_mask */
1288 	 false),		/* pcrel_offset */
1289 
1290   HOWTO (R_ARM_MOVT_BREL,	/* type */
1291 	 0,			/* rightshift */
1292 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1293 	 16,			/* bitsize */
1294 	 false,			/* pc_relative */
1295 	 0,			/* bitpos */
1296 	 complain_overflow_bitfield,/* complain_on_overflow */
1297 	 bfd_elf_generic_reloc,	/* special_function */
1298 	 "R_ARM_MOVT_BREL",	/* name */
1299 	 false,			/* partial_inplace */
1300 	 0x0000ffff,		/* src_mask */
1301 	 0x0000ffff,		/* dst_mask */
1302 	 false),		/* pcrel_offset */
1303 
1304   HOWTO (R_ARM_MOVW_BREL,	/* type */
1305 	 0,			/* rightshift */
1306 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1307 	 16,			/* bitsize */
1308 	 false,			/* pc_relative */
1309 	 0,			/* bitpos */
1310 	 complain_overflow_dont,/* complain_on_overflow */
1311 	 bfd_elf_generic_reloc,	/* special_function */
1312 	 "R_ARM_MOVW_BREL",	/* name */
1313 	 false,			/* partial_inplace */
1314 	 0x0000ffff,		/* src_mask */
1315 	 0x0000ffff,		/* dst_mask */
1316 	 false),		/* pcrel_offset */
1317 
1318   HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 	 0,			/* rightshift */
1320 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1321 	 16,			/* bitsize */
1322 	 false,			/* pc_relative */
1323 	 0,			/* bitpos */
1324 	 complain_overflow_dont,/* complain_on_overflow */
1325 	 bfd_elf_generic_reloc,	/* special_function */
1326 	 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 	 false,			/* partial_inplace */
1328 	 0x040f70ff,		/* src_mask */
1329 	 0x040f70ff,		/* dst_mask */
1330 	 false),		/* pcrel_offset */
1331 
1332   HOWTO (R_ARM_THM_MOVT_BREL,	/* type */
1333 	 0,			/* rightshift */
1334 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1335 	 16,			/* bitsize */
1336 	 false,			/* pc_relative */
1337 	 0,			/* bitpos */
1338 	 complain_overflow_bitfield,/* complain_on_overflow */
1339 	 bfd_elf_generic_reloc,	/* special_function */
1340 	 "R_ARM_THM_MOVT_BREL",	/* name */
1341 	 false,			/* partial_inplace */
1342 	 0x040f70ff,		/* src_mask */
1343 	 0x040f70ff,		/* dst_mask */
1344 	 false),		/* pcrel_offset */
1345 
1346   HOWTO (R_ARM_THM_MOVW_BREL,	/* type */
1347 	 0,			/* rightshift */
1348 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1349 	 16,			/* bitsize */
1350 	 false,			/* pc_relative */
1351 	 0,			/* bitpos */
1352 	 complain_overflow_dont,/* complain_on_overflow */
1353 	 bfd_elf_generic_reloc,	/* special_function */
1354 	 "R_ARM_THM_MOVW_BREL",	/* name */
1355 	 false,			/* partial_inplace */
1356 	 0x040f70ff,		/* src_mask */
1357 	 0x040f70ff,		/* dst_mask */
1358 	 false),		/* pcrel_offset */
1359 
1360   HOWTO (R_ARM_TLS_GOTDESC,	/* type */
1361 	 0,			/* rightshift */
1362 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1363 	 32,			/* bitsize */
1364 	 false,			/* pc_relative */
1365 	 0,			/* bitpos */
1366 	 complain_overflow_bitfield,/* complain_on_overflow */
1367 	 NULL,			/* special_function */
1368 	 "R_ARM_TLS_GOTDESC",	/* name */
1369 	 true,			/* partial_inplace */
1370 	 0xffffffff,		/* src_mask */
1371 	 0xffffffff,		/* dst_mask */
1372 	 false),		/* pcrel_offset */
1373 
1374   HOWTO (R_ARM_TLS_CALL,	/* type */
1375 	 0,			/* rightshift */
1376 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1377 	 24,			/* bitsize */
1378 	 false,			/* pc_relative */
1379 	 0,			/* bitpos */
1380 	 complain_overflow_dont,/* complain_on_overflow */
1381 	 bfd_elf_generic_reloc,	/* special_function */
1382 	 "R_ARM_TLS_CALL",	/* name */
1383 	 false,			/* partial_inplace */
1384 	 0x00ffffff,		/* src_mask */
1385 	 0x00ffffff,		/* dst_mask */
1386 	 false),		/* pcrel_offset */
1387 
1388   HOWTO (R_ARM_TLS_DESCSEQ,	/* type */
1389 	 0,			/* rightshift */
1390 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1391 	 0,			/* bitsize */
1392 	 false,			/* pc_relative */
1393 	 0,			/* bitpos */
1394 	 complain_overflow_dont,/* complain_on_overflow */
1395 	 bfd_elf_generic_reloc,	/* special_function */
1396 	 "R_ARM_TLS_DESCSEQ",	/* name */
1397 	 false,			/* partial_inplace */
1398 	 0x00000000,		/* src_mask */
1399 	 0x00000000,		/* dst_mask */
1400 	 false),		/* pcrel_offset */
1401 
1402   HOWTO (R_ARM_THM_TLS_CALL,	/* type */
1403 	 0,			/* rightshift */
1404 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1405 	 24,			/* bitsize */
1406 	 false,			/* pc_relative */
1407 	 0,			/* bitpos */
1408 	 complain_overflow_dont,/* complain_on_overflow */
1409 	 bfd_elf_generic_reloc,	/* special_function */
1410 	 "R_ARM_THM_TLS_CALL",	/* name */
1411 	 false,			/* partial_inplace */
1412 	 0x07ff07ff,		/* src_mask */
1413 	 0x07ff07ff,		/* dst_mask */
1414 	 false),		/* pcrel_offset */
1415 
1416   HOWTO (R_ARM_PLT32_ABS,	/* type */
1417 	 0,			/* rightshift */
1418 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1419 	 32,			/* bitsize */
1420 	 false,			/* pc_relative */
1421 	 0,			/* bitpos */
1422 	 complain_overflow_dont,/* complain_on_overflow */
1423 	 bfd_elf_generic_reloc,	/* special_function */
1424 	 "R_ARM_PLT32_ABS",	/* name */
1425 	 false,			/* partial_inplace */
1426 	 0xffffffff,		/* src_mask */
1427 	 0xffffffff,		/* dst_mask */
1428 	 false),		/* pcrel_offset */
1429 
1430   HOWTO (R_ARM_GOT_ABS,		/* type */
1431 	 0,			/* rightshift */
1432 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1433 	 32,			/* bitsize */
1434 	 false,			/* pc_relative */
1435 	 0,			/* bitpos */
1436 	 complain_overflow_dont,/* complain_on_overflow */
1437 	 bfd_elf_generic_reloc,	/* special_function */
1438 	 "R_ARM_GOT_ABS",	/* name */
1439 	 false,			/* partial_inplace */
1440 	 0xffffffff,		/* src_mask */
1441 	 0xffffffff,		/* dst_mask */
1442 	 false),			/* pcrel_offset */
1443 
1444   HOWTO (R_ARM_GOT_PREL,	/* type */
1445 	 0,			/* rightshift */
1446 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1447 	 32,			/* bitsize */
1448 	 true,			/* pc_relative */
1449 	 0,			/* bitpos */
1450 	 complain_overflow_dont,	/* complain_on_overflow */
1451 	 bfd_elf_generic_reloc,	/* special_function */
1452 	 "R_ARM_GOT_PREL",	/* name */
1453 	 false,			/* partial_inplace */
1454 	 0xffffffff,		/* src_mask */
1455 	 0xffffffff,		/* dst_mask */
1456 	 true),			/* pcrel_offset */
1457 
1458   HOWTO (R_ARM_GOT_BREL12,	/* type */
1459 	 0,			/* rightshift */
1460 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1461 	 12,			/* bitsize */
1462 	 false,			/* pc_relative */
1463 	 0,			/* bitpos */
1464 	 complain_overflow_bitfield,/* complain_on_overflow */
1465 	 bfd_elf_generic_reloc,	/* special_function */
1466 	 "R_ARM_GOT_BREL12",	/* name */
1467 	 false,			/* partial_inplace */
1468 	 0x00000fff,		/* src_mask */
1469 	 0x00000fff,		/* dst_mask */
1470 	 false),		/* pcrel_offset */
1471 
1472   HOWTO (R_ARM_GOTOFF12,	/* type */
1473 	 0,			/* rightshift */
1474 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1475 	 12,			/* bitsize */
1476 	 false,			/* pc_relative */
1477 	 0,			/* bitpos */
1478 	 complain_overflow_bitfield,/* complain_on_overflow */
1479 	 bfd_elf_generic_reloc,	/* special_function */
1480 	 "R_ARM_GOTOFF12",	/* name */
1481 	 false,			/* partial_inplace */
1482 	 0x00000fff,		/* src_mask */
1483 	 0x00000fff,		/* dst_mask */
1484 	 false),		/* pcrel_offset */
1485 
1486   EMPTY_HOWTO (R_ARM_GOTRELAX),	 /* reserved for future GOT-load optimizations */
1487 
1488   /* GNU extension to record C++ vtable member usage */
1489   HOWTO (R_ARM_GNU_VTENTRY,	/* type */
1490 	 0,			/* rightshift */
1491 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1492 	 0,			/* bitsize */
1493 	 false,			/* pc_relative */
1494 	 0,			/* bitpos */
1495 	 complain_overflow_dont, /* complain_on_overflow */
1496 	 _bfd_elf_rel_vtable_reloc_fn,	/* special_function */
1497 	 "R_ARM_GNU_VTENTRY",	/* name */
1498 	 false,			/* partial_inplace */
1499 	 0,			/* src_mask */
1500 	 0,			/* dst_mask */
1501 	 false),		/* pcrel_offset */
1502 
1503   /* GNU extension to record C++ vtable hierarchy */
1504   HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 	 0,			/* rightshift */
1506 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1507 	 0,			/* bitsize */
1508 	 false,			/* pc_relative */
1509 	 0,			/* bitpos */
1510 	 complain_overflow_dont, /* complain_on_overflow */
1511 	 NULL,			/* special_function */
1512 	 "R_ARM_GNU_VTINHERIT", /* name */
1513 	 false,			/* partial_inplace */
1514 	 0,			/* src_mask */
1515 	 0,			/* dst_mask */
1516 	 false),		/* pcrel_offset */
1517 
1518   HOWTO (R_ARM_THM_JUMP11,	/* type */
1519 	 1,			/* rightshift */
1520 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1521 	 11,			/* bitsize */
1522 	 true,			/* pc_relative */
1523 	 0,			/* bitpos */
1524 	 complain_overflow_signed,	/* complain_on_overflow */
1525 	 bfd_elf_generic_reloc,	/* special_function */
1526 	 "R_ARM_THM_JUMP11",	/* name */
1527 	 false,			/* partial_inplace */
1528 	 0x000007ff,		/* src_mask */
1529 	 0x000007ff,		/* dst_mask */
1530 	 true),			/* pcrel_offset */
1531 
1532   HOWTO (R_ARM_THM_JUMP8,	/* type */
1533 	 1,			/* rightshift */
1534 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1535 	 8,			/* bitsize */
1536 	 true,			/* pc_relative */
1537 	 0,			/* bitpos */
1538 	 complain_overflow_signed,	/* complain_on_overflow */
1539 	 bfd_elf_generic_reloc,	/* special_function */
1540 	 "R_ARM_THM_JUMP8",	/* name */
1541 	 false,			/* partial_inplace */
1542 	 0x000000ff,		/* src_mask */
1543 	 0x000000ff,		/* dst_mask */
1544 	 true),			/* pcrel_offset */
1545 
1546   /* TLS relocations */
1547   HOWTO (R_ARM_TLS_GD32,	/* type */
1548 	 0,			/* rightshift */
1549 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1550 	 32,			/* bitsize */
1551 	 false,			/* pc_relative */
1552 	 0,			/* bitpos */
1553 	 complain_overflow_bitfield,/* complain_on_overflow */
1554 	 NULL,			/* special_function */
1555 	 "R_ARM_TLS_GD32",	/* name */
1556 	 true,			/* partial_inplace */
1557 	 0xffffffff,		/* src_mask */
1558 	 0xffffffff,		/* dst_mask */
1559 	 false),		/* pcrel_offset */
1560 
1561   HOWTO (R_ARM_TLS_LDM32,	/* type */
1562 	 0,			/* rightshift */
1563 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1564 	 32,			/* bitsize */
1565 	 false,			/* pc_relative */
1566 	 0,			/* bitpos */
1567 	 complain_overflow_bitfield,/* complain_on_overflow */
1568 	 bfd_elf_generic_reloc, /* special_function */
1569 	 "R_ARM_TLS_LDM32",	/* name */
1570 	 true,			/* partial_inplace */
1571 	 0xffffffff,		/* src_mask */
1572 	 0xffffffff,		/* dst_mask */
1573 	 false),		/* pcrel_offset */
1574 
1575   HOWTO (R_ARM_TLS_LDO32,	/* type */
1576 	 0,			/* rightshift */
1577 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1578 	 32,			/* bitsize */
1579 	 false,			/* pc_relative */
1580 	 0,			/* bitpos */
1581 	 complain_overflow_bitfield,/* complain_on_overflow */
1582 	 bfd_elf_generic_reloc, /* special_function */
1583 	 "R_ARM_TLS_LDO32",	/* name */
1584 	 true,			/* partial_inplace */
1585 	 0xffffffff,		/* src_mask */
1586 	 0xffffffff,		/* dst_mask */
1587 	 false),		/* pcrel_offset */
1588 
1589   HOWTO (R_ARM_TLS_IE32,	/* type */
1590 	 0,			/* rightshift */
1591 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1592 	 32,			/* bitsize */
1593 	 false,			 /* pc_relative */
1594 	 0,			/* bitpos */
1595 	 complain_overflow_bitfield,/* complain_on_overflow */
1596 	 NULL,			/* special_function */
1597 	 "R_ARM_TLS_IE32",	/* name */
1598 	 true,			/* partial_inplace */
1599 	 0xffffffff,		/* src_mask */
1600 	 0xffffffff,		/* dst_mask */
1601 	 false),		/* pcrel_offset */
1602 
1603   HOWTO (R_ARM_TLS_LE32,	/* type */
1604 	 0,			/* rightshift */
1605 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1606 	 32,			/* bitsize */
1607 	 false,			/* pc_relative */
1608 	 0,			/* bitpos */
1609 	 complain_overflow_bitfield,/* complain_on_overflow */
1610 	 NULL,			/* special_function */
1611 	 "R_ARM_TLS_LE32",	/* name */
1612 	 true,			/* partial_inplace */
1613 	 0xffffffff,		/* src_mask */
1614 	 0xffffffff,		/* dst_mask */
1615 	 false),		/* pcrel_offset */
1616 
1617   HOWTO (R_ARM_TLS_LDO12,	/* type */
1618 	 0,			/* rightshift */
1619 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1620 	 12,			/* bitsize */
1621 	 false,			/* pc_relative */
1622 	 0,			/* bitpos */
1623 	 complain_overflow_bitfield,/* complain_on_overflow */
1624 	 bfd_elf_generic_reloc,	/* special_function */
1625 	 "R_ARM_TLS_LDO12",	/* name */
1626 	 false,			/* partial_inplace */
1627 	 0x00000fff,		/* src_mask */
1628 	 0x00000fff,		/* dst_mask */
1629 	 false),		/* pcrel_offset */
1630 
1631   HOWTO (R_ARM_TLS_LE12,	/* type */
1632 	 0,			/* rightshift */
1633 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1634 	 12,			/* bitsize */
1635 	 false,			/* pc_relative */
1636 	 0,			/* bitpos */
1637 	 complain_overflow_bitfield,/* complain_on_overflow */
1638 	 bfd_elf_generic_reloc,	/* special_function */
1639 	 "R_ARM_TLS_LE12",	/* name */
1640 	 false,			/* partial_inplace */
1641 	 0x00000fff,		/* src_mask */
1642 	 0x00000fff,		/* dst_mask */
1643 	 false),		/* pcrel_offset */
1644 
1645   HOWTO (R_ARM_TLS_IE12GP,	/* type */
1646 	 0,			/* rightshift */
1647 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1648 	 12,			/* bitsize */
1649 	 false,			/* pc_relative */
1650 	 0,			/* bitpos */
1651 	 complain_overflow_bitfield,/* complain_on_overflow */
1652 	 bfd_elf_generic_reloc,	/* special_function */
1653 	 "R_ARM_TLS_IE12GP",	/* name */
1654 	 false,			/* partial_inplace */
1655 	 0x00000fff,		/* src_mask */
1656 	 0x00000fff,		/* dst_mask */
1657 	 false),		/* pcrel_offset */
1658 
1659   /* 112-127 private relocations.  */
1660   EMPTY_HOWTO (112),
1661   EMPTY_HOWTO (113),
1662   EMPTY_HOWTO (114),
1663   EMPTY_HOWTO (115),
1664   EMPTY_HOWTO (116),
1665   EMPTY_HOWTO (117),
1666   EMPTY_HOWTO (118),
1667   EMPTY_HOWTO (119),
1668   EMPTY_HOWTO (120),
1669   EMPTY_HOWTO (121),
1670   EMPTY_HOWTO (122),
1671   EMPTY_HOWTO (123),
1672   EMPTY_HOWTO (124),
1673   EMPTY_HOWTO (125),
1674   EMPTY_HOWTO (126),
1675   EMPTY_HOWTO (127),
1676 
1677   /* R_ARM_ME_TOO, obsolete.  */
1678   EMPTY_HOWTO (128),
1679 
1680   HOWTO (R_ARM_THM_TLS_DESCSEQ,	/* type */
1681 	 0,			/* rightshift */
1682 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1683 	 0,			/* bitsize */
1684 	 false,			/* pc_relative */
1685 	 0,			/* bitpos */
1686 	 complain_overflow_dont,/* complain_on_overflow */
1687 	 bfd_elf_generic_reloc,	/* special_function */
1688 	 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 	 false,			/* partial_inplace */
1690 	 0x00000000,		/* src_mask */
1691 	 0x00000000,		/* dst_mask */
1692 	 false),		/* pcrel_offset */
1693   EMPTY_HOWTO (130),
1694   EMPTY_HOWTO (131),
1695   HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
1696 	 0,			/* rightshift.  */
1697 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1698 	 16,			/* bitsize.  */
1699 	 false,			/* pc_relative.  */
1700 	 0,			/* bitpos.  */
1701 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1702 	 bfd_elf_generic_reloc,	/* special_function.  */
1703 	 "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
1704 	 false,			/* partial_inplace.  */
1705 	 0x00000000,		/* src_mask.  */
1706 	 0x00000000,		/* dst_mask.  */
1707 	 false),		/* pcrel_offset.  */
1708   HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
1709 	 0,			/* rightshift.  */
1710 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1711 	 16,			/* bitsize.  */
1712 	 false,			/* pc_relative.  */
1713 	 0,			/* bitpos.  */
1714 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1715 	 bfd_elf_generic_reloc,	/* special_function.  */
1716 	 "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
1717 	 false,			/* partial_inplace.  */
1718 	 0x00000000,		/* src_mask.  */
1719 	 0x00000000,		/* dst_mask.  */
1720 	 false),		/* pcrel_offset.  */
1721   HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
1722 	 0,			/* rightshift.  */
1723 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1724 	 16,			/* bitsize.  */
1725 	 false,			/* pc_relative.  */
1726 	 0,			/* bitpos.  */
1727 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1728 	 bfd_elf_generic_reloc,	/* special_function.  */
1729 	 "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
1730 	 false,			/* partial_inplace.  */
1731 	 0x00000000,		/* src_mask.  */
1732 	 0x00000000,		/* dst_mask.  */
1733 	 false),		/* pcrel_offset.  */
1734   HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
1735 	 0,			/* rightshift.  */
1736 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1737 	 16,			/* bitsize.  */
1738 	 false,			/* pc_relative.  */
1739 	 0,			/* bitpos.  */
1740 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1741 	 bfd_elf_generic_reloc,	/* special_function.  */
1742 	 "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
1743 	 false,			/* partial_inplace.  */
1744 	 0x00000000,		/* src_mask.  */
1745 	 0x00000000,		/* dst_mask.  */
1746 	 false),		/* pcrel_offset.  */
1747   /* Relocations for Armv8.1-M Mainline.  */
1748   HOWTO (R_ARM_THM_BF16,	/* type.  */
1749 	 0,			/* rightshift.  */
1750 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1751 	 16,			/* bitsize.  */
1752 	 true,			/* pc_relative.  */
1753 	 0,			/* bitpos.  */
1754 	 complain_overflow_dont,/* do not complain_on_overflow.  */
1755 	 bfd_elf_generic_reloc,	/* special_function.  */
1756 	 "R_ARM_THM_BF16",	/* name.  */
1757 	 false,			/* partial_inplace.  */
1758 	 0x001f0ffe,		/* src_mask.  */
1759 	 0x001f0ffe,		/* dst_mask.  */
1760 	 true),			/* pcrel_offset.  */
1761   HOWTO (R_ARM_THM_BF12,	/* type.  */
1762 	 0,			/* rightshift.  */
1763 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1764 	 12,			/* bitsize.  */
1765 	 true,			/* pc_relative.  */
1766 	 0,			/* bitpos.  */
1767 	 complain_overflow_dont,/* do not complain_on_overflow.  */
1768 	 bfd_elf_generic_reloc,	/* special_function.  */
1769 	 "R_ARM_THM_BF12",	/* name.  */
1770 	 false,			/* partial_inplace.  */
1771 	 0x00010ffe,		/* src_mask.  */
1772 	 0x00010ffe,		/* dst_mask.  */
1773 	 true),			/* pcrel_offset.  */
1774   HOWTO (R_ARM_THM_BF18,	/* type.  */
1775 	 0,			/* rightshift.  */
1776 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1777 	 18,			/* bitsize.  */
1778 	 true,			/* pc_relative.  */
1779 	 0,			/* bitpos.  */
1780 	 complain_overflow_dont,/* do not complain_on_overflow.  */
1781 	 bfd_elf_generic_reloc,	/* special_function.  */
1782 	 "R_ARM_THM_BF18",	/* name.  */
1783 	 false,			/* partial_inplace.  */
1784 	 0x007f0ffe,		/* src_mask.  */
1785 	 0x007f0ffe,		/* dst_mask.  */
1786 	 true),			/* pcrel_offset.  */
1787 };
1788 
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1791 {
1792   HOWTO (R_ARM_IRELATIVE,	/* type */
1793 	 0,			/* rightshift */
1794 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1795 	 32,			/* bitsize */
1796 	 false,			/* pc_relative */
1797 	 0,			/* bitpos */
1798 	 complain_overflow_bitfield,/* complain_on_overflow */
1799 	 bfd_elf_generic_reloc, /* special_function */
1800 	 "R_ARM_IRELATIVE",	/* name */
1801 	 true,			/* partial_inplace */
1802 	 0xffffffff,		/* src_mask */
1803 	 0xffffffff,		/* dst_mask */
1804 	 false),		/* pcrel_offset */
1805   HOWTO (R_ARM_GOTFUNCDESC,	/* type */
1806 	 0,			/* rightshift */
1807 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1808 	 32,			/* bitsize */
1809 	 false,			/* pc_relative */
1810 	 0,			/* bitpos */
1811 	 complain_overflow_bitfield,/* complain_on_overflow */
1812 	 bfd_elf_generic_reloc,	/* special_function */
1813 	 "R_ARM_GOTFUNCDESC",	/* name */
1814 	 false,			/* partial_inplace */
1815 	 0,			/* src_mask */
1816 	 0xffffffff,		/* dst_mask */
1817 	 false),		/* pcrel_offset */
1818   HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 	 0,			/* rightshift */
1820 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1821 	 32,			/* bitsize */
1822 	 false,			/* pc_relative */
1823 	 0,			/* bitpos */
1824 	 complain_overflow_bitfield,/* complain_on_overflow */
1825 	 bfd_elf_generic_reloc,	/* special_function */
1826 	 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 	 false,			/* partial_inplace */
1828 	 0,			/* src_mask */
1829 	 0xffffffff,		/* dst_mask */
1830 	 false),		/* pcrel_offset */
1831   HOWTO (R_ARM_FUNCDESC,	/* type */
1832 	 0,			/* rightshift */
1833 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1834 	 32,			/* bitsize */
1835 	 false,			/* pc_relative */
1836 	 0,			/* bitpos */
1837 	 complain_overflow_bitfield,/* complain_on_overflow */
1838 	 bfd_elf_generic_reloc,	/* special_function */
1839 	 "R_ARM_FUNCDESC",	/* name */
1840 	 false,			/* partial_inplace */
1841 	 0,			/* src_mask */
1842 	 0xffffffff,		/* dst_mask */
1843 	 false),		/* pcrel_offset */
1844   HOWTO (R_ARM_FUNCDESC_VALUE,	/* type */
1845 	 0,			/* rightshift */
1846 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1847 	 64,			/* bitsize */
1848 	 false,			/* pc_relative */
1849 	 0,			/* bitpos */
1850 	 complain_overflow_bitfield,/* complain_on_overflow */
1851 	 bfd_elf_generic_reloc,	/* special_function */
1852 	 "R_ARM_FUNCDESC_VALUE",/* name */
1853 	 false,			/* partial_inplace */
1854 	 0,			/* src_mask */
1855 	 0xffffffff,		/* dst_mask */
1856 	 false),		/* pcrel_offset */
1857   HOWTO (R_ARM_TLS_GD32_FDPIC,	/* type */
1858 	 0,			/* rightshift */
1859 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1860 	 32,			/* bitsize */
1861 	 false,			/* pc_relative */
1862 	 0,			/* bitpos */
1863 	 complain_overflow_bitfield,/* complain_on_overflow */
1864 	 bfd_elf_generic_reloc,	/* special_function */
1865 	 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 	 false,			/* partial_inplace */
1867 	 0,			/* src_mask */
1868 	 0xffffffff,		/* dst_mask */
1869 	 false),		/* pcrel_offset */
1870   HOWTO (R_ARM_TLS_LDM32_FDPIC,	/* type */
1871 	 0,			/* rightshift */
1872 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1873 	 32,			/* bitsize */
1874 	 false,			/* pc_relative */
1875 	 0,			/* bitpos */
1876 	 complain_overflow_bitfield,/* complain_on_overflow */
1877 	 bfd_elf_generic_reloc,	/* special_function */
1878 	 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 	 false,			/* partial_inplace */
1880 	 0,			/* src_mask */
1881 	 0xffffffff,		/* dst_mask */
1882 	 false),		/* pcrel_offset */
1883   HOWTO (R_ARM_TLS_IE32_FDPIC,	/* type */
1884 	 0,			/* rightshift */
1885 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1886 	 32,			/* bitsize */
1887 	 false,			/* pc_relative */
1888 	 0,			/* bitpos */
1889 	 complain_overflow_bitfield,/* complain_on_overflow */
1890 	 bfd_elf_generic_reloc,	/* special_function */
1891 	 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 	 false,			/* partial_inplace */
1893 	 0,			/* src_mask */
1894 	 0xffffffff,		/* dst_mask */
1895 	 false),		/* pcrel_offset */
1896 };
1897 
1898 /* 249-255 extended, currently unused, relocations:  */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1900 {
1901   HOWTO (R_ARM_RREL32,		/* type */
1902 	 0,			/* rightshift */
1903 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1904 	 0,			/* bitsize */
1905 	 false,			/* pc_relative */
1906 	 0,			/* bitpos */
1907 	 complain_overflow_dont,/* complain_on_overflow */
1908 	 bfd_elf_generic_reloc,	/* special_function */
1909 	 "R_ARM_RREL32",	/* name */
1910 	 false,			/* partial_inplace */
1911 	 0,			/* src_mask */
1912 	 0,			/* dst_mask */
1913 	 false),		/* pcrel_offset */
1914 
1915   HOWTO (R_ARM_RABS32,		/* type */
1916 	 0,			/* rightshift */
1917 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1918 	 0,			/* bitsize */
1919 	 false,			/* pc_relative */
1920 	 0,			/* bitpos */
1921 	 complain_overflow_dont,/* complain_on_overflow */
1922 	 bfd_elf_generic_reloc,	/* special_function */
1923 	 "R_ARM_RABS32",	/* name */
1924 	 false,			/* partial_inplace */
1925 	 0,			/* src_mask */
1926 	 0,			/* dst_mask */
1927 	 false),		/* pcrel_offset */
1928 
1929   HOWTO (R_ARM_RPC24,		/* type */
1930 	 0,			/* rightshift */
1931 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1932 	 0,			/* bitsize */
1933 	 false,			/* pc_relative */
1934 	 0,			/* bitpos */
1935 	 complain_overflow_dont,/* complain_on_overflow */
1936 	 bfd_elf_generic_reloc,	/* special_function */
1937 	 "R_ARM_RPC24",		/* name */
1938 	 false,			/* partial_inplace */
1939 	 0,			/* src_mask */
1940 	 0,			/* dst_mask */
1941 	 false),		/* pcrel_offset */
1942 
1943   HOWTO (R_ARM_RBASE,		/* type */
1944 	 0,			/* rightshift */
1945 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1946 	 0,			/* bitsize */
1947 	 false,			/* pc_relative */
1948 	 0,			/* bitpos */
1949 	 complain_overflow_dont,/* complain_on_overflow */
1950 	 bfd_elf_generic_reloc,	/* special_function */
1951 	 "R_ARM_RBASE",		/* name */
1952 	 false,			/* partial_inplace */
1953 	 0,			/* src_mask */
1954 	 0,			/* dst_mask */
1955 	 false)			/* pcrel_offset */
1956 };
1957 
1958 static reloc_howto_type *
elf32_arm_howto_from_type(unsigned int r_type)1959 elf32_arm_howto_from_type (unsigned int r_type)
1960 {
1961   if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962     return &elf32_arm_howto_table_1[r_type];
1963 
1964   if (r_type >= R_ARM_IRELATIVE
1965       && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966     return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967 
1968   if (r_type >= R_ARM_RREL32
1969       && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970     return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971 
1972   return NULL;
1973 }
1974 
1975 static bool
elf32_arm_info_to_howto(bfd * abfd,arelent * bfd_reloc,Elf_Internal_Rela * elf_reloc)1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 			 Elf_Internal_Rela * elf_reloc)
1978 {
1979   unsigned int r_type;
1980 
1981   r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982   if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983     {
1984       /* xgettext:c-format */
1985       _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 			  abfd, r_type);
1987       bfd_set_error (bfd_error_bad_value);
1988       return false;
1989     }
1990   return true;
1991 }
1992 
1993 struct elf32_arm_reloc_map
1994   {
1995     bfd_reloc_code_real_type  bfd_reloc_val;
1996     unsigned char	      elf_reloc_val;
1997   };
1998 
1999 /* All entries in this list must also be present in elf32_arm_howto_table.  */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001   {
2002     {BFD_RELOC_NONE,		     R_ARM_NONE},
2003     {BFD_RELOC_ARM_PCREL_BRANCH,     R_ARM_PC24},
2004     {BFD_RELOC_ARM_PCREL_CALL,	     R_ARM_CALL},
2005     {BFD_RELOC_ARM_PCREL_JUMP,	     R_ARM_JUMP24},
2006     {BFD_RELOC_ARM_PCREL_BLX,	     R_ARM_XPC25},
2007     {BFD_RELOC_THUMB_PCREL_BLX,	     R_ARM_THM_XPC22},
2008     {BFD_RELOC_32,		     R_ARM_ABS32},
2009     {BFD_RELOC_32_PCREL,	     R_ARM_REL32},
2010     {BFD_RELOC_8,		     R_ARM_ABS8},
2011     {BFD_RELOC_16,		     R_ARM_ABS16},
2012     {BFD_RELOC_ARM_OFFSET_IMM,	     R_ARM_ABS12},
2013     {BFD_RELOC_ARM_THUMB_OFFSET,     R_ARM_THM_ABS5},
2014     {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015     {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016     {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017     {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018     {BFD_RELOC_THUMB_PCREL_BRANCH9,  R_ARM_THM_JUMP8},
2019     {BFD_RELOC_THUMB_PCREL_BRANCH7,  R_ARM_THM_JUMP6},
2020     {BFD_RELOC_ARM_GLOB_DAT,	     R_ARM_GLOB_DAT},
2021     {BFD_RELOC_ARM_JUMP_SLOT,	     R_ARM_JUMP_SLOT},
2022     {BFD_RELOC_ARM_RELATIVE,	     R_ARM_RELATIVE},
2023     {BFD_RELOC_ARM_GOTOFF,	     R_ARM_GOTOFF32},
2024     {BFD_RELOC_ARM_GOTPC,	     R_ARM_GOTPC},
2025     {BFD_RELOC_ARM_GOT_PREL,	     R_ARM_GOT_PREL},
2026     {BFD_RELOC_ARM_GOT32,	     R_ARM_GOT32},
2027     {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
2028     {BFD_RELOC_ARM_TARGET1,	     R_ARM_TARGET1},
2029     {BFD_RELOC_ARM_ROSEGREL32,	     R_ARM_ROSEGREL32},
2030     {BFD_RELOC_ARM_SBREL32,	     R_ARM_SBREL32},
2031     {BFD_RELOC_ARM_PREL31,	     R_ARM_PREL31},
2032     {BFD_RELOC_ARM_TARGET2,	     R_ARM_TARGET2},
2033     {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
2034     {BFD_RELOC_ARM_TLS_GOTDESC,	     R_ARM_TLS_GOTDESC},
2035     {BFD_RELOC_ARM_TLS_CALL,	     R_ARM_TLS_CALL},
2036     {BFD_RELOC_ARM_THM_TLS_CALL,     R_ARM_THM_TLS_CALL},
2037     {BFD_RELOC_ARM_TLS_DESCSEQ,	     R_ARM_TLS_DESCSEQ},
2038     {BFD_RELOC_ARM_THM_TLS_DESCSEQ,  R_ARM_THM_TLS_DESCSEQ},
2039     {BFD_RELOC_ARM_TLS_DESC,	     R_ARM_TLS_DESC},
2040     {BFD_RELOC_ARM_TLS_GD32,	     R_ARM_TLS_GD32},
2041     {BFD_RELOC_ARM_TLS_LDO32,	     R_ARM_TLS_LDO32},
2042     {BFD_RELOC_ARM_TLS_LDM32,	     R_ARM_TLS_LDM32},
2043     {BFD_RELOC_ARM_TLS_DTPMOD32,     R_ARM_TLS_DTPMOD32},
2044     {BFD_RELOC_ARM_TLS_DTPOFF32,     R_ARM_TLS_DTPOFF32},
2045     {BFD_RELOC_ARM_TLS_TPOFF32,	     R_ARM_TLS_TPOFF32},
2046     {BFD_RELOC_ARM_TLS_IE32,	     R_ARM_TLS_IE32},
2047     {BFD_RELOC_ARM_TLS_LE32,	     R_ARM_TLS_LE32},
2048     {BFD_RELOC_ARM_IRELATIVE,	     R_ARM_IRELATIVE},
2049     {BFD_RELOC_ARM_GOTFUNCDESC,      R_ARM_GOTFUNCDESC},
2050     {BFD_RELOC_ARM_GOTOFFFUNCDESC,   R_ARM_GOTOFFFUNCDESC},
2051     {BFD_RELOC_ARM_FUNCDESC,         R_ARM_FUNCDESC},
2052     {BFD_RELOC_ARM_FUNCDESC_VALUE,   R_ARM_FUNCDESC_VALUE},
2053     {BFD_RELOC_ARM_TLS_GD32_FDPIC,   R_ARM_TLS_GD32_FDPIC},
2054     {BFD_RELOC_ARM_TLS_LDM32_FDPIC,  R_ARM_TLS_LDM32_FDPIC},
2055     {BFD_RELOC_ARM_TLS_IE32_FDPIC,   R_ARM_TLS_IE32_FDPIC},
2056     {BFD_RELOC_VTABLE_INHERIT,	     R_ARM_GNU_VTINHERIT},
2057     {BFD_RELOC_VTABLE_ENTRY,	     R_ARM_GNU_VTENTRY},
2058     {BFD_RELOC_ARM_MOVW,	     R_ARM_MOVW_ABS_NC},
2059     {BFD_RELOC_ARM_MOVT,	     R_ARM_MOVT_ABS},
2060     {BFD_RELOC_ARM_MOVW_PCREL,	     R_ARM_MOVW_PREL_NC},
2061     {BFD_RELOC_ARM_MOVT_PCREL,	     R_ARM_MOVT_PREL},
2062     {BFD_RELOC_ARM_THUMB_MOVW,	     R_ARM_THM_MOVW_ABS_NC},
2063     {BFD_RELOC_ARM_THUMB_MOVT,	     R_ARM_THM_MOVT_ABS},
2064     {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065     {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066     {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067     {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068     {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069     {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070     {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071     {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072     {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073     {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074     {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075     {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076     {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077     {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078     {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079     {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080     {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081     {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082     {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083     {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084     {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085     {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086     {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087     {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088     {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089     {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090     {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091     {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092     {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093     {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094     {BFD_RELOC_ARM_V4BX,	     R_ARM_V4BX},
2095     {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096     {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097     {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098     {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099     {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100     {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101     {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102   };
2103 
2104 static reloc_howto_type *
elf32_arm_reloc_type_lookup(bfd * abfd ATTRIBUTE_UNUSED,bfd_reloc_code_real_type code)2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 			     bfd_reloc_code_real_type code)
2107 {
2108   unsigned int i;
2109 
2110   for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111     if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112       return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113 
2114   return NULL;
2115 }
2116 
2117 static reloc_howto_type *
elf32_arm_reloc_name_lookup(bfd * abfd ATTRIBUTE_UNUSED,const char * r_name)2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 			     const char *r_name)
2120 {
2121   unsigned int i;
2122 
2123   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124     if (elf32_arm_howto_table_1[i].name != NULL
2125 	&& strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126       return &elf32_arm_howto_table_1[i];
2127 
2128   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129     if (elf32_arm_howto_table_2[i].name != NULL
2130 	&& strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131       return &elf32_arm_howto_table_2[i];
2132 
2133   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134     if (elf32_arm_howto_table_3[i].name != NULL
2135 	&& strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136       return &elf32_arm_howto_table_3[i];
2137 
2138   return NULL;
2139 }
2140 
2141 /* Support for core dump NOTE sections.  */
2142 
2143 static bool
elf32_arm_nabi_grok_prstatus(bfd * abfd,Elf_Internal_Note * note)2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146   int offset;
2147   size_t size;
2148 
2149   switch (note->descsz)
2150     {
2151       default:
2152 	return false;
2153 
2154       case 148:		/* Linux/ARM 32-bit.  */
2155 	/* pr_cursig */
2156 	elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157 
2158 	/* pr_pid */
2159 	elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160 
2161 	/* pr_reg */
2162 	offset = 72;
2163 	size = 72;
2164 
2165 	break;
2166     }
2167 
2168   /* Make a ".reg/999" section.  */
2169   return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 					  size, note->descpos + offset);
2171 }
2172 
2173 static bool
elf32_arm_nabi_grok_psinfo(bfd * abfd,Elf_Internal_Note * note)2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175 {
2176   switch (note->descsz)
2177     {
2178       default:
2179 	return false;
2180 
2181       case 124:		/* Linux/ARM elf_prpsinfo.  */
2182 	elf_tdata (abfd)->core->pid
2183 	 = bfd_get_32 (abfd, note->descdata + 12);
2184 	elf_tdata (abfd)->core->program
2185 	 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 	elf_tdata (abfd)->core->command
2187 	 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188     }
2189 
2190   /* Note that for some reason, a spurious space is tacked
2191      onto the end of the args in some (at least one anyway)
2192      implementations, so strip it off if it exists.  */
2193   {
2194     char *command = elf_tdata (abfd)->core->command;
2195     int n = strlen (command);
2196 
2197     if (0 < n && command[n - 1] == ' ')
2198       command[n - 1] = '\0';
2199   }
2200 
2201   return true;
2202 }
2203 
2204 static char *
elf32_arm_nabi_write_core_note(bfd * abfd,char * buf,int * bufsiz,int note_type,...)2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 				int note_type, ...)
2207 {
2208   switch (note_type)
2209     {
2210     default:
2211       return NULL;
2212 
2213     case NT_PRPSINFO:
2214       {
2215 	char data[124] ATTRIBUTE_NONSTRING;
2216 	va_list ap;
2217 
2218 	va_start (ap, note_type);
2219 	memset (data, 0, sizeof (data));
2220 	strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 	DIAGNOSTIC_PUSH;
2223 	/* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 	   -Wstringop-truncation:
2225 	   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226 	 */
2227 	DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 	strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 	DIAGNOSTIC_POP;
2232 #endif
2233 	va_end (ap);
2234 
2235 	return elfcore_write_note (abfd, buf, bufsiz,
2236 				   "CORE", note_type, data, sizeof (data));
2237       }
2238 
2239     case NT_PRSTATUS:
2240       {
2241 	char data[148];
2242 	va_list ap;
2243 	long pid;
2244 	int cursig;
2245 	const void *greg;
2246 
2247 	va_start (ap, note_type);
2248 	memset (data, 0, sizeof (data));
2249 	pid = va_arg (ap, long);
2250 	bfd_put_32 (abfd, pid, data + 24);
2251 	cursig = va_arg (ap, int);
2252 	bfd_put_16 (abfd, cursig, data + 12);
2253 	greg = va_arg (ap, const void *);
2254 	memcpy (data + 72, greg, 72);
2255 	va_end (ap);
2256 
2257 	return elfcore_write_note (abfd, buf, bufsiz,
2258 				   "CORE", note_type, data, sizeof (data));
2259       }
2260     }
2261 }
2262 
2263 #define TARGET_LITTLE_SYM		arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME		"elf32-littlearm"
2265 #define TARGET_BIG_SYM			arm_elf32_be_vec
2266 #define TARGET_BIG_NAME			"elf32-bigarm"
2267 
2268 #define elf_backend_grok_prstatus	elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo		elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note	elf32_arm_nabi_write_core_note
2271 
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2274 
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276    interworkable.  */
2277 #define INTERWORK_FLAG(abfd)  \
2278   (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279   || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280   || ((abfd)->flags & BFD_LINKER_CREATED))
2281 
2282 /* The linker script knows the section names for placement.
2283    The entry_names are used to do simple name mangling on the stubs.
2284    Given a function name, and its type, the stub can be found. The
2285    name can be changed. The only requirement is the %s be present.  */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME   "__%s_from_thumb"
2288 
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME   "__%s_from_arm"
2291 
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
2294 
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
2297 
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
2300 
2301 #define STUB_ENTRY_NAME   "__%s_veneer"
2302 
2303 #define CMSE_PREFIX "__acle_se_"
2304 
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2306 
2307 /* The name of the dynamic interpreter.  This is put in the .interp
2308    section.  */
2309 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
2310 
2311 /* FDPIC default stack size.  */
2312 #define DEFAULT_STACK_SIZE 0x8000
2313 
2314 static const unsigned long tls_trampoline [] =
2315 {
2316   0xe08e0000,		/* add r0, lr, r0 */
2317   0xe5901004,		/* ldr r1, [r0,#4] */
2318   0xe12fff11,		/* bx  r1 */
2319 };
2320 
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322 {
2323   0xe52d2004, /*	push    {r2}			*/
2324   0xe59f200c, /*      ldr     r2, [pc, #3f - . - 8]	*/
2325   0xe59f100c, /*      ldr     r1, [pc, #4f - . - 8]	*/
2326   0xe79f2002, /* 1:   ldr     r2, [pc, r2]		*/
2327   0xe081100f, /* 2:   add     r1, pc			*/
2328   0xe12fff12, /*      bx      r2			*/
2329   0x00000014, /* 3:   .word  _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 				+ dl_tlsdesc_lazy_resolver(GOT)   */
2331   0x00000018, /* 4:   .word  _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332 };
2333 
2334 /* NOTE: [Thumb nop sequence]
2335    When adding code that transitions from Thumb to Arm the instruction that
2336    should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337    a nop for performance reasons.  */
2338 
2339 /* ARM FDPIC PLT entry.  */
2340 /* The last 5 words contain PLT lazy fragment code and data.  */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342   {
2343     0xe59fc008,    /* ldr     r12, .L1 */
2344     0xe08cc009,    /* add     r12, r12, r9 */
2345     0xe59c9004,    /* ldr     r9, [r12, #4] */
2346     0xe59cf000,    /* ldr     pc, [r12] */
2347     0x00000000,    /* L1.     .word   foo(GOTOFFFUNCDESC) */
2348     0x00000000,    /* L1.     .word   foo(funcdesc_value_reloc_offset) */
2349     0xe51fc00c,    /* ldr     r12, [pc, #-12] */
2350     0xe92d1000,    /* push    {r12} */
2351     0xe599c004,    /* ldr     r12, [r9, #4] */
2352     0xe599f000,    /* ldr     pc, [r9] */
2353   };
2354 
2355 /* Thumb FDPIC PLT entry.  */
2356 /* The last 5 words contain PLT lazy fragment code and data.  */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358   {
2359     0xc00cf8df,    /* ldr.w   r12, .L1 */
2360     0x0c09eb0c,    /* add.w   r12, r12, r9 */
2361     0x9004f8dc,    /* ldr.w   r9, [r12, #4] */
2362     0xf000f8dc,    /* ldr.w   pc, [r12] */
2363     0x00000000,    /* .L1     .word   foo(GOTOFFFUNCDESC) */
2364     0x00000000,    /* .L2     .word   foo(funcdesc_value_reloc_offset) */
2365     0xc008f85f,    /* ldr.w   r12, .L2 */
2366     0xcd04f84d,    /* push    {r12} */
2367     0xc004f8d9,    /* ldr.w   r12, [r9, #4] */
2368     0xf000f8d9,    /* ldr.w   pc, [r9] */
2369   };
2370 
2371 #ifdef FOUR_WORD_PLT
2372 
2373 /* The first entry in a procedure linkage table looks like
2374    this.  It is set up so that any shared library function that is
2375    called before the relocation has been set up calls the dynamic
2376    linker first.  */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2378 {
2379   0xe52de004,		/* str   lr, [sp, #-4]! */
2380   0xe59fe010,		/* ldr   lr, [pc, #16]  */
2381   0xe08fe00e,		/* add   lr, pc, lr     */
2382   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2383 };
2384 
2385 /* Subsequent entries in a procedure linkage table look like
2386    this.  */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2388 {
2389   0xe28fc600,		/* add   ip, pc, #NN	*/
2390   0xe28cca00,		/* add	 ip, ip, #NN	*/
2391   0xe5bcf000,		/* ldr	 pc, [ip, #NN]! */
2392   0x00000000,		/* unused		*/
2393 };
2394 
2395 #else /* not FOUR_WORD_PLT */
2396 
2397 /* The first entry in a procedure linkage table looks like
2398    this.  It is set up so that any shared library function that is
2399    called before the relocation has been set up calls the dynamic
2400    linker first.  */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2402 {
2403   0xe52de004,		/* str	 lr, [sp, #-4]! */
2404   0xe59fe004,		/* ldr	 lr, [pc, #4]	*/
2405   0xe08fe00e,		/* add	 lr, pc, lr	*/
2406   0xe5bef008,		/* ldr	 pc, [lr, #8]!	*/
2407   0x00000000,		/* &GOT[0] - .		*/
2408 };
2409 
2410 /* By default subsequent entries in a procedure linkage table look like
2411    this. Offsets that don't fit into 28 bits will cause link error.  */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2413 {
2414   0xe28fc600,		/* add   ip, pc, #0xNN00000 */
2415   0xe28cca00,		/* add	 ip, ip, #0xNN000   */
2416   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!  */
2417 };
2418 
2419 /* When explicitly asked, we'll use this "long" entry format
2420    which can cope with arbitrary displacements.  */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2422 {
2423   0xe28fc200,		/* add	 ip, pc, #0xN0000000 */
2424   0xe28cc600,		/* add	 ip, ip, #0xNN00000  */
2425   0xe28cca00,		/* add	 ip, ip, #0xNN000    */
2426   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!   */
2427 };
2428 
2429 static bool elf32_arm_use_long_plt_entry = false;
2430 
2431 #endif /* not FOUR_WORD_PLT */
2432 
2433 /* The first entry in a procedure linkage table looks like this.
2434    It is set up so that any shared library function that is called before the
2435    relocation has been set up calls the dynamic linker first.  */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2437 {
2438   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439      an instruction maybe encoded to one or two array elements.  */
2440   0xf8dfb500,		/* push	   {lr}		 */
2441   0x44fee008,		/* ldr.w   lr, [pc, #8]	 */
2442 			/* add	   lr, pc	 */
2443   0xff08f85e,		/* ldr.w   pc, [lr, #8]! */
2444   0x00000000,		/* &GOT[0] - .		 */
2445 };
2446 
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448    look like this.  */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2450 {
2451   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452      an instruction maybe encoded to one or two array elements.  */
2453   0x0c00f240,		/* movw	   ip, #0xNNNN	  */
2454   0x0c00f2c0,		/* movt	   ip, #0xNNNN	  */
2455   0xf8dc44fc,		/* add	   ip, pc	  */
2456   0xe7fcf000		/* ldr.w   pc, [ip]	  */
2457 			/* b      .-4		  */
2458 };
2459 
2460 /* The format of the first entry in the procedure linkage table
2461    for a VxWorks executable.  */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463 {
2464   0xe52dc008,		/* str	  ip,[sp,#-8]!			*/
2465   0xe59fc000,		/* ldr	  ip,[pc]			*/
2466   0xe59cf008,		/* ldr	  pc,[ip,#8]			*/
2467   0x00000000,		/* .long  _GLOBAL_OFFSET_TABLE_		*/
2468 };
2469 
2470 /* The format of subsequent entries in a VxWorks executable.  */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472 {
2473   0xe59fc000,	      /* ldr	ip,[pc]			*/
2474   0xe59cf000,	      /* ldr	pc,[ip]			*/
2475   0x00000000,	      /* .long	@got				*/
2476   0xe59fc000,	      /* ldr	ip,[pc]			*/
2477   0xea000000,	      /* b	_PLT				*/
2478   0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2479 };
2480 
2481 /* The format of entries in a VxWorks shared library.  */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483 {
2484   0xe59fc000,	      /* ldr	ip,[pc]			*/
2485   0xe79cf009,	      /* ldr	pc,[ip,r9]			*/
2486   0x00000000,	      /* .long	@got				*/
2487   0xe59fc000,	      /* ldr	ip,[pc]			*/
2488   0xe599f008,	      /* ldr	pc,[r9,#8]			*/
2489   0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2490 };
2491 
2492 /* An initial stub used if the PLT entry is referenced from Thumb code.  */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495 {
2496   0x4778,		/* bx pc */
2497   0xe7fd		/* b .-2 */
2498 };
2499 
2500 /* The first entry in a procedure linkage table looks like
2501    this.  It is set up so that any shared library function that is
2502    called before the relocation has been set up calls the dynamic
2503    linker first.  */
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2505 {
2506   /* First bundle: */
2507   0xe300c000,		/* movw	ip, #:lower16:&GOT[2]-.+8	*/
2508   0xe340c000,		/* movt	ip, #:upper16:&GOT[2]-.+8	*/
2509   0xe08cc00f,		/* add	ip, ip, pc			*/
2510   0xe52dc008,		/* str	ip, [sp, #-8]!			*/
2511   /* Second bundle: */
2512   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2513   0xe59cc000,		/* ldr	ip, [ip]			*/
2514   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2515   0xe12fff1c,		/* bx	ip				*/
2516   /* Third bundle: */
2517   0xe320f000,		/* nop					*/
2518   0xe320f000,		/* nop					*/
2519   0xe320f000,		/* nop					*/
2520   /* .Lplt_tail: */
2521   0xe50dc004,		/* str	ip, [sp, #-4]			*/
2522   /* Fourth bundle: */
2523   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2524   0xe59cc000,		/* ldr	ip, [ip]			*/
2525   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2526   0xe12fff1c,		/* bx	ip				*/
2527 };
2528 #define ARM_NACL_PLT_TAIL_OFFSET	(11 * 4)
2529 
2530 /* Subsequent entries in a procedure linkage table look like this.  */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2532 {
2533   0xe300c000,		/* movw	ip, #:lower16:&GOT[n]-.+8	*/
2534   0xe340c000,		/* movt	ip, #:upper16:&GOT[n]-.+8	*/
2535   0xe08cc00f,		/* add	ip, ip, pc			*/
2536   0xea000000,		/* b	.Lplt_tail			*/
2537 };
2538 
2539 #define ARM_MAX_FWD_BRANCH_OFFSET  ((((1 << 23) - 1) << 2) + 8)
2540 #define ARM_MAX_BWD_BRANCH_OFFSET  ((-((1 << 23) << 2)) + 8)
2541 #define THM_MAX_FWD_BRANCH_OFFSET  ((1 << 22) -2 + 4)
2542 #define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
2543 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2544 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2545 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2546 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2547 
2548 enum stub_insn_type
2549 {
2550   THUMB16_TYPE = 1,
2551   THUMB32_TYPE,
2552   ARM_TYPE,
2553   DATA_TYPE
2554 };
2555 
2556 #define THUMB16_INSN(X)		{(X), THUMB16_TYPE, R_ARM_NONE, 0}
2557 /* A bit of a hack.  A Thumb conditional branch, in which the proper condition
2558    is inserted in arm_build_one_stub().  */
2559 #define THUMB16_BCOND_INSN(X)	{(X), THUMB16_TYPE, R_ARM_NONE, 1}
2560 #define THUMB32_INSN(X)		{(X), THUMB32_TYPE, R_ARM_NONE, 0}
2561 #define THUMB32_MOVT(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2562 #define THUMB32_MOVW(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2563 #define THUMB32_B_INSN(X, Z)	{(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2564 #define ARM_INSN(X)		{(X), ARM_TYPE, R_ARM_NONE, 0}
2565 #define ARM_REL_INSN(X, Z)	{(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2566 #define DATA_WORD(X,Y,Z)	{(X), DATA_TYPE, (Y), (Z)}
2567 
2568 typedef struct
2569 {
2570   bfd_vma	       data;
2571   enum stub_insn_type  type;
2572   unsigned int	       r_type;
2573   int		       reloc_addend;
2574 }  insn_sequence;
2575 
2576 /* See note [Thumb nop sequence] when adding a veneer.  */
2577 
2578 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2579    to reach the stub if necessary.  */
2580 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2581 {
2582   ARM_INSN (0xe51ff004),	    /* ldr   pc, [pc, #-4] */
2583   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2584 };
2585 
2586 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2587    available.  */
2588 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2589 {
2590   ARM_INSN (0xe59fc000),	    /* ldr   ip, [pc, #0] */
2591   ARM_INSN (0xe12fff1c),	    /* bx    ip */
2592   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2593 };
2594 
2595 /* Thumb -> Thumb long branch stub. Used on M-profile architectures.  */
2596 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2597 {
2598   THUMB16_INSN (0xb401),	     /* push {r0} */
2599   THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2600   THUMB16_INSN (0x4684),	     /* mov  ip, r0 */
2601   THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2602   THUMB16_INSN (0x4760),	     /* bx   ip */
2603   THUMB16_INSN (0xbf00),	     /* nop */
2604   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2605 };
2606 
2607 /* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2609 {
2610   THUMB32_INSN (0xf85ff000),	     /* ldr.w  pc, [pc, #-0] */
2611   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
2612 };
2613 
2614 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2615    M-profile architectures.  */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2617 {
2618   THUMB32_MOVW (0xf2400c00),	     /* mov.w ip, R_ARM_MOVW_ABS_NC */
2619   THUMB32_MOVT (0xf2c00c00),	     /* movt  ip, R_ARM_MOVT_ABS << 16 */
2620   THUMB16_INSN (0x4760),	     /* bx   ip */
2621 };
2622 
2623 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2624    allowed.  */
2625 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2626 {
2627   THUMB16_INSN (0x4778),	     /* bx   pc */
2628   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2629   ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2630   ARM_INSN (0xe12fff1c),	     /* bx   ip */
2631   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2632 };
2633 
2634 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2635    available.  */
2636 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2637 {
2638   THUMB16_INSN (0x4778),	     /* bx   pc */
2639   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2640   ARM_INSN (0xe51ff004),	     /* ldr   pc, [pc, #-4] */
2641   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd   R_ARM_ABS32(X) */
2642 };
2643 
2644 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2645    one, when the destination is close enough.  */
2646 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2647 {
2648   THUMB16_INSN (0x4778),	     /* bx   pc */
2649   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2650   ARM_REL_INSN (0xea000000, -8),     /* b    (X-8) */
2651 };
2652 
2653 /* ARM/Thumb -> ARM long branch stub, PIC.  On V5T and above, use
2654    blx to reach the stub if necessary.  */
2655 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2656 {
2657   ARM_INSN (0xe59fc000),	     /* ldr   ip, [pc] */
2658   ARM_INSN (0xe08ff00c),	     /* add   pc, pc, ip */
2659   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2660 };
2661 
2662 /* ARM/Thumb -> Thumb long branch stub, PIC.  On V5T and above, use
2663    blx to reach the stub if necessary.  We can not add into pc;
2664    it is not guaranteed to mode switch (different in ARMv6 and
2665    ARMv7).  */
2666 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2667 {
2668   ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2669   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2670   ARM_INSN (0xe12fff1c),	     /* bx    ip */
2671   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2672 };
2673 
2674 /* V4T ARM -> ARM long branch stub, PIC.  */
2675 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2676 {
2677   ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2678   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2679   ARM_INSN (0xe12fff1c),	     /* bx    ip */
2680   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2681 };
2682 
2683 /* V4T Thumb -> ARM long branch stub, PIC.  */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2685 {
2686   THUMB16_INSN (0x4778),	     /* bx   pc */
2687   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2688   ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2689   ARM_INSN (0xe08cf00f),	     /* add  pc, ip, pc */
2690   DATA_WORD (0, R_ARM_REL32, -4),     /* dcd  R_ARM_REL32(X) */
2691 };
2692 
2693 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2694    architectures.  */
2695 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2696 {
2697   THUMB16_INSN (0xb401),	     /* push {r0} */
2698   THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2699   THUMB16_INSN (0x46fc),	     /* mov  ip, pc */
2700   THUMB16_INSN (0x4484),	     /* add  ip, r0 */
2701   THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2702   THUMB16_INSN (0x4760),	     /* bx   ip */
2703   DATA_WORD (0, R_ARM_REL32, 4),     /* dcd  R_ARM_REL32(X) */
2704 };
2705 
2706 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2707    allowed.  */
2708 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2709 {
2710   THUMB16_INSN (0x4778),	     /* bx   pc */
2711   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2712   ARM_INSN (0xe59fc004),	     /* ldr  ip, [pc, #4] */
2713   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2714   ARM_INSN (0xe12fff1c),	     /* bx   ip */
2715   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd  R_ARM_REL32(X) */
2716 };
2717 
2718 /* Thumb2/ARM -> TLS trampoline.  Lowest common denominator, which is a
2719    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2720 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2721 {
2722   ARM_INSN (0xe59f1000),	     /* ldr   r1, [pc] */
2723   ARM_INSN (0xe08ff001),	     /* add   pc, pc, r1 */
2724   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2725 };
2726 
2727 /* V4T Thumb -> TLS trampoline.  lowest common denominator, which is a
2728    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2729 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2730 {
2731   THUMB16_INSN (0x4778),	     /* bx   pc */
2732   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2733   ARM_INSN (0xe59f1000),	     /* ldr  r1, [pc, #0] */
2734   ARM_INSN (0xe081f00f),	     /* add  pc, r1, pc */
2735   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd  R_ARM_REL32(X) */
2736 };
2737 
2738 /* NaCl ARM -> ARM long branch stub.  */
2739 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2740 {
2741   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2742   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2743   ARM_INSN (0xe12fff1c),		/* bx	ip */
2744   ARM_INSN (0xe320f000),		/* nop */
2745   ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2746   DATA_WORD (0, R_ARM_ABS32, 0),	/* dcd	R_ARM_ABS32(X) */
2747   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2748   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2749 };
2750 
2751 /* NaCl ARM -> ARM long branch stub, PIC.  */
2752 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2753 {
2754   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2755   ARM_INSN (0xe08cc00f),		/* add	ip, ip, pc */
2756   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2757   ARM_INSN (0xe12fff1c),		/* bx	ip */
2758   ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2759   DATA_WORD (0, R_ARM_REL32, 8),	/* dcd	R_ARM_REL32(X+8) */
2760   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2761   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2762 };
2763 
2764 /* Stub used for transition to secure state (aka SG veneer).  */
2765 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2766 {
2767   THUMB32_INSN (0xe97fe97f),		/* sg.  */
2768   THUMB32_B_INSN (0xf000b800, -4),	/* b.w original_branch_dest.  */
2769 };
2770 
2771 
2772 /* Cortex-A8 erratum-workaround stubs.  */
2773 
2774 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2775    can't use a conditional branch to reach this stub).  */
2776 
2777 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2778 {
2779   THUMB16_BCOND_INSN (0xd001),	       /* b<cond>.n true.  */
2780   THUMB32_B_INSN (0xf000b800, -4),     /* b.w insn_after_original_branch.  */
2781   THUMB32_B_INSN (0xf000b800, -4)      /* true: b.w original_branch_dest.  */
2782 };
2783 
2784 /* Stub used for b.w and bl.w instructions.  */
2785 
2786 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2787 {
2788   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2789 };
2790 
2791 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2792 {
2793   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2794 };
2795 
2796 /* Stub used for Thumb-2 blx.w instructions.  We modified the original blx.w
2797    instruction (which switches to ARM mode) to point to this stub.  Jump to the
2798    real destination using an ARM-mode branch.  */
2799 
2800 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2801 {
2802   ARM_REL_INSN (0xea000000, -8)	/* b original_branch_dest.  */
2803 };
2804 
2805 /* For each section group there can be a specially created linker section
2806    to hold the stubs for that group.  The name of the stub section is based
2807    upon the name of another section within that group with the suffix below
2808    applied.
2809 
2810    PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2811    create what appeared to be a linker stub section when it actually
2812    contained user code/data.  For example, consider this fragment:
2813 
2814      const char * stubborn_problems[] = { "np" };
2815 
2816    If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2817    section called:
2818 
2819      .data.rel.local.stubborn_problems
2820 
2821    This then causes problems in arm32_arm_build_stubs() as it triggers:
2822 
2823       // Ignore non-stub sections.
2824       if (!strstr (stub_sec->name, STUB_SUFFIX))
2825 	continue;
2826 
2827    And so the section would be ignored instead of being processed.  Hence
2828    the change in definition of STUB_SUFFIX to a name that cannot be a valid
2829    C identifier.  */
2830 #define STUB_SUFFIX ".__stub"
2831 
2832 /* One entry per long/short branch stub defined above.  */
2833 #define DEF_STUBS \
2834   DEF_STUB (long_branch_any_any)	\
2835   DEF_STUB (long_branch_v4t_arm_thumb) \
2836   DEF_STUB (long_branch_thumb_only) \
2837   DEF_STUB (long_branch_v4t_thumb_thumb)	\
2838   DEF_STUB (long_branch_v4t_thumb_arm) \
2839   DEF_STUB (short_branch_v4t_thumb_arm) \
2840   DEF_STUB (long_branch_any_arm_pic) \
2841   DEF_STUB (long_branch_any_thumb_pic) \
2842   DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2843   DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2844   DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2845   DEF_STUB (long_branch_thumb_only_pic) \
2846   DEF_STUB (long_branch_any_tls_pic) \
2847   DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2848   DEF_STUB (long_branch_arm_nacl) \
2849   DEF_STUB (long_branch_arm_nacl_pic) \
2850   DEF_STUB (cmse_branch_thumb_only) \
2851   DEF_STUB (a8_veneer_b_cond) \
2852   DEF_STUB (a8_veneer_b) \
2853   DEF_STUB (a8_veneer_bl) \
2854   DEF_STUB (a8_veneer_blx) \
2855   DEF_STUB (long_branch_thumb2_only) \
2856   DEF_STUB (long_branch_thumb2_only_pure)
2857 
2858 #define DEF_STUB(x) arm_stub_##x,
2859 enum elf32_arm_stub_type
2860 {
2861   arm_stub_none,
2862   DEF_STUBS
2863   max_stub_type
2864 };
2865 #undef DEF_STUB
2866 
2867 /* Note the first a8_veneer type.  */
2868 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2869 
2870 typedef struct
2871 {
2872   const insn_sequence* template_sequence;
2873   int template_size;
2874 } stub_def;
2875 
2876 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2877 static const stub_def stub_definitions[] =
2878 {
2879   {NULL, 0},
2880   DEF_STUBS
2881 };
2882 
2883 struct elf32_arm_stub_hash_entry
2884 {
2885   /* Base hash table entry structure.  */
2886   struct bfd_hash_entry root;
2887 
2888   /* The stub section.  */
2889   asection *stub_sec;
2890 
2891   /* Offset within stub_sec of the beginning of this stub.  */
2892   bfd_vma stub_offset;
2893 
2894   /* Given the symbol's value and its section we can determine its final
2895      value when building the stubs (so the stub knows where to jump).  */
2896   bfd_vma target_value;
2897   asection *target_section;
2898 
2899   /* Same as above but for the source of the branch to the stub.  Used for
2900      Cortex-A8 erratum workaround to patch it to branch to the stub.  As
2901      such, source section does not need to be recorded since Cortex-A8 erratum
2902      workaround stubs are only generated when both source and target are in the
2903      same section.  */
2904   bfd_vma source_value;
2905 
2906   /* The instruction which caused this stub to be generated (only valid for
2907      Cortex-A8 erratum workaround stubs at present).  */
2908   unsigned long orig_insn;
2909 
2910   /* The stub type.  */
2911   enum elf32_arm_stub_type stub_type;
2912   /* Its encoding size in bytes.  */
2913   int stub_size;
2914   /* Its template.  */
2915   const insn_sequence *stub_template;
2916   /* The size of the template (number of entries).  */
2917   int stub_template_size;
2918 
2919   /* The symbol table entry, if any, that this was derived from.  */
2920   struct elf32_arm_link_hash_entry *h;
2921 
2922   /* Type of branch.  */
2923   enum arm_st_branch_type branch_type;
2924 
2925   /* Where this stub is being called from, or, in the case of combined
2926      stub sections, the first input section in the group.  */
2927   asection *id_sec;
2928 
2929   /* The name for the local symbol at the start of this stub.  The
2930      stub name in the hash table has to be unique; this does not, so
2931      it can be friendlier.  */
2932   char *output_name;
2933 };
2934 
2935 /* Used to build a map of a section.  This is required for mixed-endian
2936    code/data.  */
2937 
2938 typedef struct elf32_elf_section_map
2939 {
2940   bfd_vma vma;
2941   char type;
2942 }
2943 elf32_arm_section_map;
2944 
2945 /* Information about a VFP11 erratum veneer, or a branch to such a veneer.  */
2946 
2947 typedef enum
2948 {
2949   VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2950   VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2951   VFP11_ERRATUM_ARM_VENEER,
2952   VFP11_ERRATUM_THUMB_VENEER
2953 }
2954 elf32_vfp11_erratum_type;
2955 
2956 typedef struct elf32_vfp11_erratum_list
2957 {
2958   struct elf32_vfp11_erratum_list *next;
2959   bfd_vma vma;
2960   union
2961   {
2962     struct
2963     {
2964       struct elf32_vfp11_erratum_list *veneer;
2965       unsigned int vfp_insn;
2966     } b;
2967     struct
2968     {
2969       struct elf32_vfp11_erratum_list *branch;
2970       unsigned int id;
2971     } v;
2972   } u;
2973   elf32_vfp11_erratum_type type;
2974 }
2975 elf32_vfp11_erratum_list;
2976 
2977 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2978    veneer.  */
2979 typedef enum
2980 {
2981   STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2982   STM32L4XX_ERRATUM_VENEER
2983 }
2984 elf32_stm32l4xx_erratum_type;
2985 
2986 typedef struct elf32_stm32l4xx_erratum_list
2987 {
2988   struct elf32_stm32l4xx_erratum_list *next;
2989   bfd_vma vma;
2990   union
2991   {
2992     struct
2993     {
2994       struct elf32_stm32l4xx_erratum_list *veneer;
2995       unsigned int insn;
2996     } b;
2997     struct
2998     {
2999       struct elf32_stm32l4xx_erratum_list *branch;
3000       unsigned int id;
3001     } v;
3002   } u;
3003   elf32_stm32l4xx_erratum_type type;
3004 }
3005 elf32_stm32l4xx_erratum_list;
3006 
3007 typedef enum
3008 {
3009   DELETE_EXIDX_ENTRY,
3010   INSERT_EXIDX_CANTUNWIND_AT_END
3011 }
3012 arm_unwind_edit_type;
3013 
3014 /* A (sorted) list of edits to apply to an unwind table.  */
3015 typedef struct arm_unwind_table_edit
3016 {
3017   arm_unwind_edit_type type;
3018   /* Note: we sometimes want to insert an unwind entry corresponding to a
3019      section different from the one we're currently writing out, so record the
3020      (text) section this edit relates to here.  */
3021   asection *linked_section;
3022   unsigned int index;
3023   struct arm_unwind_table_edit *next;
3024 }
3025 arm_unwind_table_edit;
3026 
3027 typedef struct _arm_elf_section_data
3028 {
3029   /* Information about mapping symbols.  */
3030   struct bfd_elf_section_data elf;
3031   unsigned int mapcount;
3032   unsigned int mapsize;
3033   elf32_arm_section_map *map;
3034   /* Information about CPU errata.  */
3035   unsigned int erratumcount;
3036   elf32_vfp11_erratum_list *erratumlist;
3037   unsigned int stm32l4xx_erratumcount;
3038   elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3039   unsigned int additional_reloc_count;
3040   /* Information about unwind tables.  */
3041   union
3042   {
3043     /* Unwind info attached to a text section.  */
3044     struct
3045     {
3046       asection *arm_exidx_sec;
3047     } text;
3048 
3049     /* Unwind info attached to an .ARM.exidx section.  */
3050     struct
3051     {
3052       arm_unwind_table_edit *unwind_edit_list;
3053       arm_unwind_table_edit *unwind_edit_tail;
3054     } exidx;
3055   } u;
3056 }
3057 _arm_elf_section_data;
3058 
3059 #define elf32_arm_section_data(sec) \
3060   ((_arm_elf_section_data *) elf_section_data (sec))
3061 
3062 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3063    These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3064    so may be created multiple times: we use an array of these entries whilst
3065    relaxing which we can refresh easily, then create stubs for each potentially
3066    erratum-triggering instruction once we've settled on a solution.  */
3067 
3068 struct a8_erratum_fix
3069 {
3070   bfd *input_bfd;
3071   asection *section;
3072   bfd_vma offset;
3073   bfd_vma target_offset;
3074   unsigned long orig_insn;
3075   char *stub_name;
3076   enum elf32_arm_stub_type stub_type;
3077   enum arm_st_branch_type branch_type;
3078 };
3079 
3080 /* A table of relocs applied to branches which might trigger Cortex-A8
3081    erratum.  */
3082 
3083 struct a8_erratum_reloc
3084 {
3085   bfd_vma from;
3086   bfd_vma destination;
3087   struct elf32_arm_link_hash_entry *hash;
3088   const char *sym_name;
3089   unsigned int r_type;
3090   enum arm_st_branch_type branch_type;
3091   bool non_a8_stub;
3092 };
3093 
3094 /* The size of the thread control block.  */
3095 #define TCB_SIZE	8
3096 
3097 /* ARM-specific information about a PLT entry, over and above the usual
3098    gotplt_union.  */
3099 struct arm_plt_info
3100 {
3101   /* We reference count Thumb references to a PLT entry separately,
3102      so that we can emit the Thumb trampoline only if needed.  */
3103   bfd_signed_vma thumb_refcount;
3104 
3105   /* Some references from Thumb code may be eliminated by BL->BLX
3106      conversion, so record them separately.  */
3107   bfd_signed_vma maybe_thumb_refcount;
3108 
3109   /* How many of the recorded PLT accesses were from non-call relocations.
3110      This information is useful when deciding whether anything takes the
3111      address of an STT_GNU_IFUNC PLT.  A value of 0 means that all
3112      non-call references to the function should resolve directly to the
3113      real runtime target.  */
3114   unsigned int noncall_refcount;
3115 
3116   /* Since PLT entries have variable size if the Thumb prologue is
3117      used, we need to record the index into .got.plt instead of
3118      recomputing it from the PLT offset.  */
3119   bfd_signed_vma got_offset;
3120 };
3121 
3122 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol.  */
3123 struct arm_local_iplt_info
3124 {
3125   /* The information that is usually found in the generic ELF part of
3126      the hash table entry.  */
3127   union gotplt_union root;
3128 
3129   /* The information that is usually found in the ARM-specific part of
3130      the hash table entry.  */
3131   struct arm_plt_info arm;
3132 
3133   /* A list of all potential dynamic relocations against this symbol.  */
3134   struct elf_dyn_relocs *dyn_relocs;
3135 };
3136 
3137 /* Structure to handle FDPIC support for local functions.  */
3138 struct fdpic_local
3139 {
3140   unsigned int funcdesc_cnt;
3141   unsigned int gotofffuncdesc_cnt;
3142   int funcdesc_offset;
3143 };
3144 
3145 struct elf_arm_obj_tdata
3146 {
3147   struct elf_obj_tdata root;
3148 
3149   /* Zero to warn when linking objects with incompatible enum sizes.  */
3150   int no_enum_size_warning;
3151 
3152   /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
3153   int no_wchar_size_warning;
3154 
3155   /* The number of entries in each of the arrays in this strcuture.
3156      Used to avoid buffer overruns.  */
3157   bfd_size_type num_entries;
3158 
3159   /* tls_type for each local got entry.  */
3160   char *local_got_tls_type;
3161 
3162   /* GOTPLT entries for TLS descriptors.  */
3163   bfd_vma *local_tlsdesc_gotent;
3164 
3165   /* Information for local symbols that need entries in .iplt.  */
3166   struct arm_local_iplt_info **local_iplt;
3167 
3168   /* Maintains FDPIC counters and funcdesc info.  */
3169   struct fdpic_local *local_fdpic_cnts;
3170 };
3171 
3172 #define elf_arm_tdata(bfd) \
3173   ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3174 
3175 #define elf32_arm_num_entries(bfd) \
3176   (elf_arm_tdata (bfd)->num_entries)
3177 
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179   (elf_arm_tdata (bfd)->local_got_tls_type)
3180 
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182   (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3183 
3184 #define elf32_arm_local_iplt(bfd) \
3185   (elf_arm_tdata (bfd)->local_iplt)
3186 
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188   (elf_arm_tdata (bfd)->local_fdpic_cnts)
3189 
3190 #define is_arm_elf(bfd) \
3191   (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192    && elf_tdata (bfd) != NULL \
3193    && elf_object_id (bfd) == ARM_ELF_DATA)
3194 
3195 static bool
elf32_arm_mkobject(bfd * abfd)3196 elf32_arm_mkobject (bfd *abfd)
3197 {
3198   return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3199 				  ARM_ELF_DATA);
3200 }
3201 
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3203 
3204 /* Structure to handle FDPIC support for extern functions.  */
3205 struct fdpic_global {
3206   unsigned int gotofffuncdesc_cnt;
3207   unsigned int gotfuncdesc_cnt;
3208   unsigned int funcdesc_cnt;
3209   int funcdesc_offset;
3210   int gotfuncdesc_offset;
3211 };
3212 
3213 /* Arm ELF linker hash entry.  */
3214 struct elf32_arm_link_hash_entry
3215 {
3216   struct elf_link_hash_entry root;
3217 
3218   /* ARM-specific PLT information.  */
3219   struct arm_plt_info plt;
3220 
3221 #define GOT_UNKNOWN	0
3222 #define GOT_NORMAL	1
3223 #define GOT_TLS_GD	2
3224 #define GOT_TLS_IE	4
3225 #define GOT_TLS_GDESC	8
3226 #define GOT_TLS_GD_ANY_P(type)	((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3227   unsigned int tls_type : 8;
3228 
3229   /* True if the symbol's PLT entry is in .iplt rather than .plt.  */
3230   unsigned int is_iplt : 1;
3231 
3232   unsigned int unused : 23;
3233 
3234   /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3235      starting at the end of the jump table.  */
3236   bfd_vma tlsdesc_got;
3237 
3238   /* The symbol marking the real symbol location for exported thumb
3239      symbols with Arm stubs.  */
3240   struct elf_link_hash_entry *export_glue;
3241 
3242   /* A pointer to the most recently used stub hash entry against this
3243      symbol.  */
3244   struct elf32_arm_stub_hash_entry *stub_cache;
3245 
3246   /* Counter for FDPIC relocations against this symbol.  */
3247   struct fdpic_global fdpic_cnts;
3248 };
3249 
3250 /* Traverse an arm ELF linker hash table.  */
3251 #define elf32_arm_link_hash_traverse(table, func, info)			\
3252   (elf_link_hash_traverse						\
3253    (&(table)->root,							\
3254     (bool (*) (struct elf_link_hash_entry *, void *)) (func),		\
3255     (info)))
3256 
3257 /* Get the ARM elf linker hash table from a link_info structure.  */
3258 #define elf32_arm_hash_table(p) \
3259   ((is_elf_hash_table ((p)->hash)					\
3260     && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA)		\
3261    ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3262 
3263 #define arm_stub_hash_lookup(table, string, create, copy) \
3264   ((struct elf32_arm_stub_hash_entry *) \
3265    bfd_hash_lookup ((table), (string), (create), (copy)))
3266 
3267 /* Array to keep track of which stub sections have been created, and
3268    information on stub grouping.  */
3269 struct map_stub
3270 {
3271   /* This is the section to which stubs in the group will be
3272      attached.  */
3273   asection *link_sec;
3274   /* The stub section.  */
3275   asection *stub_sec;
3276 };
3277 
3278 #define elf32_arm_compute_jump_table_size(htab) \
3279   ((htab)->next_tls_desc_index * 4)
3280 
3281 /* ARM ELF linker hash table.  */
3282 struct elf32_arm_link_hash_table
3283 {
3284   /* The main hash table.  */
3285   struct elf_link_hash_table root;
3286 
3287   /* The size in bytes of the section containing the Thumb-to-ARM glue.  */
3288   bfd_size_type thumb_glue_size;
3289 
3290   /* The size in bytes of the section containing the ARM-to-Thumb glue.  */
3291   bfd_size_type arm_glue_size;
3292 
3293   /* The size in bytes of section containing the ARMv4 BX veneers.  */
3294   bfd_size_type bx_glue_size;
3295 
3296   /* Offsets of ARMv4 BX veneers.  Bit1 set if present, and Bit0 set when
3297      veneer has been populated.  */
3298   bfd_vma bx_glue_offset[15];
3299 
3300   /* The size in bytes of the section containing glue for VFP11 erratum
3301      veneers.  */
3302   bfd_size_type vfp11_erratum_glue_size;
3303 
3304  /* The size in bytes of the section containing glue for STM32L4XX erratum
3305      veneers.  */
3306   bfd_size_type stm32l4xx_erratum_glue_size;
3307 
3308   /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
3309      holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3310      elf32_arm_write_section().  */
3311   struct a8_erratum_fix *a8_erratum_fixes;
3312   unsigned int num_a8_erratum_fixes;
3313 
3314   /* An arbitrary input BFD chosen to hold the glue sections.  */
3315   bfd * bfd_of_glue_owner;
3316 
3317   /* Nonzero to output a BE8 image.  */
3318   int byteswap_code;
3319 
3320   /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3321      Nonzero if R_ARM_TARGET1 means R_ARM_REL32.  */
3322   int target1_is_rel;
3323 
3324   /* The relocation to use for R_ARM_TARGET2 relocations.  */
3325   int target2_reloc;
3326 
3327   /* 0 = Ignore R_ARM_V4BX.
3328      1 = Convert BX to MOV PC.
3329      2 = Generate v4 interworing stubs.  */
3330   int fix_v4bx;
3331 
3332   /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum.  */
3333   int fix_cortex_a8;
3334 
3335   /* Whether we should fix the ARM1176 BLX immediate issue.  */
3336   int fix_arm1176;
3337 
3338   /* Nonzero if the ARM/Thumb BLX instructions are available for use.  */
3339   int use_blx;
3340 
3341   /* What sort of code sequences we should look for which may trigger the
3342      VFP11 denorm erratum.  */
3343   bfd_arm_vfp11_fix vfp11_fix;
3344 
3345   /* Global counter for the number of fixes we have emitted.  */
3346   int num_vfp11_fixes;
3347 
3348   /* What sort of code sequences we should look for which may trigger the
3349      STM32L4XX erratum.  */
3350   bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3351 
3352   /* Global counter for the number of fixes we have emitted.  */
3353   int num_stm32l4xx_fixes;
3354 
3355   /* Nonzero to force PIC branch veneers.  */
3356   int pic_veneer;
3357 
3358   /* The number of bytes in the initial entry in the PLT.  */
3359   bfd_size_type plt_header_size;
3360 
3361   /* The number of bytes in the subsequent PLT etries.  */
3362   bfd_size_type plt_entry_size;
3363 
3364   /* True if the target uses REL relocations.  */
3365   bool use_rel;
3366 
3367   /* Nonzero if import library must be a secure gateway import library
3368      as per ARMv8-M Security Extensions.  */
3369   int cmse_implib;
3370 
3371   /* The import library whose symbols' address must remain stable in
3372      the import library generated.  */
3373   bfd *in_implib_bfd;
3374 
3375   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
3376   bfd_vma next_tls_desc_index;
3377 
3378   /* How many R_ARM_TLS_DESC relocations were generated so far.  */
3379   bfd_vma num_tls_desc;
3380 
3381   /* The (unloaded but important) VxWorks .rela.plt.unloaded section.  */
3382   asection *srelplt2;
3383 
3384   /* Offset in .plt section of tls_arm_trampoline.  */
3385   bfd_vma tls_trampoline;
3386 
3387   /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
3388   union
3389   {
3390     bfd_signed_vma refcount;
3391     bfd_vma offset;
3392   } tls_ldm_got;
3393 
3394   /* For convenience in allocate_dynrelocs.  */
3395   bfd * obfd;
3396 
3397   /* The amount of space used by the reserved portion of the sgotplt
3398      section, plus whatever space is used by the jump slots.  */
3399   bfd_vma sgotplt_jump_table_size;
3400 
3401   /* The stub hash table.  */
3402   struct bfd_hash_table stub_hash_table;
3403 
3404   /* Linker stub bfd.  */
3405   bfd *stub_bfd;
3406 
3407   /* Linker call-backs.  */
3408   asection * (*add_stub_section) (const char *, asection *, asection *,
3409 				  unsigned int);
3410   void (*layout_sections_again) (void);
3411 
3412   /* Array to keep track of which stub sections have been created, and
3413      information on stub grouping.  */
3414   struct map_stub *stub_group;
3415 
3416   /* Input stub section holding secure gateway veneers.  */
3417   asection *cmse_stub_sec;
3418 
3419   /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3420      start to be allocated.  */
3421   bfd_vma new_cmse_stub_offset;
3422 
3423   /* Number of elements in stub_group.  */
3424   unsigned int top_id;
3425 
3426   /* Assorted information used by elf32_arm_size_stubs.  */
3427   unsigned int bfd_count;
3428   unsigned int top_index;
3429   asection **input_list;
3430 
3431   /* True if the target system uses FDPIC. */
3432   int fdpic_p;
3433 
3434   /* Fixup section. Used for FDPIC.  */
3435   asection *srofixup;
3436 };
3437 
3438 /* Add an FDPIC read-only fixup.  */
3439 static void
arm_elf_add_rofixup(bfd * output_bfd,asection * srofixup,bfd_vma offset)3440 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3441 {
3442   bfd_vma fixup_offset;
3443 
3444   fixup_offset = srofixup->reloc_count++ * 4;
3445   BFD_ASSERT (fixup_offset < srofixup->size);
3446   bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3447 }
3448 
3449 static inline int
ctz(unsigned int mask)3450 ctz (unsigned int mask)
3451 {
3452 #if GCC_VERSION >= 3004
3453   return __builtin_ctz (mask);
3454 #else
3455   unsigned int i;
3456 
3457   for (i = 0; i < 8 * sizeof (mask); i++)
3458     {
3459       if (mask & 0x1)
3460 	break;
3461       mask = (mask >> 1);
3462     }
3463   return i;
3464 #endif
3465 }
3466 
3467 static inline int
elf32_arm_popcount(unsigned int mask)3468 elf32_arm_popcount (unsigned int mask)
3469 {
3470 #if GCC_VERSION >= 3004
3471   return __builtin_popcount (mask);
3472 #else
3473   unsigned int i;
3474   int sum = 0;
3475 
3476   for (i = 0; i < 8 * sizeof (mask); i++)
3477     {
3478       if (mask & 0x1)
3479 	sum++;
3480       mask = (mask >> 1);
3481     }
3482   return sum;
3483 #endif
3484 }
3485 
3486 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3487 				    asection *sreloc, Elf_Internal_Rela *rel);
3488 
3489 static void
arm_elf_fill_funcdesc(bfd * output_bfd,struct bfd_link_info * info,int * funcdesc_offset,int dynindx,int offset,bfd_vma addr,bfd_vma dynreloc_value,bfd_vma seg)3490 arm_elf_fill_funcdesc (bfd *output_bfd,
3491 		       struct bfd_link_info *info,
3492 		       int *funcdesc_offset,
3493 		       int dynindx,
3494 		       int offset,
3495 		       bfd_vma addr,
3496 		       bfd_vma dynreloc_value,
3497 		       bfd_vma seg)
3498 {
3499   if ((*funcdesc_offset & 1) == 0)
3500     {
3501       struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3502       asection *sgot = globals->root.sgot;
3503 
3504       if (bfd_link_pic (info))
3505 	{
3506 	  asection *srelgot = globals->root.srelgot;
3507 	  Elf_Internal_Rela outrel;
3508 
3509 	  outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3510 	  outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3511 	  outrel.r_addend = 0;
3512 
3513 	  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3514 	  bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3515 	  bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3516 	}
3517       else
3518 	{
3519 	  struct elf_link_hash_entry *hgot = globals->root.hgot;
3520 	  bfd_vma got_value = hgot->root.u.def.value
3521 	    + hgot->root.u.def.section->output_section->vma
3522 	    + hgot->root.u.def.section->output_offset;
3523 
3524 	  arm_elf_add_rofixup (output_bfd, globals->srofixup,
3525 			       sgot->output_section->vma + sgot->output_offset
3526 			       + offset);
3527 	  arm_elf_add_rofixup (output_bfd, globals->srofixup,
3528 			       sgot->output_section->vma + sgot->output_offset
3529 			       + offset + 4);
3530 	  bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3531 	  bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3532 	}
3533       *funcdesc_offset |= 1;
3534     }
3535 }
3536 
3537 /* Create an entry in an ARM ELF linker hash table.  */
3538 
3539 static struct bfd_hash_entry *
elf32_arm_link_hash_newfunc(struct bfd_hash_entry * entry,struct bfd_hash_table * table,const char * string)3540 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3541 			     struct bfd_hash_table * table,
3542 			     const char * string)
3543 {
3544   struct elf32_arm_link_hash_entry * ret =
3545     (struct elf32_arm_link_hash_entry *) entry;
3546 
3547   /* Allocate the structure if it has not already been allocated by a
3548      subclass.  */
3549   if (ret == NULL)
3550     ret = (struct elf32_arm_link_hash_entry *)
3551 	bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3552   if (ret == NULL)
3553     return (struct bfd_hash_entry *) ret;
3554 
3555   /* Call the allocation method of the superclass.  */
3556   ret = ((struct elf32_arm_link_hash_entry *)
3557 	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3558 				     table, string));
3559   if (ret != NULL)
3560     {
3561       ret->tls_type = GOT_UNKNOWN;
3562       ret->tlsdesc_got = (bfd_vma) -1;
3563       ret->plt.thumb_refcount = 0;
3564       ret->plt.maybe_thumb_refcount = 0;
3565       ret->plt.noncall_refcount = 0;
3566       ret->plt.got_offset = -1;
3567       ret->is_iplt = false;
3568       ret->export_glue = NULL;
3569 
3570       ret->stub_cache = NULL;
3571 
3572       ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3573       ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3574       ret->fdpic_cnts.funcdesc_cnt = 0;
3575       ret->fdpic_cnts.funcdesc_offset = -1;
3576       ret->fdpic_cnts.gotfuncdesc_offset = -1;
3577     }
3578 
3579   return (struct bfd_hash_entry *) ret;
3580 }
3581 
3582 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3583    symbols.  */
3584 
3585 static bool
elf32_arm_allocate_local_sym_info(bfd * abfd)3586 elf32_arm_allocate_local_sym_info (bfd *abfd)
3587 {
3588   if (elf_local_got_refcounts (abfd) == NULL)
3589     {
3590       bfd_size_type num_syms;
3591 
3592       elf32_arm_num_entries (abfd) = 0;
3593 
3594       /* Whilst it might be tempting to allocate a single block of memory and
3595 	 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3596 	 structure, this interferes with the work of memory checkers looking
3597 	 for buffer overruns.  So allocate each array individually.  */
3598 
3599       num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3600 
3601       elf_local_got_refcounts (abfd) = bfd_zalloc
3602 	(abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
3603 
3604       if (elf_local_got_refcounts (abfd) == NULL)
3605 	return false;
3606 
3607       elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
3608 	(abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
3609 
3610       if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
3611 	return false;
3612 
3613       elf32_arm_local_iplt (abfd) = bfd_zalloc
3614 	(abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
3615 
3616       if (elf32_arm_local_iplt (abfd) == NULL)
3617 	return false;
3618 
3619       elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
3620 	(abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
3621 
3622       if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
3623 	return false;
3624 
3625       elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
3626 	(abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
3627 
3628       if (elf32_arm_local_got_tls_type (abfd) == NULL)
3629 	return false;
3630 
3631       elf32_arm_num_entries (abfd) = num_syms;
3632 
3633 #if GCC_VERSION >= 3000
3634       BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3635 		  <= __alignof__ (*elf_local_got_refcounts (abfd)));
3636       BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3637 		  <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3638       BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3639 		  <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3640       BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3641 		  <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3642 #endif
3643     }
3644   return true;
3645 }
3646 
3647 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3648    to input bfd ABFD.  Create the information if it doesn't already exist.
3649    Return null if an allocation fails.  */
3650 
3651 static struct arm_local_iplt_info *
elf32_arm_create_local_iplt(bfd * abfd,unsigned long r_symndx)3652 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3653 {
3654   struct arm_local_iplt_info **ptr;
3655 
3656   if (!elf32_arm_allocate_local_sym_info (abfd))
3657     return NULL;
3658 
3659   BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3660   BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
3661   ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3662   if (*ptr == NULL)
3663     *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3664   return *ptr;
3665 }
3666 
3667 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3668    in ABFD's symbol table.  If the symbol is global, H points to its
3669    hash table entry, otherwise H is null.
3670 
3671    Return true if the symbol does have PLT information.  When returning
3672    true, point *ROOT_PLT at the target-independent reference count/offset
3673    union and *ARM_PLT at the ARM-specific information.  */
3674 
3675 static bool
elf32_arm_get_plt_info(bfd * abfd,struct elf32_arm_link_hash_table * globals,struct elf32_arm_link_hash_entry * h,unsigned long r_symndx,union gotplt_union ** root_plt,struct arm_plt_info ** arm_plt)3676 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3677 			struct elf32_arm_link_hash_entry *h,
3678 			unsigned long r_symndx, union gotplt_union **root_plt,
3679 			struct arm_plt_info **arm_plt)
3680 {
3681   struct arm_local_iplt_info *local_iplt;
3682 
3683   if (globals->root.splt == NULL && globals->root.iplt == NULL)
3684     return false;
3685 
3686   if (h != NULL)
3687     {
3688       *root_plt = &h->root.plt;
3689       *arm_plt = &h->plt;
3690       return true;
3691     }
3692 
3693   if (elf32_arm_local_iplt (abfd) == NULL)
3694     return false;
3695 
3696   if (r_symndx >= elf32_arm_num_entries (abfd))
3697     return false;
3698 
3699   local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3700   if (local_iplt == NULL)
3701     return false;
3702 
3703   *root_plt = &local_iplt->root;
3704   *arm_plt = &local_iplt->arm;
3705   return true;
3706 }
3707 
3708 static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
3709 
3710 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3711    before it.  */
3712 
3713 static bool
elf32_arm_plt_needs_thumb_stub_p(struct bfd_link_info * info,struct arm_plt_info * arm_plt)3714 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3715 				  struct arm_plt_info *arm_plt)
3716 {
3717   struct elf32_arm_link_hash_table *htab;
3718 
3719   htab = elf32_arm_hash_table (info);
3720 
3721   return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
3722 	  || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3723 }
3724 
3725 /* Return a pointer to the head of the dynamic reloc list that should
3726    be used for local symbol ISYM, which is symbol number R_SYMNDX in
3727    ABFD's symbol table.  Return null if an error occurs.  */
3728 
3729 static struct elf_dyn_relocs **
elf32_arm_get_local_dynreloc_list(bfd * abfd,unsigned long r_symndx,Elf_Internal_Sym * isym)3730 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3731 				   Elf_Internal_Sym *isym)
3732 {
3733   if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3734     {
3735       struct arm_local_iplt_info *local_iplt;
3736 
3737       local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3738       if (local_iplt == NULL)
3739 	return NULL;
3740       return &local_iplt->dyn_relocs;
3741     }
3742   else
3743     {
3744       /* Track dynamic relocs needed for local syms too.
3745 	 We really need local syms available to do this
3746 	 easily.  Oh well.  */
3747       asection *s;
3748       void *vpp;
3749 
3750       s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3751       if (s == NULL)
3752 	return NULL;
3753 
3754       vpp = &elf_section_data (s)->local_dynrel;
3755       return (struct elf_dyn_relocs **) vpp;
3756     }
3757 }
3758 
3759 /* Initialize an entry in the stub hash table.  */
3760 
3761 static struct bfd_hash_entry *
stub_hash_newfunc(struct bfd_hash_entry * entry,struct bfd_hash_table * table,const char * string)3762 stub_hash_newfunc (struct bfd_hash_entry *entry,
3763 		   struct bfd_hash_table *table,
3764 		   const char *string)
3765 {
3766   /* Allocate the structure if it has not already been allocated by a
3767      subclass.  */
3768   if (entry == NULL)
3769     {
3770       entry = (struct bfd_hash_entry *)
3771 	  bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3772       if (entry == NULL)
3773 	return entry;
3774     }
3775 
3776   /* Call the allocation method of the superclass.  */
3777   entry = bfd_hash_newfunc (entry, table, string);
3778   if (entry != NULL)
3779     {
3780       struct elf32_arm_stub_hash_entry *eh;
3781 
3782       /* Initialize the local fields.  */
3783       eh = (struct elf32_arm_stub_hash_entry *) entry;
3784       eh->stub_sec = NULL;
3785       eh->stub_offset = (bfd_vma) -1;
3786       eh->source_value = 0;
3787       eh->target_value = 0;
3788       eh->target_section = NULL;
3789       eh->orig_insn = 0;
3790       eh->stub_type = arm_stub_none;
3791       eh->stub_size = 0;
3792       eh->stub_template = NULL;
3793       eh->stub_template_size = -1;
3794       eh->h = NULL;
3795       eh->id_sec = NULL;
3796       eh->output_name = NULL;
3797     }
3798 
3799   return entry;
3800 }
3801 
3802 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3803    shortcuts to them in our hash table.  */
3804 
3805 static bool
create_got_section(bfd * dynobj,struct bfd_link_info * info)3806 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3807 {
3808   struct elf32_arm_link_hash_table *htab;
3809 
3810   htab = elf32_arm_hash_table (info);
3811   if (htab == NULL)
3812     return false;
3813 
3814   if (! _bfd_elf_create_got_section (dynobj, info))
3815     return false;
3816 
3817   /* Also create .rofixup.  */
3818   if (htab->fdpic_p)
3819     {
3820       htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3821 						    (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3822 						     | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3823       if (htab->srofixup == NULL
3824 	  || !bfd_set_section_alignment (htab->srofixup, 2))
3825 	return false;
3826     }
3827 
3828   return true;
3829 }
3830 
3831 /* Create the .iplt, .rel(a).iplt and .igot.plt sections.  */
3832 
3833 static bool
create_ifunc_sections(struct bfd_link_info * info)3834 create_ifunc_sections (struct bfd_link_info *info)
3835 {
3836   struct elf32_arm_link_hash_table *htab;
3837   const struct elf_backend_data *bed;
3838   bfd *dynobj;
3839   asection *s;
3840   flagword flags;
3841 
3842   htab = elf32_arm_hash_table (info);
3843   dynobj = htab->root.dynobj;
3844   bed = get_elf_backend_data (dynobj);
3845   flags = bed->dynamic_sec_flags;
3846 
3847   if (htab->root.iplt == NULL)
3848     {
3849       s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3850 					      flags | SEC_READONLY | SEC_CODE);
3851       if (s == NULL
3852 	  || !bfd_set_section_alignment (s, bed->plt_alignment))
3853 	return false;
3854       htab->root.iplt = s;
3855     }
3856 
3857   if (htab->root.irelplt == NULL)
3858     {
3859       s = bfd_make_section_anyway_with_flags (dynobj,
3860 					      RELOC_SECTION (htab, ".iplt"),
3861 					      flags | SEC_READONLY);
3862       if (s == NULL
3863 	  || !bfd_set_section_alignment (s, bed->s->log_file_align))
3864 	return false;
3865       htab->root.irelplt = s;
3866     }
3867 
3868   if (htab->root.igotplt == NULL)
3869     {
3870       s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3871       if (s == NULL
3872 	  || !bfd_set_section_alignment (s, bed->s->log_file_align))
3873 	return false;
3874       htab->root.igotplt = s;
3875     }
3876   return true;
3877 }
3878 
3879 /* Determine if we're dealing with a Thumb only architecture.  */
3880 
3881 static bool
using_thumb_only(struct elf32_arm_link_hash_table * globals)3882 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3883 {
3884   int arch;
3885   int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3886 					  Tag_CPU_arch_profile);
3887 
3888   if (profile)
3889     return profile == 'M';
3890 
3891   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3892 
3893   /* Force return logic to be reviewed for each new architecture.  */
3894   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3895 
3896   if (arch == TAG_CPU_ARCH_V6_M
3897       || arch == TAG_CPU_ARCH_V6S_M
3898       || arch == TAG_CPU_ARCH_V7E_M
3899       || arch == TAG_CPU_ARCH_V8M_BASE
3900       || arch == TAG_CPU_ARCH_V8M_MAIN
3901       || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3902     return true;
3903 
3904   return false;
3905 }
3906 
3907 /* Determine if we're dealing with a Thumb-2 object.  */
3908 
3909 static bool
using_thumb2(struct elf32_arm_link_hash_table * globals)3910 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3911 {
3912   int arch;
3913   int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3914 					    Tag_THUMB_ISA_use);
3915 
3916   /* No use of thumb permitted, or a legacy thumb-1/2 definition.  */
3917   if (thumb_isa < 3)
3918     return thumb_isa == 2;
3919 
3920   /* Variant of thumb is described by the architecture tag.  */
3921   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3922 
3923   /* Force return logic to be reviewed for each new architecture.  */
3924   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3925 
3926   return (arch == TAG_CPU_ARCH_V6T2
3927 	  || arch == TAG_CPU_ARCH_V7
3928 	  || arch == TAG_CPU_ARCH_V7E_M
3929 	  || arch == TAG_CPU_ARCH_V8
3930 	  || arch == TAG_CPU_ARCH_V8R
3931 	  || arch == TAG_CPU_ARCH_V8M_MAIN
3932 	  || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3933 }
3934 
3935 /* Determine whether Thumb-2 BL instruction is available.  */
3936 
3937 static bool
using_thumb2_bl(struct elf32_arm_link_hash_table * globals)3938 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3939 {
3940   int arch =
3941     bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3942 
3943   /* Force return logic to be reviewed for each new architecture.  */
3944   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3945 
3946   /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M).  */
3947   return (arch == TAG_CPU_ARCH_V6T2
3948 	  || arch >= TAG_CPU_ARCH_V7);
3949 }
3950 
3951 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3952    .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3953    hash table.  */
3954 
3955 static bool
elf32_arm_create_dynamic_sections(bfd * dynobj,struct bfd_link_info * info)3956 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3957 {
3958   struct elf32_arm_link_hash_table *htab;
3959 
3960   htab = elf32_arm_hash_table (info);
3961   if (htab == NULL)
3962     return false;
3963 
3964   if (!htab->root.sgot && !create_got_section (dynobj, info))
3965     return false;
3966 
3967   if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3968     return false;
3969 
3970   if (htab->root.target_os == is_vxworks)
3971     {
3972       if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3973 	return false;
3974 
3975       if (bfd_link_pic (info))
3976 	{
3977 	  htab->plt_header_size = 0;
3978 	  htab->plt_entry_size
3979 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3980 	}
3981       else
3982 	{
3983 	  htab->plt_header_size
3984 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3985 	  htab->plt_entry_size
3986 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3987 	}
3988 
3989       if (elf_elfheader (dynobj))
3990 	elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3991     }
3992   else
3993     {
3994       /* PR ld/16017
3995 	 Test for thumb only architectures.  Note - we cannot just call
3996 	 using_thumb_only() as the attributes in the output bfd have not been
3997 	 initialised at this point, so instead we use the input bfd.  */
3998       bfd * saved_obfd = htab->obfd;
3999 
4000       htab->obfd = dynobj;
4001       if (using_thumb_only (htab))
4002 	{
4003 	  htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4004 	  htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4005 	}
4006       htab->obfd = saved_obfd;
4007     }
4008 
4009   if (htab->fdpic_p) {
4010     htab->plt_header_size = 0;
4011     if (info->flags & DF_BIND_NOW)
4012       htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
4013     else
4014       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
4015   }
4016 
4017   if (!htab->root.splt
4018       || !htab->root.srelplt
4019       || !htab->root.sdynbss
4020       || (!bfd_link_pic (info) && !htab->root.srelbss))
4021     abort ();
4022 
4023   return true;
4024 }
4025 
4026 /* Copy the extra info we tack onto an elf_link_hash_entry.  */
4027 
4028 static void
elf32_arm_copy_indirect_symbol(struct bfd_link_info * info,struct elf_link_hash_entry * dir,struct elf_link_hash_entry * ind)4029 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4030 				struct elf_link_hash_entry *dir,
4031 				struct elf_link_hash_entry *ind)
4032 {
4033   struct elf32_arm_link_hash_entry *edir, *eind;
4034 
4035   edir = (struct elf32_arm_link_hash_entry *) dir;
4036   eind = (struct elf32_arm_link_hash_entry *) ind;
4037 
4038   if (ind->root.type == bfd_link_hash_indirect)
4039     {
4040       /* Copy over PLT info.  */
4041       edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4042       eind->plt.thumb_refcount = 0;
4043       edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4044       eind->plt.maybe_thumb_refcount = 0;
4045       edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4046       eind->plt.noncall_refcount = 0;
4047 
4048       /* Copy FDPIC counters.  */
4049       edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4050       edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4051       edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4052 
4053       /* We should only allocate a function to .iplt once the final
4054 	 symbol information is known.  */
4055       BFD_ASSERT (!eind->is_iplt);
4056 
4057       if (dir->got.refcount <= 0)
4058 	{
4059 	  edir->tls_type = eind->tls_type;
4060 	  eind->tls_type = GOT_UNKNOWN;
4061 	}
4062     }
4063 
4064   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4065 }
4066 
4067 /* Destroy an ARM elf linker hash table.  */
4068 
4069 static void
elf32_arm_link_hash_table_free(bfd * obfd)4070 elf32_arm_link_hash_table_free (bfd *obfd)
4071 {
4072   struct elf32_arm_link_hash_table *ret
4073     = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4074 
4075   bfd_hash_table_free (&ret->stub_hash_table);
4076   _bfd_elf_link_hash_table_free (obfd);
4077 }
4078 
4079 /* Create an ARM elf linker hash table.  */
4080 
4081 static struct bfd_link_hash_table *
elf32_arm_link_hash_table_create(bfd * abfd)4082 elf32_arm_link_hash_table_create (bfd *abfd)
4083 {
4084   struct elf32_arm_link_hash_table *ret;
4085   size_t amt = sizeof (struct elf32_arm_link_hash_table);
4086 
4087   ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4088   if (ret == NULL)
4089     return NULL;
4090 
4091   if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4092 				      elf32_arm_link_hash_newfunc,
4093 				      sizeof (struct elf32_arm_link_hash_entry),
4094 				      ARM_ELF_DATA))
4095     {
4096       free (ret);
4097       return NULL;
4098     }
4099 
4100   ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4101   ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4102 #ifdef FOUR_WORD_PLT
4103   ret->plt_header_size = 16;
4104   ret->plt_entry_size = 16;
4105 #else
4106   ret->plt_header_size = 20;
4107   ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4108 #endif
4109   ret->use_rel = true;
4110   ret->obfd = abfd;
4111   ret->fdpic_p = 0;
4112 
4113   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4114 			    sizeof (struct elf32_arm_stub_hash_entry)))
4115     {
4116       _bfd_elf_link_hash_table_free (abfd);
4117       return NULL;
4118     }
4119   ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4120 
4121   return &ret->root.root;
4122 }
4123 
4124 /* Determine what kind of NOPs are available.  */
4125 
4126 static bool
arch_has_arm_nop(struct elf32_arm_link_hash_table * globals)4127 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4128 {
4129   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4130 					     Tag_CPU_arch);
4131 
4132   /* Force return logic to be reviewed for each new architecture.  */
4133   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4134 
4135   return (arch == TAG_CPU_ARCH_V6T2
4136 	  || arch == TAG_CPU_ARCH_V6K
4137 	  || arch == TAG_CPU_ARCH_V7
4138 	  || arch == TAG_CPU_ARCH_V8
4139 	  || arch == TAG_CPU_ARCH_V8R);
4140 }
4141 
4142 static bool
arm_stub_is_thumb(enum elf32_arm_stub_type stub_type)4143 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4144 {
4145   switch (stub_type)
4146     {
4147     case arm_stub_long_branch_thumb_only:
4148     case arm_stub_long_branch_thumb2_only:
4149     case arm_stub_long_branch_thumb2_only_pure:
4150     case arm_stub_long_branch_v4t_thumb_arm:
4151     case arm_stub_short_branch_v4t_thumb_arm:
4152     case arm_stub_long_branch_v4t_thumb_arm_pic:
4153     case arm_stub_long_branch_v4t_thumb_tls_pic:
4154     case arm_stub_long_branch_thumb_only_pic:
4155     case arm_stub_cmse_branch_thumb_only:
4156       return true;
4157     case arm_stub_none:
4158       BFD_FAIL ();
4159       return false;
4160       break;
4161     default:
4162       return false;
4163     }
4164 }
4165 
4166 /* Determine the type of stub needed, if any, for a call.  */
4167 
4168 static enum elf32_arm_stub_type
arm_type_of_stub(struct bfd_link_info * info,asection * input_sec,const Elf_Internal_Rela * rel,unsigned char st_type,enum arm_st_branch_type * actual_branch_type,struct elf32_arm_link_hash_entry * hash,bfd_vma destination,asection * sym_sec,bfd * input_bfd,const char * name)4169 arm_type_of_stub (struct bfd_link_info *info,
4170 		  asection *input_sec,
4171 		  const Elf_Internal_Rela *rel,
4172 		  unsigned char st_type,
4173 		  enum arm_st_branch_type *actual_branch_type,
4174 		  struct elf32_arm_link_hash_entry *hash,
4175 		  bfd_vma destination,
4176 		  asection *sym_sec,
4177 		  bfd *input_bfd,
4178 		  const char *name)
4179 {
4180   bfd_vma location;
4181   bfd_signed_vma branch_offset;
4182   unsigned int r_type;
4183   struct elf32_arm_link_hash_table * globals;
4184   bool thumb2, thumb2_bl, thumb_only;
4185   enum elf32_arm_stub_type stub_type = arm_stub_none;
4186   int use_plt = 0;
4187   enum arm_st_branch_type branch_type = *actual_branch_type;
4188   union gotplt_union *root_plt;
4189   struct arm_plt_info *arm_plt;
4190   int arch;
4191   int thumb2_movw;
4192 
4193   if (branch_type == ST_BRANCH_LONG)
4194     return stub_type;
4195 
4196   globals = elf32_arm_hash_table (info);
4197   if (globals == NULL)
4198     return stub_type;
4199 
4200   thumb_only = using_thumb_only (globals);
4201   thumb2 = using_thumb2 (globals);
4202   thumb2_bl = using_thumb2_bl (globals);
4203 
4204   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4205 
4206   /* True for architectures that implement the thumb2 movw instruction.  */
4207   thumb2_movw = thumb2 || (arch  == TAG_CPU_ARCH_V8M_BASE);
4208 
4209   /* Determine where the call point is.  */
4210   location = (input_sec->output_offset
4211 	      + input_sec->output_section->vma
4212 	      + rel->r_offset);
4213 
4214   r_type = ELF32_R_TYPE (rel->r_info);
4215 
4216   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4217      are considering a function call relocation.  */
4218   if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4219 		     || r_type == R_ARM_THM_JUMP19)
4220       && branch_type == ST_BRANCH_TO_ARM)
4221     branch_type = ST_BRANCH_TO_THUMB;
4222 
4223   /* For TLS call relocs, it is the caller's responsibility to provide
4224      the address of the appropriate trampoline.  */
4225   if (r_type != R_ARM_TLS_CALL
4226       && r_type != R_ARM_THM_TLS_CALL
4227       && elf32_arm_get_plt_info (input_bfd, globals, hash,
4228 				 ELF32_R_SYM (rel->r_info), &root_plt,
4229 				 &arm_plt)
4230       && root_plt->offset != (bfd_vma) -1)
4231     {
4232       asection *splt;
4233 
4234       if (hash == NULL || hash->is_iplt)
4235 	splt = globals->root.iplt;
4236       else
4237 	splt = globals->root.splt;
4238       if (splt != NULL)
4239 	{
4240 	  use_plt = 1;
4241 
4242 	  /* Note when dealing with PLT entries: the main PLT stub is in
4243 	     ARM mode, so if the branch is in Thumb mode, another
4244 	     Thumb->ARM stub will be inserted later just before the ARM
4245 	     PLT stub. If a long branch stub is needed, we'll add a
4246 	     Thumb->Arm one and branch directly to the ARM PLT entry.
4247 	     Here, we have to check if a pre-PLT Thumb->ARM stub
4248 	     is needed and if it will be close enough.  */
4249 
4250 	  destination = (splt->output_section->vma
4251 			 + splt->output_offset
4252 			 + root_plt->offset);
4253 	  st_type = STT_FUNC;
4254 
4255 	  /* Thumb branch/call to PLT: it can become a branch to ARM
4256 	     or to Thumb. We must perform the same checks and
4257 	     corrections as in elf32_arm_final_link_relocate.  */
4258 	  if ((r_type == R_ARM_THM_CALL)
4259 	      || (r_type == R_ARM_THM_JUMP24))
4260 	    {
4261 	      if (globals->use_blx
4262 		  && r_type == R_ARM_THM_CALL
4263 		  && !thumb_only)
4264 		{
4265 		  /* If the Thumb BLX instruction is available, convert
4266 		     the BL to a BLX instruction to call the ARM-mode
4267 		     PLT entry.  */
4268 		  branch_type = ST_BRANCH_TO_ARM;
4269 		}
4270 	      else
4271 		{
4272 		  if (!thumb_only)
4273 		    /* Target the Thumb stub before the ARM PLT entry.  */
4274 		    destination -= PLT_THUMB_STUB_SIZE;
4275 		  branch_type = ST_BRANCH_TO_THUMB;
4276 		}
4277 	    }
4278 	  else
4279 	    {
4280 	      branch_type = ST_BRANCH_TO_ARM;
4281 	    }
4282 	}
4283     }
4284   /* Calls to STT_GNU_IFUNC symbols should go through a PLT.  */
4285   BFD_ASSERT (st_type != STT_GNU_IFUNC);
4286 
4287   branch_offset = (bfd_signed_vma)(destination - location);
4288 
4289   if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4290       || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4291     {
4292       /* Handle cases where:
4293 	 - this call goes too far (different Thumb/Thumb2 max
4294 	   distance)
4295 	 - it's a Thumb->Arm call and blx is not available, or it's a
4296 	   Thumb->Arm branch (not bl). A stub is needed in this case,
4297 	   but only if this call is not through a PLT entry. Indeed,
4298 	   PLT stubs handle mode switching already.  */
4299       if ((!thumb2_bl
4300 	    && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4301 		|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4302 	  || (thumb2_bl
4303 	      && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4304 		  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4305 	  || (thumb2
4306 	      && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4307 		  || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4308 	      && (r_type == R_ARM_THM_JUMP19))
4309 	  || (branch_type == ST_BRANCH_TO_ARM
4310 	      && (((r_type == R_ARM_THM_CALL
4311 		    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4312 		  || (r_type == R_ARM_THM_JUMP24)
4313 		  || (r_type == R_ARM_THM_JUMP19))
4314 	      && !use_plt))
4315 	{
4316 	  /* If we need to insert a Thumb-Thumb long branch stub to a
4317 	     PLT, use one that branches directly to the ARM PLT
4318 	     stub. If we pretended we'd use the pre-PLT Thumb->ARM
4319 	     stub, undo this now.  */
4320 	  if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4321 	    {
4322 	      branch_type = ST_BRANCH_TO_ARM;
4323 	      branch_offset += PLT_THUMB_STUB_SIZE;
4324 	    }
4325 
4326 	  if (branch_type == ST_BRANCH_TO_THUMB)
4327 	    {
4328 	      /* Thumb to thumb.  */
4329 	      if (!thumb_only)
4330 		{
4331 		  if (input_sec->flags & SEC_ELF_PURECODE)
4332 		    _bfd_error_handler
4333 		      (_("%pB(%pA): warning: long branch veneers used in"
4334 			 " section with SHF_ARM_PURECODE section"
4335 			 " attribute is only supported for M-profile"
4336 			 " targets that implement the movw instruction"),
4337 		       input_bfd, input_sec);
4338 
4339 		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4340 		    /* PIC stubs.  */
4341 		    ? ((globals->use_blx
4342 			&& (r_type == R_ARM_THM_CALL))
4343 		       /* V5T and above. Stub starts with ARM code, so
4344 			  we must be able to switch mode before
4345 			  reaching it, which is only possible for 'bl'
4346 			  (ie R_ARM_THM_CALL relocation).  */
4347 		       ? arm_stub_long_branch_any_thumb_pic
4348 		       /* On V4T, use Thumb code only.  */
4349 		       : arm_stub_long_branch_v4t_thumb_thumb_pic)
4350 
4351 		    /* non-PIC stubs.  */
4352 		    : ((globals->use_blx
4353 			&& (r_type == R_ARM_THM_CALL))
4354 		       /* V5T and above.  */
4355 		       ? arm_stub_long_branch_any_any
4356 		       /* V4T.  */
4357 		       : arm_stub_long_branch_v4t_thumb_thumb);
4358 		}
4359 	      else
4360 		{
4361 		  if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4362 		      stub_type = arm_stub_long_branch_thumb2_only_pure;
4363 		  else
4364 		    {
4365 		      if (input_sec->flags & SEC_ELF_PURECODE)
4366 			_bfd_error_handler
4367 			  (_("%pB(%pA): warning: long branch veneers used in"
4368 			     " section with SHF_ARM_PURECODE section"
4369 			     " attribute is only supported for M-profile"
4370 			     " targets that implement the movw instruction"),
4371 			   input_bfd, input_sec);
4372 
4373 		      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4374 			/* PIC stub.  */
4375 			? arm_stub_long_branch_thumb_only_pic
4376 			/* non-PIC stub.  */
4377 			: (thumb2 ? arm_stub_long_branch_thumb2_only
4378 				  : arm_stub_long_branch_thumb_only);
4379 		    }
4380 		}
4381 	    }
4382 	  else
4383 	    {
4384 	      if (input_sec->flags & SEC_ELF_PURECODE)
4385 		_bfd_error_handler
4386 		  (_("%pB(%pA): warning: long branch veneers used in"
4387 		     " section with SHF_ARM_PURECODE section"
4388 		     " attribute is only supported" " for M-profile"
4389 		     " targets that implement the movw instruction"),
4390 		   input_bfd, input_sec);
4391 
4392 	      /* Thumb to arm.  */
4393 	      if (sym_sec != NULL
4394 		  && sym_sec->owner != NULL
4395 		  && !INTERWORK_FLAG (sym_sec->owner))
4396 		{
4397 		  _bfd_error_handler
4398 		    (_("%pB(%s): warning: interworking not enabled;"
4399 		       " first occurrence: %pB: %s call to %s"),
4400 		     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4401 		}
4402 
4403 	      stub_type =
4404 		(bfd_link_pic (info) | globals->pic_veneer)
4405 		/* PIC stubs.  */
4406 		? (r_type == R_ARM_THM_TLS_CALL
4407 		   /* TLS PIC stubs.  */
4408 		   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4409 		      : arm_stub_long_branch_v4t_thumb_tls_pic)
4410 		   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4411 		      /* V5T PIC and above.  */
4412 		      ? arm_stub_long_branch_any_arm_pic
4413 		      /* V4T PIC stub.  */
4414 		      : arm_stub_long_branch_v4t_thumb_arm_pic))
4415 
4416 		/* non-PIC stubs.  */
4417 		: ((globals->use_blx && r_type == R_ARM_THM_CALL)
4418 		   /* V5T and above.  */
4419 		   ? arm_stub_long_branch_any_any
4420 		   /* V4T.  */
4421 		   : arm_stub_long_branch_v4t_thumb_arm);
4422 
4423 	      /* Handle v4t short branches.  */
4424 	      if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4425 		  && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4426 		  && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4427 		stub_type = arm_stub_short_branch_v4t_thumb_arm;
4428 	    }
4429 	}
4430     }
4431   else if (r_type == R_ARM_CALL
4432 	   || r_type == R_ARM_JUMP24
4433 	   || r_type == R_ARM_PLT32
4434 	   || r_type == R_ARM_TLS_CALL)
4435     {
4436       if (input_sec->flags & SEC_ELF_PURECODE)
4437 	_bfd_error_handler
4438 	  (_("%pB(%pA): warning: long branch veneers used in"
4439 	     " section with SHF_ARM_PURECODE section"
4440 	     " attribute is only supported for M-profile"
4441 	     " targets that implement the movw instruction"),
4442 	   input_bfd, input_sec);
4443       if (branch_type == ST_BRANCH_TO_THUMB)
4444 	{
4445 	  /* Arm to thumb.  */
4446 
4447 	  if (sym_sec != NULL
4448 	      && sym_sec->owner != NULL
4449 	      && !INTERWORK_FLAG (sym_sec->owner))
4450 	    {
4451 	      _bfd_error_handler
4452 		(_("%pB(%s): warning: interworking not enabled;"
4453 		   " first occurrence: %pB: %s call to %s"),
4454 		 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4455 	    }
4456 
4457 	  /* We have an extra 2-bytes reach because of
4458 	     the mode change (bit 24 (H) of BLX encoding).  */
4459 	  if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4460 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4461 	      || (r_type == R_ARM_CALL && !globals->use_blx)
4462 	      || (r_type == R_ARM_JUMP24)
4463 	      || (r_type == R_ARM_PLT32))
4464 	    {
4465 	      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4466 		/* PIC stubs.  */
4467 		? ((globals->use_blx)
4468 		   /* V5T and above.  */
4469 		   ? arm_stub_long_branch_any_thumb_pic
4470 		   /* V4T stub.  */
4471 		   : arm_stub_long_branch_v4t_arm_thumb_pic)
4472 
4473 		/* non-PIC stubs.  */
4474 		: ((globals->use_blx)
4475 		   /* V5T and above.  */
4476 		   ? arm_stub_long_branch_any_any
4477 		   /* V4T.  */
4478 		   : arm_stub_long_branch_v4t_arm_thumb);
4479 	    }
4480 	}
4481       else
4482 	{
4483 	  /* Arm to arm.  */
4484 	  if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4485 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4486 	    {
4487 	      stub_type =
4488 		(bfd_link_pic (info) | globals->pic_veneer)
4489 		/* PIC stubs.  */
4490 		? (r_type == R_ARM_TLS_CALL
4491 		   /* TLS PIC Stub.  */
4492 		   ? arm_stub_long_branch_any_tls_pic
4493 		   : (globals->root.target_os == is_nacl
4494 		      ? arm_stub_long_branch_arm_nacl_pic
4495 		      : arm_stub_long_branch_any_arm_pic))
4496 		/* non-PIC stubs.  */
4497 		: (globals->root.target_os == is_nacl
4498 		   ? arm_stub_long_branch_arm_nacl
4499 		   : arm_stub_long_branch_any_any);
4500 	    }
4501 	}
4502     }
4503 
4504   /* If a stub is needed, record the actual destination type.  */
4505   if (stub_type != arm_stub_none)
4506     *actual_branch_type = branch_type;
4507 
4508   return stub_type;
4509 }
4510 
4511 /* Build a name for an entry in the stub hash table.  */
4512 
4513 static char *
elf32_arm_stub_name(const asection * input_section,const asection * sym_sec,const struct elf32_arm_link_hash_entry * hash,const Elf_Internal_Rela * rel,enum elf32_arm_stub_type stub_type)4514 elf32_arm_stub_name (const asection *input_section,
4515 		     const asection *sym_sec,
4516 		     const struct elf32_arm_link_hash_entry *hash,
4517 		     const Elf_Internal_Rela *rel,
4518 		     enum elf32_arm_stub_type stub_type)
4519 {
4520   char *stub_name;
4521   bfd_size_type len;
4522 
4523   if (hash)
4524     {
4525       len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4526       stub_name = (char *) bfd_malloc (len);
4527       if (stub_name != NULL)
4528 	sprintf (stub_name, "%08x_%s+%x_%d",
4529 		 input_section->id & 0xffffffff,
4530 		 hash->root.root.root.string,
4531 		 (int) rel->r_addend & 0xffffffff,
4532 		 (int) stub_type);
4533     }
4534   else
4535     {
4536       len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4537       stub_name = (char *) bfd_malloc (len);
4538       if (stub_name != NULL)
4539 	sprintf (stub_name, "%08x_%x:%x+%x_%d",
4540 		 input_section->id & 0xffffffff,
4541 		 sym_sec->id & 0xffffffff,
4542 		 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4543 		 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4544 		 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4545 		 (int) rel->r_addend & 0xffffffff,
4546 		 (int) stub_type);
4547     }
4548 
4549   return stub_name;
4550 }
4551 
4552 /* Look up an entry in the stub hash.  Stub entries are cached because
4553    creating the stub name takes a bit of time.  */
4554 
4555 static struct elf32_arm_stub_hash_entry *
elf32_arm_get_stub_entry(const asection * input_section,const asection * sym_sec,struct elf_link_hash_entry * hash,const Elf_Internal_Rela * rel,struct elf32_arm_link_hash_table * htab,enum elf32_arm_stub_type stub_type)4556 elf32_arm_get_stub_entry (const asection *input_section,
4557 			  const asection *sym_sec,
4558 			  struct elf_link_hash_entry *hash,
4559 			  const Elf_Internal_Rela *rel,
4560 			  struct elf32_arm_link_hash_table *htab,
4561 			  enum elf32_arm_stub_type stub_type)
4562 {
4563   struct elf32_arm_stub_hash_entry *stub_entry;
4564   struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4565   const asection *id_sec;
4566 
4567   if ((input_section->flags & SEC_CODE) == 0)
4568     return NULL;
4569 
4570   /* If the input section is the CMSE stubs one and it needs a long
4571      branch stub to reach it's final destination, give up with an
4572      error message: this is not supported.  See PR ld/24709.  */
4573   if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
4574     {
4575       bfd *output_bfd = htab->obfd;
4576       asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4577 
4578       _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4579 			    "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4580 			  CMSE_STUB_NAME,
4581 			  (uint64_t)out_sec->output_section->vma
4582 			    + out_sec->output_offset,
4583 			  (uint64_t)sym_sec->output_section->vma
4584 			    + sym_sec->output_offset
4585 			    + h->root.root.u.def.value);
4586       /* Exit, rather than leave incompletely processed
4587 	 relocations.  */
4588       xexit (1);
4589     }
4590 
4591   /* If this input section is part of a group of sections sharing one
4592      stub section, then use the id of the first section in the group.
4593      Stub names need to include a section id, as there may well be
4594      more than one stub used to reach say, printf, and we need to
4595      distinguish between them.  */
4596   BFD_ASSERT (input_section->id <= htab->top_id);
4597   id_sec = htab->stub_group[input_section->id].link_sec;
4598 
4599   if (h != NULL && h->stub_cache != NULL
4600       && h->stub_cache->h == h
4601       && h->stub_cache->id_sec == id_sec
4602       && h->stub_cache->stub_type == stub_type)
4603     {
4604       stub_entry = h->stub_cache;
4605     }
4606   else
4607     {
4608       char *stub_name;
4609 
4610       stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4611       if (stub_name == NULL)
4612 	return NULL;
4613 
4614       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4615 					stub_name, false, false);
4616       if (h != NULL)
4617 	h->stub_cache = stub_entry;
4618 
4619       free (stub_name);
4620     }
4621 
4622   return stub_entry;
4623 }
4624 
4625 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4626    section.  */
4627 
4628 static bool
arm_dedicated_stub_output_section_required(enum elf32_arm_stub_type stub_type)4629 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4630 {
4631   if (stub_type >= max_stub_type)
4632     abort ();  /* Should be unreachable.  */
4633 
4634   switch (stub_type)
4635     {
4636     case arm_stub_cmse_branch_thumb_only:
4637       return true;
4638 
4639     default:
4640       return false;
4641     }
4642 
4643   abort ();  /* Should be unreachable.  */
4644 }
4645 
4646 /* Required alignment (as a power of 2) for the dedicated section holding
4647    veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4648    with input sections.  */
4649 
4650 static int
arm_dedicated_stub_output_section_required_alignment(enum elf32_arm_stub_type stub_type)4651 arm_dedicated_stub_output_section_required_alignment
4652   (enum elf32_arm_stub_type stub_type)
4653 {
4654   if (stub_type >= max_stub_type)
4655     abort ();  /* Should be unreachable.  */
4656 
4657   switch (stub_type)
4658     {
4659     /* Vectors of Secure Gateway veneers must be aligned on 32byte
4660        boundary.  */
4661     case arm_stub_cmse_branch_thumb_only:
4662       return 5;
4663 
4664     default:
4665       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4666       return 0;
4667     }
4668 
4669   abort ();  /* Should be unreachable.  */
4670 }
4671 
4672 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4673    NULL if veneers of this type are interspersed with input sections.  */
4674 
4675 static const char *
arm_dedicated_stub_output_section_name(enum elf32_arm_stub_type stub_type)4676 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4677 {
4678   if (stub_type >= max_stub_type)
4679     abort ();  /* Should be unreachable.  */
4680 
4681   switch (stub_type)
4682     {
4683     case arm_stub_cmse_branch_thumb_only:
4684       return CMSE_STUB_NAME;
4685 
4686     default:
4687       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4688       return NULL;
4689     }
4690 
4691   abort ();  /* Should be unreachable.  */
4692 }
4693 
4694 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4695    returns the address of the hash table field in HTAB holding a pointer to the
4696    corresponding input section.  Otherwise, returns NULL.  */
4697 
4698 static asection **
arm_dedicated_stub_input_section_ptr(struct elf32_arm_link_hash_table * htab,enum elf32_arm_stub_type stub_type)4699 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4700 				      enum elf32_arm_stub_type stub_type)
4701 {
4702   if (stub_type >= max_stub_type)
4703     abort ();  /* Should be unreachable.  */
4704 
4705   switch (stub_type)
4706     {
4707     case arm_stub_cmse_branch_thumb_only:
4708       return &htab->cmse_stub_sec;
4709 
4710     default:
4711       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4712       return NULL;
4713     }
4714 
4715   abort ();  /* Should be unreachable.  */
4716 }
4717 
4718 /* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
4719    is the section that branch into veneer and can be NULL if stub should go in
4720    a dedicated output section.  Returns a pointer to the stub section, and the
4721    section to which the stub section will be attached (in *LINK_SEC_P).
4722    LINK_SEC_P may be NULL.  */
4723 
4724 static asection *
elf32_arm_create_or_find_stub_sec(asection ** link_sec_p,asection * section,struct elf32_arm_link_hash_table * htab,enum elf32_arm_stub_type stub_type)4725 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4726 				   struct elf32_arm_link_hash_table *htab,
4727 				   enum elf32_arm_stub_type stub_type)
4728 {
4729   asection *link_sec, *out_sec, **stub_sec_p;
4730   const char *stub_sec_prefix;
4731   bool dedicated_output_section =
4732     arm_dedicated_stub_output_section_required (stub_type);
4733   int align;
4734 
4735   if (dedicated_output_section)
4736     {
4737       bfd *output_bfd = htab->obfd;
4738       const char *out_sec_name =
4739 	arm_dedicated_stub_output_section_name (stub_type);
4740       link_sec = NULL;
4741       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4742       stub_sec_prefix = out_sec_name;
4743       align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4744       out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4745       if (out_sec == NULL)
4746 	{
4747 	  _bfd_error_handler (_("no address assigned to the veneers output "
4748 				"section %s"), out_sec_name);
4749 	  return NULL;
4750 	}
4751     }
4752   else
4753     {
4754       BFD_ASSERT (section->id <= htab->top_id);
4755       link_sec = htab->stub_group[section->id].link_sec;
4756       BFD_ASSERT (link_sec != NULL);
4757       stub_sec_p = &htab->stub_group[section->id].stub_sec;
4758       if (*stub_sec_p == NULL)
4759 	stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4760       stub_sec_prefix = link_sec->name;
4761       out_sec = link_sec->output_section;
4762       align = htab->root.target_os == is_nacl ? 4 : 3;
4763     }
4764 
4765   if (*stub_sec_p == NULL)
4766     {
4767       size_t namelen;
4768       bfd_size_type len;
4769       char *s_name;
4770 
4771       namelen = strlen (stub_sec_prefix);
4772       len = namelen + sizeof (STUB_SUFFIX);
4773       s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4774       if (s_name == NULL)
4775 	return NULL;
4776 
4777       memcpy (s_name, stub_sec_prefix, namelen);
4778       memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4779       *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4780 					       align);
4781       if (*stub_sec_p == NULL)
4782 	return NULL;
4783 
4784       out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4785 			| SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4786 			| SEC_KEEP;
4787     }
4788 
4789   if (!dedicated_output_section)
4790     htab->stub_group[section->id].stub_sec = *stub_sec_p;
4791 
4792   if (link_sec_p)
4793     *link_sec_p = link_sec;
4794 
4795   return *stub_sec_p;
4796 }
4797 
4798 /* Add a new stub entry to the stub hash.  Not all fields of the new
4799    stub entry are initialised.  */
4800 
4801 static struct elf32_arm_stub_hash_entry *
elf32_arm_add_stub(const char * stub_name,asection * section,struct elf32_arm_link_hash_table * htab,enum elf32_arm_stub_type stub_type)4802 elf32_arm_add_stub (const char *stub_name, asection *section,
4803 		    struct elf32_arm_link_hash_table *htab,
4804 		    enum elf32_arm_stub_type stub_type)
4805 {
4806   asection *link_sec;
4807   asection *stub_sec;
4808   struct elf32_arm_stub_hash_entry *stub_entry;
4809 
4810   stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4811 						stub_type);
4812   if (stub_sec == NULL)
4813     return NULL;
4814 
4815   /* Enter this entry into the linker stub hash table.  */
4816   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4817 				     true, false);
4818   if (stub_entry == NULL)
4819     {
4820       if (section == NULL)
4821 	section = stub_sec;
4822       _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4823 			  section->owner, stub_name);
4824       return NULL;
4825     }
4826 
4827   stub_entry->stub_sec = stub_sec;
4828   stub_entry->stub_offset = (bfd_vma) -1;
4829   stub_entry->id_sec = link_sec;
4830 
4831   return stub_entry;
4832 }
4833 
4834 /* Store an Arm insn into an output section not processed by
4835    elf32_arm_write_section.  */
4836 
4837 static void
put_arm_insn(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,bfd_vma val,void * ptr)4838 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4839 	      bfd * output_bfd, bfd_vma val, void * ptr)
4840 {
4841   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4842     bfd_putl32 (val, ptr);
4843   else
4844     bfd_putb32 (val, ptr);
4845 }
4846 
4847 /* Store a 16-bit Thumb insn into an output section not processed by
4848    elf32_arm_write_section.  */
4849 
4850 static void
put_thumb_insn(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,bfd_vma val,void * ptr)4851 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4852 		bfd * output_bfd, bfd_vma val, void * ptr)
4853 {
4854   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4855     bfd_putl16 (val, ptr);
4856   else
4857     bfd_putb16 (val, ptr);
4858 }
4859 
4860 /* Store a Thumb2 insn into an output section not processed by
4861    elf32_arm_write_section.  */
4862 
4863 static void
put_thumb2_insn(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,bfd_vma val,bfd_byte * ptr)4864 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4865 		 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4866 {
4867   /* T2 instructions are 16-bit streamed.  */
4868   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4869     {
4870       bfd_putl16 ((val >> 16) & 0xffff, ptr);
4871       bfd_putl16 ((val & 0xffff), ptr + 2);
4872     }
4873   else
4874     {
4875       bfd_putb16 ((val >> 16) & 0xffff, ptr);
4876       bfd_putb16 ((val & 0xffff), ptr + 2);
4877     }
4878 }
4879 
4880 /* If it's possible to change R_TYPE to a more efficient access
4881    model, return the new reloc type.  */
4882 
4883 static unsigned
elf32_arm_tls_transition(struct bfd_link_info * info,int r_type,struct elf_link_hash_entry * h)4884 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4885 			  struct elf_link_hash_entry *h)
4886 {
4887   int is_local = (h == NULL);
4888 
4889   if (bfd_link_dll (info)
4890       || (h && h->root.type == bfd_link_hash_undefweak))
4891     return r_type;
4892 
4893   /* We do not support relaxations for Old TLS models.  */
4894   switch (r_type)
4895     {
4896     case R_ARM_TLS_GOTDESC:
4897     case R_ARM_TLS_CALL:
4898     case R_ARM_THM_TLS_CALL:
4899     case R_ARM_TLS_DESCSEQ:
4900     case R_ARM_THM_TLS_DESCSEQ:
4901       return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4902     }
4903 
4904   return r_type;
4905 }
4906 
4907 static bfd_reloc_status_type elf32_arm_final_link_relocate
4908   (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4909    Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4910    const char *, unsigned char, enum arm_st_branch_type,
4911    struct elf_link_hash_entry *, bool *, char **);
4912 
4913 static unsigned int
arm_stub_required_alignment(enum elf32_arm_stub_type stub_type)4914 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4915 {
4916   switch (stub_type)
4917     {
4918     case arm_stub_a8_veneer_b_cond:
4919     case arm_stub_a8_veneer_b:
4920     case arm_stub_a8_veneer_bl:
4921       return 2;
4922 
4923     case arm_stub_long_branch_any_any:
4924     case arm_stub_long_branch_v4t_arm_thumb:
4925     case arm_stub_long_branch_thumb_only:
4926     case arm_stub_long_branch_thumb2_only:
4927     case arm_stub_long_branch_thumb2_only_pure:
4928     case arm_stub_long_branch_v4t_thumb_thumb:
4929     case arm_stub_long_branch_v4t_thumb_arm:
4930     case arm_stub_short_branch_v4t_thumb_arm:
4931     case arm_stub_long_branch_any_arm_pic:
4932     case arm_stub_long_branch_any_thumb_pic:
4933     case arm_stub_long_branch_v4t_thumb_thumb_pic:
4934     case arm_stub_long_branch_v4t_arm_thumb_pic:
4935     case arm_stub_long_branch_v4t_thumb_arm_pic:
4936     case arm_stub_long_branch_thumb_only_pic:
4937     case arm_stub_long_branch_any_tls_pic:
4938     case arm_stub_long_branch_v4t_thumb_tls_pic:
4939     case arm_stub_cmse_branch_thumb_only:
4940     case arm_stub_a8_veneer_blx:
4941       return 4;
4942 
4943     case arm_stub_long_branch_arm_nacl:
4944     case arm_stub_long_branch_arm_nacl_pic:
4945       return 16;
4946 
4947     default:
4948       abort ();  /* Should be unreachable.  */
4949     }
4950 }
4951 
4952 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4953    veneering (TRUE) or have their own symbol (FALSE).  */
4954 
4955 static bool
arm_stub_sym_claimed(enum elf32_arm_stub_type stub_type)4956 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4957 {
4958   if (stub_type >= max_stub_type)
4959     abort ();  /* Should be unreachable.  */
4960 
4961   switch (stub_type)
4962     {
4963     case arm_stub_cmse_branch_thumb_only:
4964       return true;
4965 
4966     default:
4967       return false;
4968     }
4969 
4970   abort ();  /* Should be unreachable.  */
4971 }
4972 
4973 /* Returns the padding needed for the dedicated section used stubs of type
4974    STUB_TYPE.  */
4975 
4976 static int
arm_dedicated_stub_section_padding(enum elf32_arm_stub_type stub_type)4977 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4978 {
4979   if (stub_type >= max_stub_type)
4980     abort ();  /* Should be unreachable.  */
4981 
4982   switch (stub_type)
4983     {
4984     case arm_stub_cmse_branch_thumb_only:
4985       return 32;
4986 
4987     default:
4988       return 0;
4989     }
4990 
4991   abort ();  /* Should be unreachable.  */
4992 }
4993 
4994 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4995    returns the address of the hash table field in HTAB holding the offset at
4996    which new veneers should be layed out in the stub section.  */
4997 
4998 static bfd_vma*
arm_new_stubs_start_offset_ptr(struct elf32_arm_link_hash_table * htab,enum elf32_arm_stub_type stub_type)4999 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5000 				enum elf32_arm_stub_type stub_type)
5001 {
5002   switch (stub_type)
5003     {
5004     case arm_stub_cmse_branch_thumb_only:
5005       return &htab->new_cmse_stub_offset;
5006 
5007     default:
5008       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5009       return NULL;
5010     }
5011 }
5012 
5013 static bool
arm_build_one_stub(struct bfd_hash_entry * gen_entry,void * in_arg)5014 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5015 		    void * in_arg)
5016 {
5017 #define MAXRELOCS 3
5018   bool removed_sg_veneer;
5019   struct elf32_arm_stub_hash_entry *stub_entry;
5020   struct elf32_arm_link_hash_table *globals;
5021   struct bfd_link_info *info;
5022   asection *stub_sec;
5023   bfd *stub_bfd;
5024   bfd_byte *loc;
5025   bfd_vma sym_value;
5026   int template_size;
5027   int size;
5028   const insn_sequence *template_sequence;
5029   int i;
5030   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5031   int stub_reloc_offset[MAXRELOCS] = {0, 0};
5032   int nrelocs = 0;
5033   int just_allocated = 0;
5034 
5035   /* Massage our args to the form they really have.  */
5036   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5037   info = (struct bfd_link_info *) in_arg;
5038 
5039   /* Fail if the target section could not be assigned to an output
5040      section.  The user should fix his linker script.  */
5041   if (stub_entry->target_section->output_section == NULL
5042       && info->non_contiguous_regions)
5043     info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5044 			      "Retry without --enable-non-contiguous-regions.\n"),
5045 			    stub_entry->target_section);
5046 
5047   globals = elf32_arm_hash_table (info);
5048   if (globals == NULL)
5049     return false;
5050 
5051   stub_sec = stub_entry->stub_sec;
5052 
5053   if ((globals->fix_cortex_a8 < 0)
5054       != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5055     /* We have to do less-strictly-aligned fixes last.  */
5056     return true;
5057 
5058   /* Assign a slot at the end of section if none assigned yet.  */
5059   if (stub_entry->stub_offset == (bfd_vma) -1)
5060     {
5061       stub_entry->stub_offset = stub_sec->size;
5062       just_allocated = 1;
5063     }
5064   loc = stub_sec->contents + stub_entry->stub_offset;
5065 
5066   stub_bfd = stub_sec->owner;
5067 
5068   /* This is the address of the stub destination.  */
5069   sym_value = (stub_entry->target_value
5070 	       + stub_entry->target_section->output_offset
5071 	       + stub_entry->target_section->output_section->vma);
5072 
5073   template_sequence = stub_entry->stub_template;
5074   template_size = stub_entry->stub_template_size;
5075 
5076   size = 0;
5077   for (i = 0; i < template_size; i++)
5078     {
5079       switch (template_sequence[i].type)
5080 	{
5081 	case THUMB16_TYPE:
5082 	  {
5083 	    bfd_vma data = (bfd_vma) template_sequence[i].data;
5084 	    if (template_sequence[i].reloc_addend != 0)
5085 	      {
5086 		/* We've borrowed the reloc_addend field to mean we should
5087 		   insert a condition code into this (Thumb-1 branch)
5088 		   instruction.  See THUMB16_BCOND_INSN.  */
5089 		BFD_ASSERT ((data & 0xff00) == 0xd000);
5090 		data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5091 	      }
5092 	    bfd_put_16 (stub_bfd, data, loc + size);
5093 	    size += 2;
5094 	  }
5095 	  break;
5096 
5097 	case THUMB32_TYPE:
5098 	  bfd_put_16 (stub_bfd,
5099 		      (template_sequence[i].data >> 16) & 0xffff,
5100 		      loc + size);
5101 	  bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5102 		      loc + size + 2);
5103 	  if (template_sequence[i].r_type != R_ARM_NONE)
5104 	    {
5105 	      stub_reloc_idx[nrelocs] = i;
5106 	      stub_reloc_offset[nrelocs++] = size;
5107 	    }
5108 	  size += 4;
5109 	  break;
5110 
5111 	case ARM_TYPE:
5112 	  bfd_put_32 (stub_bfd, template_sequence[i].data,
5113 		      loc + size);
5114 	  /* Handle cases where the target is encoded within the
5115 	     instruction.  */
5116 	  if (template_sequence[i].r_type == R_ARM_JUMP24)
5117 	    {
5118 	      stub_reloc_idx[nrelocs] = i;
5119 	      stub_reloc_offset[nrelocs++] = size;
5120 	    }
5121 	  size += 4;
5122 	  break;
5123 
5124 	case DATA_TYPE:
5125 	  bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5126 	  stub_reloc_idx[nrelocs] = i;
5127 	  stub_reloc_offset[nrelocs++] = size;
5128 	  size += 4;
5129 	  break;
5130 
5131 	default:
5132 	  BFD_FAIL ();
5133 	  return false;
5134 	}
5135     }
5136 
5137   if (just_allocated)
5138     stub_sec->size += size;
5139 
5140   /* Stub size has already been computed in arm_size_one_stub. Check
5141      consistency.  */
5142   BFD_ASSERT (size == stub_entry->stub_size);
5143 
5144   /* Destination is Thumb. Force bit 0 to 1 to reflect this.  */
5145   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5146     sym_value |= 1;
5147 
5148   /* Assume non empty slots have at least one and at most MAXRELOCS entries
5149      to relocate in each stub.  */
5150   removed_sg_veneer =
5151     (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5152   BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5153 
5154   for (i = 0; i < nrelocs; i++)
5155     {
5156       Elf_Internal_Rela rel;
5157       bool unresolved_reloc;
5158       char *error_message;
5159       bfd_vma points_to =
5160 	sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5161 
5162       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5163       rel.r_info = ELF32_R_INFO (0,
5164 				 template_sequence[stub_reloc_idx[i]].r_type);
5165       rel.r_addend = 0;
5166 
5167       if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5168 	/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5169 	   template should refer back to the instruction after the original
5170 	   branch.  We use target_section as Cortex-A8 erratum workaround stubs
5171 	   are only generated when both source and target are in the same
5172 	   section.  */
5173 	points_to = stub_entry->target_section->output_section->vma
5174 		    + stub_entry->target_section->output_offset
5175 		    + stub_entry->source_value;
5176 
5177       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5178 	  (template_sequence[stub_reloc_idx[i]].r_type),
5179 	   stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5180 	   points_to, info, stub_entry->target_section, "", STT_FUNC,
5181 	   stub_entry->branch_type,
5182 	   (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5183 	   &error_message);
5184     }
5185 
5186   return true;
5187 #undef MAXRELOCS
5188 }
5189 
5190 /* Calculate the template, template size and instruction size for a stub.
5191    Return value is the instruction size.  */
5192 
5193 static unsigned int
find_stub_size_and_template(enum elf32_arm_stub_type stub_type,const insn_sequence ** stub_template,int * stub_template_size)5194 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5195 			     const insn_sequence **stub_template,
5196 			     int *stub_template_size)
5197 {
5198   const insn_sequence *template_sequence = NULL;
5199   int template_size = 0, i;
5200   unsigned int size;
5201 
5202   template_sequence = stub_definitions[stub_type].template_sequence;
5203   if (stub_template)
5204     *stub_template = template_sequence;
5205 
5206   template_size = stub_definitions[stub_type].template_size;
5207   if (stub_template_size)
5208     *stub_template_size = template_size;
5209 
5210   size = 0;
5211   for (i = 0; i < template_size; i++)
5212     {
5213       switch (template_sequence[i].type)
5214 	{
5215 	case THUMB16_TYPE:
5216 	  size += 2;
5217 	  break;
5218 
5219 	case ARM_TYPE:
5220 	case THUMB32_TYPE:
5221 	case DATA_TYPE:
5222 	  size += 4;
5223 	  break;
5224 
5225 	default:
5226 	  BFD_FAIL ();
5227 	  return 0;
5228 	}
5229     }
5230 
5231   return size;
5232 }
5233 
5234 /* As above, but don't actually build the stub.  Just bump offset so
5235    we know stub section sizes.  */
5236 
5237 static bool
arm_size_one_stub(struct bfd_hash_entry * gen_entry,void * in_arg ATTRIBUTE_UNUSED)5238 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5239 		   void *in_arg ATTRIBUTE_UNUSED)
5240 {
5241   struct elf32_arm_stub_hash_entry *stub_entry;
5242   const insn_sequence *template_sequence;
5243   int template_size, size;
5244 
5245   /* Massage our args to the form they really have.  */
5246   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5247 
5248   BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
5249 	      && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
5250 
5251   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5252 				      &template_size);
5253 
5254   /* Initialized to -1.  Null size indicates an empty slot full of zeros.  */
5255   if (stub_entry->stub_template_size)
5256     {
5257       stub_entry->stub_size = size;
5258       stub_entry->stub_template = template_sequence;
5259       stub_entry->stub_template_size = template_size;
5260     }
5261 
5262   /* Already accounted for.  */
5263   if (stub_entry->stub_offset != (bfd_vma) -1)
5264     return true;
5265 
5266   size = (size + 7) & ~7;
5267   stub_entry->stub_sec->size += size;
5268 
5269   return true;
5270 }
5271 
5272 /* External entry points for sizing and building linker stubs.  */
5273 
5274 /* Set up various things so that we can make a list of input sections
5275    for each output section included in the link.  Returns -1 on error,
5276    0 when no stubs will be needed, and 1 on success.  */
5277 
5278 int
elf32_arm_setup_section_lists(bfd * output_bfd,struct bfd_link_info * info)5279 elf32_arm_setup_section_lists (bfd *output_bfd,
5280 			       struct bfd_link_info *info)
5281 {
5282   bfd *input_bfd;
5283   unsigned int bfd_count;
5284   unsigned int top_id, top_index;
5285   asection *section;
5286   asection **input_list, **list;
5287   size_t amt;
5288   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5289 
5290   if (htab == NULL)
5291     return 0;
5292 
5293   /* Count the number of input BFDs and find the top input section id.  */
5294   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5295        input_bfd != NULL;
5296        input_bfd = input_bfd->link.next)
5297     {
5298       bfd_count += 1;
5299       for (section = input_bfd->sections;
5300 	   section != NULL;
5301 	   section = section->next)
5302 	{
5303 	  if (top_id < section->id)
5304 	    top_id = section->id;
5305 	}
5306     }
5307   htab->bfd_count = bfd_count;
5308 
5309   amt = sizeof (struct map_stub) * (top_id + 1);
5310   htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5311   if (htab->stub_group == NULL)
5312     return -1;
5313   htab->top_id = top_id;
5314 
5315   /* We can't use output_bfd->section_count here to find the top output
5316      section index as some sections may have been removed, and
5317      _bfd_strip_section_from_output doesn't renumber the indices.  */
5318   for (section = output_bfd->sections, top_index = 0;
5319        section != NULL;
5320        section = section->next)
5321     {
5322       if (top_index < section->index)
5323 	top_index = section->index;
5324     }
5325 
5326   htab->top_index = top_index;
5327   amt = sizeof (asection *) * (top_index + 1);
5328   input_list = (asection **) bfd_malloc (amt);
5329   htab->input_list = input_list;
5330   if (input_list == NULL)
5331     return -1;
5332 
5333   /* For sections we aren't interested in, mark their entries with a
5334      value we can check later.  */
5335   list = input_list + top_index;
5336   do
5337     *list = bfd_abs_section_ptr;
5338   while (list-- != input_list);
5339 
5340   for (section = output_bfd->sections;
5341        section != NULL;
5342        section = section->next)
5343     {
5344       if ((section->flags & SEC_CODE) != 0)
5345 	input_list[section->index] = NULL;
5346     }
5347 
5348   return 1;
5349 }
5350 
5351 /* The linker repeatedly calls this function for each input section,
5352    in the order that input sections are linked into output sections.
5353    Build lists of input sections to determine groupings between which
5354    we may insert linker stubs.  */
5355 
5356 void
elf32_arm_next_input_section(struct bfd_link_info * info,asection * isec)5357 elf32_arm_next_input_section (struct bfd_link_info *info,
5358 			      asection *isec)
5359 {
5360   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5361 
5362   if (htab == NULL)
5363     return;
5364 
5365   if (isec->output_section->index <= htab->top_index)
5366     {
5367       asection **list = htab->input_list + isec->output_section->index;
5368 
5369       if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5370 	{
5371 	  /* Steal the link_sec pointer for our list.  */
5372 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5373 	  /* This happens to make the list in reverse order,
5374 	     which we reverse later.  */
5375 	  PREV_SEC (isec) = *list;
5376 	  *list = isec;
5377 	}
5378     }
5379 }
5380 
5381 /* See whether we can group stub sections together.  Grouping stub
5382    sections may result in fewer stubs.  More importantly, we need to
5383    put all .init* and .fini* stubs at the end of the .init or
5384    .fini output sections respectively, because glibc splits the
5385    _init and _fini functions into multiple parts.  Putting a stub in
5386    the middle of a function is not a good idea.  */
5387 
5388 static void
group_sections(struct elf32_arm_link_hash_table * htab,bfd_size_type stub_group_size,bool stubs_always_after_branch)5389 group_sections (struct elf32_arm_link_hash_table *htab,
5390 		bfd_size_type stub_group_size,
5391 		bool stubs_always_after_branch)
5392 {
5393   asection **list = htab->input_list;
5394 
5395   do
5396     {
5397       asection *tail = *list;
5398       asection *head;
5399 
5400       if (tail == bfd_abs_section_ptr)
5401 	continue;
5402 
5403       /* Reverse the list: we must avoid placing stubs at the
5404 	 beginning of the section because the beginning of the text
5405 	 section may be required for an interrupt vector in bare metal
5406 	 code.  */
5407 #define NEXT_SEC PREV_SEC
5408       head = NULL;
5409       while (tail != NULL)
5410 	{
5411 	  /* Pop from tail.  */
5412 	  asection *item = tail;
5413 	  tail = PREV_SEC (item);
5414 
5415 	  /* Push on head.  */
5416 	  NEXT_SEC (item) = head;
5417 	  head = item;
5418 	}
5419 
5420       while (head != NULL)
5421 	{
5422 	  asection *curr;
5423 	  asection *next;
5424 	  bfd_vma stub_group_start = head->output_offset;
5425 	  bfd_vma end_of_next;
5426 
5427 	  curr = head;
5428 	  while (NEXT_SEC (curr) != NULL)
5429 	    {
5430 	      next = NEXT_SEC (curr);
5431 	      end_of_next = next->output_offset + next->size;
5432 	      if (end_of_next - stub_group_start >= stub_group_size)
5433 		/* End of NEXT is too far from start, so stop.  */
5434 		break;
5435 	      /* Add NEXT to the group.  */
5436 	      curr = next;
5437 	    }
5438 
5439 	  /* OK, the size from the start to the start of CURR is less
5440 	     than stub_group_size and thus can be handled by one stub
5441 	     section.  (Or the head section is itself larger than
5442 	     stub_group_size, in which case we may be toast.)
5443 	     We should really be keeping track of the total size of
5444 	     stubs added here, as stubs contribute to the final output
5445 	     section size.  */
5446 	  do
5447 	    {
5448 	      next = NEXT_SEC (head);
5449 	      /* Set up this stub group.  */
5450 	      htab->stub_group[head->id].link_sec = curr;
5451 	    }
5452 	  while (head != curr && (head = next) != NULL);
5453 
5454 	  /* But wait, there's more!  Input sections up to stub_group_size
5455 	     bytes after the stub section can be handled by it too.  */
5456 	  if (!stubs_always_after_branch)
5457 	    {
5458 	      stub_group_start = curr->output_offset + curr->size;
5459 
5460 	      while (next != NULL)
5461 		{
5462 		  end_of_next = next->output_offset + next->size;
5463 		  if (end_of_next - stub_group_start >= stub_group_size)
5464 		    /* End of NEXT is too far from stubs, so stop.  */
5465 		    break;
5466 		  /* Add NEXT to the stub group.  */
5467 		  head = next;
5468 		  next = NEXT_SEC (head);
5469 		  htab->stub_group[head->id].link_sec = curr;
5470 		}
5471 	    }
5472 	  head = next;
5473 	}
5474     }
5475   while (list++ != htab->input_list + htab->top_index);
5476 
5477   free (htab->input_list);
5478 #undef PREV_SEC
5479 #undef NEXT_SEC
5480 }
5481 
5482 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5483    erratum fix.  */
5484 
5485 static int
a8_reloc_compare(const void * a,const void * b)5486 a8_reloc_compare (const void *a, const void *b)
5487 {
5488   const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5489   const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5490 
5491   if (ra->from < rb->from)
5492     return -1;
5493   else if (ra->from > rb->from)
5494     return 1;
5495   else
5496     return 0;
5497 }
5498 
5499 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5500 						    const char *, char **);
5501 
5502 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5503    branch/TLB erratum.  Fill in the table described by A8_FIXES_P,
5504    NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P.  Returns true if an error occurs, false
5505    otherwise.  */
5506 
5507 static bool
cortex_a8_erratum_scan(bfd * input_bfd,struct bfd_link_info * info,struct a8_erratum_fix ** a8_fixes_p,unsigned int * num_a8_fixes_p,unsigned int * a8_fix_table_size_p,struct a8_erratum_reloc * a8_relocs,unsigned int num_a8_relocs,unsigned prev_num_a8_fixes,bool * stub_changed_p)5508 cortex_a8_erratum_scan (bfd *input_bfd,
5509 			struct bfd_link_info *info,
5510 			struct a8_erratum_fix **a8_fixes_p,
5511 			unsigned int *num_a8_fixes_p,
5512 			unsigned int *a8_fix_table_size_p,
5513 			struct a8_erratum_reloc *a8_relocs,
5514 			unsigned int num_a8_relocs,
5515 			unsigned prev_num_a8_fixes,
5516 			bool *stub_changed_p)
5517 {
5518   asection *section;
5519   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5520   struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5521   unsigned int num_a8_fixes = *num_a8_fixes_p;
5522   unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5523 
5524   if (htab == NULL)
5525     return false;
5526 
5527   for (section = input_bfd->sections;
5528        section != NULL;
5529        section = section->next)
5530     {
5531       bfd_byte *contents = NULL;
5532       struct _arm_elf_section_data *sec_data;
5533       unsigned int span;
5534       bfd_vma base_vma;
5535 
5536       if (elf_section_type (section) != SHT_PROGBITS
5537 	  || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5538 	  || (section->flags & SEC_EXCLUDE) != 0
5539 	  || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5540 	  || (section->output_section == bfd_abs_section_ptr))
5541 	continue;
5542 
5543       base_vma = section->output_section->vma + section->output_offset;
5544 
5545       if (elf_section_data (section)->this_hdr.contents != NULL)
5546 	contents = elf_section_data (section)->this_hdr.contents;
5547       else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5548 	return true;
5549 
5550       sec_data = elf32_arm_section_data (section);
5551 
5552       for (span = 0; span < sec_data->mapcount; span++)
5553 	{
5554 	  unsigned int span_start = sec_data->map[span].vma;
5555 	  unsigned int span_end = (span == sec_data->mapcount - 1)
5556 	    ? section->size : sec_data->map[span + 1].vma;
5557 	  unsigned int i;
5558 	  char span_type = sec_data->map[span].type;
5559 	  bool last_was_32bit = false, last_was_branch = false;
5560 
5561 	  if (span_type != 't')
5562 	    continue;
5563 
5564 	  /* Span is entirely within a single 4KB region: skip scanning.  */
5565 	  if (((base_vma + span_start) & ~0xfff)
5566 	      == ((base_vma + span_end) & ~0xfff))
5567 	    continue;
5568 
5569 	  /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5570 
5571 	       * The opcode is BLX.W, BL.W, B.W, Bcc.W
5572 	       * The branch target is in the same 4KB region as the
5573 		 first half of the branch.
5574 	       * The instruction before the branch is a 32-bit
5575 		 length non-branch instruction.  */
5576 	  for (i = span_start; i < span_end;)
5577 	    {
5578 	      unsigned int insn = bfd_getl16 (&contents[i]);
5579 	      bool insn_32bit = false, is_blx = false, is_b = false;
5580 	      bool is_bl = false, is_bcc = false, is_32bit_branch;
5581 
5582 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5583 		insn_32bit = true;
5584 
5585 	      if (insn_32bit)
5586 		{
5587 		  /* Load the rest of the insn (in manual-friendly order).  */
5588 		  insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5589 
5590 		  /* Encoding T4: B<c>.W.  */
5591 		  is_b = (insn & 0xf800d000) == 0xf0009000;
5592 		  /* Encoding T1: BL<c>.W.  */
5593 		  is_bl = (insn & 0xf800d000) == 0xf000d000;
5594 		  /* Encoding T2: BLX<c>.W.  */
5595 		  is_blx = (insn & 0xf800d000) == 0xf000c000;
5596 		  /* Encoding T3: B<c>.W (not permitted in IT block).  */
5597 		  is_bcc = (insn & 0xf800d000) == 0xf0008000
5598 			   && (insn & 0x07f00000) != 0x03800000;
5599 		}
5600 
5601 	      is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5602 
5603 	      if (((base_vma + i) & 0xfff) == 0xffe
5604 		  && insn_32bit
5605 		  && is_32bit_branch
5606 		  && last_was_32bit
5607 		  && ! last_was_branch)
5608 		{
5609 		  bfd_signed_vma offset = 0;
5610 		  bool force_target_arm = false;
5611 		  bool force_target_thumb = false;
5612 		  bfd_vma target;
5613 		  enum elf32_arm_stub_type stub_type = arm_stub_none;
5614 		  struct a8_erratum_reloc key, *found;
5615 		  bool use_plt = false;
5616 
5617 		  key.from = base_vma + i;
5618 		  found = (struct a8_erratum_reloc *)
5619 		      bsearch (&key, a8_relocs, num_a8_relocs,
5620 			       sizeof (struct a8_erratum_reloc),
5621 			       &a8_reloc_compare);
5622 
5623 		  if (found)
5624 		    {
5625 		      char *error_message = NULL;
5626 		      struct elf_link_hash_entry *entry;
5627 
5628 		      /* We don't care about the error returned from this
5629 			 function, only if there is glue or not.  */
5630 		      entry = find_thumb_glue (info, found->sym_name,
5631 					       &error_message);
5632 
5633 		      if (entry)
5634 			found->non_a8_stub = true;
5635 
5636 		      /* Keep a simpler condition, for the sake of clarity.  */
5637 		      if (htab->root.splt != NULL && found->hash != NULL
5638 			  && found->hash->root.plt.offset != (bfd_vma) -1)
5639 			use_plt = true;
5640 
5641 		      if (found->r_type == R_ARM_THM_CALL)
5642 			{
5643 			  if (found->branch_type == ST_BRANCH_TO_ARM
5644 			      || use_plt)
5645 			    force_target_arm = true;
5646 			  else
5647 			    force_target_thumb = true;
5648 			}
5649 		    }
5650 
5651 		  /* Check if we have an offending branch instruction.  */
5652 
5653 		  if (found && found->non_a8_stub)
5654 		    /* We've already made a stub for this instruction, e.g.
5655 		       it's a long branch or a Thumb->ARM stub.  Assume that
5656 		       stub will suffice to work around the A8 erratum (see
5657 		       setting of always_after_branch above).  */
5658 		    ;
5659 		  else if (is_bcc)
5660 		    {
5661 		      offset = (insn & 0x7ff) << 1;
5662 		      offset |= (insn & 0x3f0000) >> 4;
5663 		      offset |= (insn & 0x2000) ? 0x40000 : 0;
5664 		      offset |= (insn & 0x800) ? 0x80000 : 0;
5665 		      offset |= (insn & 0x4000000) ? 0x100000 : 0;
5666 		      if (offset & 0x100000)
5667 			offset |= ~ ((bfd_signed_vma) 0xfffff);
5668 		      stub_type = arm_stub_a8_veneer_b_cond;
5669 		    }
5670 		  else if (is_b || is_bl || is_blx)
5671 		    {
5672 		      int s = (insn & 0x4000000) != 0;
5673 		      int j1 = (insn & 0x2000) != 0;
5674 		      int j2 = (insn & 0x800) != 0;
5675 		      int i1 = !(j1 ^ s);
5676 		      int i2 = !(j2 ^ s);
5677 
5678 		      offset = (insn & 0x7ff) << 1;
5679 		      offset |= (insn & 0x3ff0000) >> 4;
5680 		      offset |= i2 << 22;
5681 		      offset |= i1 << 23;
5682 		      offset |= s << 24;
5683 		      if (offset & 0x1000000)
5684 			offset |= ~ ((bfd_signed_vma) 0xffffff);
5685 
5686 		      if (is_blx)
5687 			offset &= ~ ((bfd_signed_vma) 3);
5688 
5689 		      stub_type = is_blx ? arm_stub_a8_veneer_blx :
5690 			is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5691 		    }
5692 
5693 		  if (stub_type != arm_stub_none)
5694 		    {
5695 		      bfd_vma pc_for_insn = base_vma + i + 4;
5696 
5697 		      /* The original instruction is a BL, but the target is
5698 			 an ARM instruction.  If we were not making a stub,
5699 			 the BL would have been converted to a BLX.  Use the
5700 			 BLX stub instead in that case.  */
5701 		      if (htab->use_blx && force_target_arm
5702 			  && stub_type == arm_stub_a8_veneer_bl)
5703 			{
5704 			  stub_type = arm_stub_a8_veneer_blx;
5705 			  is_blx = true;
5706 			  is_bl = false;
5707 			}
5708 		      /* Conversely, if the original instruction was
5709 			 BLX but the target is Thumb mode, use the BL
5710 			 stub.  */
5711 		      else if (force_target_thumb
5712 			       && stub_type == arm_stub_a8_veneer_blx)
5713 			{
5714 			  stub_type = arm_stub_a8_veneer_bl;
5715 			  is_blx = false;
5716 			  is_bl = true;
5717 			}
5718 
5719 		      if (is_blx)
5720 			pc_for_insn &= ~ ((bfd_vma) 3);
5721 
5722 		      /* If we found a relocation, use the proper destination,
5723 			 not the offset in the (unrelocated) instruction.
5724 			 Note this is always done if we switched the stub type
5725 			 above.  */
5726 		      if (found)
5727 			offset =
5728 			  (bfd_signed_vma) (found->destination - pc_for_insn);
5729 
5730 		      /* If the stub will use a Thumb-mode branch to a
5731 			 PLT target, redirect it to the preceding Thumb
5732 			 entry point.  */
5733 		      if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5734 			offset -= PLT_THUMB_STUB_SIZE;
5735 
5736 		      target = pc_for_insn + offset;
5737 
5738 		      /* The BLX stub is ARM-mode code.  Adjust the offset to
5739 			 take the different PC value (+8 instead of +4) into
5740 			 account.  */
5741 		      if (stub_type == arm_stub_a8_veneer_blx)
5742 			offset += 4;
5743 
5744 		      if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5745 			{
5746 			  char *stub_name = NULL;
5747 
5748 			  if (num_a8_fixes == a8_fix_table_size)
5749 			    {
5750 			      a8_fix_table_size *= 2;
5751 			      a8_fixes = (struct a8_erratum_fix *)
5752 				  bfd_realloc (a8_fixes,
5753 					       sizeof (struct a8_erratum_fix)
5754 					       * a8_fix_table_size);
5755 			    }
5756 
5757 			  if (num_a8_fixes < prev_num_a8_fixes)
5758 			    {
5759 			      /* If we're doing a subsequent scan,
5760 				 check if we've found the same fix as
5761 				 before, and try and reuse the stub
5762 				 name.  */
5763 			      stub_name = a8_fixes[num_a8_fixes].stub_name;
5764 			      if ((a8_fixes[num_a8_fixes].section != section)
5765 				  || (a8_fixes[num_a8_fixes].offset != i))
5766 				{
5767 				  free (stub_name);
5768 				  stub_name = NULL;
5769 				  *stub_changed_p = true;
5770 				}
5771 			    }
5772 
5773 			  if (!stub_name)
5774 			    {
5775 			      stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5776 			      if (stub_name != NULL)
5777 				sprintf (stub_name, "%x:%x", section->id, i);
5778 			    }
5779 
5780 			  a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5781 			  a8_fixes[num_a8_fixes].section = section;
5782 			  a8_fixes[num_a8_fixes].offset = i;
5783 			  a8_fixes[num_a8_fixes].target_offset =
5784 			    target - base_vma;
5785 			  a8_fixes[num_a8_fixes].orig_insn = insn;
5786 			  a8_fixes[num_a8_fixes].stub_name = stub_name;
5787 			  a8_fixes[num_a8_fixes].stub_type = stub_type;
5788 			  a8_fixes[num_a8_fixes].branch_type =
5789 			    is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5790 
5791 			  num_a8_fixes++;
5792 			}
5793 		    }
5794 		}
5795 
5796 	      i += insn_32bit ? 4 : 2;
5797 	      last_was_32bit = insn_32bit;
5798 	      last_was_branch = is_32bit_branch;
5799 	    }
5800 	}
5801 
5802       if (elf_section_data (section)->this_hdr.contents == NULL)
5803 	free (contents);
5804     }
5805 
5806   *a8_fixes_p = a8_fixes;
5807   *num_a8_fixes_p = num_a8_fixes;
5808   *a8_fix_table_size_p = a8_fix_table_size;
5809 
5810   return false;
5811 }
5812 
5813 /* Create or update a stub entry depending on whether the stub can already be
5814    found in HTAB.  The stub is identified by:
5815    - its type STUB_TYPE
5816    - its source branch (note that several can share the same stub) whose
5817      section and relocation (if any) are given by SECTION and IRELA
5818      respectively
5819    - its target symbol whose input section, hash, name, value and branch type
5820      are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5821      respectively
5822 
5823    If found, the value of the stub's target symbol is updated from SYM_VALUE
5824    and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
5825    TRUE and the stub entry is initialized.
5826 
5827    Returns the stub that was created or updated, or NULL if an error
5828    occurred.  */
5829 
5830 static struct elf32_arm_stub_hash_entry *
elf32_arm_create_stub(struct elf32_arm_link_hash_table * htab,enum elf32_arm_stub_type stub_type,asection * section,Elf_Internal_Rela * irela,asection * sym_sec,struct elf32_arm_link_hash_entry * hash,char * sym_name,bfd_vma sym_value,enum arm_st_branch_type branch_type,bool * new_stub)5831 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5832 		       enum elf32_arm_stub_type stub_type, asection *section,
5833 		       Elf_Internal_Rela *irela, asection *sym_sec,
5834 		       struct elf32_arm_link_hash_entry *hash, char *sym_name,
5835 		       bfd_vma sym_value, enum arm_st_branch_type branch_type,
5836 		       bool *new_stub)
5837 {
5838   const asection *id_sec;
5839   char *stub_name;
5840   struct elf32_arm_stub_hash_entry *stub_entry;
5841   unsigned int r_type;
5842   bool sym_claimed = arm_stub_sym_claimed (stub_type);
5843 
5844   BFD_ASSERT (stub_type != arm_stub_none);
5845   *new_stub = false;
5846 
5847   if (sym_claimed)
5848     stub_name = sym_name;
5849   else
5850     {
5851       BFD_ASSERT (irela);
5852       BFD_ASSERT (section);
5853       BFD_ASSERT (section->id <= htab->top_id);
5854 
5855       /* Support for grouping stub sections.  */
5856       id_sec = htab->stub_group[section->id].link_sec;
5857 
5858       /* Get the name of this stub.  */
5859       stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5860 				       stub_type);
5861       if (!stub_name)
5862 	return NULL;
5863     }
5864 
5865   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
5866 				     false);
5867   /* The proper stub has already been created, just update its value.  */
5868   if (stub_entry != NULL)
5869     {
5870       if (!sym_claimed)
5871 	free (stub_name);
5872       stub_entry->target_value = sym_value;
5873       return stub_entry;
5874     }
5875 
5876   stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5877   if (stub_entry == NULL)
5878     {
5879       if (!sym_claimed)
5880 	free (stub_name);
5881       return NULL;
5882     }
5883 
5884   stub_entry->target_value = sym_value;
5885   stub_entry->target_section = sym_sec;
5886   stub_entry->stub_type = stub_type;
5887   stub_entry->h = hash;
5888   stub_entry->branch_type = branch_type;
5889 
5890   if (sym_claimed)
5891     stub_entry->output_name = sym_name;
5892   else
5893     {
5894       if (sym_name == NULL)
5895 	sym_name = "unnamed";
5896       stub_entry->output_name = (char *)
5897 	bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5898 				   + strlen (sym_name));
5899       if (stub_entry->output_name == NULL)
5900 	{
5901 	  free (stub_name);
5902 	  return NULL;
5903 	}
5904 
5905       /* For historical reasons, use the existing names for ARM-to-Thumb and
5906 	 Thumb-to-ARM stubs.  */
5907       r_type = ELF32_R_TYPE (irela->r_info);
5908       if ((r_type == (unsigned int) R_ARM_THM_CALL
5909 	   || r_type == (unsigned int) R_ARM_THM_JUMP24
5910 	   || r_type == (unsigned int) R_ARM_THM_JUMP19)
5911 	  && branch_type == ST_BRANCH_TO_ARM)
5912 	sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5913       else if ((r_type == (unsigned int) R_ARM_CALL
5914 		|| r_type == (unsigned int) R_ARM_JUMP24)
5915 	       && branch_type == ST_BRANCH_TO_THUMB)
5916 	sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5917       else
5918 	sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5919     }
5920 
5921   *new_stub = true;
5922   return stub_entry;
5923 }
5924 
5925 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5926    gateway veneer to transition from non secure to secure state and create them
5927    accordingly.
5928 
5929    "ARMv8-M Security Extensions: Requirements on Development Tools" document
5930    defines the conditions that govern Secure Gateway veneer creation for a
5931    given symbol <SYM> as follows:
5932    - it has function type
5933    - it has non local binding
5934    - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5935      same type, binding and value as <SYM> (called normal symbol).
5936    An entry function can handle secure state transition itself in which case
5937    its special symbol would have a different value from the normal symbol.
5938 
5939    OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5940    entry mapping while HTAB gives the name to hash entry mapping.
5941    *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5942    created.
5943 
5944    The return value gives whether a stub failed to be allocated.  */
5945 
5946 static bool
cmse_scan(bfd * input_bfd,struct elf32_arm_link_hash_table * htab,obj_attribute * out_attr,struct elf_link_hash_entry ** sym_hashes,int * cmse_stub_created)5947 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5948 	   obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5949 	   int *cmse_stub_created)
5950 {
5951   const struct elf_backend_data *bed;
5952   Elf_Internal_Shdr *symtab_hdr;
5953   unsigned i, j, sym_count, ext_start;
5954   Elf_Internal_Sym *cmse_sym, *local_syms;
5955   struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5956   enum arm_st_branch_type branch_type;
5957   char *sym_name, *lsym_name;
5958   bfd_vma sym_value;
5959   asection *section;
5960   struct elf32_arm_stub_hash_entry *stub_entry;
5961   bool is_v8m, new_stub, cmse_invalid, ret = true;
5962 
5963   bed = get_elf_backend_data (input_bfd);
5964   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5965   sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5966   ext_start = symtab_hdr->sh_info;
5967   is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5968 	    && out_attr[Tag_CPU_arch_profile].i == 'M');
5969 
5970   local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5971   if (local_syms == NULL)
5972     local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5973 				       symtab_hdr->sh_info, 0, NULL, NULL,
5974 				       NULL);
5975   if (symtab_hdr->sh_info && local_syms == NULL)
5976     return false;
5977 
5978   /* Scan symbols.  */
5979   for (i = 0; i < sym_count; i++)
5980     {
5981       cmse_invalid = false;
5982 
5983       if (i < ext_start)
5984 	{
5985 	  cmse_sym = &local_syms[i];
5986 	  sym_name = bfd_elf_string_from_elf_section (input_bfd,
5987 						      symtab_hdr->sh_link,
5988 						      cmse_sym->st_name);
5989 	  if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
5990 	    continue;
5991 
5992 	  /* Special symbol with local binding.  */
5993 	  cmse_invalid = true;
5994 	}
5995       else
5996 	{
5997 	  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5998 	  sym_name = (char *) cmse_hash->root.root.root.string;
5999 	  if (!startswith (sym_name, CMSE_PREFIX))
6000 	    continue;
6001 
6002 	  /* Special symbol has incorrect binding or type.  */
6003 	  if ((cmse_hash->root.root.type != bfd_link_hash_defined
6004 	       && cmse_hash->root.root.type != bfd_link_hash_defweak)
6005 	      || cmse_hash->root.type != STT_FUNC)
6006 	    cmse_invalid = true;
6007 	}
6008 
6009       if (!is_v8m)
6010 	{
6011 	  _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6012 				"ARMv8-M architecture or later"),
6013 			      input_bfd, sym_name);
6014 	  is_v8m = true; /* Avoid multiple warning.  */
6015 	  ret = false;
6016 	}
6017 
6018       if (cmse_invalid)
6019 	{
6020 	  _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6021 				" a global or weak function symbol"),
6022 			      input_bfd, sym_name);
6023 	  ret = false;
6024 	  if (i < ext_start)
6025 	    continue;
6026 	}
6027 
6028       sym_name += strlen (CMSE_PREFIX);
6029       hash = (struct elf32_arm_link_hash_entry *)
6030 	elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6031 
6032       /* No associated normal symbol or it is neither global nor weak.  */
6033       if (!hash
6034 	  || (hash->root.root.type != bfd_link_hash_defined
6035 	      && hash->root.root.type != bfd_link_hash_defweak)
6036 	  || hash->root.type != STT_FUNC)
6037 	{
6038 	  /* Initialize here to avoid warning about use of possibly
6039 	     uninitialized variable.  */
6040 	  j = 0;
6041 
6042 	  if (!hash)
6043 	    {
6044 	      /* Searching for a normal symbol with local binding.  */
6045 	      for (; j < ext_start; j++)
6046 		{
6047 		  lsym_name =
6048 		    bfd_elf_string_from_elf_section (input_bfd,
6049 						     symtab_hdr->sh_link,
6050 						     local_syms[j].st_name);
6051 		  if (!strcmp (sym_name, lsym_name))
6052 		    break;
6053 		}
6054 	    }
6055 
6056 	  if (hash || j < ext_start)
6057 	    {
6058 	      _bfd_error_handler
6059 		(_("%pB: invalid standard symbol `%s'; it must be "
6060 		   "a global or weak function symbol"),
6061 		 input_bfd, sym_name);
6062 	    }
6063 	  else
6064 	    _bfd_error_handler
6065 	      (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6066 	  ret = false;
6067 	  if (!hash)
6068 	    continue;
6069 	}
6070 
6071       sym_value = hash->root.root.u.def.value;
6072       section = hash->root.root.u.def.section;
6073 
6074       if (cmse_hash->root.root.u.def.section != section)
6075 	{
6076 	  _bfd_error_handler
6077 	    (_("%pB: `%s' and its special symbol are in different sections"),
6078 	     input_bfd, sym_name);
6079 	  ret = false;
6080 	}
6081       if (cmse_hash->root.root.u.def.value != sym_value)
6082 	continue; /* Ignore: could be an entry function starting with SG.  */
6083 
6084 	/* If this section is a link-once section that will be discarded, then
6085 	   don't create any stubs.  */
6086       if (section->output_section == NULL)
6087 	{
6088 	  _bfd_error_handler
6089 	    (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6090 	  continue;
6091 	}
6092 
6093       if (hash->root.size == 0)
6094 	{
6095 	  _bfd_error_handler
6096 	    (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6097 	  ret = false;
6098 	}
6099 
6100       if (!ret)
6101 	continue;
6102       branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6103       stub_entry
6104 	= elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6105 				 NULL, NULL, section, hash, sym_name,
6106 				 sym_value, branch_type, &new_stub);
6107 
6108       if (stub_entry == NULL)
6109 	 ret = false;
6110       else
6111 	{
6112 	  BFD_ASSERT (new_stub);
6113 	  (*cmse_stub_created)++;
6114 	}
6115     }
6116 
6117   if (!symtab_hdr->contents)
6118     free (local_syms);
6119   return ret;
6120 }
6121 
6122 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6123    code entry function, ie can be called from non secure code without using a
6124    veneer.  */
6125 
6126 static bool
cmse_entry_fct_p(struct elf32_arm_link_hash_entry * hash)6127 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6128 {
6129   bfd_byte contents[4];
6130   uint32_t first_insn;
6131   asection *section;
6132   file_ptr offset;
6133   bfd *abfd;
6134 
6135   /* Defined symbol of function type.  */
6136   if (hash->root.root.type != bfd_link_hash_defined
6137       && hash->root.root.type != bfd_link_hash_defweak)
6138     return false;
6139   if (hash->root.type != STT_FUNC)
6140     return false;
6141 
6142   /* Read first instruction.  */
6143   section = hash->root.root.u.def.section;
6144   abfd = section->owner;
6145   offset = hash->root.root.u.def.value - section->vma;
6146   if (!bfd_get_section_contents (abfd, section, contents, offset,
6147 				 sizeof (contents)))
6148     return false;
6149 
6150   first_insn = bfd_get_32 (abfd, contents);
6151 
6152   /* Starts by SG instruction.  */
6153   return first_insn == 0xe97fe97f;
6154 }
6155 
6156 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6157    secure gateway veneers (ie. the veneers was not in the input import library)
6158    and there is no output import library (GEN_INFO->out_implib_bfd is NULL.  */
6159 
6160 static bool
arm_list_new_cmse_stub(struct bfd_hash_entry * gen_entry,void * gen_info)6161 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6162 {
6163   struct elf32_arm_stub_hash_entry *stub_entry;
6164   struct bfd_link_info *info;
6165 
6166   /* Massage our args to the form they really have.  */
6167   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6168   info = (struct bfd_link_info *) gen_info;
6169 
6170   if (info->out_implib_bfd)
6171     return true;
6172 
6173   if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6174     return true;
6175 
6176   if (stub_entry->stub_offset == (bfd_vma) -1)
6177     _bfd_error_handler ("  %s", stub_entry->output_name);
6178 
6179   return true;
6180 }
6181 
6182 /* Set offset of each secure gateway veneers so that its address remain
6183    identical to the one in the input import library referred by
6184    HTAB->in_implib_bfd.  A warning is issued for veneers that disappeared
6185    (present in input import library but absent from the executable being
6186    linked) or if new veneers appeared and there is no output import library
6187    (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6188    number of secure gateway veneers found in the input import library.
6189 
6190    The function returns whether an error occurred.  If no error occurred,
6191    *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6192    and this function and HTAB->new_cmse_stub_offset is set to the biggest
6193    veneer observed set for new veneers to be layed out after.  */
6194 
6195 static bool
set_cmse_veneer_addr_from_implib(struct bfd_link_info * info,struct elf32_arm_link_hash_table * htab,int * cmse_stub_created)6196 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6197 				  struct elf32_arm_link_hash_table *htab,
6198 				  int *cmse_stub_created)
6199 {
6200   long symsize;
6201   char *sym_name;
6202   flagword flags;
6203   long i, symcount;
6204   bfd *in_implib_bfd;
6205   asection *stub_out_sec;
6206   bool ret = true;
6207   Elf_Internal_Sym *intsym;
6208   const char *out_sec_name;
6209   bfd_size_type cmse_stub_size;
6210   asymbol **sympp = NULL, *sym;
6211   struct elf32_arm_link_hash_entry *hash;
6212   const insn_sequence *cmse_stub_template;
6213   struct elf32_arm_stub_hash_entry *stub_entry;
6214   int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6215   bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6216   bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6217 
6218   /* No input secure gateway import library.  */
6219   if (!htab->in_implib_bfd)
6220     return true;
6221 
6222   in_implib_bfd = htab->in_implib_bfd;
6223   if (!htab->cmse_implib)
6224     {
6225       _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6226 			    "Gateway import libraries"), in_implib_bfd);
6227       return false;
6228     }
6229 
6230   /* Get symbol table size.  */
6231   symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6232   if (symsize < 0)
6233     return false;
6234 
6235   /* Read in the input secure gateway import library's symbol table.  */
6236   sympp = (asymbol **) bfd_malloc (symsize);
6237   if (sympp == NULL)
6238     return false;
6239 
6240   symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6241   if (symcount < 0)
6242     {
6243       ret = false;
6244       goto free_sym_buf;
6245     }
6246 
6247   htab->new_cmse_stub_offset = 0;
6248   cmse_stub_size =
6249     find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6250 				 &cmse_stub_template,
6251 				 &cmse_stub_template_size);
6252   out_sec_name =
6253     arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6254   stub_out_sec =
6255     bfd_get_section_by_name (htab->obfd, out_sec_name);
6256   if (stub_out_sec != NULL)
6257     cmse_stub_sec_vma = stub_out_sec->vma;
6258 
6259   /* Set addresses of veneers mentionned in input secure gateway import
6260      library's symbol table.  */
6261   for (i = 0; i < symcount; i++)
6262     {
6263       sym = sympp[i];
6264       flags = sym->flags;
6265       sym_name = (char *) bfd_asymbol_name (sym);
6266       intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6267 
6268       if (sym->section != bfd_abs_section_ptr
6269 	  || !(flags & (BSF_GLOBAL | BSF_WEAK))
6270 	  || (flags & BSF_FUNCTION) != BSF_FUNCTION
6271 	  || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6272 	      != ST_BRANCH_TO_THUMB))
6273 	{
6274 	  _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6275 				"symbol should be absolute, global and "
6276 				"refer to Thumb functions"),
6277 			      in_implib_bfd, sym_name);
6278 	  ret = false;
6279 	  continue;
6280 	}
6281 
6282       veneer_value = bfd_asymbol_value (sym);
6283       stub_offset = veneer_value - cmse_stub_sec_vma;
6284       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6285 					 false, false);
6286       hash = (struct elf32_arm_link_hash_entry *)
6287 	elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6288 
6289       /* Stub entry should have been created by cmse_scan or the symbol be of
6290 	 a secure function callable from non secure code.  */
6291       if (!stub_entry && !hash)
6292 	{
6293 	  bool new_stub;
6294 
6295 	  _bfd_error_handler
6296 	    (_("entry function `%s' disappeared from secure code"), sym_name);
6297 	  hash = (struct elf32_arm_link_hash_entry *)
6298 	    elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
6299 	  stub_entry
6300 	    = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6301 				     NULL, NULL, bfd_abs_section_ptr, hash,
6302 				     sym_name, veneer_value,
6303 				     ST_BRANCH_TO_THUMB, &new_stub);
6304 	  if (stub_entry == NULL)
6305 	    ret = false;
6306 	  else
6307 	  {
6308 	    BFD_ASSERT (new_stub);
6309 	    new_cmse_stubs_created++;
6310 	    (*cmse_stub_created)++;
6311 	  }
6312 	  stub_entry->stub_template_size = stub_entry->stub_size = 0;
6313 	  stub_entry->stub_offset = stub_offset;
6314 	}
6315       /* Symbol found is not callable from non secure code.  */
6316       else if (!stub_entry)
6317 	{
6318 	  if (!cmse_entry_fct_p (hash))
6319 	    {
6320 	      _bfd_error_handler (_("`%s' refers to a non entry function"),
6321 				  sym_name);
6322 	      ret = false;
6323 	    }
6324 	  continue;
6325 	}
6326       else
6327 	{
6328 	  /* Only stubs for SG veneers should have been created.  */
6329 	  BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6330 
6331 	  /* Check visibility hasn't changed.  */
6332 	  if (!!(flags & BSF_GLOBAL)
6333 	      != (hash->root.root.type == bfd_link_hash_defined))
6334 	    _bfd_error_handler
6335 	      (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6336 	       sym_name);
6337 
6338 	  stub_entry->stub_offset = stub_offset;
6339 	}
6340 
6341       /* Size should match that of a SG veneer.  */
6342       if (intsym->st_size != cmse_stub_size)
6343 	{
6344 	  _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6345 			      in_implib_bfd, sym_name);
6346 	  ret = false;
6347 	}
6348 
6349       /* Previous veneer address is before current SG veneer section.  */
6350       if (veneer_value < cmse_stub_sec_vma)
6351 	{
6352 	  /* Avoid offset underflow.  */
6353 	  if (stub_entry)
6354 	    stub_entry->stub_offset = 0;
6355 	  stub_offset = 0;
6356 	  ret = false;
6357 	}
6358 
6359       /* Complain if stub offset not a multiple of stub size.  */
6360       if (stub_offset % cmse_stub_size)
6361 	{
6362 	  _bfd_error_handler
6363 	    (_("offset of veneer for entry function `%s' not a multiple of "
6364 	       "its size"), sym_name);
6365 	  ret = false;
6366 	}
6367 
6368       if (!ret)
6369 	continue;
6370 
6371       new_cmse_stubs_created--;
6372       if (veneer_value < cmse_stub_array_start)
6373 	cmse_stub_array_start = veneer_value;
6374       next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6375       if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6376 	htab->new_cmse_stub_offset = next_cmse_stub_offset;
6377     }
6378 
6379   if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6380     {
6381       BFD_ASSERT (new_cmse_stubs_created > 0);
6382       _bfd_error_handler
6383 	(_("new entry function(s) introduced but no output import library "
6384 	   "specified:"));
6385       bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6386     }
6387 
6388   if (cmse_stub_array_start != cmse_stub_sec_vma)
6389     {
6390       _bfd_error_handler
6391 	(_("start address of `%s' is different from previous link"),
6392 	 out_sec_name);
6393       ret = false;
6394     }
6395 
6396  free_sym_buf:
6397   free (sympp);
6398   return ret;
6399 }
6400 
6401 /* Determine and set the size of the stub section for a final link.
6402 
6403    The basic idea here is to examine all the relocations looking for
6404    PC-relative calls to a target that is unreachable with a "bl"
6405    instruction.  */
6406 
6407 bool
elf32_arm_size_stubs(bfd * output_bfd,bfd * stub_bfd,struct bfd_link_info * info,bfd_signed_vma group_size,asection * (* add_stub_section)(const char *,asection *,asection *,unsigned int),void (* layout_sections_again)(void))6408 elf32_arm_size_stubs (bfd *output_bfd,
6409 		      bfd *stub_bfd,
6410 		      struct bfd_link_info *info,
6411 		      bfd_signed_vma group_size,
6412 		      asection * (*add_stub_section) (const char *, asection *,
6413 						      asection *,
6414 						      unsigned int),
6415 		      void (*layout_sections_again) (void))
6416 {
6417   bool ret = true;
6418   obj_attribute *out_attr;
6419   int cmse_stub_created = 0;
6420   bfd_size_type stub_group_size;
6421   bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
6422   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6423   struct a8_erratum_fix *a8_fixes = NULL;
6424   unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6425   struct a8_erratum_reloc *a8_relocs = NULL;
6426   unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6427 
6428   if (htab == NULL)
6429     return false;
6430 
6431   if (htab->fix_cortex_a8)
6432     {
6433       a8_fixes = (struct a8_erratum_fix *)
6434 	  bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6435       a8_relocs = (struct a8_erratum_reloc *)
6436 	  bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6437     }
6438 
6439   /* Propagate mach to stub bfd, because it may not have been
6440      finalized when we created stub_bfd.  */
6441   bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6442 		     bfd_get_mach (output_bfd));
6443 
6444   /* Stash our params away.  */
6445   htab->stub_bfd = stub_bfd;
6446   htab->add_stub_section = add_stub_section;
6447   htab->layout_sections_again = layout_sections_again;
6448   stubs_always_after_branch = group_size < 0;
6449 
6450   out_attr = elf_known_obj_attributes_proc (output_bfd);
6451   m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6452 
6453   /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6454      as the first half of a 32-bit branch straddling two 4K pages.  This is a
6455      crude way of enforcing that.  */
6456   if (htab->fix_cortex_a8)
6457     stubs_always_after_branch = 1;
6458 
6459   if (group_size < 0)
6460     stub_group_size = -group_size;
6461   else
6462     stub_group_size = group_size;
6463 
6464   if (stub_group_size == 1)
6465     {
6466       /* Default values.  */
6467       /* Thumb branch range is +-4MB has to be used as the default
6468 	 maximum size (a given section can contain both ARM and Thumb
6469 	 code, so the worst case has to be taken into account).
6470 
6471 	 This value is 24K less than that, which allows for 2025
6472 	 12-byte stubs.  If we exceed that, then we will fail to link.
6473 	 The user will have to relink with an explicit group size
6474 	 option.  */
6475       stub_group_size = 4170000;
6476     }
6477 
6478   group_sections (htab, stub_group_size, stubs_always_after_branch);
6479 
6480   /* If we're applying the cortex A8 fix, we need to determine the
6481      program header size now, because we cannot change it later --
6482      that could alter section placements.  Notice the A8 erratum fix
6483      ends up requiring the section addresses to remain unchanged
6484      modulo the page size.  That's something we cannot represent
6485      inside BFD, and we don't want to force the section alignment to
6486      be the page size.  */
6487   if (htab->fix_cortex_a8)
6488     (*htab->layout_sections_again) ();
6489 
6490   while (1)
6491     {
6492       bfd *input_bfd;
6493       unsigned int bfd_indx;
6494       asection *stub_sec;
6495       enum elf32_arm_stub_type stub_type;
6496       bool stub_changed = false;
6497       unsigned prev_num_a8_fixes = num_a8_fixes;
6498 
6499       num_a8_fixes = 0;
6500       for (input_bfd = info->input_bfds, bfd_indx = 0;
6501 	   input_bfd != NULL;
6502 	   input_bfd = input_bfd->link.next, bfd_indx++)
6503 	{
6504 	  Elf_Internal_Shdr *symtab_hdr;
6505 	  asection *section;
6506 	  Elf_Internal_Sym *local_syms = NULL;
6507 
6508 	  if (!is_arm_elf (input_bfd))
6509 	    continue;
6510 	  if ((input_bfd->flags & DYNAMIC) != 0
6511 	      && (elf_sym_hashes (input_bfd) == NULL
6512 		  || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6513 	    continue;
6514 
6515 	  num_a8_relocs = 0;
6516 
6517 	  /* We'll need the symbol table in a second.  */
6518 	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6519 	  if (symtab_hdr->sh_info == 0)
6520 	    continue;
6521 
6522 	  /* Limit scan of symbols to object file whose profile is
6523 	     Microcontroller to not hinder performance in the general case.  */
6524 	  if (m_profile && first_veneer_scan)
6525 	    {
6526 	      struct elf_link_hash_entry **sym_hashes;
6527 
6528 	      sym_hashes = elf_sym_hashes (input_bfd);
6529 	      if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6530 			      &cmse_stub_created))
6531 		goto error_ret_free_local;
6532 
6533 	      if (cmse_stub_created != 0)
6534 		stub_changed = true;
6535 	    }
6536 
6537 	  /* Walk over each section attached to the input bfd.  */
6538 	  for (section = input_bfd->sections;
6539 	       section != NULL;
6540 	       section = section->next)
6541 	    {
6542 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6543 
6544 	      /* If there aren't any relocs, then there's nothing more
6545 		 to do.  */
6546 	      if ((section->flags & SEC_RELOC) == 0
6547 		  || section->reloc_count == 0
6548 		  || (section->flags & SEC_CODE) == 0)
6549 		continue;
6550 
6551 	      /* If this section is a link-once section that will be
6552 		 discarded, then don't create any stubs.  */
6553 	      if (section->output_section == NULL
6554 		  || section->output_section->owner != output_bfd)
6555 		continue;
6556 
6557 	      /* Get the relocs.  */
6558 	      internal_relocs
6559 		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6560 					     NULL, info->keep_memory);
6561 	      if (internal_relocs == NULL)
6562 		goto error_ret_free_local;
6563 
6564 	      /* Now examine each relocation.  */
6565 	      irela = internal_relocs;
6566 	      irelaend = irela + section->reloc_count;
6567 	      for (; irela < irelaend; irela++)
6568 		{
6569 		  unsigned int r_type, r_indx;
6570 		  asection *sym_sec;
6571 		  bfd_vma sym_value;
6572 		  bfd_vma destination;
6573 		  struct elf32_arm_link_hash_entry *hash;
6574 		  const char *sym_name;
6575 		  unsigned char st_type;
6576 		  enum arm_st_branch_type branch_type;
6577 		  bool created_stub = false;
6578 
6579 		  r_type = ELF32_R_TYPE (irela->r_info);
6580 		  r_indx = ELF32_R_SYM (irela->r_info);
6581 
6582 		  if (r_type >= (unsigned int) R_ARM_max)
6583 		    {
6584 		      bfd_set_error (bfd_error_bad_value);
6585 		    error_ret_free_internal:
6586 		      if (elf_section_data (section)->relocs == NULL)
6587 			free (internal_relocs);
6588 		    /* Fall through.  */
6589 		    error_ret_free_local:
6590 		      if (symtab_hdr->contents != (unsigned char *) local_syms)
6591 			free (local_syms);
6592 		      return false;
6593 		    }
6594 
6595 		  hash = NULL;
6596 		  if (r_indx >= symtab_hdr->sh_info)
6597 		    hash = elf32_arm_hash_entry
6598 		      (elf_sym_hashes (input_bfd)
6599 		       [r_indx - symtab_hdr->sh_info]);
6600 
6601 		  /* Only look for stubs on branch instructions, or
6602 		     non-relaxed TLSCALL  */
6603 		  if ((r_type != (unsigned int) R_ARM_CALL)
6604 		      && (r_type != (unsigned int) R_ARM_THM_CALL)
6605 		      && (r_type != (unsigned int) R_ARM_JUMP24)
6606 		      && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6607 		      && (r_type != (unsigned int) R_ARM_THM_XPC22)
6608 		      && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6609 		      && (r_type != (unsigned int) R_ARM_PLT32)
6610 		      && !((r_type == (unsigned int) R_ARM_TLS_CALL
6611 			    || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6612 			   && r_type == (elf32_arm_tls_transition
6613 					 (info, r_type,
6614 					  (struct elf_link_hash_entry *) hash))
6615 			   && ((hash ? hash->tls_type
6616 				: (elf32_arm_local_got_tls_type
6617 				   (input_bfd)[r_indx]))
6618 			       & GOT_TLS_GDESC) != 0))
6619 		    continue;
6620 
6621 		  /* Now determine the call target, its name, value,
6622 		     section.  */
6623 		  sym_sec = NULL;
6624 		  sym_value = 0;
6625 		  destination = 0;
6626 		  sym_name = NULL;
6627 
6628 		  if (r_type == (unsigned int) R_ARM_TLS_CALL
6629 		      || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6630 		    {
6631 		      /* A non-relaxed TLS call.  The target is the
6632 			 plt-resident trampoline and nothing to do
6633 			 with the symbol.  */
6634 		      BFD_ASSERT (htab->tls_trampoline > 0);
6635 		      sym_sec = htab->root.splt;
6636 		      sym_value = htab->tls_trampoline;
6637 		      hash = 0;
6638 		      st_type = STT_FUNC;
6639 		      branch_type = ST_BRANCH_TO_ARM;
6640 		    }
6641 		  else if (!hash)
6642 		    {
6643 		      /* It's a local symbol.  */
6644 		      Elf_Internal_Sym *sym;
6645 
6646 		      if (local_syms == NULL)
6647 			{
6648 			  local_syms
6649 			    = (Elf_Internal_Sym *) symtab_hdr->contents;
6650 			  if (local_syms == NULL)
6651 			    local_syms
6652 			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6653 						      symtab_hdr->sh_info, 0,
6654 						      NULL, NULL, NULL);
6655 			  if (local_syms == NULL)
6656 			    goto error_ret_free_internal;
6657 			}
6658 
6659 		      sym = local_syms + r_indx;
6660 		      if (sym->st_shndx == SHN_UNDEF)
6661 			sym_sec = bfd_und_section_ptr;
6662 		      else if (sym->st_shndx == SHN_ABS)
6663 			sym_sec = bfd_abs_section_ptr;
6664 		      else if (sym->st_shndx == SHN_COMMON)
6665 			sym_sec = bfd_com_section_ptr;
6666 		      else
6667 			sym_sec =
6668 			  bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6669 
6670 		      if (!sym_sec)
6671 			/* This is an undefined symbol.  It can never
6672 			   be resolved.  */
6673 			continue;
6674 
6675 		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6676 			sym_value = sym->st_value;
6677 		      destination = (sym_value + irela->r_addend
6678 				     + sym_sec->output_offset
6679 				     + sym_sec->output_section->vma);
6680 		      st_type = ELF_ST_TYPE (sym->st_info);
6681 		      branch_type =
6682 			ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6683 		      sym_name
6684 			= bfd_elf_string_from_elf_section (input_bfd,
6685 							   symtab_hdr->sh_link,
6686 							   sym->st_name);
6687 		    }
6688 		  else
6689 		    {
6690 		      /* It's an external symbol.  */
6691 		      while (hash->root.root.type == bfd_link_hash_indirect
6692 			     || hash->root.root.type == bfd_link_hash_warning)
6693 			hash = ((struct elf32_arm_link_hash_entry *)
6694 				hash->root.root.u.i.link);
6695 
6696 		      if (hash->root.root.type == bfd_link_hash_defined
6697 			  || hash->root.root.type == bfd_link_hash_defweak)
6698 			{
6699 			  sym_sec = hash->root.root.u.def.section;
6700 			  sym_value = hash->root.root.u.def.value;
6701 
6702 			  struct elf32_arm_link_hash_table *globals =
6703 						  elf32_arm_hash_table (info);
6704 
6705 			  /* For a destination in a shared library,
6706 			     use the PLT stub as target address to
6707 			     decide whether a branch stub is
6708 			     needed.  */
6709 			  if (globals != NULL
6710 			      && globals->root.splt != NULL
6711 			      && hash != NULL
6712 			      && hash->root.plt.offset != (bfd_vma) -1)
6713 			    {
6714 			      sym_sec = globals->root.splt;
6715 			      sym_value = hash->root.plt.offset;
6716 			      if (sym_sec->output_section != NULL)
6717 				destination = (sym_value
6718 					       + sym_sec->output_offset
6719 					       + sym_sec->output_section->vma);
6720 			    }
6721 			  else if (sym_sec->output_section != NULL)
6722 			    destination = (sym_value + irela->r_addend
6723 					   + sym_sec->output_offset
6724 					   + sym_sec->output_section->vma);
6725 			}
6726 		      else if ((hash->root.root.type == bfd_link_hash_undefined)
6727 			       || (hash->root.root.type == bfd_link_hash_undefweak))
6728 			{
6729 			  /* For a shared library, use the PLT stub as
6730 			     target address to decide whether a long
6731 			     branch stub is needed.
6732 			     For absolute code, they cannot be handled.  */
6733 			  struct elf32_arm_link_hash_table *globals =
6734 			    elf32_arm_hash_table (info);
6735 
6736 			  if (globals != NULL
6737 			      && globals->root.splt != NULL
6738 			      && hash != NULL
6739 			      && hash->root.plt.offset != (bfd_vma) -1)
6740 			    {
6741 			      sym_sec = globals->root.splt;
6742 			      sym_value = hash->root.plt.offset;
6743 			      if (sym_sec->output_section != NULL)
6744 				destination = (sym_value
6745 					       + sym_sec->output_offset
6746 					       + sym_sec->output_section->vma);
6747 			    }
6748 			  else
6749 			    continue;
6750 			}
6751 		      else
6752 			{
6753 			  bfd_set_error (bfd_error_bad_value);
6754 			  goto error_ret_free_internal;
6755 			}
6756 		      st_type = hash->root.type;
6757 		      branch_type =
6758 			ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6759 		      sym_name = hash->root.root.root.string;
6760 		    }
6761 
6762 		  do
6763 		    {
6764 		      bool new_stub;
6765 		      struct elf32_arm_stub_hash_entry *stub_entry;
6766 
6767 		      /* Determine what (if any) linker stub is needed.  */
6768 		      stub_type = arm_type_of_stub (info, section, irela,
6769 						    st_type, &branch_type,
6770 						    hash, destination, sym_sec,
6771 						    input_bfd, sym_name);
6772 		      if (stub_type == arm_stub_none)
6773 			break;
6774 
6775 		      /* We've either created a stub for this reloc already,
6776 			 or we are about to.  */
6777 		      stub_entry =
6778 			elf32_arm_create_stub (htab, stub_type, section, irela,
6779 					       sym_sec, hash,
6780 					       (char *) sym_name, sym_value,
6781 					       branch_type, &new_stub);
6782 
6783 		      created_stub = stub_entry != NULL;
6784 		      if (!created_stub)
6785 			goto error_ret_free_internal;
6786 		      else if (!new_stub)
6787 			break;
6788 		      else
6789 			stub_changed = true;
6790 		    }
6791 		  while (0);
6792 
6793 		  /* Look for relocations which might trigger Cortex-A8
6794 		     erratum.  */
6795 		  if (htab->fix_cortex_a8
6796 		      && (r_type == (unsigned int) R_ARM_THM_JUMP24
6797 			  || r_type == (unsigned int) R_ARM_THM_JUMP19
6798 			  || r_type == (unsigned int) R_ARM_THM_CALL
6799 			  || r_type == (unsigned int) R_ARM_THM_XPC22))
6800 		    {
6801 		      bfd_vma from = section->output_section->vma
6802 				     + section->output_offset
6803 				     + irela->r_offset;
6804 
6805 		      if ((from & 0xfff) == 0xffe)
6806 			{
6807 			  /* Found a candidate.  Note we haven't checked the
6808 			     destination is within 4K here: if we do so (and
6809 			     don't create an entry in a8_relocs) we can't tell
6810 			     that a branch should have been relocated when
6811 			     scanning later.  */
6812 			  if (num_a8_relocs == a8_reloc_table_size)
6813 			    {
6814 			      a8_reloc_table_size *= 2;
6815 			      a8_relocs = (struct a8_erratum_reloc *)
6816 				  bfd_realloc (a8_relocs,
6817 					       sizeof (struct a8_erratum_reloc)
6818 					       * a8_reloc_table_size);
6819 			    }
6820 
6821 			  a8_relocs[num_a8_relocs].from = from;
6822 			  a8_relocs[num_a8_relocs].destination = destination;
6823 			  a8_relocs[num_a8_relocs].r_type = r_type;
6824 			  a8_relocs[num_a8_relocs].branch_type = branch_type;
6825 			  a8_relocs[num_a8_relocs].sym_name = sym_name;
6826 			  a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6827 			  a8_relocs[num_a8_relocs].hash = hash;
6828 
6829 			  num_a8_relocs++;
6830 			}
6831 		    }
6832 		}
6833 
6834 	      /* We're done with the internal relocs, free them.  */
6835 	      if (elf_section_data (section)->relocs == NULL)
6836 		free (internal_relocs);
6837 	    }
6838 
6839 	  if (htab->fix_cortex_a8)
6840 	    {
6841 	      /* Sort relocs which might apply to Cortex-A8 erratum.  */
6842 	      qsort (a8_relocs, num_a8_relocs,
6843 		     sizeof (struct a8_erratum_reloc),
6844 		     &a8_reloc_compare);
6845 
6846 	      /* Scan for branches which might trigger Cortex-A8 erratum.  */
6847 	      if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6848 					  &num_a8_fixes, &a8_fix_table_size,
6849 					  a8_relocs, num_a8_relocs,
6850 					  prev_num_a8_fixes, &stub_changed)
6851 		  != 0)
6852 		goto error_ret_free_local;
6853 	    }
6854 
6855 	  if (local_syms != NULL
6856 	      && symtab_hdr->contents != (unsigned char *) local_syms)
6857 	    {
6858 	      if (!info->keep_memory)
6859 		free (local_syms);
6860 	      else
6861 		symtab_hdr->contents = (unsigned char *) local_syms;
6862 	    }
6863 	}
6864 
6865       if (first_veneer_scan
6866 	  && !set_cmse_veneer_addr_from_implib (info, htab,
6867 						&cmse_stub_created))
6868 	ret = false;
6869 
6870       if (prev_num_a8_fixes != num_a8_fixes)
6871 	stub_changed = true;
6872 
6873       if (!stub_changed)
6874 	break;
6875 
6876       /* OK, we've added some stubs.  Find out the new size of the
6877 	 stub sections.  */
6878       for (stub_sec = htab->stub_bfd->sections;
6879 	   stub_sec != NULL;
6880 	   stub_sec = stub_sec->next)
6881 	{
6882 	  /* Ignore non-stub sections.  */
6883 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
6884 	    continue;
6885 
6886 	  stub_sec->size = 0;
6887 	}
6888 
6889       /* Add new SG veneers after those already in the input import
6890 	 library.  */
6891       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6892 	   stub_type++)
6893 	{
6894 	  bfd_vma *start_offset_p;
6895 	  asection **stub_sec_p;
6896 
6897 	  start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6898 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6899 	  if (start_offset_p == NULL)
6900 	    continue;
6901 
6902 	  BFD_ASSERT (stub_sec_p != NULL);
6903 	  if (*stub_sec_p != NULL)
6904 	    (*stub_sec_p)->size = *start_offset_p;
6905 	}
6906 
6907       /* Compute stub section size, considering padding.  */
6908       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6909       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6910 	   stub_type++)
6911 	{
6912 	  int size, padding;
6913 	  asection **stub_sec_p;
6914 
6915 	  padding = arm_dedicated_stub_section_padding (stub_type);
6916 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6917 	  /* Skip if no stub input section or no stub section padding
6918 	     required.  */
6919 	  if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6920 	    continue;
6921 	  /* Stub section padding required but no dedicated section.  */
6922 	  BFD_ASSERT (stub_sec_p);
6923 
6924 	  size = (*stub_sec_p)->size;
6925 	  size = (size + padding - 1) & ~(padding - 1);
6926 	  (*stub_sec_p)->size = size;
6927 	}
6928 
6929       /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
6930       if (htab->fix_cortex_a8)
6931 	for (i = 0; i < num_a8_fixes; i++)
6932 	  {
6933 	    stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6934 			 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6935 
6936 	    if (stub_sec == NULL)
6937 	      return false;
6938 
6939 	    stub_sec->size
6940 	      += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6941 					      NULL);
6942 	  }
6943 
6944 
6945       /* Ask the linker to do its stuff.  */
6946       (*htab->layout_sections_again) ();
6947       first_veneer_scan = false;
6948     }
6949 
6950   /* Add stubs for Cortex-A8 erratum fixes now.  */
6951   if (htab->fix_cortex_a8)
6952     {
6953       for (i = 0; i < num_a8_fixes; i++)
6954 	{
6955 	  struct elf32_arm_stub_hash_entry *stub_entry;
6956 	  char *stub_name = a8_fixes[i].stub_name;
6957 	  asection *section = a8_fixes[i].section;
6958 	  unsigned int section_id = a8_fixes[i].section->id;
6959 	  asection *link_sec = htab->stub_group[section_id].link_sec;
6960 	  asection *stub_sec = htab->stub_group[section_id].stub_sec;
6961 	  const insn_sequence *template_sequence;
6962 	  int template_size, size = 0;
6963 
6964 	  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6965 					     true, false);
6966 	  if (stub_entry == NULL)
6967 	    {
6968 	      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6969 				  section->owner, stub_name);
6970 	      return false;
6971 	    }
6972 
6973 	  stub_entry->stub_sec = stub_sec;
6974 	  stub_entry->stub_offset = (bfd_vma) -1;
6975 	  stub_entry->id_sec = link_sec;
6976 	  stub_entry->stub_type = a8_fixes[i].stub_type;
6977 	  stub_entry->source_value = a8_fixes[i].offset;
6978 	  stub_entry->target_section = a8_fixes[i].section;
6979 	  stub_entry->target_value = a8_fixes[i].target_offset;
6980 	  stub_entry->orig_insn = a8_fixes[i].orig_insn;
6981 	  stub_entry->branch_type = a8_fixes[i].branch_type;
6982 
6983 	  size = find_stub_size_and_template (a8_fixes[i].stub_type,
6984 					      &template_sequence,
6985 					      &template_size);
6986 
6987 	  stub_entry->stub_size = size;
6988 	  stub_entry->stub_template = template_sequence;
6989 	  stub_entry->stub_template_size = template_size;
6990 	}
6991 
6992       /* Stash the Cortex-A8 erratum fix array for use later in
6993 	 elf32_arm_write_section().  */
6994       htab->a8_erratum_fixes = a8_fixes;
6995       htab->num_a8_erratum_fixes = num_a8_fixes;
6996     }
6997   else
6998     {
6999       htab->a8_erratum_fixes = NULL;
7000       htab->num_a8_erratum_fixes = 0;
7001     }
7002   return ret;
7003 }
7004 
7005 /* Build all the stubs associated with the current output file.  The
7006    stubs are kept in a hash table attached to the main linker hash
7007    table.  We also set up the .plt entries for statically linked PIC
7008    functions here.  This function is called via arm_elf_finish in the
7009    linker.  */
7010 
7011 bool
elf32_arm_build_stubs(struct bfd_link_info * info)7012 elf32_arm_build_stubs (struct bfd_link_info *info)
7013 {
7014   asection *stub_sec;
7015   struct bfd_hash_table *table;
7016   enum elf32_arm_stub_type stub_type;
7017   struct elf32_arm_link_hash_table *htab;
7018 
7019   htab = elf32_arm_hash_table (info);
7020   if (htab == NULL)
7021     return false;
7022 
7023   for (stub_sec = htab->stub_bfd->sections;
7024        stub_sec != NULL;
7025        stub_sec = stub_sec->next)
7026     {
7027       bfd_size_type size;
7028 
7029       /* Ignore non-stub sections.  */
7030       if (!strstr (stub_sec->name, STUB_SUFFIX))
7031 	continue;
7032 
7033       /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
7034 	 must at least be done for stub section requiring padding and for SG
7035 	 veneers to ensure that a non secure code branching to a removed SG
7036 	 veneer causes an error.  */
7037       size = stub_sec->size;
7038       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7039       if (stub_sec->contents == NULL && size != 0)
7040 	return false;
7041 
7042       stub_sec->size = 0;
7043     }
7044 
7045   /* Add new SG veneers after those already in the input import library.  */
7046   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7047     {
7048       bfd_vma *start_offset_p;
7049       asection **stub_sec_p;
7050 
7051       start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7052       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7053       if (start_offset_p == NULL)
7054 	continue;
7055 
7056       BFD_ASSERT (stub_sec_p != NULL);
7057       if (*stub_sec_p != NULL)
7058 	(*stub_sec_p)->size = *start_offset_p;
7059     }
7060 
7061   /* Build the stubs as directed by the stub hash table.  */
7062   table = &htab->stub_hash_table;
7063   bfd_hash_traverse (table, arm_build_one_stub, info);
7064   if (htab->fix_cortex_a8)
7065     {
7066       /* Place the cortex a8 stubs last.  */
7067       htab->fix_cortex_a8 = -1;
7068       bfd_hash_traverse (table, arm_build_one_stub, info);
7069     }
7070 
7071   return true;
7072 }
7073 
7074 /* Locate the Thumb encoded calling stub for NAME.  */
7075 
7076 static struct elf_link_hash_entry *
find_thumb_glue(struct bfd_link_info * link_info,const char * name,char ** error_message)7077 find_thumb_glue (struct bfd_link_info *link_info,
7078 		 const char *name,
7079 		 char **error_message)
7080 {
7081   char *tmp_name;
7082   struct elf_link_hash_entry *hash;
7083   struct elf32_arm_link_hash_table *hash_table;
7084 
7085   /* We need a pointer to the armelf specific hash table.  */
7086   hash_table = elf32_arm_hash_table (link_info);
7087   if (hash_table == NULL)
7088     return NULL;
7089 
7090   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7091 				  + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7092 
7093   BFD_ASSERT (tmp_name);
7094 
7095   sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7096 
7097   hash = elf_link_hash_lookup
7098     (&(hash_table)->root, tmp_name, false, false, true);
7099 
7100   if (hash == NULL
7101       && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7102 		   "Thumb", tmp_name, name) == -1)
7103     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7104 
7105   free (tmp_name);
7106 
7107   return hash;
7108 }
7109 
7110 /* Locate the ARM encoded calling stub for NAME.  */
7111 
7112 static struct elf_link_hash_entry *
find_arm_glue(struct bfd_link_info * link_info,const char * name,char ** error_message)7113 find_arm_glue (struct bfd_link_info *link_info,
7114 	       const char *name,
7115 	       char **error_message)
7116 {
7117   char *tmp_name;
7118   struct elf_link_hash_entry *myh;
7119   struct elf32_arm_link_hash_table *hash_table;
7120 
7121   /* We need a pointer to the elfarm specific hash table.  */
7122   hash_table = elf32_arm_hash_table (link_info);
7123   if (hash_table == NULL)
7124     return NULL;
7125 
7126   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7127 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7128   BFD_ASSERT (tmp_name);
7129 
7130   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7131 
7132   myh = elf_link_hash_lookup
7133     (&(hash_table)->root, tmp_name, false, false, true);
7134 
7135   if (myh == NULL
7136       && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7137 		   "ARM", tmp_name, name) == -1)
7138     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7139 
7140   free (tmp_name);
7141 
7142   return myh;
7143 }
7144 
7145 /* ARM->Thumb glue (static images):
7146 
7147    .arm
7148    __func_from_arm:
7149    ldr r12, __func_addr
7150    bx  r12
7151    __func_addr:
7152    .word func    @ behave as if you saw a ARM_32 reloc.
7153 
7154    (v5t static images)
7155    .arm
7156    __func_from_arm:
7157    ldr pc, __func_addr
7158    __func_addr:
7159    .word func    @ behave as if you saw a ARM_32 reloc.
7160 
7161    (relocatable images)
7162    .arm
7163    __func_from_arm:
7164    ldr r12, __func_offset
7165    add r12, r12, pc
7166    bx  r12
7167    __func_offset:
7168    .word func - .   */
7169 
7170 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7171 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7172 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7173 static const insn32 a2t3_func_addr_insn = 0x00000001;
7174 
7175 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7176 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7177 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7178 
7179 #define ARM2THUMB_PIC_GLUE_SIZE 16
7180 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7181 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7182 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7183 
7184 /* Thumb->ARM:				Thumb->(non-interworking aware) ARM
7185 
7186      .thumb				.thumb
7187      .align 2				.align 2
7188  __func_from_thumb:		    __func_from_thumb:
7189      bx pc				push {r6, lr}
7190      nop				ldr  r6, __func_addr
7191      .arm				mov  lr, pc
7192      b func				bx   r6
7193 					.arm
7194 				    ;; back_to_thumb
7195 					ldmia r13! {r6, lr}
7196 					bx    lr
7197 				    __func_addr:
7198 					.word	     func  */
7199 
7200 #define THUMB2ARM_GLUE_SIZE 8
7201 static const insn16 t2a1_bx_pc_insn = 0x4778;
7202 static const insn16 t2a2_noop_insn = 0x46c0;
7203 static const insn32 t2a3_b_insn = 0xea000000;
7204 
7205 #define VFP11_ERRATUM_VENEER_SIZE 8
7206 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7207 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7208 
7209 #define ARM_BX_VENEER_SIZE 12
7210 static const insn32 armbx1_tst_insn = 0xe3100001;
7211 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7212 static const insn32 armbx3_bx_insn = 0xe12fff10;
7213 
7214 #ifndef ELFARM_NABI_C_INCLUDED
7215 static void
arm_allocate_glue_section_space(bfd * abfd,bfd_size_type size,const char * name)7216 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7217 {
7218   asection * s;
7219   bfd_byte * contents;
7220 
7221   if (size == 0)
7222     {
7223       /* Do not include empty glue sections in the output.  */
7224       if (abfd != NULL)
7225 	{
7226 	  s = bfd_get_linker_section (abfd, name);
7227 	  if (s != NULL)
7228 	    s->flags |= SEC_EXCLUDE;
7229 	}
7230       return;
7231     }
7232 
7233   BFD_ASSERT (abfd != NULL);
7234 
7235   s = bfd_get_linker_section (abfd, name);
7236   BFD_ASSERT (s != NULL);
7237 
7238   contents = (bfd_byte *) bfd_zalloc (abfd, size);
7239 
7240   BFD_ASSERT (s->size == size);
7241   s->contents = contents;
7242 }
7243 
7244 bool
bfd_elf32_arm_allocate_interworking_sections(struct bfd_link_info * info)7245 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7246 {
7247   struct elf32_arm_link_hash_table * globals;
7248 
7249   globals = elf32_arm_hash_table (info);
7250   BFD_ASSERT (globals != NULL);
7251 
7252   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7253 				   globals->arm_glue_size,
7254 				   ARM2THUMB_GLUE_SECTION_NAME);
7255 
7256   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7257 				   globals->thumb_glue_size,
7258 				   THUMB2ARM_GLUE_SECTION_NAME);
7259 
7260   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7261 				   globals->vfp11_erratum_glue_size,
7262 				   VFP11_ERRATUM_VENEER_SECTION_NAME);
7263 
7264   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7265 				   globals->stm32l4xx_erratum_glue_size,
7266 				   STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7267 
7268   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7269 				   globals->bx_glue_size,
7270 				   ARM_BX_GLUE_SECTION_NAME);
7271 
7272   return true;
7273 }
7274 
7275 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7276    returns the symbol identifying the stub.  */
7277 
7278 static struct elf_link_hash_entry *
record_arm_to_thumb_glue(struct bfd_link_info * link_info,struct elf_link_hash_entry * h)7279 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7280 			  struct elf_link_hash_entry * h)
7281 {
7282   const char * name = h->root.root.string;
7283   asection * s;
7284   char * tmp_name;
7285   struct elf_link_hash_entry * myh;
7286   struct bfd_link_hash_entry * bh;
7287   struct elf32_arm_link_hash_table * globals;
7288   bfd_vma val;
7289   bfd_size_type size;
7290 
7291   globals = elf32_arm_hash_table (link_info);
7292   BFD_ASSERT (globals != NULL);
7293   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7294 
7295   s = bfd_get_linker_section
7296     (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7297 
7298   BFD_ASSERT (s != NULL);
7299 
7300   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7301 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7302   BFD_ASSERT (tmp_name);
7303 
7304   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7305 
7306   myh = elf_link_hash_lookup
7307     (&(globals)->root, tmp_name, false, false, true);
7308 
7309   if (myh != NULL)
7310     {
7311       /* We've already seen this guy.  */
7312       free (tmp_name);
7313       return myh;
7314     }
7315 
7316   /* The only trick here is using hash_table->arm_glue_size as the value.
7317      Even though the section isn't allocated yet, this is where we will be
7318      putting it.  The +1 on the value marks that the stub has not been
7319      output yet - not that it is a Thumb function.  */
7320   bh = NULL;
7321   val = globals->arm_glue_size + 1;
7322   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7323 				    tmp_name, BSF_GLOBAL, s, val,
7324 				    NULL, true, false, &bh);
7325 
7326   myh = (struct elf_link_hash_entry *) bh;
7327   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7328   myh->forced_local = 1;
7329 
7330   free (tmp_name);
7331 
7332   if (bfd_link_pic (link_info)
7333       || globals->root.is_relocatable_executable
7334       || globals->pic_veneer)
7335     size = ARM2THUMB_PIC_GLUE_SIZE;
7336   else if (globals->use_blx)
7337     size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7338   else
7339     size = ARM2THUMB_STATIC_GLUE_SIZE;
7340 
7341   s->size += size;
7342   globals->arm_glue_size += size;
7343 
7344   return myh;
7345 }
7346 
7347 /* Allocate space for ARMv4 BX veneers.  */
7348 
7349 static void
record_arm_bx_glue(struct bfd_link_info * link_info,int reg)7350 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7351 {
7352   asection * s;
7353   struct elf32_arm_link_hash_table *globals;
7354   char *tmp_name;
7355   struct elf_link_hash_entry *myh;
7356   struct bfd_link_hash_entry *bh;
7357   bfd_vma val;
7358 
7359   /* BX PC does not need a veneer.  */
7360   if (reg == 15)
7361     return;
7362 
7363   globals = elf32_arm_hash_table (link_info);
7364   BFD_ASSERT (globals != NULL);
7365   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7366 
7367   /* Check if this veneer has already been allocated.  */
7368   if (globals->bx_glue_offset[reg])
7369     return;
7370 
7371   s = bfd_get_linker_section
7372     (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7373 
7374   BFD_ASSERT (s != NULL);
7375 
7376   /* Add symbol for veneer.  */
7377   tmp_name = (char *)
7378       bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7379   BFD_ASSERT (tmp_name);
7380 
7381   sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7382 
7383   myh = elf_link_hash_lookup
7384     (&(globals)->root, tmp_name, false, false, false);
7385 
7386   BFD_ASSERT (myh == NULL);
7387 
7388   bh = NULL;
7389   val = globals->bx_glue_size;
7390   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7391 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7392 				    NULL, true, false, &bh);
7393 
7394   myh = (struct elf_link_hash_entry *) bh;
7395   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7396   myh->forced_local = 1;
7397 
7398   s->size += ARM_BX_VENEER_SIZE;
7399   globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7400   globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7401 }
7402 
7403 
7404 /* Add an entry to the code/data map for section SEC.  */
7405 
7406 static void
elf32_arm_section_map_add(asection * sec,char type,bfd_vma vma)7407 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7408 {
7409   struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7410   unsigned int newidx;
7411 
7412   if (sec_data->map == NULL)
7413     {
7414       sec_data->map = (elf32_arm_section_map *)
7415 	  bfd_malloc (sizeof (elf32_arm_section_map));
7416       sec_data->mapcount = 0;
7417       sec_data->mapsize = 1;
7418     }
7419 
7420   newidx = sec_data->mapcount++;
7421 
7422   if (sec_data->mapcount > sec_data->mapsize)
7423     {
7424       sec_data->mapsize *= 2;
7425       sec_data->map = (elf32_arm_section_map *)
7426 	  bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7427 			       * sizeof (elf32_arm_section_map));
7428     }
7429 
7430   if (sec_data->map)
7431     {
7432       sec_data->map[newidx].vma = vma;
7433       sec_data->map[newidx].type = type;
7434     }
7435 }
7436 
7437 
7438 /* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
7439    veneers are handled for now.  */
7440 
7441 static bfd_vma
record_vfp11_erratum_veneer(struct bfd_link_info * link_info,elf32_vfp11_erratum_list * branch,bfd * branch_bfd,asection * branch_sec,unsigned int offset)7442 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7443 			     elf32_vfp11_erratum_list *branch,
7444 			     bfd *branch_bfd,
7445 			     asection *branch_sec,
7446 			     unsigned int offset)
7447 {
7448   asection *s;
7449   struct elf32_arm_link_hash_table *hash_table;
7450   char *tmp_name;
7451   struct elf_link_hash_entry *myh;
7452   struct bfd_link_hash_entry *bh;
7453   bfd_vma val;
7454   struct _arm_elf_section_data *sec_data;
7455   elf32_vfp11_erratum_list *newerr;
7456 
7457   hash_table = elf32_arm_hash_table (link_info);
7458   BFD_ASSERT (hash_table != NULL);
7459   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7460 
7461   s = bfd_get_linker_section
7462     (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7463 
7464   sec_data = elf32_arm_section_data (s);
7465 
7466   BFD_ASSERT (s != NULL);
7467 
7468   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7469 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7470   BFD_ASSERT (tmp_name);
7471 
7472   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7473 	   hash_table->num_vfp11_fixes);
7474 
7475   myh = elf_link_hash_lookup
7476     (&(hash_table)->root, tmp_name, false, false, false);
7477 
7478   BFD_ASSERT (myh == NULL);
7479 
7480   bh = NULL;
7481   val = hash_table->vfp11_erratum_glue_size;
7482   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7483 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7484 				    NULL, true, false, &bh);
7485 
7486   myh = (struct elf_link_hash_entry *) bh;
7487   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7488   myh->forced_local = 1;
7489 
7490   /* Link veneer back to calling location.  */
7491   sec_data->erratumcount += 1;
7492   newerr = (elf32_vfp11_erratum_list *)
7493       bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7494 
7495   newerr->type = VFP11_ERRATUM_ARM_VENEER;
7496   newerr->vma = -1;
7497   newerr->u.v.branch = branch;
7498   newerr->u.v.id = hash_table->num_vfp11_fixes;
7499   branch->u.b.veneer = newerr;
7500 
7501   newerr->next = sec_data->erratumlist;
7502   sec_data->erratumlist = newerr;
7503 
7504   /* A symbol for the return from the veneer.  */
7505   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7506 	   hash_table->num_vfp11_fixes);
7507 
7508   myh = elf_link_hash_lookup
7509     (&(hash_table)->root, tmp_name, false, false, false);
7510 
7511   if (myh != NULL)
7512     abort ();
7513 
7514   bh = NULL;
7515   val = offset + 4;
7516   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7517 				    branch_sec, val, NULL, true, false, &bh);
7518 
7519   myh = (struct elf_link_hash_entry *) bh;
7520   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7521   myh->forced_local = 1;
7522 
7523   free (tmp_name);
7524 
7525   /* Generate a mapping symbol for the veneer section, and explicitly add an
7526      entry for that symbol to the code/data map for the section.  */
7527   if (hash_table->vfp11_erratum_glue_size == 0)
7528     {
7529       bh = NULL;
7530       /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
7531 	 ever requires this erratum fix.  */
7532       _bfd_generic_link_add_one_symbol (link_info,
7533 					hash_table->bfd_of_glue_owner, "$a",
7534 					BSF_LOCAL, s, 0, NULL,
7535 					true, false, &bh);
7536 
7537       myh = (struct elf_link_hash_entry *) bh;
7538       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7539       myh->forced_local = 1;
7540 
7541       /* The elf32_arm_init_maps function only cares about symbols from input
7542 	 BFDs.  We must make a note of this generated mapping symbol
7543 	 ourselves so that code byteswapping works properly in
7544 	 elf32_arm_write_section.  */
7545       elf32_arm_section_map_add (s, 'a', 0);
7546     }
7547 
7548   s->size += VFP11_ERRATUM_VENEER_SIZE;
7549   hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7550   hash_table->num_vfp11_fixes++;
7551 
7552   /* The offset of the veneer.  */
7553   return val;
7554 }
7555 
7556 /* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
7557    veneers need to be handled because used only in Cortex-M.  */
7558 
7559 static bfd_vma
record_stm32l4xx_erratum_veneer(struct bfd_link_info * link_info,elf32_stm32l4xx_erratum_list * branch,bfd * branch_bfd,asection * branch_sec,unsigned int offset,bfd_size_type veneer_size)7560 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7561 				 elf32_stm32l4xx_erratum_list *branch,
7562 				 bfd *branch_bfd,
7563 				 asection *branch_sec,
7564 				 unsigned int offset,
7565 				 bfd_size_type veneer_size)
7566 {
7567   asection *s;
7568   struct elf32_arm_link_hash_table *hash_table;
7569   char *tmp_name;
7570   struct elf_link_hash_entry *myh;
7571   struct bfd_link_hash_entry *bh;
7572   bfd_vma val;
7573   struct _arm_elf_section_data *sec_data;
7574   elf32_stm32l4xx_erratum_list *newerr;
7575 
7576   hash_table = elf32_arm_hash_table (link_info);
7577   BFD_ASSERT (hash_table != NULL);
7578   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7579 
7580   s = bfd_get_linker_section
7581     (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7582 
7583   BFD_ASSERT (s != NULL);
7584 
7585   sec_data = elf32_arm_section_data (s);
7586 
7587   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7588 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7589   BFD_ASSERT (tmp_name);
7590 
7591   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7592 	   hash_table->num_stm32l4xx_fixes);
7593 
7594   myh = elf_link_hash_lookup
7595     (&(hash_table)->root, tmp_name, false, false, false);
7596 
7597   BFD_ASSERT (myh == NULL);
7598 
7599   bh = NULL;
7600   val = hash_table->stm32l4xx_erratum_glue_size;
7601   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7602 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7603 				    NULL, true, false, &bh);
7604 
7605   myh = (struct elf_link_hash_entry *) bh;
7606   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7607   myh->forced_local = 1;
7608 
7609   /* Link veneer back to calling location.  */
7610   sec_data->stm32l4xx_erratumcount += 1;
7611   newerr = (elf32_stm32l4xx_erratum_list *)
7612       bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7613 
7614   newerr->type = STM32L4XX_ERRATUM_VENEER;
7615   newerr->vma = -1;
7616   newerr->u.v.branch = branch;
7617   newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7618   branch->u.b.veneer = newerr;
7619 
7620   newerr->next = sec_data->stm32l4xx_erratumlist;
7621   sec_data->stm32l4xx_erratumlist = newerr;
7622 
7623   /* A symbol for the return from the veneer.  */
7624   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7625 	   hash_table->num_stm32l4xx_fixes);
7626 
7627   myh = elf_link_hash_lookup
7628     (&(hash_table)->root, tmp_name, false, false, false);
7629 
7630   if (myh != NULL)
7631     abort ();
7632 
7633   bh = NULL;
7634   val = offset + 4;
7635   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7636 				    branch_sec, val, NULL, true, false, &bh);
7637 
7638   myh = (struct elf_link_hash_entry *) bh;
7639   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7640   myh->forced_local = 1;
7641 
7642   free (tmp_name);
7643 
7644   /* Generate a mapping symbol for the veneer section, and explicitly add an
7645      entry for that symbol to the code/data map for the section.  */
7646   if (hash_table->stm32l4xx_erratum_glue_size == 0)
7647     {
7648       bh = NULL;
7649       /* Creates a THUMB symbol since there is no other choice.  */
7650       _bfd_generic_link_add_one_symbol (link_info,
7651 					hash_table->bfd_of_glue_owner, "$t",
7652 					BSF_LOCAL, s, 0, NULL,
7653 					true, false, &bh);
7654 
7655       myh = (struct elf_link_hash_entry *) bh;
7656       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7657       myh->forced_local = 1;
7658 
7659       /* The elf32_arm_init_maps function only cares about symbols from input
7660 	 BFDs.  We must make a note of this generated mapping symbol
7661 	 ourselves so that code byteswapping works properly in
7662 	 elf32_arm_write_section.  */
7663       elf32_arm_section_map_add (s, 't', 0);
7664     }
7665 
7666   s->size += veneer_size;
7667   hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7668   hash_table->num_stm32l4xx_fixes++;
7669 
7670   /* The offset of the veneer.  */
7671   return val;
7672 }
7673 
7674 #define ARM_GLUE_SECTION_FLAGS \
7675   (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7676    | SEC_READONLY | SEC_LINKER_CREATED)
7677 
7678 /* Create a fake section for use by the ARM backend of the linker.  */
7679 
7680 static bool
arm_make_glue_section(bfd * abfd,const char * name)7681 arm_make_glue_section (bfd * abfd, const char * name)
7682 {
7683   asection * sec;
7684 
7685   sec = bfd_get_linker_section (abfd, name);
7686   if (sec != NULL)
7687     /* Already made.  */
7688     return true;
7689 
7690   sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7691 
7692   if (sec == NULL
7693       || !bfd_set_section_alignment (sec, 2))
7694     return false;
7695 
7696   /* Set the gc mark to prevent the section from being removed by garbage
7697      collection, despite the fact that no relocs refer to this section.  */
7698   sec->gc_mark = 1;
7699 
7700   return true;
7701 }
7702 
7703 /* Set size of .plt entries.  This function is called from the
7704    linker scripts in ld/emultempl/{armelf}.em.  */
7705 
7706 void
bfd_elf32_arm_use_long_plt(void)7707 bfd_elf32_arm_use_long_plt (void)
7708 {
7709   elf32_arm_use_long_plt_entry = true;
7710 }
7711 
7712 /* Add the glue sections to ABFD.  This function is called from the
7713    linker scripts in ld/emultempl/{armelf}.em.  */
7714 
7715 bool
bfd_elf32_arm_add_glue_sections_to_bfd(bfd * abfd,struct bfd_link_info * info)7716 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7717 					struct bfd_link_info *info)
7718 {
7719   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7720   bool dostm32l4xx = globals
7721     && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7722   bool addglue;
7723 
7724   /* If we are only performing a partial
7725      link do not bother adding the glue.  */
7726   if (bfd_link_relocatable (info))
7727     return true;
7728 
7729   addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7730     && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7731     && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7732     && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7733 
7734   if (!dostm32l4xx)
7735     return addglue;
7736 
7737   return addglue
7738     && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7739 }
7740 
7741 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
7742    ensures they are not marked for deletion by
7743    strip_excluded_output_sections () when veneers are going to be created
7744    later.  Not doing so would trigger assert on empty section size in
7745    lang_size_sections_1 ().  */
7746 
7747 void
bfd_elf32_arm_keep_private_stub_output_sections(struct bfd_link_info * info)7748 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7749 {
7750   enum elf32_arm_stub_type stub_type;
7751 
7752   /* If we are only performing a partial
7753      link do not bother adding the glue.  */
7754   if (bfd_link_relocatable (info))
7755     return;
7756 
7757   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7758     {
7759       asection *out_sec;
7760       const char *out_sec_name;
7761 
7762       if (!arm_dedicated_stub_output_section_required (stub_type))
7763 	continue;
7764 
7765      out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7766      out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7767      if (out_sec != NULL)
7768 	out_sec->flags |= SEC_KEEP;
7769     }
7770 }
7771 
7772 /* Select a BFD to be used to hold the sections used by the glue code.
7773    This function is called from the linker scripts in ld/emultempl/
7774    {armelf/pe}.em.  */
7775 
7776 bool
bfd_elf32_arm_get_bfd_for_interworking(bfd * abfd,struct bfd_link_info * info)7777 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7778 {
7779   struct elf32_arm_link_hash_table *globals;
7780 
7781   /* If we are only performing a partial link
7782      do not bother getting a bfd to hold the glue.  */
7783   if (bfd_link_relocatable (info))
7784     return true;
7785 
7786   /* Make sure we don't attach the glue sections to a dynamic object.  */
7787   BFD_ASSERT (!(abfd->flags & DYNAMIC));
7788 
7789   globals = elf32_arm_hash_table (info);
7790   BFD_ASSERT (globals != NULL);
7791 
7792   if (globals->bfd_of_glue_owner != NULL)
7793     return true;
7794 
7795   /* Save the bfd for later use.  */
7796   globals->bfd_of_glue_owner = abfd;
7797 
7798   return true;
7799 }
7800 
7801 static void
check_use_blx(struct elf32_arm_link_hash_table * globals)7802 check_use_blx (struct elf32_arm_link_hash_table *globals)
7803 {
7804   int cpu_arch;
7805 
7806   cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7807 				       Tag_CPU_arch);
7808 
7809   if (globals->fix_arm1176)
7810     {
7811       if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7812 	globals->use_blx = 1;
7813     }
7814   else
7815     {
7816       if (cpu_arch > TAG_CPU_ARCH_V4T)
7817 	globals->use_blx = 1;
7818     }
7819 }
7820 
7821 bool
bfd_elf32_arm_process_before_allocation(bfd * abfd,struct bfd_link_info * link_info)7822 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7823 					 struct bfd_link_info *link_info)
7824 {
7825   Elf_Internal_Shdr *symtab_hdr;
7826   Elf_Internal_Rela *internal_relocs = NULL;
7827   Elf_Internal_Rela *irel, *irelend;
7828   bfd_byte *contents = NULL;
7829 
7830   asection *sec;
7831   struct elf32_arm_link_hash_table *globals;
7832 
7833   /* If we are only performing a partial link do not bother
7834      to construct any glue.  */
7835   if (bfd_link_relocatable (link_info))
7836     return true;
7837 
7838   /* Here we have a bfd that is to be included on the link.  We have a
7839      hook to do reloc rummaging, before section sizes are nailed down.  */
7840   globals = elf32_arm_hash_table (link_info);
7841   BFD_ASSERT (globals != NULL);
7842 
7843   check_use_blx (globals);
7844 
7845   if (globals->byteswap_code && !bfd_big_endian (abfd))
7846     {
7847       _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7848 			  abfd);
7849       return false;
7850     }
7851 
7852   /* PR 5398: If we have not decided to include any loadable sections in
7853      the output then we will not have a glue owner bfd.  This is OK, it
7854      just means that there is nothing else for us to do here.  */
7855   if (globals->bfd_of_glue_owner == NULL)
7856     return true;
7857 
7858   /* Rummage around all the relocs and map the glue vectors.  */
7859   sec = abfd->sections;
7860 
7861   if (sec == NULL)
7862     return true;
7863 
7864   for (; sec != NULL; sec = sec->next)
7865     {
7866       if (sec->reloc_count == 0)
7867 	continue;
7868 
7869       if ((sec->flags & SEC_EXCLUDE) != 0)
7870 	continue;
7871 
7872       symtab_hdr = & elf_symtab_hdr (abfd);
7873 
7874       /* Load the relocs.  */
7875       internal_relocs
7876 	= _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
7877 
7878       if (internal_relocs == NULL)
7879 	goto error_return;
7880 
7881       irelend = internal_relocs + sec->reloc_count;
7882       for (irel = internal_relocs; irel < irelend; irel++)
7883 	{
7884 	  long r_type;
7885 	  unsigned long r_index;
7886 
7887 	  struct elf_link_hash_entry *h;
7888 
7889 	  r_type = ELF32_R_TYPE (irel->r_info);
7890 	  r_index = ELF32_R_SYM (irel->r_info);
7891 
7892 	  /* These are the only relocation types we care about.  */
7893 	  if (   r_type != R_ARM_PC24
7894 	      && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7895 	    continue;
7896 
7897 	  /* Get the section contents if we haven't done so already.  */
7898 	  if (contents == NULL)
7899 	    {
7900 	      /* Get cached copy if it exists.  */
7901 	      if (elf_section_data (sec)->this_hdr.contents != NULL)
7902 		contents = elf_section_data (sec)->this_hdr.contents;
7903 	      else
7904 		{
7905 		  /* Go get them off disk.  */
7906 		  if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7907 		    goto error_return;
7908 		}
7909 	    }
7910 
7911 	  if (r_type == R_ARM_V4BX)
7912 	    {
7913 	      int reg;
7914 
7915 	      reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7916 	      record_arm_bx_glue (link_info, reg);
7917 	      continue;
7918 	    }
7919 
7920 	  /* If the relocation is not against a symbol it cannot concern us.  */
7921 	  h = NULL;
7922 
7923 	  /* We don't care about local symbols.  */
7924 	  if (r_index < symtab_hdr->sh_info)
7925 	    continue;
7926 
7927 	  /* This is an external symbol.  */
7928 	  r_index -= symtab_hdr->sh_info;
7929 	  h = (struct elf_link_hash_entry *)
7930 	    elf_sym_hashes (abfd)[r_index];
7931 
7932 	  /* If the relocation is against a static symbol it must be within
7933 	     the current section and so cannot be a cross ARM/Thumb relocation.  */
7934 	  if (h == NULL)
7935 	    continue;
7936 
7937 	  /* If the call will go through a PLT entry then we do not need
7938 	     glue.  */
7939 	  if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7940 	    continue;
7941 
7942 	  switch (r_type)
7943 	    {
7944 	    case R_ARM_PC24:
7945 	      /* This one is a call from arm code.  We need to look up
7946 		 the target of the call.  If it is a thumb target, we
7947 		 insert glue.  */
7948 	      if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7949 		  == ST_BRANCH_TO_THUMB)
7950 		record_arm_to_thumb_glue (link_info, h);
7951 	      break;
7952 
7953 	    default:
7954 	      abort ();
7955 	    }
7956 	}
7957 
7958       if (elf_section_data (sec)->this_hdr.contents != contents)
7959 	free (contents);
7960       contents = NULL;
7961 
7962       if (elf_section_data (sec)->relocs != internal_relocs)
7963 	free (internal_relocs);
7964       internal_relocs = NULL;
7965     }
7966 
7967   return true;
7968 
7969  error_return:
7970   if (elf_section_data (sec)->this_hdr.contents != contents)
7971     free (contents);
7972   if (elf_section_data (sec)->relocs != internal_relocs)
7973     free (internal_relocs);
7974 
7975   return false;
7976 }
7977 #endif
7978 
7979 
7980 /* Initialise maps of ARM/Thumb/data for input BFDs.  */
7981 
7982 void
bfd_elf32_arm_init_maps(bfd * abfd)7983 bfd_elf32_arm_init_maps (bfd *abfd)
7984 {
7985   Elf_Internal_Sym *isymbuf;
7986   Elf_Internal_Shdr *hdr;
7987   unsigned int i, localsyms;
7988 
7989   /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
7990   if (! is_arm_elf (abfd))
7991     return;
7992 
7993   if ((abfd->flags & DYNAMIC) != 0)
7994     return;
7995 
7996   hdr = & elf_symtab_hdr (abfd);
7997   localsyms = hdr->sh_info;
7998 
7999   /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8000      should contain the number of local symbols, which should come before any
8001      global symbols.  Mapping symbols are always local.  */
8002   isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8003 				  NULL);
8004 
8005   /* No internal symbols read?  Skip this BFD.  */
8006   if (isymbuf == NULL)
8007     return;
8008 
8009   for (i = 0; i < localsyms; i++)
8010     {
8011       Elf_Internal_Sym *isym = &isymbuf[i];
8012       asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8013       const char *name;
8014 
8015       if (sec != NULL
8016 	  && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8017 	{
8018 	  name = bfd_elf_string_from_elf_section (abfd,
8019 	    hdr->sh_link, isym->st_name);
8020 
8021 	  if (bfd_is_arm_special_symbol_name (name,
8022 					      BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8023 	    elf32_arm_section_map_add (sec, name[1], isym->st_value);
8024 	}
8025     }
8026 }
8027 
8028 
8029 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8030    say what they wanted.  */
8031 
8032 void
bfd_elf32_arm_set_cortex_a8_fix(bfd * obfd,struct bfd_link_info * link_info)8033 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8034 {
8035   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8036   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8037 
8038   if (globals == NULL)
8039     return;
8040 
8041   if (globals->fix_cortex_a8 == -1)
8042     {
8043       /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
8044       if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8045 	  && (out_attr[Tag_CPU_arch_profile].i == 'A'
8046 	      || out_attr[Tag_CPU_arch_profile].i == 0))
8047 	globals->fix_cortex_a8 = 1;
8048       else
8049 	globals->fix_cortex_a8 = 0;
8050     }
8051 }
8052 
8053 
8054 void
bfd_elf32_arm_set_vfp11_fix(bfd * obfd,struct bfd_link_info * link_info)8055 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8056 {
8057   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8058   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8059 
8060   if (globals == NULL)
8061     return;
8062   /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
8063   if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8064     {
8065       switch (globals->vfp11_fix)
8066 	{
8067 	case BFD_ARM_VFP11_FIX_DEFAULT:
8068 	case BFD_ARM_VFP11_FIX_NONE:
8069 	  globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8070 	  break;
8071 
8072 	default:
8073 	  /* Give a warning, but do as the user requests anyway.  */
8074 	  _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8075 	    "workaround is not necessary for target architecture"), obfd);
8076 	}
8077     }
8078   else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8079     /* For earlier architectures, we might need the workaround, but do not
8080        enable it by default.  If users is running with broken hardware, they
8081        must enable the erratum fix explicitly.  */
8082     globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8083 }
8084 
8085 void
bfd_elf32_arm_set_stm32l4xx_fix(bfd * obfd,struct bfd_link_info * link_info)8086 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8087 {
8088   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8089   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8090 
8091   if (globals == NULL)
8092     return;
8093 
8094   /* We assume only Cortex-M4 may require the fix.  */
8095   if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8096       || out_attr[Tag_CPU_arch_profile].i != 'M')
8097     {
8098       if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8099 	/* Give a warning, but do as the user requests anyway.  */
8100 	_bfd_error_handler
8101 	  (_("%pB: warning: selected STM32L4XX erratum "
8102 	     "workaround is not necessary for target architecture"), obfd);
8103     }
8104 }
8105 
8106 enum bfd_arm_vfp11_pipe
8107 {
8108   VFP11_FMAC,
8109   VFP11_LS,
8110   VFP11_DS,
8111   VFP11_BAD
8112 };
8113 
8114 /* Return a VFP register number.  This is encoded as RX:X for single-precision
8115    registers, or X:RX for double-precision registers, where RX is the group of
8116    four bits in the instruction encoding and X is the single extension bit.
8117    RX and X fields are specified using their lowest (starting) bit.  The return
8118    value is:
8119 
8120      0...31: single-precision registers s0...s31
8121      32...63: double-precision registers d0...d31.
8122 
8123    Although X should be zero for VFP11 (encoding d0...d15 only), we might
8124    encounter VFP3 instructions, so we allow the full range for DP registers.  */
8125 
8126 static unsigned int
bfd_arm_vfp11_regno(unsigned int insn,bool is_double,unsigned int rx,unsigned int x)8127 bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
8128 		     unsigned int x)
8129 {
8130   if (is_double)
8131     return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8132   else
8133     return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8134 }
8135 
8136 /* Set bits in *WMASK according to a register number REG as encoded by
8137    bfd_arm_vfp11_regno().  Ignore d16-d31.  */
8138 
8139 static void
bfd_arm_vfp11_write_mask(unsigned int * wmask,unsigned int reg)8140 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8141 {
8142   if (reg < 32)
8143     *wmask |= 1 << reg;
8144   else if (reg < 48)
8145     *wmask |= 3 << ((reg - 32) * 2);
8146 }
8147 
8148 /* Return TRUE if WMASK overwrites anything in REGS.  */
8149 
8150 static bool
bfd_arm_vfp11_antidependency(unsigned int wmask,int * regs,int numregs)8151 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8152 {
8153   int i;
8154 
8155   for (i = 0; i < numregs; i++)
8156     {
8157       unsigned int reg = regs[i];
8158 
8159       if (reg < 32 && (wmask & (1 << reg)) != 0)
8160 	return true;
8161 
8162       reg -= 32;
8163 
8164       if (reg >= 16)
8165 	continue;
8166 
8167       if ((wmask & (3 << (reg * 2))) != 0)
8168 	return true;
8169     }
8170 
8171   return false;
8172 }
8173 
8174 /* In this function, we're interested in two things: finding input registers
8175    for VFP data-processing instructions, and finding the set of registers which
8176    arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
8177    hold the written set, so FLDM etc. are easy to deal with (we're only
8178    interested in 32 SP registers or 16 dp registers, due to the VFP version
8179    implemented by the chip in question).  DP registers are marked by setting
8180    both SP registers in the write mask).  */
8181 
8182 static enum bfd_arm_vfp11_pipe
bfd_arm_vfp11_insn_decode(unsigned int insn,unsigned int * destmask,int * regs,int * numregs)8183 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8184 			   int *numregs)
8185 {
8186   enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8187   bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8188 
8189   if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
8190     {
8191       unsigned int pqrs;
8192       unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8193       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8194 
8195       pqrs = ((insn & 0x00800000) >> 20)
8196 	   | ((insn & 0x00300000) >> 19)
8197 	   | ((insn & 0x00000040) >> 6);
8198 
8199       switch (pqrs)
8200 	{
8201 	case 0: /* fmac[sd].  */
8202 	case 1: /* fnmac[sd].  */
8203 	case 2: /* fmsc[sd].  */
8204 	case 3: /* fnmsc[sd].  */
8205 	  vpipe = VFP11_FMAC;
8206 	  bfd_arm_vfp11_write_mask (destmask, fd);
8207 	  regs[0] = fd;
8208 	  regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
8209 	  regs[2] = fm;
8210 	  *numregs = 3;
8211 	  break;
8212 
8213 	case 4: /* fmul[sd].  */
8214 	case 5: /* fnmul[sd].  */
8215 	case 6: /* fadd[sd].  */
8216 	case 7: /* fsub[sd].  */
8217 	  vpipe = VFP11_FMAC;
8218 	  goto vfp_binop;
8219 
8220 	case 8: /* fdiv[sd].  */
8221 	  vpipe = VFP11_DS;
8222 	  vfp_binop:
8223 	  bfd_arm_vfp11_write_mask (destmask, fd);
8224 	  regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
8225 	  regs[1] = fm;
8226 	  *numregs = 2;
8227 	  break;
8228 
8229 	case 15: /* extended opcode.  */
8230 	  {
8231 	    unsigned int extn = ((insn >> 15) & 0x1e)
8232 			      | ((insn >> 7) & 1);
8233 
8234 	    switch (extn)
8235 	      {
8236 	      case 0: /* fcpy[sd].  */
8237 	      case 1: /* fabs[sd].  */
8238 	      case 2: /* fneg[sd].  */
8239 	      case 8: /* fcmp[sd].  */
8240 	      case 9: /* fcmpe[sd].  */
8241 	      case 10: /* fcmpz[sd].  */
8242 	      case 11: /* fcmpez[sd].  */
8243 	      case 16: /* fuito[sd].  */
8244 	      case 17: /* fsito[sd].  */
8245 	      case 24: /* ftoui[sd].  */
8246 	      case 25: /* ftouiz[sd].  */
8247 	      case 26: /* ftosi[sd].  */
8248 	      case 27: /* ftosiz[sd].  */
8249 		/* These instructions will not bounce due to underflow.  */
8250 		*numregs = 0;
8251 		vpipe = VFP11_FMAC;
8252 		break;
8253 
8254 	      case 3: /* fsqrt[sd].  */
8255 		/* fsqrt cannot underflow, but it can (perhaps) overwrite
8256 		   registers to cause the erratum in previous instructions.  */
8257 		bfd_arm_vfp11_write_mask (destmask, fd);
8258 		vpipe = VFP11_DS;
8259 		break;
8260 
8261 	      case 15: /* fcvt{ds,sd}.  */
8262 		{
8263 		  int rnum = 0;
8264 
8265 		  bfd_arm_vfp11_write_mask (destmask, fd);
8266 
8267 		  /* Only FCVTSD can underflow.  */
8268 		  if ((insn & 0x100) != 0)
8269 		    regs[rnum++] = fm;
8270 
8271 		  *numregs = rnum;
8272 
8273 		  vpipe = VFP11_FMAC;
8274 		}
8275 		break;
8276 
8277 	      default:
8278 		return VFP11_BAD;
8279 	      }
8280 	  }
8281 	  break;
8282 
8283 	default:
8284 	  return VFP11_BAD;
8285 	}
8286     }
8287   /* Two-register transfer.  */
8288   else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8289     {
8290       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8291 
8292       if ((insn & 0x100000) == 0)
8293 	{
8294 	  if (is_double)
8295 	    bfd_arm_vfp11_write_mask (destmask, fm);
8296 	  else
8297 	    {
8298 	      bfd_arm_vfp11_write_mask (destmask, fm);
8299 	      bfd_arm_vfp11_write_mask (destmask, fm + 1);
8300 	    }
8301 	}
8302 
8303       vpipe = VFP11_LS;
8304     }
8305   else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
8306     {
8307       int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8308       unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8309 
8310       switch (puw)
8311 	{
8312 	case 0: /* Two-reg transfer.  We should catch these above.  */
8313 	  abort ();
8314 
8315 	case 2: /* fldm[sdx].  */
8316 	case 3:
8317 	case 5:
8318 	  {
8319 	    unsigned int i, offset = insn & 0xff;
8320 
8321 	    if (is_double)
8322 	      offset >>= 1;
8323 
8324 	    for (i = fd; i < fd + offset; i++)
8325 	      bfd_arm_vfp11_write_mask (destmask, i);
8326 	  }
8327 	  break;
8328 
8329 	case 4: /* fld[sd].  */
8330 	case 6:
8331 	  bfd_arm_vfp11_write_mask (destmask, fd);
8332 	  break;
8333 
8334 	default:
8335 	  return VFP11_BAD;
8336 	}
8337 
8338       vpipe = VFP11_LS;
8339     }
8340   /* Single-register transfer. Note L==0.  */
8341   else if ((insn & 0x0f100e10) == 0x0e000a10)
8342     {
8343       unsigned int opcode = (insn >> 21) & 7;
8344       unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8345 
8346       switch (opcode)
8347 	{
8348 	case 0: /* fmsr/fmdlr.  */
8349 	case 1: /* fmdhr.  */
8350 	  /* Mark fmdhr and fmdlr as writing to the whole of the DP
8351 	     destination register.  I don't know if this is exactly right,
8352 	     but it is the conservative choice.  */
8353 	  bfd_arm_vfp11_write_mask (destmask, fn);
8354 	  break;
8355 
8356 	case 7: /* fmxr.  */
8357 	  break;
8358 	}
8359 
8360       vpipe = VFP11_LS;
8361     }
8362 
8363   return vpipe;
8364 }
8365 
8366 
8367 static int elf32_arm_compare_mapping (const void * a, const void * b);
8368 
8369 
8370 /* Look for potentially-troublesome code sequences which might trigger the
8371    VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
8372    (available from ARM) for details of the erratum.  A short version is
8373    described in ld.texinfo.  */
8374 
8375 bool
bfd_elf32_arm_vfp11_erratum_scan(bfd * abfd,struct bfd_link_info * link_info)8376 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8377 {
8378   asection *sec;
8379   bfd_byte *contents = NULL;
8380   int state = 0;
8381   int regs[3], numregs = 0;
8382   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8383   int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8384 
8385   if (globals == NULL)
8386     return false;
8387 
8388   /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8389      The states transition as follows:
8390 
8391        0 -> 1 (vector) or 0 -> 2 (scalar)
8392 	   A VFP FMAC-pipeline instruction has been seen. Fill
8393 	   regs[0]..regs[numregs-1] with its input operands. Remember this
8394 	   instruction in 'first_fmac'.
8395 
8396        1 -> 2
8397 	   Any instruction, except for a VFP instruction which overwrites
8398 	   regs[*].
8399 
8400        1 -> 3 [ -> 0 ]  or
8401        2 -> 3 [ -> 0 ]
8402 	   A VFP instruction has been seen which overwrites any of regs[*].
8403 	   We must make a veneer!  Reset state to 0 before examining next
8404 	   instruction.
8405 
8406        2 -> 0
8407 	   If we fail to match anything in state 2, reset to state 0 and reset
8408 	   the instruction pointer to the instruction after 'first_fmac'.
8409 
8410      If the VFP11 vector mode is in use, there must be at least two unrelated
8411      instructions between anti-dependent VFP11 instructions to properly avoid
8412      triggering the erratum, hence the use of the extra state 1.  */
8413 
8414   /* If we are only performing a partial link do not bother
8415      to construct any glue.  */
8416   if (bfd_link_relocatable (link_info))
8417     return true;
8418 
8419   /* Skip if this bfd does not correspond to an ELF image.  */
8420   if (! is_arm_elf (abfd))
8421     return true;
8422 
8423   /* We should have chosen a fix type by the time we get here.  */
8424   BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8425 
8426   if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8427     return true;
8428 
8429   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8430   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8431     return true;
8432 
8433   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8434     {
8435       unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8436       struct _arm_elf_section_data *sec_data;
8437 
8438       /* If we don't have executable progbits, we're not interested in this
8439 	 section.  Also skip if section is to be excluded.  */
8440       if (elf_section_type (sec) != SHT_PROGBITS
8441 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8442 	  || (sec->flags & SEC_EXCLUDE) != 0
8443 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8444 	  || sec->output_section == bfd_abs_section_ptr
8445 	  || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8446 	continue;
8447 
8448       sec_data = elf32_arm_section_data (sec);
8449 
8450       if (sec_data->mapcount == 0)
8451 	continue;
8452 
8453       if (elf_section_data (sec)->this_hdr.contents != NULL)
8454 	contents = elf_section_data (sec)->this_hdr.contents;
8455       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8456 	goto error_return;
8457 
8458       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8459 	     elf32_arm_compare_mapping);
8460 
8461       for (span = 0; span < sec_data->mapcount; span++)
8462 	{
8463 	  unsigned int span_start = sec_data->map[span].vma;
8464 	  unsigned int span_end = (span == sec_data->mapcount - 1)
8465 				  ? sec->size : sec_data->map[span + 1].vma;
8466 	  char span_type = sec_data->map[span].type;
8467 
8468 	  /* FIXME: Only ARM mode is supported at present.  We may need to
8469 	     support Thumb-2 mode also at some point.  */
8470 	  if (span_type != 'a')
8471 	    continue;
8472 
8473 	  for (i = span_start; i < span_end;)
8474 	    {
8475 	      unsigned int next_i = i + 4;
8476 	      unsigned int insn = bfd_big_endian (abfd)
8477 		? (((unsigned) contents[i] << 24)
8478 		   | (contents[i + 1] << 16)
8479 		   | (contents[i + 2] << 8)
8480 		   | contents[i + 3])
8481 		: (((unsigned) contents[i + 3] << 24)
8482 		   | (contents[i + 2] << 16)
8483 		   | (contents[i + 1] << 8)
8484 		   | contents[i]);
8485 	      unsigned int writemask = 0;
8486 	      enum bfd_arm_vfp11_pipe vpipe;
8487 
8488 	      switch (state)
8489 		{
8490 		case 0:
8491 		  vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8492 						    &numregs);
8493 		  /* I'm assuming the VFP11 erratum can trigger with denorm
8494 		     operands on either the FMAC or the DS pipeline. This might
8495 		     lead to slightly overenthusiastic veneer insertion.  */
8496 		  if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8497 		    {
8498 		      state = use_vector ? 1 : 2;
8499 		      first_fmac = i;
8500 		      veneer_of_insn = insn;
8501 		    }
8502 		  break;
8503 
8504 		case 1:
8505 		  {
8506 		    int other_regs[3], other_numregs;
8507 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8508 						      other_regs,
8509 						      &other_numregs);
8510 		    if (vpipe != VFP11_BAD
8511 			&& bfd_arm_vfp11_antidependency (writemask, regs,
8512 							 numregs))
8513 		      state = 3;
8514 		    else
8515 		      state = 2;
8516 		  }
8517 		  break;
8518 
8519 		case 2:
8520 		  {
8521 		    int other_regs[3], other_numregs;
8522 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8523 						      other_regs,
8524 						      &other_numregs);
8525 		    if (vpipe != VFP11_BAD
8526 			&& bfd_arm_vfp11_antidependency (writemask, regs,
8527 							 numregs))
8528 		      state = 3;
8529 		    else
8530 		      {
8531 			state = 0;
8532 			next_i = first_fmac + 4;
8533 		      }
8534 		  }
8535 		  break;
8536 
8537 		case 3:
8538 		  abort ();  /* Should be unreachable.  */
8539 		}
8540 
8541 	      if (state == 3)
8542 		{
8543 		  elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8544 		      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8545 
8546 		  elf32_arm_section_data (sec)->erratumcount += 1;
8547 
8548 		  newerr->u.b.vfp_insn = veneer_of_insn;
8549 
8550 		  switch (span_type)
8551 		    {
8552 		    case 'a':
8553 		      newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8554 		      break;
8555 
8556 		    default:
8557 		      abort ();
8558 		    }
8559 
8560 		  record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8561 					       first_fmac);
8562 
8563 		  newerr->vma = -1;
8564 
8565 		  newerr->next = sec_data->erratumlist;
8566 		  sec_data->erratumlist = newerr;
8567 
8568 		  state = 0;
8569 		}
8570 
8571 	      i = next_i;
8572 	    }
8573 	}
8574 
8575       if (elf_section_data (sec)->this_hdr.contents != contents)
8576 	free (contents);
8577       contents = NULL;
8578     }
8579 
8580   return true;
8581 
8582  error_return:
8583   if (elf_section_data (sec)->this_hdr.contents != contents)
8584     free (contents);
8585 
8586   return false;
8587 }
8588 
8589 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8590    after sections have been laid out, using specially-named symbols.  */
8591 
8592 void
bfd_elf32_arm_vfp11_fix_veneer_locations(bfd * abfd,struct bfd_link_info * link_info)8593 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8594 					  struct bfd_link_info *link_info)
8595 {
8596   asection *sec;
8597   struct elf32_arm_link_hash_table *globals;
8598   char *tmp_name;
8599 
8600   if (bfd_link_relocatable (link_info))
8601     return;
8602 
8603   /* Skip if this bfd does not correspond to an ELF image.  */
8604   if (! is_arm_elf (abfd))
8605     return;
8606 
8607   globals = elf32_arm_hash_table (link_info);
8608   if (globals == NULL)
8609     return;
8610 
8611   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8612 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8613   BFD_ASSERT (tmp_name);
8614 
8615   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8616     {
8617       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8618       elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8619 
8620       for (; errnode != NULL; errnode = errnode->next)
8621 	{
8622 	  struct elf_link_hash_entry *myh;
8623 	  bfd_vma vma;
8624 
8625 	  switch (errnode->type)
8626 	    {
8627 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8628 	    case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8629 	      /* Find veneer symbol.  */
8630 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8631 		       errnode->u.b.veneer->u.v.id);
8632 
8633 	      myh = elf_link_hash_lookup
8634 		(&(globals)->root, tmp_name, false, false, true);
8635 
8636 	      if (myh == NULL)
8637 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8638 				    abfd, "VFP11", tmp_name);
8639 
8640 	      vma = myh->root.u.def.section->output_section->vma
8641 		    + myh->root.u.def.section->output_offset
8642 		    + myh->root.u.def.value;
8643 
8644 	      errnode->u.b.veneer->vma = vma;
8645 	      break;
8646 
8647 	    case VFP11_ERRATUM_ARM_VENEER:
8648 	    case VFP11_ERRATUM_THUMB_VENEER:
8649 	      /* Find return location.  */
8650 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8651 		       errnode->u.v.id);
8652 
8653 	      myh = elf_link_hash_lookup
8654 		(&(globals)->root, tmp_name, false, false, true);
8655 
8656 	      if (myh == NULL)
8657 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8658 				    abfd, "VFP11", tmp_name);
8659 
8660 	      vma = myh->root.u.def.section->output_section->vma
8661 		    + myh->root.u.def.section->output_offset
8662 		    + myh->root.u.def.value;
8663 
8664 	      errnode->u.v.branch->vma = vma;
8665 	      break;
8666 
8667 	    default:
8668 	      abort ();
8669 	    }
8670 	}
8671     }
8672 
8673   free (tmp_name);
8674 }
8675 
8676 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8677    return locations after sections have been laid out, using
8678    specially-named symbols.  */
8679 
8680 void
bfd_elf32_arm_stm32l4xx_fix_veneer_locations(bfd * abfd,struct bfd_link_info * link_info)8681 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8682 					      struct bfd_link_info *link_info)
8683 {
8684   asection *sec;
8685   struct elf32_arm_link_hash_table *globals;
8686   char *tmp_name;
8687 
8688   if (bfd_link_relocatable (link_info))
8689     return;
8690 
8691   /* Skip if this bfd does not correspond to an ELF image.  */
8692   if (! is_arm_elf (abfd))
8693     return;
8694 
8695   globals = elf32_arm_hash_table (link_info);
8696   if (globals == NULL)
8697     return;
8698 
8699   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8700 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8701   BFD_ASSERT (tmp_name);
8702 
8703   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8704     {
8705       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8706       elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8707 
8708       for (; errnode != NULL; errnode = errnode->next)
8709 	{
8710 	  struct elf_link_hash_entry *myh;
8711 	  bfd_vma vma;
8712 
8713 	  switch (errnode->type)
8714 	    {
8715 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8716 	      /* Find veneer symbol.  */
8717 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8718 		       errnode->u.b.veneer->u.v.id);
8719 
8720 	      myh = elf_link_hash_lookup
8721 		(&(globals)->root, tmp_name, false, false, true);
8722 
8723 	      if (myh == NULL)
8724 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8725 				    abfd, "STM32L4XX", tmp_name);
8726 
8727 	      vma = myh->root.u.def.section->output_section->vma
8728 		+ myh->root.u.def.section->output_offset
8729 		+ myh->root.u.def.value;
8730 
8731 	      errnode->u.b.veneer->vma = vma;
8732 	      break;
8733 
8734 	    case STM32L4XX_ERRATUM_VENEER:
8735 	      /* Find return location.  */
8736 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8737 		       errnode->u.v.id);
8738 
8739 	      myh = elf_link_hash_lookup
8740 		(&(globals)->root, tmp_name, false, false, true);
8741 
8742 	      if (myh == NULL)
8743 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8744 				    abfd, "STM32L4XX", tmp_name);
8745 
8746 	      vma = myh->root.u.def.section->output_section->vma
8747 		+ myh->root.u.def.section->output_offset
8748 		+ myh->root.u.def.value;
8749 
8750 	      errnode->u.v.branch->vma = vma;
8751 	      break;
8752 
8753 	    default:
8754 	      abort ();
8755 	    }
8756 	}
8757     }
8758 
8759   free (tmp_name);
8760 }
8761 
8762 static inline bool
is_thumb2_ldmia(const insn32 insn)8763 is_thumb2_ldmia (const insn32 insn)
8764 {
8765   /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8766      1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
8767   return (insn & 0xffd02000) == 0xe8900000;
8768 }
8769 
8770 static inline bool
is_thumb2_ldmdb(const insn32 insn)8771 is_thumb2_ldmdb (const insn32 insn)
8772 {
8773   /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8774      1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
8775   return (insn & 0xffd02000) == 0xe9100000;
8776 }
8777 
8778 static inline bool
is_thumb2_vldm(const insn32 insn)8779 is_thumb2_vldm (const insn32 insn)
8780 {
8781   /* A6.5 Extension register load or store instruction
8782      A7.7.229
8783      We look for SP 32-bit and DP 64-bit registers.
8784      Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8785      <list> is consecutive 64-bit registers
8786      1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8787      Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8788      <list> is consecutive 32-bit registers
8789      1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8790      if P==0 && U==1 && W==1 && Rn=1101 VPOP
8791      if PUW=010 || PUW=011 || PUW=101 VLDM.  */
8792   return
8793     (((insn & 0xfe100f00) == 0xec100b00) ||
8794      ((insn & 0xfe100f00) == 0xec100a00))
8795     && /* (IA without !).  */
8796     (((((insn << 7) >> 28) & 0xd) == 0x4)
8797      /* (IA with !), includes VPOP (when reg number is SP).  */
8798      || ((((insn << 7) >> 28) & 0xd) == 0x5)
8799      /* (DB with !).  */
8800      || ((((insn << 7) >> 28) & 0xd) == 0x9));
8801 }
8802 
8803 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8804    VLDM opcode and:
8805  - computes the number and the mode of memory accesses
8806  - decides if the replacement should be done:
8807    . replaces only if > 8-word accesses
8808    . or (testing purposes only) replaces all accesses.  */
8809 
8810 static bool
stm32l4xx_need_create_replacing_stub(const insn32 insn,bfd_arm_stm32l4xx_fix stm32l4xx_fix)8811 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8812 				      bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8813 {
8814   int nb_words = 0;
8815 
8816   /* The field encoding the register list is the same for both LDMIA
8817      and LDMDB encodings.  */
8818   if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8819     nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8820   else if (is_thumb2_vldm (insn))
8821    nb_words = (insn & 0xff);
8822 
8823   /* DEFAULT mode accounts for the real bug condition situation,
8824      ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
8825   return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
8826 	  ? nb_words > 8
8827 	  : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
8828 }
8829 
8830 /* Look for potentially-troublesome code sequences which might trigger
8831    the STM STM32L4XX erratum.  */
8832 
8833 bool
bfd_elf32_arm_stm32l4xx_erratum_scan(bfd * abfd,struct bfd_link_info * link_info)8834 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8835 				      struct bfd_link_info *link_info)
8836 {
8837   asection *sec;
8838   bfd_byte *contents = NULL;
8839   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8840 
8841   if (globals == NULL)
8842     return false;
8843 
8844   /* If we are only performing a partial link do not bother
8845      to construct any glue.  */
8846   if (bfd_link_relocatable (link_info))
8847     return true;
8848 
8849   /* Skip if this bfd does not correspond to an ELF image.  */
8850   if (! is_arm_elf (abfd))
8851     return true;
8852 
8853   if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8854     return true;
8855 
8856   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8857   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8858     return true;
8859 
8860   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8861     {
8862       unsigned int i, span;
8863       struct _arm_elf_section_data *sec_data;
8864 
8865       /* If we don't have executable progbits, we're not interested in this
8866 	 section.  Also skip if section is to be excluded.  */
8867       if (elf_section_type (sec) != SHT_PROGBITS
8868 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8869 	  || (sec->flags & SEC_EXCLUDE) != 0
8870 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8871 	  || sec->output_section == bfd_abs_section_ptr
8872 	  || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8873 	continue;
8874 
8875       sec_data = elf32_arm_section_data (sec);
8876 
8877       if (sec_data->mapcount == 0)
8878 	continue;
8879 
8880       if (elf_section_data (sec)->this_hdr.contents != NULL)
8881 	contents = elf_section_data (sec)->this_hdr.contents;
8882       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8883 	goto error_return;
8884 
8885       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8886 	     elf32_arm_compare_mapping);
8887 
8888       for (span = 0; span < sec_data->mapcount; span++)
8889 	{
8890 	  unsigned int span_start = sec_data->map[span].vma;
8891 	  unsigned int span_end = (span == sec_data->mapcount - 1)
8892 	    ? sec->size : sec_data->map[span + 1].vma;
8893 	  char span_type = sec_data->map[span].type;
8894 	  int itblock_current_pos = 0;
8895 
8896 	  /* Only Thumb2 mode need be supported with this CM4 specific
8897 	     code, we should not encounter any arm mode eg span_type
8898 	     != 'a'.  */
8899 	  if (span_type != 't')
8900 	    continue;
8901 
8902 	  for (i = span_start; i < span_end;)
8903 	    {
8904 	      unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8905 	      bool insn_32bit = false;
8906 	      bool is_ldm = false;
8907 	      bool is_vldm = false;
8908 	      bool is_not_last_in_it_block = false;
8909 
8910 	      /* The first 16-bits of all 32-bit thumb2 instructions start
8911 		 with opcode[15..13]=0b111 and the encoded op1 can be anything
8912 		 except opcode[12..11]!=0b00.
8913 		 See 32-bit Thumb instruction encoding.  */
8914 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8915 		insn_32bit = true;
8916 
8917 	      /* Compute the predicate that tells if the instruction
8918 		 is concerned by the IT block
8919 		 - Creates an error if there is a ldm that is not
8920 		   last in the IT block thus cannot be replaced
8921 		 - Otherwise we can create a branch at the end of the
8922 		   IT block, it will be controlled naturally by IT
8923 		   with the proper pseudo-predicate
8924 		 - So the only interesting predicate is the one that
8925 		   tells that we are not on the last item of an IT
8926 		   block.  */
8927 	      if (itblock_current_pos != 0)
8928 		  is_not_last_in_it_block = !!--itblock_current_pos;
8929 
8930 	      if (insn_32bit)
8931 		{
8932 		  /* Load the rest of the insn (in manual-friendly order).  */
8933 		  insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8934 		  is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8935 		  is_vldm = is_thumb2_vldm (insn);
8936 
8937 		  /* Veneers are created for (v)ldm depending on
8938 		     option flags and memory accesses conditions; but
8939 		     if the instruction is not the last instruction of
8940 		     an IT block, we cannot create a jump there, so we
8941 		     bail out.  */
8942 		    if ((is_ldm || is_vldm)
8943 			&& stm32l4xx_need_create_replacing_stub
8944 			(insn, globals->stm32l4xx_fix))
8945 		      {
8946 			if (is_not_last_in_it_block)
8947 			  {
8948 			    _bfd_error_handler
8949 			      /* xgettext:c-format */
8950 			      (_("%pB(%pA+%#x): error: multiple load detected"
8951 				 " in non-last IT block instruction:"
8952 				 " STM32L4XX veneer cannot be generated; "
8953 				 "use gcc option -mrestrict-it to generate"
8954 				 " only one instruction per IT block"),
8955 			       abfd, sec, i);
8956 			  }
8957 			else
8958 			  {
8959 			    elf32_stm32l4xx_erratum_list *newerr =
8960 			      (elf32_stm32l4xx_erratum_list *)
8961 			      bfd_zmalloc
8962 			      (sizeof (elf32_stm32l4xx_erratum_list));
8963 
8964 			    elf32_arm_section_data (sec)
8965 			      ->stm32l4xx_erratumcount += 1;
8966 			    newerr->u.b.insn = insn;
8967 			    /* We create only thumb branches.  */
8968 			    newerr->type =
8969 			      STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8970 			    record_stm32l4xx_erratum_veneer
8971 			      (link_info, newerr, abfd, sec,
8972 			       i,
8973 			       is_ldm ?
8974 			       STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8975 			       STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8976 			    newerr->vma = -1;
8977 			    newerr->next = sec_data->stm32l4xx_erratumlist;
8978 			    sec_data->stm32l4xx_erratumlist = newerr;
8979 			  }
8980 		      }
8981 		}
8982 	      else
8983 		{
8984 		  /* A7.7.37 IT p208
8985 		     IT blocks are only encoded in T1
8986 		     Encoding T1: IT{x{y{z}}} <firstcond>
8987 		     1 0 1 1 - 1 1 1 1 - firstcond - mask
8988 		     if mask = '0000' then see 'related encodings'
8989 		     We don't deal with UNPREDICTABLE, just ignore these.
8990 		     There can be no nested IT blocks so an IT block
8991 		     is naturally a new one for which it is worth
8992 		     computing its size.  */
8993 		  bool is_newitblock = ((insn & 0xff00) == 0xbf00)
8994 		    && ((insn & 0x000f) != 0x0000);
8995 		  /* If we have a new IT block we compute its size.  */
8996 		  if (is_newitblock)
8997 		    {
8998 		      /* Compute the number of instructions controlled
8999 			 by the IT block, it will be used to decide
9000 			 whether we are inside an IT block or not.  */
9001 		      unsigned int mask = insn & 0x000f;
9002 		      itblock_current_pos = 4 - ctz (mask);
9003 		    }
9004 		}
9005 
9006 	      i += insn_32bit ? 4 : 2;
9007 	    }
9008 	}
9009 
9010       if (elf_section_data (sec)->this_hdr.contents != contents)
9011 	free (contents);
9012       contents = NULL;
9013     }
9014 
9015   return true;
9016 
9017  error_return:
9018   if (elf_section_data (sec)->this_hdr.contents != contents)
9019     free (contents);
9020 
9021   return false;
9022 }
9023 
9024 /* Set target relocation values needed during linking.  */
9025 
9026 void
bfd_elf32_arm_set_target_params(struct bfd * output_bfd,struct bfd_link_info * link_info,struct elf32_arm_params * params)9027 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9028 				 struct bfd_link_info *link_info,
9029 				 struct elf32_arm_params *params)
9030 {
9031   struct elf32_arm_link_hash_table *globals;
9032 
9033   globals = elf32_arm_hash_table (link_info);
9034   if (globals == NULL)
9035     return;
9036 
9037   globals->target1_is_rel = params->target1_is_rel;
9038   if (globals->fdpic_p)
9039     globals->target2_reloc = R_ARM_GOT32;
9040   else if (strcmp (params->target2_type, "rel") == 0)
9041     globals->target2_reloc = R_ARM_REL32;
9042   else if (strcmp (params->target2_type, "abs") == 0)
9043     globals->target2_reloc = R_ARM_ABS32;
9044   else if (strcmp (params->target2_type, "got-rel") == 0)
9045     globals->target2_reloc = R_ARM_GOT_PREL;
9046   else
9047     {
9048       _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9049 			  params->target2_type);
9050     }
9051   globals->fix_v4bx = params->fix_v4bx;
9052   globals->use_blx |= params->use_blx;
9053   globals->vfp11_fix = params->vfp11_denorm_fix;
9054   globals->stm32l4xx_fix = params->stm32l4xx_fix;
9055   if (globals->fdpic_p)
9056     globals->pic_veneer = 1;
9057   else
9058     globals->pic_veneer = params->pic_veneer;
9059   globals->fix_cortex_a8 = params->fix_cortex_a8;
9060   globals->fix_arm1176 = params->fix_arm1176;
9061   globals->cmse_implib = params->cmse_implib;
9062   globals->in_implib_bfd = params->in_implib_bfd;
9063 
9064   BFD_ASSERT (is_arm_elf (output_bfd));
9065   elf_arm_tdata (output_bfd)->no_enum_size_warning
9066     = params->no_enum_size_warning;
9067   elf_arm_tdata (output_bfd)->no_wchar_size_warning
9068     = params->no_wchar_size_warning;
9069 }
9070 
9071 /* Replace the target offset of a Thumb bl or b.w instruction.  */
9072 
9073 static void
insert_thumb_branch(bfd * abfd,long int offset,bfd_byte * insn)9074 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9075 {
9076   bfd_vma upper;
9077   bfd_vma lower;
9078   int reloc_sign;
9079 
9080   BFD_ASSERT ((offset & 1) == 0);
9081 
9082   upper = bfd_get_16 (abfd, insn);
9083   lower = bfd_get_16 (abfd, insn + 2);
9084   reloc_sign = (offset < 0) ? 1 : 0;
9085   upper = (upper & ~(bfd_vma) 0x7ff)
9086 	  | ((offset >> 12) & 0x3ff)
9087 	  | (reloc_sign << 10);
9088   lower = (lower & ~(bfd_vma) 0x2fff)
9089 	  | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9090 	  | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9091 	  | ((offset >> 1) & 0x7ff);
9092   bfd_put_16 (abfd, upper, insn);
9093   bfd_put_16 (abfd, lower, insn + 2);
9094 }
9095 
9096 /* Thumb code calling an ARM function.  */
9097 
9098 static int
elf32_thumb_to_arm_stub(struct bfd_link_info * info,const char * name,bfd * input_bfd,bfd * output_bfd,asection * input_section,bfd_byte * hit_data,asection * sym_sec,bfd_vma offset,bfd_signed_vma addend,bfd_vma val,char ** error_message)9099 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9100 			 const char *		name,
9101 			 bfd *			input_bfd,
9102 			 bfd *			output_bfd,
9103 			 asection *		input_section,
9104 			 bfd_byte *		hit_data,
9105 			 asection *		sym_sec,
9106 			 bfd_vma		offset,
9107 			 bfd_signed_vma		addend,
9108 			 bfd_vma		val,
9109 			 char **error_message)
9110 {
9111   asection * s = 0;
9112   bfd_vma my_offset;
9113   long int ret_offset;
9114   struct elf_link_hash_entry * myh;
9115   struct elf32_arm_link_hash_table * globals;
9116 
9117   myh = find_thumb_glue (info, name, error_message);
9118   if (myh == NULL)
9119     return false;
9120 
9121   globals = elf32_arm_hash_table (info);
9122   BFD_ASSERT (globals != NULL);
9123   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9124 
9125   my_offset = myh->root.u.def.value;
9126 
9127   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9128 			      THUMB2ARM_GLUE_SECTION_NAME);
9129 
9130   BFD_ASSERT (s != NULL);
9131   BFD_ASSERT (s->contents != NULL);
9132   BFD_ASSERT (s->output_section != NULL);
9133 
9134   if ((my_offset & 0x01) == 0x01)
9135     {
9136       if (sym_sec != NULL
9137 	  && sym_sec->owner != NULL
9138 	  && !INTERWORK_FLAG (sym_sec->owner))
9139 	{
9140 	  _bfd_error_handler
9141 	    (_("%pB(%s): warning: interworking not enabled;"
9142 	       " first occurrence: %pB: %s call to %s"),
9143 	     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9144 
9145 	  return false;
9146 	}
9147 
9148       --my_offset;
9149       myh->root.u.def.value = my_offset;
9150 
9151       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9152 		      s->contents + my_offset);
9153 
9154       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9155 		      s->contents + my_offset + 2);
9156 
9157       ret_offset =
9158 	/* Address of destination of the stub.  */
9159 	((bfd_signed_vma) val)
9160 	- ((bfd_signed_vma)
9161 	   /* Offset from the start of the current section
9162 	      to the start of the stubs.  */
9163 	   (s->output_offset
9164 	    /* Offset of the start of this stub from the start of the stubs.  */
9165 	    + my_offset
9166 	    /* Address of the start of the current section.  */
9167 	    + s->output_section->vma)
9168 	   /* The branch instruction is 4 bytes into the stub.  */
9169 	   + 4
9170 	   /* ARM branches work from the pc of the instruction + 8.  */
9171 	   + 8);
9172 
9173       put_arm_insn (globals, output_bfd,
9174 		    (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9175 		    s->contents + my_offset + 4);
9176     }
9177 
9178   BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9179 
9180   /* Now go back and fix up the original BL insn to point to here.  */
9181   ret_offset =
9182     /* Address of where the stub is located.  */
9183     (s->output_section->vma + s->output_offset + my_offset)
9184      /* Address of where the BL is located.  */
9185     - (input_section->output_section->vma + input_section->output_offset
9186        + offset)
9187     /* Addend in the relocation.  */
9188     - addend
9189     /* Biassing for PC-relative addressing.  */
9190     - 8;
9191 
9192   insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9193 
9194   return true;
9195 }
9196 
9197 /* Populate an Arm to Thumb stub.  Returns the stub symbol.  */
9198 
9199 static struct elf_link_hash_entry *
elf32_arm_create_thumb_stub(struct bfd_link_info * info,const char * name,bfd * input_bfd,bfd * output_bfd,asection * sym_sec,bfd_vma val,asection * s,char ** error_message)9200 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9201 			     const char *	    name,
9202 			     bfd *		    input_bfd,
9203 			     bfd *		    output_bfd,
9204 			     asection *		    sym_sec,
9205 			     bfd_vma		    val,
9206 			     asection *		    s,
9207 			     char **		    error_message)
9208 {
9209   bfd_vma my_offset;
9210   long int ret_offset;
9211   struct elf_link_hash_entry * myh;
9212   struct elf32_arm_link_hash_table * globals;
9213 
9214   myh = find_arm_glue (info, name, error_message);
9215   if (myh == NULL)
9216     return NULL;
9217 
9218   globals = elf32_arm_hash_table (info);
9219   BFD_ASSERT (globals != NULL);
9220   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9221 
9222   my_offset = myh->root.u.def.value;
9223 
9224   if ((my_offset & 0x01) == 0x01)
9225     {
9226       if (sym_sec != NULL
9227 	  && sym_sec->owner != NULL
9228 	  && !INTERWORK_FLAG (sym_sec->owner))
9229 	{
9230 	  _bfd_error_handler
9231 	    (_("%pB(%s): warning: interworking not enabled;"
9232 	       " first occurrence: %pB: %s call to %s"),
9233 	     sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9234 	}
9235 
9236       --my_offset;
9237       myh->root.u.def.value = my_offset;
9238 
9239       if (bfd_link_pic (info)
9240 	  || globals->root.is_relocatable_executable
9241 	  || globals->pic_veneer)
9242 	{
9243 	  /* For relocatable objects we can't use absolute addresses,
9244 	     so construct the address from a relative offset.  */
9245 	  /* TODO: If the offset is small it's probably worth
9246 	     constructing the address with adds.  */
9247 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9248 			s->contents + my_offset);
9249 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9250 			s->contents + my_offset + 4);
9251 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9252 			s->contents + my_offset + 8);
9253 	  /* Adjust the offset by 4 for the position of the add,
9254 	     and 8 for the pipeline offset.  */
9255 	  ret_offset = (val - (s->output_offset
9256 			       + s->output_section->vma
9257 			       + my_offset + 12))
9258 		       | 1;
9259 	  bfd_put_32 (output_bfd, ret_offset,
9260 		      s->contents + my_offset + 12);
9261 	}
9262       else if (globals->use_blx)
9263 	{
9264 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9265 			s->contents + my_offset);
9266 
9267 	  /* It's a thumb address.  Add the low order bit.  */
9268 	  bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9269 		      s->contents + my_offset + 4);
9270 	}
9271       else
9272 	{
9273 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9274 			s->contents + my_offset);
9275 
9276 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9277 			s->contents + my_offset + 4);
9278 
9279 	  /* It's a thumb address.  Add the low order bit.  */
9280 	  bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9281 		      s->contents + my_offset + 8);
9282 
9283 	  my_offset += 12;
9284 	}
9285     }
9286 
9287   BFD_ASSERT (my_offset <= globals->arm_glue_size);
9288 
9289   return myh;
9290 }
9291 
9292 /* Arm code calling a Thumb function.  */
9293 
9294 static int
elf32_arm_to_thumb_stub(struct bfd_link_info * info,const char * name,bfd * input_bfd,bfd * output_bfd,asection * input_section,bfd_byte * hit_data,asection * sym_sec,bfd_vma offset,bfd_signed_vma addend,bfd_vma val,char ** error_message)9295 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9296 			 const char *		name,
9297 			 bfd *			input_bfd,
9298 			 bfd *			output_bfd,
9299 			 asection *		input_section,
9300 			 bfd_byte *		hit_data,
9301 			 asection *		sym_sec,
9302 			 bfd_vma		offset,
9303 			 bfd_signed_vma		addend,
9304 			 bfd_vma		val,
9305 			 char **error_message)
9306 {
9307   unsigned long int tmp;
9308   bfd_vma my_offset;
9309   asection * s;
9310   long int ret_offset;
9311   struct elf_link_hash_entry * myh;
9312   struct elf32_arm_link_hash_table * globals;
9313 
9314   globals = elf32_arm_hash_table (info);
9315   BFD_ASSERT (globals != NULL);
9316   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9317 
9318   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9319 			      ARM2THUMB_GLUE_SECTION_NAME);
9320   BFD_ASSERT (s != NULL);
9321   BFD_ASSERT (s->contents != NULL);
9322   BFD_ASSERT (s->output_section != NULL);
9323 
9324   myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9325 				     sym_sec, val, s, error_message);
9326   if (!myh)
9327     return false;
9328 
9329   my_offset = myh->root.u.def.value;
9330   tmp = bfd_get_32 (input_bfd, hit_data);
9331   tmp = tmp & 0xFF000000;
9332 
9333   /* Somehow these are both 4 too far, so subtract 8.  */
9334   ret_offset = (s->output_offset
9335 		+ my_offset
9336 		+ s->output_section->vma
9337 		- (input_section->output_offset
9338 		   + input_section->output_section->vma
9339 		   + offset + addend)
9340 		- 8);
9341 
9342   tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9343 
9344   bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9345 
9346   return true;
9347 }
9348 
9349 /* Populate Arm stub for an exported Thumb function.  */
9350 
9351 static bool
elf32_arm_to_thumb_export_stub(struct elf_link_hash_entry * h,void * inf)9352 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9353 {
9354   struct bfd_link_info * info = (struct bfd_link_info *) inf;
9355   asection * s;
9356   struct elf_link_hash_entry * myh;
9357   struct elf32_arm_link_hash_entry *eh;
9358   struct elf32_arm_link_hash_table * globals;
9359   asection *sec;
9360   bfd_vma val;
9361   char *error_message;
9362 
9363   eh = elf32_arm_hash_entry (h);
9364   /* Allocate stubs for exported Thumb functions on v4t.  */
9365   if (eh->export_glue == NULL)
9366     return true;
9367 
9368   globals = elf32_arm_hash_table (info);
9369   BFD_ASSERT (globals != NULL);
9370   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9371 
9372   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9373 			      ARM2THUMB_GLUE_SECTION_NAME);
9374   BFD_ASSERT (s != NULL);
9375   BFD_ASSERT (s->contents != NULL);
9376   BFD_ASSERT (s->output_section != NULL);
9377 
9378   sec = eh->export_glue->root.u.def.section;
9379 
9380   BFD_ASSERT (sec->output_section != NULL);
9381 
9382   val = eh->export_glue->root.u.def.value + sec->output_offset
9383 	+ sec->output_section->vma;
9384 
9385   myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9386 				     h->root.u.def.section->owner,
9387 				     globals->obfd, sec, val, s,
9388 				     &error_message);
9389   BFD_ASSERT (myh);
9390   return true;
9391 }
9392 
9393 /* Populate ARMv4 BX veneers.  Returns the absolute adress of the veneer.  */
9394 
9395 static bfd_vma
elf32_arm_bx_glue(struct bfd_link_info * info,int reg)9396 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9397 {
9398   bfd_byte *p;
9399   bfd_vma glue_addr;
9400   asection *s;
9401   struct elf32_arm_link_hash_table *globals;
9402 
9403   globals = elf32_arm_hash_table (info);
9404   BFD_ASSERT (globals != NULL);
9405   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9406 
9407   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9408 			      ARM_BX_GLUE_SECTION_NAME);
9409   BFD_ASSERT (s != NULL);
9410   BFD_ASSERT (s->contents != NULL);
9411   BFD_ASSERT (s->output_section != NULL);
9412 
9413   BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9414 
9415   glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9416 
9417   if ((globals->bx_glue_offset[reg] & 1) == 0)
9418     {
9419       p = s->contents + glue_addr;
9420       bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9421       bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9422       bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9423       globals->bx_glue_offset[reg] |= 1;
9424     }
9425 
9426   return glue_addr + s->output_section->vma + s->output_offset;
9427 }
9428 
9429 /* Generate Arm stubs for exported Thumb symbols.  */
9430 static void
elf32_arm_begin_write_processing(bfd * abfd ATTRIBUTE_UNUSED,struct bfd_link_info * link_info)9431 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9432 				  struct bfd_link_info *link_info)
9433 {
9434   struct elf32_arm_link_hash_table * globals;
9435 
9436   if (link_info == NULL)
9437     /* Ignore this if we are not called by the ELF backend linker.  */
9438     return;
9439 
9440   globals = elf32_arm_hash_table (link_info);
9441   if (globals == NULL)
9442     return;
9443 
9444   /* If blx is available then exported Thumb symbols are OK and there is
9445      nothing to do.  */
9446   if (globals->use_blx)
9447     return;
9448 
9449   elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9450 			  link_info);
9451 }
9452 
9453 /* Reserve space for COUNT dynamic relocations in relocation selection
9454    SRELOC.  */
9455 
9456 static void
elf32_arm_allocate_dynrelocs(struct bfd_link_info * info,asection * sreloc,bfd_size_type count)9457 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9458 			      bfd_size_type count)
9459 {
9460   struct elf32_arm_link_hash_table *htab;
9461 
9462   htab = elf32_arm_hash_table (info);
9463   BFD_ASSERT (htab->root.dynamic_sections_created);
9464   if (sreloc == NULL)
9465     abort ();
9466   sreloc->size += RELOC_SIZE (htab) * count;
9467 }
9468 
9469 /* Reserve space for COUNT R_ARM_IRELATIVE relocations.  If the link is
9470    dynamic, the relocations should go in SRELOC, otherwise they should
9471    go in the special .rel.iplt section.  */
9472 
9473 static void
elf32_arm_allocate_irelocs(struct bfd_link_info * info,asection * sreloc,bfd_size_type count)9474 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9475 			    bfd_size_type count)
9476 {
9477   struct elf32_arm_link_hash_table *htab;
9478 
9479   htab = elf32_arm_hash_table (info);
9480   if (!htab->root.dynamic_sections_created)
9481     htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9482   else
9483     {
9484       BFD_ASSERT (sreloc != NULL);
9485       sreloc->size += RELOC_SIZE (htab) * count;
9486     }
9487 }
9488 
9489 /* Add relocation REL to the end of relocation section SRELOC.  */
9490 
9491 static void
elf32_arm_add_dynreloc(bfd * output_bfd,struct bfd_link_info * info,asection * sreloc,Elf_Internal_Rela * rel)9492 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9493 			asection *sreloc, Elf_Internal_Rela *rel)
9494 {
9495   bfd_byte *loc;
9496   struct elf32_arm_link_hash_table *htab;
9497 
9498   htab = elf32_arm_hash_table (info);
9499   if (!htab->root.dynamic_sections_created
9500       && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9501     sreloc = htab->root.irelplt;
9502   if (sreloc == NULL)
9503     abort ();
9504   loc = sreloc->contents;
9505   loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9506   if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9507     abort ();
9508   SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9509 }
9510 
9511 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9512    IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9513    to .plt.  */
9514 
9515 static void
elf32_arm_allocate_plt_entry(struct bfd_link_info * info,bool is_iplt_entry,union gotplt_union * root_plt,struct arm_plt_info * arm_plt)9516 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9517 			      bool is_iplt_entry,
9518 			      union gotplt_union *root_plt,
9519 			      struct arm_plt_info *arm_plt)
9520 {
9521   struct elf32_arm_link_hash_table *htab;
9522   asection *splt;
9523   asection *sgotplt;
9524 
9525   htab = elf32_arm_hash_table (info);
9526 
9527   if (is_iplt_entry)
9528     {
9529       splt = htab->root.iplt;
9530       sgotplt = htab->root.igotplt;
9531 
9532       /* NaCl uses a special first entry in .iplt too.  */
9533       if (htab->root.target_os == is_nacl && splt->size == 0)
9534 	splt->size += htab->plt_header_size;
9535 
9536       /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt.  */
9537       elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9538     }
9539   else
9540     {
9541       splt = htab->root.splt;
9542       sgotplt = htab->root.sgotplt;
9543 
9544     if (htab->fdpic_p)
9545       {
9546 	/* Allocate room for R_ARM_FUNCDESC_VALUE.  */
9547 	/* For lazy binding, relocations will be put into .rel.plt, in
9548 	   .rel.got otherwise.  */
9549 	/* FIXME: today we don't support lazy binding so put it in .rel.got */
9550 	if (info->flags & DF_BIND_NOW)
9551 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9552 	else
9553 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9554       }
9555     else
9556       {
9557 	/* Allocate room for an R_JUMP_SLOT relocation in .rel.plt.  */
9558 	elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9559       }
9560 
9561       /* If this is the first .plt entry, make room for the special
9562 	 first entry.  */
9563       if (splt->size == 0)
9564 	splt->size += htab->plt_header_size;
9565 
9566       htab->next_tls_desc_index++;
9567     }
9568 
9569   /* Allocate the PLT entry itself, including any leading Thumb stub.  */
9570   if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9571     splt->size += PLT_THUMB_STUB_SIZE;
9572   root_plt->offset = splt->size;
9573   splt->size += htab->plt_entry_size;
9574 
9575   /* We also need to make an entry in the .got.plt section, which
9576      will be placed in the .got section by the linker script.  */
9577   if (is_iplt_entry)
9578     arm_plt->got_offset = sgotplt->size;
9579   else
9580     arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9581   if (htab->fdpic_p)
9582     /* Function descriptor takes 64 bits in GOT.  */
9583     sgotplt->size += 8;
9584   else
9585     sgotplt->size += 4;
9586 }
9587 
9588 static bfd_vma
arm_movw_immediate(bfd_vma value)9589 arm_movw_immediate (bfd_vma value)
9590 {
9591   return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9592 }
9593 
9594 static bfd_vma
arm_movt_immediate(bfd_vma value)9595 arm_movt_immediate (bfd_vma value)
9596 {
9597   return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9598 }
9599 
9600 /* Fill in a PLT entry and its associated GOT slot.  If DYNINDX == -1,
9601    the entry lives in .iplt and resolves to (*SYM_VALUE)().
9602    Otherwise, DYNINDX is the index of the symbol in the dynamic
9603    symbol table and SYM_VALUE is undefined.
9604 
9605    ROOT_PLT points to the offset of the PLT entry from the start of its
9606    section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
9607    bookkeeping information.
9608 
9609    Returns FALSE if there was a problem.  */
9610 
9611 static bool
elf32_arm_populate_plt_entry(bfd * output_bfd,struct bfd_link_info * info,union gotplt_union * root_plt,struct arm_plt_info * arm_plt,int dynindx,bfd_vma sym_value)9612 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9613 			      union gotplt_union *root_plt,
9614 			      struct arm_plt_info *arm_plt,
9615 			      int dynindx, bfd_vma sym_value)
9616 {
9617   struct elf32_arm_link_hash_table *htab;
9618   asection *sgot;
9619   asection *splt;
9620   asection *srel;
9621   bfd_byte *loc;
9622   bfd_vma plt_index;
9623   Elf_Internal_Rela rel;
9624   bfd_vma got_header_size;
9625 
9626   htab = elf32_arm_hash_table (info);
9627 
9628   /* Pick the appropriate sections and sizes.  */
9629   if (dynindx == -1)
9630     {
9631       splt = htab->root.iplt;
9632       sgot = htab->root.igotplt;
9633       srel = htab->root.irelplt;
9634 
9635       /* There are no reserved entries in .igot.plt, and no special
9636 	 first entry in .iplt.  */
9637       got_header_size = 0;
9638     }
9639   else
9640     {
9641       splt = htab->root.splt;
9642       sgot = htab->root.sgotplt;
9643       srel = htab->root.srelplt;
9644 
9645       got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9646     }
9647   BFD_ASSERT (splt != NULL && srel != NULL);
9648 
9649   bfd_vma got_offset, got_address, plt_address;
9650   bfd_vma got_displacement, initial_got_entry;
9651   bfd_byte * ptr;
9652 
9653   BFD_ASSERT (sgot != NULL);
9654 
9655   /* Get the offset into the .(i)got.plt table of the entry that
9656      corresponds to this function.  */
9657   got_offset = (arm_plt->got_offset & -2);
9658 
9659   /* Get the index in the procedure linkage table which
9660      corresponds to this symbol.  This is the index of this symbol
9661      in all the symbols for which we are making plt entries.
9662      After the reserved .got.plt entries, all symbols appear in
9663      the same order as in .plt.  */
9664   if (htab->fdpic_p)
9665     /* Function descriptor takes 8 bytes.  */
9666     plt_index = (got_offset - got_header_size) / 8;
9667   else
9668     plt_index = (got_offset - got_header_size) / 4;
9669 
9670   /* Calculate the address of the GOT entry.  */
9671   got_address = (sgot->output_section->vma
9672 		 + sgot->output_offset
9673 		 + got_offset);
9674 
9675   /* ...and the address of the PLT entry.  */
9676   plt_address = (splt->output_section->vma
9677 		 + splt->output_offset
9678 		 + root_plt->offset);
9679 
9680   ptr = splt->contents + root_plt->offset;
9681   if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9682     {
9683       unsigned int i;
9684       bfd_vma val;
9685 
9686       for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9687 	{
9688 	  val = elf32_arm_vxworks_shared_plt_entry[i];
9689 	  if (i == 2)
9690 	    val |= got_address - sgot->output_section->vma;
9691 	  if (i == 5)
9692 	    val |= plt_index * RELOC_SIZE (htab);
9693 	  if (i == 2 || i == 5)
9694 	    bfd_put_32 (output_bfd, val, ptr);
9695 	  else
9696 	    put_arm_insn (htab, output_bfd, val, ptr);
9697 	}
9698     }
9699   else if (htab->root.target_os == is_vxworks)
9700     {
9701       unsigned int i;
9702       bfd_vma val;
9703 
9704       for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9705 	{
9706 	  val = elf32_arm_vxworks_exec_plt_entry[i];
9707 	  if (i == 2)
9708 	    val |= got_address;
9709 	  if (i == 4)
9710 	    val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9711 	  if (i == 5)
9712 	    val |= plt_index * RELOC_SIZE (htab);
9713 	  if (i == 2 || i == 5)
9714 	    bfd_put_32 (output_bfd, val, ptr);
9715 	  else
9716 	    put_arm_insn (htab, output_bfd, val, ptr);
9717 	}
9718 
9719       loc = (htab->srelplt2->contents
9720 	     + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9721 
9722       /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9723 	 referencing the GOT for this PLT entry.  */
9724       rel.r_offset = plt_address + 8;
9725       rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9726       rel.r_addend = got_offset;
9727       SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9728       loc += RELOC_SIZE (htab);
9729 
9730       /* Create the R_ARM_ABS32 relocation referencing the
9731 	 beginning of the PLT for this GOT entry.  */
9732       rel.r_offset = got_address;
9733       rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9734       rel.r_addend = 0;
9735       SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9736     }
9737   else if (htab->root.target_os == is_nacl)
9738     {
9739       /* Calculate the displacement between the PLT slot and the
9740 	 common tail that's part of the special initial PLT slot.  */
9741       int32_t tail_displacement
9742 	= ((splt->output_section->vma + splt->output_offset
9743 	    + ARM_NACL_PLT_TAIL_OFFSET)
9744 	   - (plt_address + htab->plt_entry_size + 4));
9745       BFD_ASSERT ((tail_displacement & 3) == 0);
9746       tail_displacement >>= 2;
9747 
9748       BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9749 		  || (-tail_displacement & 0xff000000) == 0);
9750 
9751       /* Calculate the displacement between the PLT slot and the entry
9752 	 in the GOT.  The offset accounts for the value produced by
9753 	 adding to pc in the penultimate instruction of the PLT stub.  */
9754       got_displacement = (got_address
9755 			  - (plt_address + htab->plt_entry_size));
9756 
9757       /* NaCl does not support interworking at all.  */
9758       BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9759 
9760       put_arm_insn (htab, output_bfd,
9761 		    elf32_arm_nacl_plt_entry[0]
9762 		    | arm_movw_immediate (got_displacement),
9763 		    ptr + 0);
9764       put_arm_insn (htab, output_bfd,
9765 		    elf32_arm_nacl_plt_entry[1]
9766 		    | arm_movt_immediate (got_displacement),
9767 		    ptr + 4);
9768       put_arm_insn (htab, output_bfd,
9769 		    elf32_arm_nacl_plt_entry[2],
9770 		    ptr + 8);
9771       put_arm_insn (htab, output_bfd,
9772 		    elf32_arm_nacl_plt_entry[3]
9773 		    | (tail_displacement & 0x00ffffff),
9774 		    ptr + 12);
9775     }
9776   else if (htab->fdpic_p)
9777     {
9778       const bfd_vma *plt_entry = using_thumb_only (htab)
9779 	? elf32_arm_fdpic_thumb_plt_entry
9780 	: elf32_arm_fdpic_plt_entry;
9781 
9782       /* Fill-up Thumb stub if needed.  */
9783       if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9784 	{
9785 	  put_thumb_insn (htab, output_bfd,
9786 			  elf32_arm_plt_thumb_stub[0], ptr - 4);
9787 	  put_thumb_insn (htab, output_bfd,
9788 			  elf32_arm_plt_thumb_stub[1], ptr - 2);
9789 	}
9790       /* As we are using 32 bit instructions even for the Thumb
9791 	 version, we have to use 'put_arm_insn' instead of
9792 	 'put_thumb_insn'.  */
9793       put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
9794       put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
9795       put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
9796       put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
9797       bfd_put_32 (output_bfd, got_offset, ptr + 16);
9798 
9799       if (!(info->flags & DF_BIND_NOW))
9800 	{
9801 	  /* funcdesc_value_reloc_offset.  */
9802 	  bfd_put_32 (output_bfd,
9803 		      htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9804 		      ptr + 20);
9805 	  put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
9806 	  put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
9807 	  put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
9808 	  put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
9809 	}
9810     }
9811   else if (using_thumb_only (htab))
9812     {
9813       /* PR ld/16017: Generate thumb only PLT entries.  */
9814       if (!using_thumb2 (htab))
9815 	{
9816 	  /* FIXME: We ought to be able to generate thumb-1 PLT
9817 	     instructions...  */
9818 	  _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9819 			      output_bfd);
9820 	  return false;
9821 	}
9822 
9823       /* Calculate the displacement between the PLT slot and the entry in
9824 	 the GOT.  The 12-byte offset accounts for the value produced by
9825 	 adding to pc in the 3rd instruction of the PLT stub.  */
9826       got_displacement = got_address - (plt_address + 12);
9827 
9828       /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9829 	 instead of 'put_thumb_insn'.  */
9830       put_arm_insn (htab, output_bfd,
9831 		    elf32_thumb2_plt_entry[0]
9832 		    | ((got_displacement & 0x000000ff) << 16)
9833 		    | ((got_displacement & 0x00000700) << 20)
9834 		    | ((got_displacement & 0x00000800) >>  1)
9835 		    | ((got_displacement & 0x0000f000) >> 12),
9836 		    ptr + 0);
9837       put_arm_insn (htab, output_bfd,
9838 		    elf32_thumb2_plt_entry[1]
9839 		    | ((got_displacement & 0x00ff0000)      )
9840 		    | ((got_displacement & 0x07000000) <<  4)
9841 		    | ((got_displacement & 0x08000000) >> 17)
9842 		    | ((got_displacement & 0xf0000000) >> 28),
9843 		    ptr + 4);
9844       put_arm_insn (htab, output_bfd,
9845 		    elf32_thumb2_plt_entry[2],
9846 		    ptr + 8);
9847       put_arm_insn (htab, output_bfd,
9848 		    elf32_thumb2_plt_entry[3],
9849 		    ptr + 12);
9850     }
9851   else
9852     {
9853       /* Calculate the displacement between the PLT slot and the
9854 	 entry in the GOT.  The eight-byte offset accounts for the
9855 	 value produced by adding to pc in the first instruction
9856 	 of the PLT stub.  */
9857       got_displacement = got_address - (plt_address + 8);
9858 
9859       if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9860 	{
9861 	  put_thumb_insn (htab, output_bfd,
9862 			  elf32_arm_plt_thumb_stub[0], ptr - 4);
9863 	  put_thumb_insn (htab, output_bfd,
9864 			  elf32_arm_plt_thumb_stub[1], ptr - 2);
9865 	}
9866 
9867       if (!elf32_arm_use_long_plt_entry)
9868 	{
9869 	  BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9870 
9871 	  put_arm_insn (htab, output_bfd,
9872 			elf32_arm_plt_entry_short[0]
9873 			| ((got_displacement & 0x0ff00000) >> 20),
9874 			ptr + 0);
9875 	  put_arm_insn (htab, output_bfd,
9876 			elf32_arm_plt_entry_short[1]
9877 			| ((got_displacement & 0x000ff000) >> 12),
9878 			ptr+ 4);
9879 	  put_arm_insn (htab, output_bfd,
9880 			elf32_arm_plt_entry_short[2]
9881 			| (got_displacement & 0x00000fff),
9882 			ptr + 8);
9883 #ifdef FOUR_WORD_PLT
9884 	  bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9885 #endif
9886 	}
9887       else
9888 	{
9889 	  put_arm_insn (htab, output_bfd,
9890 			elf32_arm_plt_entry_long[0]
9891 			| ((got_displacement & 0xf0000000) >> 28),
9892 			ptr + 0);
9893 	  put_arm_insn (htab, output_bfd,
9894 			elf32_arm_plt_entry_long[1]
9895 			| ((got_displacement & 0x0ff00000) >> 20),
9896 			ptr + 4);
9897 	  put_arm_insn (htab, output_bfd,
9898 			elf32_arm_plt_entry_long[2]
9899 			| ((got_displacement & 0x000ff000) >> 12),
9900 			ptr+ 8);
9901 	  put_arm_insn (htab, output_bfd,
9902 			elf32_arm_plt_entry_long[3]
9903 			| (got_displacement & 0x00000fff),
9904 			ptr + 12);
9905 	}
9906     }
9907 
9908   /* Fill in the entry in the .rel(a).(i)plt section.  */
9909   rel.r_offset = got_address;
9910   rel.r_addend = 0;
9911   if (dynindx == -1)
9912     {
9913       /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9914 	 The dynamic linker or static executable then calls SYM_VALUE
9915 	 to determine the correct run-time value of the .igot.plt entry.  */
9916       rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9917       initial_got_entry = sym_value;
9918     }
9919   else
9920     {
9921       /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9922 	 used by PLT entry.  */
9923       if (htab->fdpic_p)
9924 	{
9925 	  rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9926 	  initial_got_entry = 0;
9927 	}
9928       else
9929 	{
9930 	  rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9931 	  initial_got_entry = (splt->output_section->vma
9932 			       + splt->output_offset);
9933 
9934 	  /* PR ld/16017
9935 	     When thumb only we need to set the LSB for any address that
9936 	     will be used with an interworking branch instruction.  */
9937 	  if (using_thumb_only (htab))
9938 	    initial_got_entry |= 1;
9939 	}
9940     }
9941 
9942   /* Fill in the entry in the global offset table.  */
9943   bfd_put_32 (output_bfd, initial_got_entry,
9944 	      sgot->contents + got_offset);
9945 
9946   if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9947     {
9948       /* Setup initial funcdesc value.  */
9949       /* FIXME: we don't support lazy binding because there is a
9950 	 race condition between both words getting written and
9951 	 some other thread attempting to read them. The ARM
9952 	 architecture does not have an atomic 64 bit load/store
9953 	 instruction that could be used to prevent it; it is
9954 	 recommended that threaded FDPIC applications run with the
9955 	 LD_BIND_NOW environment variable set.  */
9956       bfd_put_32 (output_bfd, plt_address + 0x18,
9957 		  sgot->contents + got_offset);
9958       bfd_put_32 (output_bfd, -1 /*TODO*/,
9959 		  sgot->contents + got_offset + 4);
9960     }
9961 
9962   if (dynindx == -1)
9963     elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9964   else
9965     {
9966       if (htab->fdpic_p)
9967 	{
9968 	  /* For FDPIC we put PLT relocationss into .rel.got when not
9969 	     lazy binding otherwise we put them in .rel.plt.  For now,
9970 	     we don't support lazy binding so put it in .rel.got.  */
9971 	  if (info->flags & DF_BIND_NOW)
9972 	    elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
9973 	  else
9974 	    elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
9975 	}
9976       else
9977 	{
9978 	  loc = srel->contents + plt_index * RELOC_SIZE (htab);
9979 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9980 	}
9981     }
9982 
9983   return true;
9984 }
9985 
9986 /* Some relocations map to different relocations depending on the
9987    target.  Return the real relocation.  */
9988 
9989 static int
arm_real_reloc_type(struct elf32_arm_link_hash_table * globals,int r_type)9990 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9991 		     int r_type)
9992 {
9993   switch (r_type)
9994     {
9995     case R_ARM_TARGET1:
9996       if (globals->target1_is_rel)
9997 	return R_ARM_REL32;
9998       else
9999 	return R_ARM_ABS32;
10000 
10001     case R_ARM_TARGET2:
10002       return globals->target2_reloc;
10003 
10004     default:
10005       return r_type;
10006     }
10007 }
10008 
10009 /* Return the base VMA address which should be subtracted from real addresses
10010    when resolving @dtpoff relocation.
10011    This is PT_TLS segment p_vaddr.  */
10012 
10013 static bfd_vma
dtpoff_base(struct bfd_link_info * info)10014 dtpoff_base (struct bfd_link_info *info)
10015 {
10016   /* If tls_sec is NULL, we should have signalled an error already.  */
10017   if (elf_hash_table (info)->tls_sec == NULL)
10018     return 0;
10019   return elf_hash_table (info)->tls_sec->vma;
10020 }
10021 
10022 /* Return the relocation value for @tpoff relocation
10023    if STT_TLS virtual address is ADDRESS.  */
10024 
10025 static bfd_vma
tpoff(struct bfd_link_info * info,bfd_vma address)10026 tpoff (struct bfd_link_info *info, bfd_vma address)
10027 {
10028   struct elf_link_hash_table *htab = elf_hash_table (info);
10029   bfd_vma base;
10030 
10031   /* If tls_sec is NULL, we should have signalled an error already.  */
10032   if (htab->tls_sec == NULL)
10033     return 0;
10034   base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10035   return address - htab->tls_sec->vma + base;
10036 }
10037 
10038 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10039    VALUE is the relocation value.  */
10040 
10041 static bfd_reloc_status_type
elf32_arm_abs12_reloc(bfd * abfd,void * data,bfd_vma value)10042 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10043 {
10044   if (value > 0xfff)
10045     return bfd_reloc_overflow;
10046 
10047   value |= bfd_get_32 (abfd, data) & 0xfffff000;
10048   bfd_put_32 (abfd, value, data);
10049   return bfd_reloc_ok;
10050 }
10051 
10052 /* Handle TLS relaxations.  Relaxing is possible for symbols that use
10053    R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10054    R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10055 
10056    Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10057    is to then call final_link_relocate.  Return other values in the
10058    case of error.
10059 
10060    FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10061    the pre-relaxed code.  It would be nice if the relocs were updated
10062    to match the optimization.   */
10063 
10064 static bfd_reloc_status_type
elf32_arm_tls_relax(struct elf32_arm_link_hash_table * globals,bfd * input_bfd,asection * input_sec,bfd_byte * contents,Elf_Internal_Rela * rel,unsigned long is_local)10065 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10066 		     bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10067 		     Elf_Internal_Rela *rel, unsigned long is_local)
10068 {
10069   unsigned long insn;
10070 
10071   switch (ELF32_R_TYPE (rel->r_info))
10072     {
10073     default:
10074       return bfd_reloc_notsupported;
10075 
10076     case R_ARM_TLS_GOTDESC:
10077       if (is_local)
10078 	insn = 0;
10079       else
10080 	{
10081 	  insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10082 	  if (insn & 1)
10083 	    insn -= 5; /* THUMB */
10084 	  else
10085 	    insn -= 8; /* ARM */
10086 	}
10087       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10088       return bfd_reloc_continue;
10089 
10090     case R_ARM_THM_TLS_DESCSEQ:
10091       /* Thumb insn.  */
10092       insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10093       if ((insn & 0xff78) == 0x4478)	  /* add rx, pc */
10094 	{
10095 	  if (is_local)
10096 	    /* nop */
10097 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10098 	}
10099       else if ((insn & 0xffc0) == 0x6840)  /* ldr rx,[ry,#4] */
10100 	{
10101 	  if (is_local)
10102 	    /* nop */
10103 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10104 	  else
10105 	    /* ldr rx,[ry] */
10106 	    bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10107 	}
10108       else if ((insn & 0xff87) == 0x4780)  /* blx rx */
10109 	{
10110 	  if (is_local)
10111 	    /* nop */
10112 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10113 	  else
10114 	    /* mov r0, rx */
10115 	    bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10116 			contents + rel->r_offset);
10117 	}
10118       else
10119 	{
10120 	  if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10121 	    /* It's a 32 bit instruction, fetch the rest of it for
10122 	       error generation.  */
10123 	    insn = (insn << 16)
10124 	      | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10125 	  _bfd_error_handler
10126 	    /* xgettext:c-format */
10127 	    (_("%pB(%pA+%#" PRIx64 "): "
10128 	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10129 	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10130 	     "Thumb", insn);
10131 	  return bfd_reloc_notsupported;
10132 	}
10133       break;
10134 
10135     case R_ARM_TLS_DESCSEQ:
10136       /* arm insn.  */
10137       insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10138       if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10139 	{
10140 	  if (is_local)
10141 	    /* mov rx, ry */
10142 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10143 			contents + rel->r_offset);
10144 	}
10145       else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10146 	{
10147 	  if (is_local)
10148 	    /* nop */
10149 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10150 	  else
10151 	    /* ldr rx,[ry] */
10152 	    bfd_put_32 (input_bfd, insn & 0xfffff000,
10153 			contents + rel->r_offset);
10154 	}
10155       else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10156 	{
10157 	  if (is_local)
10158 	    /* nop */
10159 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10160 	  else
10161 	    /* mov r0, rx */
10162 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10163 			contents + rel->r_offset);
10164 	}
10165       else
10166 	{
10167 	  _bfd_error_handler
10168 	    /* xgettext:c-format */
10169 	    (_("%pB(%pA+%#" PRIx64 "): "
10170 	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10171 	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10172 	     "ARM", insn);
10173 	  return bfd_reloc_notsupported;
10174 	}
10175       break;
10176 
10177     case R_ARM_TLS_CALL:
10178       /* GD->IE relaxation, turn the instruction into 'nop' or
10179 	 'ldr r0, [pc,r0]'  */
10180       insn = is_local ? 0xe1a00000 : 0xe79f0000;
10181       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10182       break;
10183 
10184     case R_ARM_THM_TLS_CALL:
10185       /* GD->IE relaxation.  */
10186       if (!is_local)
10187 	/* add r0,pc; ldr r0, [r0]  */
10188 	insn = 0x44786800;
10189       else if (using_thumb2 (globals))
10190 	/* nop.w */
10191 	insn = 0xf3af8000;
10192       else
10193 	/* nop; nop */
10194 	insn = 0xbf00bf00;
10195 
10196       bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10197       bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10198       break;
10199     }
10200   return bfd_reloc_ok;
10201 }
10202 
10203 /* For a given value of n, calculate the value of G_n as required to
10204    deal with group relocations.  We return it in the form of an
10205    encoded constant-and-rotation, together with the final residual.  If n is
10206    specified as less than zero, then final_residual is filled with the
10207    input value and no further action is performed.  */
10208 
10209 static bfd_vma
calculate_group_reloc_mask(bfd_vma value,int n,bfd_vma * final_residual)10210 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10211 {
10212   int current_n;
10213   bfd_vma g_n;
10214   bfd_vma encoded_g_n = 0;
10215   bfd_vma residual = value; /* Also known as Y_n.  */
10216 
10217   for (current_n = 0; current_n <= n; current_n++)
10218     {
10219       int shift;
10220 
10221       /* Calculate which part of the value to mask.  */
10222       if (residual == 0)
10223 	shift = 0;
10224       else
10225 	{
10226 	  int msb;
10227 
10228 	  /* Determine the most significant bit in the residual and
10229 	     align the resulting value to a 2-bit boundary.  */
10230 	  for (msb = 30; msb >= 0; msb -= 2)
10231 	    if (residual & (3u << msb))
10232 	      break;
10233 
10234 	  /* The desired shift is now (msb - 6), or zero, whichever
10235 	     is the greater.  */
10236 	  shift = msb - 6;
10237 	  if (shift < 0)
10238 	    shift = 0;
10239 	}
10240 
10241       /* Calculate g_n in 32-bit as well as encoded constant+rotation form.  */
10242       g_n = residual & (0xff << shift);
10243       encoded_g_n = (g_n >> shift)
10244 		    | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10245 
10246       /* Calculate the residual for the next time around.  */
10247       residual &= ~g_n;
10248     }
10249 
10250   *final_residual = residual;
10251 
10252   return encoded_g_n;
10253 }
10254 
10255 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10256    Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise.  */
10257 
10258 static int
identify_add_or_sub(bfd_vma insn)10259 identify_add_or_sub (bfd_vma insn)
10260 {
10261   int opcode = insn & 0x1e00000;
10262 
10263   if (opcode == 1 << 23) /* ADD */
10264     return 1;
10265 
10266   if (opcode == 1 << 22) /* SUB */
10267     return -1;
10268 
10269   return 0;
10270 }
10271 
10272 /* Perform a relocation as part of a final link.  */
10273 
10274 static bfd_reloc_status_type
elf32_arm_final_link_relocate(reloc_howto_type * howto,bfd * input_bfd,bfd * output_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * rel,bfd_vma value,struct bfd_link_info * info,asection * sym_sec,const char * sym_name,unsigned char st_type,enum arm_st_branch_type branch_type,struct elf_link_hash_entry * h,bool * unresolved_reloc_p,char ** error_message)10275 elf32_arm_final_link_relocate (reloc_howto_type *	    howto,
10276 			       bfd *			    input_bfd,
10277 			       bfd *			    output_bfd,
10278 			       asection *		    input_section,
10279 			       bfd_byte *		    contents,
10280 			       Elf_Internal_Rela *	    rel,
10281 			       bfd_vma			    value,
10282 			       struct bfd_link_info *	    info,
10283 			       asection *		    sym_sec,
10284 			       const char *		    sym_name,
10285 			       unsigned char		    st_type,
10286 			       enum arm_st_branch_type	    branch_type,
10287 			       struct elf_link_hash_entry * h,
10288 			       bool *			    unresolved_reloc_p,
10289 			       char **			    error_message)
10290 {
10291   unsigned long			r_type = howto->type;
10292   unsigned long			r_symndx;
10293   bfd_byte *			hit_data = contents + rel->r_offset;
10294   bfd_vma *			local_got_offsets;
10295   bfd_vma *			local_tlsdesc_gotents;
10296   asection *			sgot;
10297   asection *			splt;
10298   asection *			sreloc = NULL;
10299   asection *			srelgot;
10300   bfd_vma			addend;
10301   bfd_signed_vma		signed_addend;
10302   unsigned char			dynreloc_st_type;
10303   bfd_vma			dynreloc_value;
10304   struct elf32_arm_link_hash_table * globals;
10305   struct elf32_arm_link_hash_entry *eh;
10306   union gotplt_union	       *root_plt;
10307   struct arm_plt_info	       *arm_plt;
10308   bfd_vma			plt_offset;
10309   bfd_vma			gotplt_offset;
10310   bool				has_iplt_entry;
10311   bool				resolved_to_zero;
10312 
10313   globals = elf32_arm_hash_table (info);
10314   if (globals == NULL)
10315     return bfd_reloc_notsupported;
10316 
10317   BFD_ASSERT (is_arm_elf (input_bfd));
10318   BFD_ASSERT (howto != NULL);
10319 
10320   /* Some relocation types map to different relocations depending on the
10321      target.  We pick the right one here.  */
10322   r_type = arm_real_reloc_type (globals, r_type);
10323 
10324   /* It is possible to have linker relaxations on some TLS access
10325      models.  Update our information here.  */
10326   r_type = elf32_arm_tls_transition (info, r_type, h);
10327 
10328   if (r_type != howto->type)
10329     howto = elf32_arm_howto_from_type (r_type);
10330 
10331   eh = (struct elf32_arm_link_hash_entry *) h;
10332   sgot = globals->root.sgot;
10333   local_got_offsets = elf_local_got_offsets (input_bfd);
10334   local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10335 
10336   if (globals->root.dynamic_sections_created)
10337     srelgot = globals->root.srelgot;
10338   else
10339     srelgot = NULL;
10340 
10341   r_symndx = ELF32_R_SYM (rel->r_info);
10342 
10343   if (globals->use_rel)
10344     {
10345       bfd_vma sign;
10346 
10347       switch (howto->size)
10348 	{
10349 	case 0: addend = bfd_get_8 (input_bfd, hit_data); break;
10350 	case 1: addend = bfd_get_16 (input_bfd, hit_data); break;
10351 	case 2: addend = bfd_get_32 (input_bfd, hit_data); break;
10352 	default: addend = 0; break;
10353 	}
10354       /* Note: the addend and signed_addend calculated here are
10355 	 incorrect for any split field.  */
10356       addend &= howto->src_mask;
10357       sign = howto->src_mask & ~(howto->src_mask >> 1);
10358       signed_addend = (addend ^ sign) - sign;
10359       signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10360       addend <<= howto->rightshift;
10361     }
10362   else
10363     addend = signed_addend = rel->r_addend;
10364 
10365   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10366      are resolving a function call relocation.  */
10367   if (using_thumb_only (globals)
10368       && (r_type == R_ARM_THM_CALL
10369 	  || r_type == R_ARM_THM_JUMP24)
10370       && branch_type == ST_BRANCH_TO_ARM)
10371     branch_type = ST_BRANCH_TO_THUMB;
10372 
10373   /* Record the symbol information that should be used in dynamic
10374      relocations.  */
10375   dynreloc_st_type = st_type;
10376   dynreloc_value = value;
10377   if (branch_type == ST_BRANCH_TO_THUMB)
10378     dynreloc_value |= 1;
10379 
10380   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
10381      VALUE appropriately for relocations that we resolve at link time.  */
10382   has_iplt_entry = false;
10383   if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10384 			      &arm_plt)
10385       && root_plt->offset != (bfd_vma) -1)
10386     {
10387       plt_offset = root_plt->offset;
10388       gotplt_offset = arm_plt->got_offset;
10389 
10390       if (h == NULL || eh->is_iplt)
10391 	{
10392 	  has_iplt_entry = true;
10393 	  splt = globals->root.iplt;
10394 
10395 	  /* Populate .iplt entries here, because not all of them will
10396 	     be seen by finish_dynamic_symbol.  The lower bit is set if
10397 	     we have already populated the entry.  */
10398 	  if (plt_offset & 1)
10399 	    plt_offset--;
10400 	  else
10401 	    {
10402 	      if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10403 						-1, dynreloc_value))
10404 		root_plt->offset |= 1;
10405 	      else
10406 		return bfd_reloc_notsupported;
10407 	    }
10408 
10409 	  /* Static relocations always resolve to the .iplt entry.  */
10410 	  st_type = STT_FUNC;
10411 	  value = (splt->output_section->vma
10412 		   + splt->output_offset
10413 		   + plt_offset);
10414 	  branch_type = ST_BRANCH_TO_ARM;
10415 
10416 	  /* If there are non-call relocations that resolve to the .iplt
10417 	     entry, then all dynamic ones must too.  */
10418 	  if (arm_plt->noncall_refcount != 0)
10419 	    {
10420 	      dynreloc_st_type = st_type;
10421 	      dynreloc_value = value;
10422 	    }
10423 	}
10424       else
10425 	/* We populate the .plt entry in finish_dynamic_symbol.  */
10426 	splt = globals->root.splt;
10427     }
10428   else
10429     {
10430       splt = NULL;
10431       plt_offset = (bfd_vma) -1;
10432       gotplt_offset = (bfd_vma) -1;
10433     }
10434 
10435   resolved_to_zero = (h != NULL
10436 		      && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10437 
10438   switch (r_type)
10439     {
10440     case R_ARM_NONE:
10441       /* We don't need to find a value for this symbol.  It's just a
10442 	 marker.  */
10443       *unresolved_reloc_p = false;
10444       return bfd_reloc_ok;
10445 
10446     case R_ARM_ABS12:
10447       if (globals->root.target_os != is_vxworks)
10448 	return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10449       /* Fall through.  */
10450 
10451     case R_ARM_PC24:
10452     case R_ARM_ABS32:
10453     case R_ARM_ABS32_NOI:
10454     case R_ARM_REL32:
10455     case R_ARM_REL32_NOI:
10456     case R_ARM_CALL:
10457     case R_ARM_JUMP24:
10458     case R_ARM_XPC25:
10459     case R_ARM_PREL31:
10460     case R_ARM_PLT32:
10461       /* Handle relocations which should use the PLT entry.  ABS32/REL32
10462 	 will use the symbol's value, which may point to a PLT entry, but we
10463 	 don't need to handle that here.  If we created a PLT entry, all
10464 	 branches in this object should go to it, except if the PLT is too
10465 	 far away, in which case a long branch stub should be inserted.  */
10466       if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10467 	   && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10468 	   && r_type != R_ARM_CALL
10469 	   && r_type != R_ARM_JUMP24
10470 	   && r_type != R_ARM_PLT32)
10471 	  && plt_offset != (bfd_vma) -1)
10472 	{
10473 	  /* If we've created a .plt section, and assigned a PLT entry
10474 	     to this function, it must either be a STT_GNU_IFUNC reference
10475 	     or not be known to bind locally.  In other cases, we should
10476 	     have cleared the PLT entry by now.  */
10477 	  BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10478 
10479 	  value = (splt->output_section->vma
10480 		   + splt->output_offset
10481 		   + plt_offset);
10482 	  *unresolved_reloc_p = false;
10483 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10484 					   contents, rel->r_offset, value,
10485 					   rel->r_addend);
10486 	}
10487 
10488       /* When generating a shared object or relocatable executable, these
10489 	 relocations are copied into the output file to be resolved at
10490 	 run time.  */
10491       if ((bfd_link_pic (info)
10492 	   || globals->root.is_relocatable_executable
10493 	   || globals->fdpic_p)
10494 	  && (input_section->flags & SEC_ALLOC)
10495 	  && !(globals->root.target_os == is_vxworks
10496 	       && strcmp (input_section->output_section->name,
10497 			  ".tls_vars") == 0)
10498 	  && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10499 	      || !SYMBOL_CALLS_LOCAL (info, h))
10500 	  && !(input_bfd == globals->stub_bfd
10501 	       && strstr (input_section->name, STUB_SUFFIX))
10502 	  && (h == NULL
10503 	      || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10504 		  && !resolved_to_zero)
10505 	      || h->root.type != bfd_link_hash_undefweak)
10506 	  && r_type != R_ARM_PC24
10507 	  && r_type != R_ARM_CALL
10508 	  && r_type != R_ARM_JUMP24
10509 	  && r_type != R_ARM_PREL31
10510 	  && r_type != R_ARM_PLT32)
10511 	{
10512 	  Elf_Internal_Rela outrel;
10513 	  bool skip, relocate;
10514 	  int isrofixup = 0;
10515 
10516 	  if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10517 	      && !h->def_regular)
10518 	    {
10519 	      char *v = _("shared object");
10520 
10521 	      if (bfd_link_executable (info))
10522 		v = _("PIE executable");
10523 
10524 	      _bfd_error_handler
10525 		(_("%pB: relocation %s against external or undefined symbol `%s'"
10526 		   " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10527 		 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10528 	      return bfd_reloc_notsupported;
10529 	    }
10530 
10531 	  *unresolved_reloc_p = false;
10532 
10533 	  if (sreloc == NULL && globals->root.dynamic_sections_created)
10534 	    {
10535 	      sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10536 							   ! globals->use_rel);
10537 
10538 	      if (sreloc == NULL)
10539 		return bfd_reloc_notsupported;
10540 	    }
10541 
10542 	  skip = false;
10543 	  relocate = false;
10544 
10545 	  outrel.r_addend = addend;
10546 	  outrel.r_offset =
10547 	    _bfd_elf_section_offset (output_bfd, info, input_section,
10548 				     rel->r_offset);
10549 	  if (outrel.r_offset == (bfd_vma) -1)
10550 	    skip = true;
10551 	  else if (outrel.r_offset == (bfd_vma) -2)
10552 	    skip = true, relocate = true;
10553 	  outrel.r_offset += (input_section->output_section->vma
10554 			      + input_section->output_offset);
10555 
10556 	  if (skip)
10557 	    memset (&outrel, 0, sizeof outrel);
10558 	  else if (h != NULL
10559 		   && h->dynindx != -1
10560 		   && (!bfd_link_pic (info)
10561 		       || !(bfd_link_pie (info)
10562 			    || SYMBOLIC_BIND (info, h))
10563 		       || !h->def_regular))
10564 	    outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10565 	  else
10566 	    {
10567 	      int symbol;
10568 
10569 	      /* This symbol is local, or marked to become local.  */
10570 	      BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10571 			  || (globals->fdpic_p && !bfd_link_pic (info)));
10572 	      /* On SVR4-ish systems, the dynamic loader cannot
10573 		 relocate the text and data segments independently,
10574 		 so the symbol does not matter.  */
10575 	      symbol = 0;
10576 	      if (dynreloc_st_type == STT_GNU_IFUNC)
10577 		/* We have an STT_GNU_IFUNC symbol that doesn't resolve
10578 		   to the .iplt entry.  Instead, every non-call reference
10579 		   must use an R_ARM_IRELATIVE relocation to obtain the
10580 		   correct run-time address.  */
10581 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10582 	      else if (globals->fdpic_p && !bfd_link_pic (info))
10583 		isrofixup = 1;
10584 	      else
10585 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10586 	      if (globals->use_rel)
10587 		relocate = true;
10588 	      else
10589 		outrel.r_addend += dynreloc_value;
10590 	    }
10591 
10592 	  if (isrofixup)
10593 	    arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
10594 	  else
10595 	    elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10596 
10597 	  /* If this reloc is against an external symbol, we do not want to
10598 	     fiddle with the addend.  Otherwise, we need to include the symbol
10599 	     value so that it becomes an addend for the dynamic reloc.  */
10600 	  if (! relocate)
10601 	    return bfd_reloc_ok;
10602 
10603 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10604 					   contents, rel->r_offset,
10605 					   dynreloc_value, (bfd_vma) 0);
10606 	}
10607       else switch (r_type)
10608 	{
10609 	case R_ARM_ABS12:
10610 	  return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10611 
10612 	case R_ARM_XPC25:	  /* Arm BLX instruction.  */
10613 	case R_ARM_CALL:
10614 	case R_ARM_JUMP24:
10615 	case R_ARM_PC24:	  /* Arm B/BL instruction.  */
10616 	case R_ARM_PLT32:
10617 	  {
10618 	  struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10619 
10620 	  if (r_type == R_ARM_XPC25)
10621 	    {
10622 	      /* Check for Arm calling Arm function.  */
10623 	      /* FIXME: Should we translate the instruction into a BL
10624 		 instruction instead ?  */
10625 	      if (branch_type != ST_BRANCH_TO_THUMB)
10626 		_bfd_error_handler
10627 		  (_("\%pB: warning: %s BLX instruction targets"
10628 		     " %s function '%s'"),
10629 		   input_bfd, "ARM",
10630 		   "ARM", h ? h->root.root.string : "(local)");
10631 	    }
10632 	  else if (r_type == R_ARM_PC24)
10633 	    {
10634 	      /* Check for Arm calling Thumb function.  */
10635 	      if (branch_type == ST_BRANCH_TO_THUMB)
10636 		{
10637 		  if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10638 					       output_bfd, input_section,
10639 					       hit_data, sym_sec, rel->r_offset,
10640 					       signed_addend, value,
10641 					       error_message))
10642 		    return bfd_reloc_ok;
10643 		  else
10644 		    return bfd_reloc_dangerous;
10645 		}
10646 	    }
10647 
10648 	  /* Check if a stub has to be inserted because the
10649 	     destination is too far or we are changing mode.  */
10650 	  if (   r_type == R_ARM_CALL
10651 	      || r_type == R_ARM_JUMP24
10652 	      || r_type == R_ARM_PLT32)
10653 	    {
10654 	      enum elf32_arm_stub_type stub_type = arm_stub_none;
10655 	      struct elf32_arm_link_hash_entry *hash;
10656 
10657 	      hash = (struct elf32_arm_link_hash_entry *) h;
10658 	      stub_type = arm_type_of_stub (info, input_section, rel,
10659 					    st_type, &branch_type,
10660 					    hash, value, sym_sec,
10661 					    input_bfd, sym_name);
10662 
10663 	      if (stub_type != arm_stub_none)
10664 		{
10665 		  /* The target is out of reach, so redirect the
10666 		     branch to the local stub for this function.  */
10667 		  stub_entry = elf32_arm_get_stub_entry (input_section,
10668 							 sym_sec, h,
10669 							 rel, globals,
10670 							 stub_type);
10671 		  {
10672 		    if (stub_entry != NULL)
10673 		      value = (stub_entry->stub_offset
10674 			       + stub_entry->stub_sec->output_offset
10675 			       + stub_entry->stub_sec->output_section->vma);
10676 
10677 		    if (plt_offset != (bfd_vma) -1)
10678 		      *unresolved_reloc_p = false;
10679 		  }
10680 		}
10681 	      else
10682 		{
10683 		  /* If the call goes through a PLT entry, make sure to
10684 		     check distance to the right destination address.  */
10685 		  if (plt_offset != (bfd_vma) -1)
10686 		    {
10687 		      value = (splt->output_section->vma
10688 			       + splt->output_offset
10689 			       + plt_offset);
10690 		      *unresolved_reloc_p = false;
10691 		      /* The PLT entry is in ARM mode, regardless of the
10692 			 target function.  */
10693 		      branch_type = ST_BRANCH_TO_ARM;
10694 		    }
10695 		}
10696 	    }
10697 
10698 	  /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10699 	     where:
10700 	      S is the address of the symbol in the relocation.
10701 	      P is address of the instruction being relocated.
10702 	      A is the addend (extracted from the instruction) in bytes.
10703 
10704 	     S is held in 'value'.
10705 	     P is the base address of the section containing the
10706 	       instruction plus the offset of the reloc into that
10707 	       section, ie:
10708 		 (input_section->output_section->vma +
10709 		  input_section->output_offset +
10710 		  rel->r_offset).
10711 	     A is the addend, converted into bytes, ie:
10712 		 (signed_addend * 4)
10713 
10714 	     Note: None of these operations have knowledge of the pipeline
10715 	     size of the processor, thus it is up to the assembler to
10716 	     encode this information into the addend.  */
10717 	  value -= (input_section->output_section->vma
10718 		    + input_section->output_offset);
10719 	  value -= rel->r_offset;
10720 	  value += signed_addend;
10721 
10722 	  signed_addend = value;
10723 	  signed_addend >>= howto->rightshift;
10724 
10725 	  /* A branch to an undefined weak symbol is turned into a jump to
10726 	     the next instruction unless a PLT entry will be created.
10727 	     Do the same for local undefined symbols (but not for STN_UNDEF).
10728 	     The jump to the next instruction is optimized as a NOP depending
10729 	     on the architecture.  */
10730 	  if (h ? (h->root.type == bfd_link_hash_undefweak
10731 		   && plt_offset == (bfd_vma) -1)
10732 	      : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10733 	    {
10734 	      value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10735 
10736 	      if (arch_has_arm_nop (globals))
10737 		value |= 0x0320f000;
10738 	      else
10739 		value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0.  */
10740 	    }
10741 	  else
10742 	    {
10743 	      /* Perform a signed range check.  */
10744 	      if (   signed_addend >   ((bfd_signed_vma)  (howto->dst_mask >> 1))
10745 		  || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10746 		return bfd_reloc_overflow;
10747 
10748 	      addend = (value & 2);
10749 
10750 	      value = (signed_addend & howto->dst_mask)
10751 		| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10752 
10753 	      if (r_type == R_ARM_CALL)
10754 		{
10755 		  /* Set the H bit in the BLX instruction.  */
10756 		  if (branch_type == ST_BRANCH_TO_THUMB)
10757 		    {
10758 		      if (addend)
10759 			value |= (1 << 24);
10760 		      else
10761 			value &= ~(bfd_vma)(1 << 24);
10762 		    }
10763 
10764 		  /* Select the correct instruction (BL or BLX).  */
10765 		  /* Only if we are not handling a BL to a stub. In this
10766 		     case, mode switching is performed by the stub.  */
10767 		  if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10768 		    value |= (1 << 28);
10769 		  else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10770 		    {
10771 		      value &= ~(bfd_vma)(1 << 28);
10772 		      value |= (1 << 24);
10773 		    }
10774 		}
10775 	    }
10776 	  }
10777 	  break;
10778 
10779 	case R_ARM_ABS32:
10780 	  value += addend;
10781 	  if (branch_type == ST_BRANCH_TO_THUMB)
10782 	    value |= 1;
10783 	  break;
10784 
10785 	case R_ARM_ABS32_NOI:
10786 	  value += addend;
10787 	  break;
10788 
10789 	case R_ARM_REL32:
10790 	  value += addend;
10791 	  if (branch_type == ST_BRANCH_TO_THUMB)
10792 	    value |= 1;
10793 	  value -= (input_section->output_section->vma
10794 		    + input_section->output_offset + rel->r_offset);
10795 	  break;
10796 
10797 	case R_ARM_REL32_NOI:
10798 	  value += addend;
10799 	  value -= (input_section->output_section->vma
10800 		    + input_section->output_offset + rel->r_offset);
10801 	  break;
10802 
10803 	case R_ARM_PREL31:
10804 	  value -= (input_section->output_section->vma
10805 		    + input_section->output_offset + rel->r_offset);
10806 	  value += signed_addend;
10807 	  if (! h || h->root.type != bfd_link_hash_undefweak)
10808 	    {
10809 	      /* Check for overflow.  */
10810 	      if ((value ^ (value >> 1)) & (1 << 30))
10811 		return bfd_reloc_overflow;
10812 	    }
10813 	  value &= 0x7fffffff;
10814 	  value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10815 	  if (branch_type == ST_BRANCH_TO_THUMB)
10816 	    value |= 1;
10817 	  break;
10818 	}
10819 
10820       bfd_put_32 (input_bfd, value, hit_data);
10821       return bfd_reloc_ok;
10822 
10823     case R_ARM_ABS8:
10824       value += addend;
10825 
10826       /* There is no way to tell whether the user intended to use a signed or
10827 	 unsigned addend.  When checking for overflow we accept either,
10828 	 as specified by the AAELF.  */
10829       if ((long) value > 0xff || (long) value < -0x80)
10830 	return bfd_reloc_overflow;
10831 
10832       bfd_put_8 (input_bfd, value, hit_data);
10833       return bfd_reloc_ok;
10834 
10835     case R_ARM_ABS16:
10836       value += addend;
10837 
10838       /* See comment for R_ARM_ABS8.  */
10839       if ((long) value > 0xffff || (long) value < -0x8000)
10840 	return bfd_reloc_overflow;
10841 
10842       bfd_put_16 (input_bfd, value, hit_data);
10843       return bfd_reloc_ok;
10844 
10845     case R_ARM_THM_ABS5:
10846       /* Support ldr and str instructions for the thumb.  */
10847       if (globals->use_rel)
10848 	{
10849 	  /* Need to refetch addend.  */
10850 	  addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10851 	  /* ??? Need to determine shift amount from operand size.  */
10852 	  addend >>= howto->rightshift;
10853 	}
10854       value += addend;
10855 
10856       /* ??? Isn't value unsigned?  */
10857       if ((long) value > 0x1f || (long) value < -0x10)
10858 	return bfd_reloc_overflow;
10859 
10860       /* ??? Value needs to be properly shifted into place first.  */
10861       value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10862       bfd_put_16 (input_bfd, value, hit_data);
10863       return bfd_reloc_ok;
10864 
10865     case R_ARM_THM_ALU_PREL_11_0:
10866       /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw).  */
10867       {
10868 	bfd_vma insn;
10869 	bfd_signed_vma relocation;
10870 
10871 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10872 	     | bfd_get_16 (input_bfd, hit_data + 2);
10873 
10874 	if (globals->use_rel)
10875 	  {
10876 	    signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10877 			  | ((insn & (1 << 26)) >> 15);
10878 	    if (insn & 0xf00000)
10879 	      signed_addend = -signed_addend;
10880 	  }
10881 
10882 	relocation = value + signed_addend;
10883 	relocation -= Pa (input_section->output_section->vma
10884 			  + input_section->output_offset
10885 			  + rel->r_offset);
10886 
10887 	/* PR 21523: Use an absolute value.  The user of this reloc will
10888 	   have already selected an ADD or SUB insn appropriately.  */
10889 	value = llabs (relocation);
10890 
10891 	if (value >= 0x1000)
10892 	  return bfd_reloc_overflow;
10893 
10894 	/* Destination is Thumb.  Force bit 0 to 1 to reflect this.  */
10895 	if (branch_type == ST_BRANCH_TO_THUMB)
10896 	  value |= 1;
10897 
10898 	insn = (insn & 0xfb0f8f00) | (value & 0xff)
10899 	     | ((value & 0x700) << 4)
10900 	     | ((value & 0x800) << 15);
10901 	if (relocation < 0)
10902 	  insn |= 0xa00000;
10903 
10904 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10905 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10906 
10907 	return bfd_reloc_ok;
10908       }
10909 
10910     case R_ARM_THM_PC8:
10911       /* PR 10073:  This reloc is not generated by the GNU toolchain,
10912 	 but it is supported for compatibility with third party libraries
10913 	 generated by other compilers, specifically the ARM/IAR.  */
10914       {
10915 	bfd_vma insn;
10916 	bfd_signed_vma relocation;
10917 
10918 	insn = bfd_get_16 (input_bfd, hit_data);
10919 
10920 	if (globals->use_rel)
10921 	  addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10922 
10923 	relocation = value + addend;
10924 	relocation -= Pa (input_section->output_section->vma
10925 			  + input_section->output_offset
10926 			  + rel->r_offset);
10927 
10928 	value = relocation;
10929 
10930 	/* We do not check for overflow of this reloc.  Although strictly
10931 	   speaking this is incorrect, it appears to be necessary in order
10932 	   to work with IAR generated relocs.  Since GCC and GAS do not
10933 	   generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10934 	   a problem for them.  */
10935 	value &= 0x3fc;
10936 
10937 	insn = (insn & 0xff00) | (value >> 2);
10938 
10939 	bfd_put_16 (input_bfd, insn, hit_data);
10940 
10941 	return bfd_reloc_ok;
10942       }
10943 
10944     case R_ARM_THM_PC12:
10945       /* Corresponds to: ldr.w reg, [pc, #offset].  */
10946       {
10947 	bfd_vma insn;
10948 	bfd_signed_vma relocation;
10949 
10950 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10951 	     | bfd_get_16 (input_bfd, hit_data + 2);
10952 
10953 	if (globals->use_rel)
10954 	  {
10955 	    signed_addend = insn & 0xfff;
10956 	    if (!(insn & (1 << 23)))
10957 	      signed_addend = -signed_addend;
10958 	  }
10959 
10960 	relocation = value + signed_addend;
10961 	relocation -= Pa (input_section->output_section->vma
10962 			  + input_section->output_offset
10963 			  + rel->r_offset);
10964 
10965 	value = relocation;
10966 
10967 	if (value >= 0x1000)
10968 	  return bfd_reloc_overflow;
10969 
10970 	insn = (insn & 0xff7ff000) | value;
10971 	if (relocation >= 0)
10972 	  insn |= (1 << 23);
10973 
10974 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10975 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10976 
10977 	return bfd_reloc_ok;
10978       }
10979 
10980     case R_ARM_THM_XPC22:
10981     case R_ARM_THM_CALL:
10982     case R_ARM_THM_JUMP24:
10983       /* Thumb BL (branch long instruction).  */
10984       {
10985 	bfd_vma relocation;
10986 	bfd_vma reloc_sign;
10987 	bool overflow = false;
10988 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10989 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10990 	bfd_signed_vma reloc_signed_max;
10991 	bfd_signed_vma reloc_signed_min;
10992 	bfd_vma check;
10993 	bfd_signed_vma signed_check;
10994 	int bitsize;
10995 	const int thumb2 = using_thumb2 (globals);
10996 	const int thumb2_bl = using_thumb2_bl (globals);
10997 
10998 	/* A branch to an undefined weak symbol is turned into a jump to
10999 	   the next instruction unless a PLT entry will be created.
11000 	   The jump to the next instruction is optimized as a NOP.W for
11001 	   Thumb-2 enabled architectures.  */
11002 	if (h && h->root.type == bfd_link_hash_undefweak
11003 	    && plt_offset == (bfd_vma) -1)
11004 	  {
11005 	    if (thumb2)
11006 	      {
11007 		bfd_put_16 (input_bfd, 0xf3af, hit_data);
11008 		bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11009 	      }
11010 	    else
11011 	      {
11012 		bfd_put_16 (input_bfd, 0xe000, hit_data);
11013 		bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11014 	      }
11015 	    return bfd_reloc_ok;
11016 	  }
11017 
11018 	/* Fetch the addend.  We use the Thumb-2 encoding (backwards compatible
11019 	   with Thumb-1) involving the J1 and J2 bits.  */
11020 	if (globals->use_rel)
11021 	  {
11022 	    bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11023 	    bfd_vma upper = upper_insn & 0x3ff;
11024 	    bfd_vma lower = lower_insn & 0x7ff;
11025 	    bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11026 	    bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11027 	    bfd_vma i1 = j1 ^ s ? 0 : 1;
11028 	    bfd_vma i2 = j2 ^ s ? 0 : 1;
11029 
11030 	    addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11031 	    /* Sign extend.  */
11032 	    addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11033 
11034 	    signed_addend = addend;
11035 	  }
11036 
11037 	if (r_type == R_ARM_THM_XPC22)
11038 	  {
11039 	    /* Check for Thumb to Thumb call.  */
11040 	    /* FIXME: Should we translate the instruction into a BL
11041 	       instruction instead ?  */
11042 	    if (branch_type == ST_BRANCH_TO_THUMB)
11043 	      _bfd_error_handler
11044 		(_("%pB: warning: %s BLX instruction targets"
11045 		   " %s function '%s'"),
11046 		 input_bfd, "Thumb",
11047 		 "Thumb", h ? h->root.root.string : "(local)");
11048 	  }
11049 	else
11050 	  {
11051 	    /* If it is not a call to Thumb, assume call to Arm.
11052 	       If it is a call relative to a section name, then it is not a
11053 	       function call at all, but rather a long jump.  Calls through
11054 	       the PLT do not require stubs.  */
11055 	    if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11056 	      {
11057 		if (globals->use_blx && r_type == R_ARM_THM_CALL)
11058 		  {
11059 		    /* Convert BL to BLX.  */
11060 		    lower_insn = (lower_insn & ~0x1000) | 0x0800;
11061 		  }
11062 		else if ((   r_type != R_ARM_THM_CALL)
11063 			 && (r_type != R_ARM_THM_JUMP24))
11064 		  {
11065 		    if (elf32_thumb_to_arm_stub
11066 			(info, sym_name, input_bfd, output_bfd, input_section,
11067 			 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11068 			 error_message))
11069 		      return bfd_reloc_ok;
11070 		    else
11071 		      return bfd_reloc_dangerous;
11072 		  }
11073 	      }
11074 	    else if (branch_type == ST_BRANCH_TO_THUMB
11075 		     && globals->use_blx
11076 		     && r_type == R_ARM_THM_CALL)
11077 	      {
11078 		/* Make sure this is a BL.  */
11079 		lower_insn |= 0x1800;
11080 	      }
11081 	  }
11082 
11083 	enum elf32_arm_stub_type stub_type = arm_stub_none;
11084 	if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11085 	  {
11086 	    /* Check if a stub has to be inserted because the destination
11087 	       is too far.  */
11088 	    struct elf32_arm_stub_hash_entry *stub_entry;
11089 	    struct elf32_arm_link_hash_entry *hash;
11090 
11091 	    hash = (struct elf32_arm_link_hash_entry *) h;
11092 
11093 	    stub_type = arm_type_of_stub (info, input_section, rel,
11094 					  st_type, &branch_type,
11095 					  hash, value, sym_sec,
11096 					  input_bfd, sym_name);
11097 
11098 	    if (stub_type != arm_stub_none)
11099 	      {
11100 		/* The target is out of reach or we are changing modes, so
11101 		   redirect the branch to the local stub for this
11102 		   function.  */
11103 		stub_entry = elf32_arm_get_stub_entry (input_section,
11104 						       sym_sec, h,
11105 						       rel, globals,
11106 						       stub_type);
11107 		if (stub_entry != NULL)
11108 		  {
11109 		    value = (stub_entry->stub_offset
11110 			     + stub_entry->stub_sec->output_offset
11111 			     + stub_entry->stub_sec->output_section->vma);
11112 
11113 		    if (plt_offset != (bfd_vma) -1)
11114 		      *unresolved_reloc_p = false;
11115 		  }
11116 
11117 		/* If this call becomes a call to Arm, force BLX.  */
11118 		if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11119 		  {
11120 		    if ((stub_entry
11121 			 && !arm_stub_is_thumb (stub_entry->stub_type))
11122 			|| branch_type != ST_BRANCH_TO_THUMB)
11123 		      lower_insn = (lower_insn & ~0x1000) | 0x0800;
11124 		  }
11125 	      }
11126 	  }
11127 
11128 	/* Handle calls via the PLT.  */
11129 	if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11130 	  {
11131 	    value = (splt->output_section->vma
11132 		     + splt->output_offset
11133 		     + plt_offset);
11134 
11135 	    if (globals->use_blx
11136 		&& r_type == R_ARM_THM_CALL
11137 		&& ! using_thumb_only (globals))
11138 	      {
11139 		/* If the Thumb BLX instruction is available, convert
11140 		   the BL to a BLX instruction to call the ARM-mode
11141 		   PLT entry.  */
11142 		lower_insn = (lower_insn & ~0x1000) | 0x0800;
11143 		branch_type = ST_BRANCH_TO_ARM;
11144 	      }
11145 	    else
11146 	      {
11147 		if (! using_thumb_only (globals))
11148 		  /* Target the Thumb stub before the ARM PLT entry.  */
11149 		  value -= PLT_THUMB_STUB_SIZE;
11150 		branch_type = ST_BRANCH_TO_THUMB;
11151 	      }
11152 	    *unresolved_reloc_p = false;
11153 	  }
11154 
11155 	relocation = value + signed_addend;
11156 
11157 	relocation -= (input_section->output_section->vma
11158 		       + input_section->output_offset
11159 		       + rel->r_offset);
11160 
11161 	check = relocation >> howto->rightshift;
11162 
11163 	/* If this is a signed value, the rightshift just dropped
11164 	   leading 1 bits (assuming twos complement).  */
11165 	if ((bfd_signed_vma) relocation >= 0)
11166 	  signed_check = check;
11167 	else
11168 	  signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11169 
11170 	/* Calculate the permissable maximum and minimum values for
11171 	   this relocation according to whether we're relocating for
11172 	   Thumb-2 or not.  */
11173 	bitsize = howto->bitsize;
11174 	if (!thumb2_bl)
11175 	  bitsize -= 2;
11176 	reloc_signed_max = (1 << (bitsize - 1)) - 1;
11177 	reloc_signed_min = ~reloc_signed_max;
11178 
11179 	/* Assumes two's complement.  */
11180 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11181 	  overflow = true;
11182 
11183 	if ((lower_insn & 0x5000) == 0x4000)
11184 	  /* For a BLX instruction, make sure that the relocation is rounded up
11185 	     to a word boundary.  This follows the semantics of the instruction
11186 	     which specifies that bit 1 of the target address will come from bit
11187 	     1 of the base address.  */
11188 	  relocation = (relocation + 2) & ~ 3;
11189 
11190 	/* Put RELOCATION back into the insn.  Assumes two's complement.
11191 	   We use the Thumb-2 encoding, which is safe even if dealing with
11192 	   a Thumb-1 instruction by virtue of our overflow check above.  */
11193 	reloc_sign = (signed_check < 0) ? 1 : 0;
11194 	upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11195 		     | ((relocation >> 12) & 0x3ff)
11196 		     | (reloc_sign << 10);
11197 	lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11198 		     | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11199 		     | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11200 		     | ((relocation >> 1) & 0x7ff);
11201 
11202 	/* Put the relocated value back in the object file:  */
11203 	bfd_put_16 (input_bfd, upper_insn, hit_data);
11204 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11205 
11206 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11207       }
11208       break;
11209 
11210     case R_ARM_THM_JUMP19:
11211       /* Thumb32 conditional branch instruction.  */
11212       {
11213 	bfd_vma relocation;
11214 	bool overflow = false;
11215 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11216 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11217 	bfd_signed_vma reloc_signed_max = 0xffffe;
11218 	bfd_signed_vma reloc_signed_min = -0x100000;
11219 	bfd_signed_vma signed_check;
11220 	enum elf32_arm_stub_type stub_type = arm_stub_none;
11221 	struct elf32_arm_stub_hash_entry *stub_entry;
11222 	struct elf32_arm_link_hash_entry *hash;
11223 
11224 	/* Need to refetch the addend, reconstruct the top three bits,
11225 	   and squish the two 11 bit pieces together.  */
11226 	if (globals->use_rel)
11227 	  {
11228 	    bfd_vma S     = (upper_insn & 0x0400) >> 10;
11229 	    bfd_vma upper = (upper_insn & 0x003f);
11230 	    bfd_vma J1    = (lower_insn & 0x2000) >> 13;
11231 	    bfd_vma J2    = (lower_insn & 0x0800) >> 11;
11232 	    bfd_vma lower = (lower_insn & 0x07ff);
11233 
11234 	    upper |= J1 << 6;
11235 	    upper |= J2 << 7;
11236 	    upper |= (!S) << 8;
11237 	    upper -= 0x0100; /* Sign extend.  */
11238 
11239 	    addend = (upper << 12) | (lower << 1);
11240 	    signed_addend = addend;
11241 	  }
11242 
11243 	/* Handle calls via the PLT.  */
11244 	if (plt_offset != (bfd_vma) -1)
11245 	  {
11246 	    value = (splt->output_section->vma
11247 		     + splt->output_offset
11248 		     + plt_offset);
11249 	    /* Target the Thumb stub before the ARM PLT entry.  */
11250 	    value -= PLT_THUMB_STUB_SIZE;
11251 	    *unresolved_reloc_p = false;
11252 	  }
11253 
11254 	hash = (struct elf32_arm_link_hash_entry *)h;
11255 
11256 	stub_type = arm_type_of_stub (info, input_section, rel,
11257 				      st_type, &branch_type,
11258 				      hash, value, sym_sec,
11259 				      input_bfd, sym_name);
11260 	if (stub_type != arm_stub_none)
11261 	  {
11262 	    stub_entry = elf32_arm_get_stub_entry (input_section,
11263 						   sym_sec, h,
11264 						   rel, globals,
11265 						   stub_type);
11266 	    if (stub_entry != NULL)
11267 	      {
11268 		value = (stub_entry->stub_offset
11269 			+ stub_entry->stub_sec->output_offset
11270 			+ stub_entry->stub_sec->output_section->vma);
11271 	      }
11272 	  }
11273 
11274 	relocation = value + signed_addend;
11275 	relocation -= (input_section->output_section->vma
11276 		       + input_section->output_offset
11277 		       + rel->r_offset);
11278 	signed_check = (bfd_signed_vma) relocation;
11279 
11280 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11281 	  overflow = true;
11282 
11283 	/* Put RELOCATION back into the insn.  */
11284 	{
11285 	  bfd_vma S  = (relocation & 0x00100000) >> 20;
11286 	  bfd_vma J2 = (relocation & 0x00080000) >> 19;
11287 	  bfd_vma J1 = (relocation & 0x00040000) >> 18;
11288 	  bfd_vma hi = (relocation & 0x0003f000) >> 12;
11289 	  bfd_vma lo = (relocation & 0x00000ffe) >>  1;
11290 
11291 	  upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11292 	  lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11293 	}
11294 
11295 	/* Put the relocated value back in the object file:  */
11296 	bfd_put_16 (input_bfd, upper_insn, hit_data);
11297 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11298 
11299 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11300       }
11301 
11302     case R_ARM_THM_JUMP11:
11303     case R_ARM_THM_JUMP8:
11304     case R_ARM_THM_JUMP6:
11305       /* Thumb B (branch) instruction).  */
11306       {
11307 	bfd_signed_vma relocation;
11308 	bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11309 	bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11310 	bfd_signed_vma signed_check;
11311 
11312 	/* CZB cannot jump backward.  */
11313 	if (r_type == R_ARM_THM_JUMP6)
11314 	  {
11315 	    reloc_signed_min = 0;
11316 	    if (globals->use_rel)
11317 	      signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11318 	  }
11319 
11320 	relocation = value + signed_addend;
11321 
11322 	relocation -= (input_section->output_section->vma
11323 		       + input_section->output_offset
11324 		       + rel->r_offset);
11325 
11326 	relocation >>= howto->rightshift;
11327 	signed_check = relocation;
11328 
11329 	if (r_type == R_ARM_THM_JUMP6)
11330 	  relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11331 	else
11332 	  relocation &= howto->dst_mask;
11333 	relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11334 
11335 	bfd_put_16 (input_bfd, relocation, hit_data);
11336 
11337 	/* Assumes two's complement.  */
11338 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11339 	  return bfd_reloc_overflow;
11340 
11341 	return bfd_reloc_ok;
11342       }
11343 
11344     case R_ARM_ALU_PCREL7_0:
11345     case R_ARM_ALU_PCREL15_8:
11346     case R_ARM_ALU_PCREL23_15:
11347       {
11348 	bfd_vma insn;
11349 	bfd_vma relocation;
11350 
11351 	insn = bfd_get_32 (input_bfd, hit_data);
11352 	if (globals->use_rel)
11353 	  {
11354 	    /* Extract the addend.  */
11355 	    addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11356 	    signed_addend = addend;
11357 	  }
11358 	relocation = value + signed_addend;
11359 
11360 	relocation -= (input_section->output_section->vma
11361 		       + input_section->output_offset
11362 		       + rel->r_offset);
11363 	insn = (insn & ~0xfff)
11364 	       | ((howto->bitpos << 7) & 0xf00)
11365 	       | ((relocation >> howto->bitpos) & 0xff);
11366 	bfd_put_32 (input_bfd, value, hit_data);
11367       }
11368       return bfd_reloc_ok;
11369 
11370     case R_ARM_GNU_VTINHERIT:
11371     case R_ARM_GNU_VTENTRY:
11372       return bfd_reloc_ok;
11373 
11374     case R_ARM_GOTOFF32:
11375       /* Relocation is relative to the start of the
11376 	 global offset table.  */
11377 
11378       BFD_ASSERT (sgot != NULL);
11379       if (sgot == NULL)
11380 	return bfd_reloc_notsupported;
11381 
11382       /* If we are addressing a Thumb function, we need to adjust the
11383 	 address by one, so that attempts to call the function pointer will
11384 	 correctly interpret it as Thumb code.  */
11385       if (branch_type == ST_BRANCH_TO_THUMB)
11386 	value += 1;
11387 
11388       /* Note that sgot->output_offset is not involved in this
11389 	 calculation.  We always want the start of .got.  If we
11390 	 define _GLOBAL_OFFSET_TABLE in a different way, as is
11391 	 permitted by the ABI, we might have to change this
11392 	 calculation.  */
11393       value -= sgot->output_section->vma;
11394       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11395 				       contents, rel->r_offset, value,
11396 				       rel->r_addend);
11397 
11398     case R_ARM_GOTPC:
11399       /* Use global offset table as symbol value.  */
11400       BFD_ASSERT (sgot != NULL);
11401 
11402       if (sgot == NULL)
11403 	return bfd_reloc_notsupported;
11404 
11405       *unresolved_reloc_p = false;
11406       value = sgot->output_section->vma;
11407       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11408 				       contents, rel->r_offset, value,
11409 				       rel->r_addend);
11410 
11411     case R_ARM_GOT32:
11412     case R_ARM_GOT_PREL:
11413       /* Relocation is to the entry for this symbol in the
11414 	 global offset table.  */
11415       if (sgot == NULL)
11416 	return bfd_reloc_notsupported;
11417 
11418       if (dynreloc_st_type == STT_GNU_IFUNC
11419 	  && plt_offset != (bfd_vma) -1
11420 	  && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11421 	{
11422 	  /* We have a relocation against a locally-binding STT_GNU_IFUNC
11423 	     symbol, and the relocation resolves directly to the runtime
11424 	     target rather than to the .iplt entry.  This means that any
11425 	     .got entry would be the same value as the .igot.plt entry,
11426 	     so there's no point creating both.  */
11427 	  sgot = globals->root.igotplt;
11428 	  value = sgot->output_offset + gotplt_offset;
11429 	}
11430       else if (h != NULL)
11431 	{
11432 	  bfd_vma off;
11433 
11434 	  off = h->got.offset;
11435 	  BFD_ASSERT (off != (bfd_vma) -1);
11436 	  if ((off & 1) != 0)
11437 	    {
11438 	      /* We have already processsed one GOT relocation against
11439 		 this symbol.  */
11440 	      off &= ~1;
11441 	      if (globals->root.dynamic_sections_created
11442 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11443 		*unresolved_reloc_p = false;
11444 	    }
11445 	  else
11446 	    {
11447 	      Elf_Internal_Rela outrel;
11448 	      int isrofixup = 0;
11449 
11450 	      if (((h->dynindx != -1) || globals->fdpic_p)
11451 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11452 		{
11453 		  /* If the symbol doesn't resolve locally in a static
11454 		     object, we have an undefined reference.  If the
11455 		     symbol doesn't resolve locally in a dynamic object,
11456 		     it should be resolved by the dynamic linker.  */
11457 		  if (globals->root.dynamic_sections_created)
11458 		    {
11459 		      outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11460 		      *unresolved_reloc_p = false;
11461 		    }
11462 		  else
11463 		    outrel.r_info = 0;
11464 		  outrel.r_addend = 0;
11465 		}
11466 	      else
11467 		{
11468 		  if (dynreloc_st_type == STT_GNU_IFUNC)
11469 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11470 		  else if (bfd_link_pic (info)
11471 			   && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11472 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11473 		  else
11474 		    {
11475 		      outrel.r_info = 0;
11476 		      if (globals->fdpic_p)
11477 			isrofixup = 1;
11478 		    }
11479 		  outrel.r_addend = dynreloc_value;
11480 		}
11481 
11482 	      /* The GOT entry is initialized to zero by default.
11483 		 See if we should install a different value.  */
11484 	      if (outrel.r_addend != 0
11485 		  && (globals->use_rel || outrel.r_info == 0))
11486 		{
11487 		  bfd_put_32 (output_bfd, outrel.r_addend,
11488 			      sgot->contents + off);
11489 		  outrel.r_addend = 0;
11490 		}
11491 
11492 	      if (isrofixup)
11493 		arm_elf_add_rofixup (output_bfd,
11494 				     elf32_arm_hash_table (info)->srofixup,
11495 				     sgot->output_section->vma
11496 				     + sgot->output_offset + off);
11497 
11498 	      else if (outrel.r_info != 0)
11499 		{
11500 		  outrel.r_offset = (sgot->output_section->vma
11501 				     + sgot->output_offset
11502 				     + off);
11503 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11504 		}
11505 
11506 	      h->got.offset |= 1;
11507 	    }
11508 	  value = sgot->output_offset + off;
11509 	}
11510       else
11511 	{
11512 	  bfd_vma off;
11513 
11514 	  BFD_ASSERT (local_got_offsets != NULL
11515 		      && local_got_offsets[r_symndx] != (bfd_vma) -1);
11516 
11517 	  off = local_got_offsets[r_symndx];
11518 
11519 	  /* The offset must always be a multiple of 4.  We use the
11520 	     least significant bit to record whether we have already
11521 	     generated the necessary reloc.  */
11522 	  if ((off & 1) != 0)
11523 	    off &= ~1;
11524 	  else
11525 	    {
11526 	      Elf_Internal_Rela outrel;
11527 	      int isrofixup = 0;
11528 
11529 	      if (dynreloc_st_type == STT_GNU_IFUNC)
11530 		outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11531 	      else if (bfd_link_pic (info))
11532 		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11533 	      else
11534 		{
11535 		  outrel.r_info = 0;
11536 		  if (globals->fdpic_p)
11537 		    isrofixup = 1;
11538 		}
11539 
11540 	      /* The GOT entry is initialized to zero by default.
11541 		 See if we should install a different value.  */
11542 	      if (globals->use_rel || outrel.r_info == 0)
11543 		bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11544 
11545 	      if (isrofixup)
11546 		arm_elf_add_rofixup (output_bfd,
11547 				     globals->srofixup,
11548 				     sgot->output_section->vma
11549 				     + sgot->output_offset + off);
11550 
11551 	      else if (outrel.r_info != 0)
11552 		{
11553 		  outrel.r_addend = addend + dynreloc_value;
11554 		  outrel.r_offset = (sgot->output_section->vma
11555 				     + sgot->output_offset
11556 				     + off);
11557 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11558 		}
11559 
11560 	      local_got_offsets[r_symndx] |= 1;
11561 	    }
11562 
11563 	  value = sgot->output_offset + off;
11564 	}
11565       if (r_type != R_ARM_GOT32)
11566 	value += sgot->output_section->vma;
11567 
11568       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11569 				       contents, rel->r_offset, value,
11570 				       rel->r_addend);
11571 
11572     case R_ARM_TLS_LDO32:
11573       value = value - dtpoff_base (info);
11574 
11575       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11576 				       contents, rel->r_offset, value,
11577 				       rel->r_addend);
11578 
11579     case R_ARM_TLS_LDM32:
11580     case R_ARM_TLS_LDM32_FDPIC:
11581       {
11582 	bfd_vma off;
11583 
11584 	if (sgot == NULL)
11585 	  abort ();
11586 
11587 	off = globals->tls_ldm_got.offset;
11588 
11589 	if ((off & 1) != 0)
11590 	  off &= ~1;
11591 	else
11592 	  {
11593 	    /* If we don't know the module number, create a relocation
11594 	       for it.  */
11595 	    if (bfd_link_dll (info))
11596 	      {
11597 		Elf_Internal_Rela outrel;
11598 
11599 		if (srelgot == NULL)
11600 		  abort ();
11601 
11602 		outrel.r_addend = 0;
11603 		outrel.r_offset = (sgot->output_section->vma
11604 				   + sgot->output_offset + off);
11605 		outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11606 
11607 		if (globals->use_rel)
11608 		  bfd_put_32 (output_bfd, outrel.r_addend,
11609 			      sgot->contents + off);
11610 
11611 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11612 	      }
11613 	    else
11614 	      bfd_put_32 (output_bfd, 1, sgot->contents + off);
11615 
11616 	    globals->tls_ldm_got.offset |= 1;
11617 	  }
11618 
11619 	if (r_type == R_ARM_TLS_LDM32_FDPIC)
11620 	  {
11621 	    bfd_put_32 (output_bfd,
11622 			globals->root.sgot->output_offset + off,
11623 			contents + rel->r_offset);
11624 
11625 	    return bfd_reloc_ok;
11626 	  }
11627 	else
11628 	  {
11629 	    value = sgot->output_section->vma + sgot->output_offset + off
11630 	      - (input_section->output_section->vma
11631 		 + input_section->output_offset + rel->r_offset);
11632 
11633 	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
11634 					     contents, rel->r_offset, value,
11635 					     rel->r_addend);
11636 	  }
11637       }
11638 
11639     case R_ARM_TLS_CALL:
11640     case R_ARM_THM_TLS_CALL:
11641     case R_ARM_TLS_GD32:
11642     case R_ARM_TLS_GD32_FDPIC:
11643     case R_ARM_TLS_IE32:
11644     case R_ARM_TLS_IE32_FDPIC:
11645     case R_ARM_TLS_GOTDESC:
11646     case R_ARM_TLS_DESCSEQ:
11647     case R_ARM_THM_TLS_DESCSEQ:
11648       {
11649 	bfd_vma off, offplt;
11650 	int indx = 0;
11651 	char tls_type;
11652 
11653 	BFD_ASSERT (sgot != NULL);
11654 
11655 	if (h != NULL)
11656 	  {
11657 	    bool dyn;
11658 	    dyn = globals->root.dynamic_sections_created;
11659 	    if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11660 						 bfd_link_pic (info),
11661 						 h)
11662 		&& (!bfd_link_pic (info)
11663 		    || !SYMBOL_REFERENCES_LOCAL (info, h)))
11664 	      {
11665 		*unresolved_reloc_p = false;
11666 		indx = h->dynindx;
11667 	      }
11668 	    off = h->got.offset;
11669 	    offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11670 	    tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11671 	  }
11672 	else
11673 	  {
11674 	    BFD_ASSERT (local_got_offsets != NULL);
11675 
11676 	    if (r_symndx >= elf32_arm_num_entries (input_bfd))
11677 	      {
11678 		_bfd_error_handler (_("\
11679 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11680 				    input_bfd,
11681 				    (unsigned long) elf32_arm_num_entries (input_bfd),
11682 				    r_symndx);
11683 		return false;
11684 	      }
11685 	    off = local_got_offsets[r_symndx];
11686 	    offplt = local_tlsdesc_gotents[r_symndx];
11687 	    tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11688 	  }
11689 
11690 	/* Linker relaxations happens from one of the
11691 	   R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE.  */
11692 	if (ELF32_R_TYPE (rel->r_info) != r_type)
11693 	  tls_type = GOT_TLS_IE;
11694 
11695 	BFD_ASSERT (tls_type != GOT_UNKNOWN);
11696 
11697 	if ((off & 1) != 0)
11698 	  off &= ~1;
11699 	else
11700 	  {
11701 	    bool need_relocs = false;
11702 	    Elf_Internal_Rela outrel;
11703 	    int cur_off = off;
11704 
11705 	    /* The GOT entries have not been initialized yet.  Do it
11706 	       now, and emit any relocations.  If both an IE GOT and a
11707 	       GD GOT are necessary, we emit the GD first.  */
11708 
11709 	    if ((bfd_link_dll (info) || indx != 0)
11710 		&& (h == NULL
11711 		    || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11712 			&& !resolved_to_zero)
11713 		    || h->root.type != bfd_link_hash_undefweak))
11714 	      {
11715 		need_relocs = true;
11716 		BFD_ASSERT (srelgot != NULL);
11717 	      }
11718 
11719 	    if (tls_type & GOT_TLS_GDESC)
11720 	      {
11721 		bfd_byte *loc;
11722 
11723 		/* We should have relaxed, unless this is an undefined
11724 		   weak symbol.  */
11725 		BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11726 			    || bfd_link_dll (info));
11727 		BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11728 			    <= globals->root.sgotplt->size);
11729 
11730 		outrel.r_addend = 0;
11731 		outrel.r_offset = (globals->root.sgotplt->output_section->vma
11732 				   + globals->root.sgotplt->output_offset
11733 				   + offplt
11734 				   + globals->sgotplt_jump_table_size);
11735 
11736 		outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11737 		sreloc = globals->root.srelplt;
11738 		loc = sreloc->contents;
11739 		loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11740 		BFD_ASSERT (loc + RELOC_SIZE (globals)
11741 			   <= sreloc->contents + sreloc->size);
11742 
11743 		SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11744 
11745 		/* For globals, the first word in the relocation gets
11746 		   the relocation index and the top bit set, or zero,
11747 		   if we're binding now.  For locals, it gets the
11748 		   symbol's offset in the tls section.  */
11749 		bfd_put_32 (output_bfd,
11750 			    !h ? value - elf_hash_table (info)->tls_sec->vma
11751 			    : info->flags & DF_BIND_NOW ? 0
11752 			    : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11753 			    globals->root.sgotplt->contents + offplt
11754 			    + globals->sgotplt_jump_table_size);
11755 
11756 		/* Second word in the relocation is always zero.  */
11757 		bfd_put_32 (output_bfd, 0,
11758 			    globals->root.sgotplt->contents + offplt
11759 			    + globals->sgotplt_jump_table_size + 4);
11760 	      }
11761 	    if (tls_type & GOT_TLS_GD)
11762 	      {
11763 		if (need_relocs)
11764 		  {
11765 		    outrel.r_addend = 0;
11766 		    outrel.r_offset = (sgot->output_section->vma
11767 				       + sgot->output_offset
11768 				       + cur_off);
11769 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11770 
11771 		    if (globals->use_rel)
11772 		      bfd_put_32 (output_bfd, outrel.r_addend,
11773 				  sgot->contents + cur_off);
11774 
11775 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11776 
11777 		    if (indx == 0)
11778 		      bfd_put_32 (output_bfd, value - dtpoff_base (info),
11779 				  sgot->contents + cur_off + 4);
11780 		    else
11781 		      {
11782 			outrel.r_addend = 0;
11783 			outrel.r_info = ELF32_R_INFO (indx,
11784 						      R_ARM_TLS_DTPOFF32);
11785 			outrel.r_offset += 4;
11786 
11787 			if (globals->use_rel)
11788 			  bfd_put_32 (output_bfd, outrel.r_addend,
11789 				      sgot->contents + cur_off + 4);
11790 
11791 			elf32_arm_add_dynreloc (output_bfd, info,
11792 						srelgot, &outrel);
11793 		      }
11794 		  }
11795 		else
11796 		  {
11797 		    /* If we are not emitting relocations for a
11798 		       general dynamic reference, then we must be in a
11799 		       static link or an executable link with the
11800 		       symbol binding locally.  Mark it as belonging
11801 		       to module 1, the executable.  */
11802 		    bfd_put_32 (output_bfd, 1,
11803 				sgot->contents + cur_off);
11804 		    bfd_put_32 (output_bfd, value - dtpoff_base (info),
11805 				sgot->contents + cur_off + 4);
11806 		  }
11807 
11808 		cur_off += 8;
11809 	      }
11810 
11811 	    if (tls_type & GOT_TLS_IE)
11812 	      {
11813 		if (need_relocs)
11814 		  {
11815 		    if (indx == 0)
11816 		      outrel.r_addend = value - dtpoff_base (info);
11817 		    else
11818 		      outrel.r_addend = 0;
11819 		    outrel.r_offset = (sgot->output_section->vma
11820 				       + sgot->output_offset
11821 				       + cur_off);
11822 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11823 
11824 		    if (globals->use_rel)
11825 		      bfd_put_32 (output_bfd, outrel.r_addend,
11826 				  sgot->contents + cur_off);
11827 
11828 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11829 		  }
11830 		else
11831 		  bfd_put_32 (output_bfd, tpoff (info, value),
11832 			      sgot->contents + cur_off);
11833 		cur_off += 4;
11834 	      }
11835 
11836 	    if (h != NULL)
11837 	      h->got.offset |= 1;
11838 	    else
11839 	      local_got_offsets[r_symndx] |= 1;
11840 	  }
11841 
11842 	if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11843 	  off += 8;
11844 	else if (tls_type & GOT_TLS_GDESC)
11845 	  off = offplt;
11846 
11847 	if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
11848 	    || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
11849 	  {
11850 	    bfd_signed_vma offset;
11851 	    /* TLS stubs are arm mode.  The original symbol is a
11852 	       data object, so branch_type is bogus.  */
11853 	    branch_type = ST_BRANCH_TO_ARM;
11854 	    enum elf32_arm_stub_type stub_type
11855 	      = arm_type_of_stub (info, input_section, rel,
11856 				  st_type, &branch_type,
11857 				  (struct elf32_arm_link_hash_entry *)h,
11858 				  globals->tls_trampoline, globals->root.splt,
11859 				  input_bfd, sym_name);
11860 
11861 	    if (stub_type != arm_stub_none)
11862 	      {
11863 		struct elf32_arm_stub_hash_entry *stub_entry
11864 		  = elf32_arm_get_stub_entry
11865 		  (input_section, globals->root.splt, 0, rel,
11866 		   globals, stub_type);
11867 		offset = (stub_entry->stub_offset
11868 			  + stub_entry->stub_sec->output_offset
11869 			  + stub_entry->stub_sec->output_section->vma);
11870 	      }
11871 	    else
11872 	      offset = (globals->root.splt->output_section->vma
11873 			+ globals->root.splt->output_offset
11874 			+ globals->tls_trampoline);
11875 
11876 	    if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
11877 	      {
11878 		unsigned long inst;
11879 
11880 		offset -= (input_section->output_section->vma
11881 			   + input_section->output_offset
11882 			   + rel->r_offset + 8);
11883 
11884 		inst = offset >> 2;
11885 		inst &= 0x00ffffff;
11886 		value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11887 	      }
11888 	    else
11889 	      {
11890 		/* Thumb blx encodes the offset in a complicated
11891 		   fashion.  */
11892 		unsigned upper_insn, lower_insn;
11893 		unsigned neg;
11894 
11895 		offset -= (input_section->output_section->vma
11896 			   + input_section->output_offset
11897 			   + rel->r_offset + 4);
11898 
11899 		if (stub_type != arm_stub_none
11900 		    && arm_stub_is_thumb (stub_type))
11901 		  {
11902 		    lower_insn = 0xd000;
11903 		  }
11904 		else
11905 		  {
11906 		    lower_insn = 0xc000;
11907 		    /* Round up the offset to a word boundary.  */
11908 		    offset = (offset + 2) & ~2;
11909 		  }
11910 
11911 		neg = offset < 0;
11912 		upper_insn = (0xf000
11913 			      | ((offset >> 12) & 0x3ff)
11914 			      | (neg << 10));
11915 		lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11916 			      | (((!((offset >> 22) & 1)) ^ neg) << 11)
11917 			      | ((offset >> 1) & 0x7ff);
11918 		bfd_put_16 (input_bfd, upper_insn, hit_data);
11919 		bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11920 		return bfd_reloc_ok;
11921 	      }
11922 	  }
11923 	/* These relocations needs special care, as besides the fact
11924 	   they point somewhere in .gotplt, the addend must be
11925 	   adjusted accordingly depending on the type of instruction
11926 	   we refer to.  */
11927 	else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11928 	  {
11929 	    unsigned long data, insn;
11930 	    unsigned thumb;
11931 
11932 	    data = bfd_get_signed_32 (input_bfd, hit_data);
11933 	    thumb = data & 1;
11934 	    data &= ~1ul;
11935 
11936 	    if (thumb)
11937 	      {
11938 		insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11939 		if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11940 		  insn = (insn << 16)
11941 		    | bfd_get_16 (input_bfd,
11942 				  contents + rel->r_offset - data + 2);
11943 		if ((insn & 0xf800c000) == 0xf000c000)
11944 		  /* bl/blx */
11945 		  value = -6;
11946 		else if ((insn & 0xffffff00) == 0x4400)
11947 		  /* add */
11948 		  value = -5;
11949 		else
11950 		  {
11951 		    _bfd_error_handler
11952 		      /* xgettext:c-format */
11953 		      (_("%pB(%pA+%#" PRIx64 "): "
11954 			 "unexpected %s instruction '%#lx' "
11955 			 "referenced by TLS_GOTDESC"),
11956 		       input_bfd, input_section, (uint64_t) rel->r_offset,
11957 		       "Thumb", insn);
11958 		    return bfd_reloc_notsupported;
11959 		  }
11960 	      }
11961 	    else
11962 	      {
11963 		insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11964 
11965 		switch (insn >> 24)
11966 		  {
11967 		  case 0xeb:  /* bl */
11968 		  case 0xfa:  /* blx */
11969 		    value = -4;
11970 		    break;
11971 
11972 		  case 0xe0:	/* add */
11973 		    value = -8;
11974 		    break;
11975 
11976 		  default:
11977 		    _bfd_error_handler
11978 		      /* xgettext:c-format */
11979 		      (_("%pB(%pA+%#" PRIx64 "): "
11980 			 "unexpected %s instruction '%#lx' "
11981 			 "referenced by TLS_GOTDESC"),
11982 		       input_bfd, input_section, (uint64_t) rel->r_offset,
11983 		       "ARM", insn);
11984 		    return bfd_reloc_notsupported;
11985 		  }
11986 	      }
11987 
11988 	    value += ((globals->root.sgotplt->output_section->vma
11989 		       + globals->root.sgotplt->output_offset + off)
11990 		      - (input_section->output_section->vma
11991 			 + input_section->output_offset
11992 			 + rel->r_offset)
11993 		      + globals->sgotplt_jump_table_size);
11994 	  }
11995 	else
11996 	  value = ((globals->root.sgot->output_section->vma
11997 		    + globals->root.sgot->output_offset + off)
11998 		   - (input_section->output_section->vma
11999 		      + input_section->output_offset + rel->r_offset));
12000 
12001 	if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12002 				 r_type == R_ARM_TLS_IE32_FDPIC))
12003 	  {
12004 	    /* For FDPIC relocations, resolve to the offset of the GOT
12005 	       entry from the start of GOT.  */
12006 	    bfd_put_32 (output_bfd,
12007 			globals->root.sgot->output_offset + off,
12008 			contents + rel->r_offset);
12009 
12010 	    return bfd_reloc_ok;
12011 	  }
12012 	else
12013 	  {
12014 	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
12015 					     contents, rel->r_offset, value,
12016 					     rel->r_addend);
12017 	  }
12018       }
12019 
12020     case R_ARM_TLS_LE32:
12021       if (bfd_link_dll (info))
12022 	{
12023 	  _bfd_error_handler
12024 	    /* xgettext:c-format */
12025 	    (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12026 	       "in shared object"),
12027 	     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12028 	  return bfd_reloc_notsupported;
12029 	}
12030       else
12031 	value = tpoff (info, value);
12032 
12033       return _bfd_final_link_relocate (howto, input_bfd, input_section,
12034 				       contents, rel->r_offset, value,
12035 				       rel->r_addend);
12036 
12037     case R_ARM_V4BX:
12038       if (globals->fix_v4bx)
12039 	{
12040 	  bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12041 
12042 	  /* Ensure that we have a BX instruction.  */
12043 	  BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12044 
12045 	  if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12046 	    {
12047 	      /* Branch to veneer.  */
12048 	      bfd_vma glue_addr;
12049 	      glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12050 	      glue_addr -= input_section->output_section->vma
12051 			   + input_section->output_offset
12052 			   + rel->r_offset + 8;
12053 	      insn = (insn & 0xf0000000) | 0x0a000000
12054 		     | ((glue_addr >> 2) & 0x00ffffff);
12055 	    }
12056 	  else
12057 	    {
12058 	      /* Preserve Rm (lowest four bits) and the condition code
12059 		 (highest four bits). Other bits encode MOV PC,Rm.  */
12060 	      insn = (insn & 0xf000000f) | 0x01a0f000;
12061 	    }
12062 
12063 	  bfd_put_32 (input_bfd, insn, hit_data);
12064 	}
12065       return bfd_reloc_ok;
12066 
12067     case R_ARM_MOVW_ABS_NC:
12068     case R_ARM_MOVT_ABS:
12069     case R_ARM_MOVW_PREL_NC:
12070     case R_ARM_MOVT_PREL:
12071     /* Until we properly support segment-base-relative addressing then
12072        we assume the segment base to be zero, as for the group relocations.
12073        Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12074        and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS.  */
12075     case R_ARM_MOVW_BREL_NC:
12076     case R_ARM_MOVW_BREL:
12077     case R_ARM_MOVT_BREL:
12078       {
12079 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12080 
12081 	if (globals->use_rel)
12082 	  {
12083 	    addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12084 	    signed_addend = (addend ^ 0x8000) - 0x8000;
12085 	  }
12086 
12087 	value += signed_addend;
12088 
12089 	if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12090 	  value -= (input_section->output_section->vma
12091 		    + input_section->output_offset + rel->r_offset);
12092 
12093 	if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12094 	  return bfd_reloc_overflow;
12095 
12096 	if (branch_type == ST_BRANCH_TO_THUMB)
12097 	  value |= 1;
12098 
12099 	if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12100 	    || r_type == R_ARM_MOVT_BREL)
12101 	  value >>= 16;
12102 
12103 	insn &= 0xfff0f000;
12104 	insn |= value & 0xfff;
12105 	insn |= (value & 0xf000) << 4;
12106 	bfd_put_32 (input_bfd, insn, hit_data);
12107       }
12108       return bfd_reloc_ok;
12109 
12110     case R_ARM_THM_MOVW_ABS_NC:
12111     case R_ARM_THM_MOVT_ABS:
12112     case R_ARM_THM_MOVW_PREL_NC:
12113     case R_ARM_THM_MOVT_PREL:
12114     /* Until we properly support segment-base-relative addressing then
12115        we assume the segment base to be zero, as for the above relocations.
12116        Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12117        R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12118        as R_ARM_THM_MOVT_ABS.  */
12119     case R_ARM_THM_MOVW_BREL_NC:
12120     case R_ARM_THM_MOVW_BREL:
12121     case R_ARM_THM_MOVT_BREL:
12122       {
12123 	bfd_vma insn;
12124 
12125 	insn = bfd_get_16 (input_bfd, hit_data) << 16;
12126 	insn |= bfd_get_16 (input_bfd, hit_data + 2);
12127 
12128 	if (globals->use_rel)
12129 	  {
12130 	    addend = ((insn >> 4)  & 0xf000)
12131 		   | ((insn >> 15) & 0x0800)
12132 		   | ((insn >> 4)  & 0x0700)
12133 		   | (insn	   & 0x00ff);
12134 	    signed_addend = (addend ^ 0x8000) - 0x8000;
12135 	  }
12136 
12137 	value += signed_addend;
12138 
12139 	if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12140 	  value -= (input_section->output_section->vma
12141 		    + input_section->output_offset + rel->r_offset);
12142 
12143 	if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12144 	  return bfd_reloc_overflow;
12145 
12146 	if (branch_type == ST_BRANCH_TO_THUMB)
12147 	  value |= 1;
12148 
12149 	if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12150 	    || r_type == R_ARM_THM_MOVT_BREL)
12151 	  value >>= 16;
12152 
12153 	insn &= 0xfbf08f00;
12154 	insn |= (value & 0xf000) << 4;
12155 	insn |= (value & 0x0800) << 15;
12156 	insn |= (value & 0x0700) << 4;
12157 	insn |= (value & 0x00ff);
12158 
12159 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
12160 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12161       }
12162       return bfd_reloc_ok;
12163 
12164     case R_ARM_ALU_PC_G0_NC:
12165     case R_ARM_ALU_PC_G1_NC:
12166     case R_ARM_ALU_PC_G0:
12167     case R_ARM_ALU_PC_G1:
12168     case R_ARM_ALU_PC_G2:
12169     case R_ARM_ALU_SB_G0_NC:
12170     case R_ARM_ALU_SB_G1_NC:
12171     case R_ARM_ALU_SB_G0:
12172     case R_ARM_ALU_SB_G1:
12173     case R_ARM_ALU_SB_G2:
12174       {
12175 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12176 	bfd_vma pc = input_section->output_section->vma
12177 		     + input_section->output_offset + rel->r_offset;
12178 	/* sb is the origin of the *segment* containing the symbol.  */
12179 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12180 	bfd_vma residual;
12181 	bfd_vma g_n;
12182 	bfd_signed_vma signed_value;
12183 	int group = 0;
12184 
12185 	/* Determine which group of bits to select.  */
12186 	switch (r_type)
12187 	  {
12188 	  case R_ARM_ALU_PC_G0_NC:
12189 	  case R_ARM_ALU_PC_G0:
12190 	  case R_ARM_ALU_SB_G0_NC:
12191 	  case R_ARM_ALU_SB_G0:
12192 	    group = 0;
12193 	    break;
12194 
12195 	  case R_ARM_ALU_PC_G1_NC:
12196 	  case R_ARM_ALU_PC_G1:
12197 	  case R_ARM_ALU_SB_G1_NC:
12198 	  case R_ARM_ALU_SB_G1:
12199 	    group = 1;
12200 	    break;
12201 
12202 	  case R_ARM_ALU_PC_G2:
12203 	  case R_ARM_ALU_SB_G2:
12204 	    group = 2;
12205 	    break;
12206 
12207 	  default:
12208 	    abort ();
12209 	  }
12210 
12211 	/* If REL, extract the addend from the insn.  If RELA, it will
12212 	   have already been fetched for us.  */
12213 	if (globals->use_rel)
12214 	  {
12215 	    int negative;
12216 	    bfd_vma constant = insn & 0xff;
12217 	    bfd_vma rotation = (insn & 0xf00) >> 8;
12218 
12219 	    if (rotation == 0)
12220 	      signed_addend = constant;
12221 	    else
12222 	      {
12223 		/* Compensate for the fact that in the instruction, the
12224 		   rotation is stored in multiples of 2 bits.  */
12225 		rotation *= 2;
12226 
12227 		/* Rotate "constant" right by "rotation" bits.  */
12228 		signed_addend = (constant >> rotation) |
12229 				(constant << (8 * sizeof (bfd_vma) - rotation));
12230 	      }
12231 
12232 	    /* Determine if the instruction is an ADD or a SUB.
12233 	       (For REL, this determines the sign of the addend.)  */
12234 	    negative = identify_add_or_sub (insn);
12235 	    if (negative == 0)
12236 	      {
12237 		_bfd_error_handler
12238 		  /* xgettext:c-format */
12239 		  (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12240 		     "are allowed for ALU group relocations"),
12241 		  input_bfd, input_section, (uint64_t) rel->r_offset);
12242 		return bfd_reloc_overflow;
12243 	      }
12244 
12245 	    signed_addend *= negative;
12246 	  }
12247 
12248 	/* Compute the value (X) to go in the place.  */
12249 	if (r_type == R_ARM_ALU_PC_G0_NC
12250 	    || r_type == R_ARM_ALU_PC_G1_NC
12251 	    || r_type == R_ARM_ALU_PC_G0
12252 	    || r_type == R_ARM_ALU_PC_G1
12253 	    || r_type == R_ARM_ALU_PC_G2)
12254 	  /* PC relative.  */
12255 	  signed_value = value - pc + signed_addend;
12256 	else
12257 	  /* Section base relative.  */
12258 	  signed_value = value - sb + signed_addend;
12259 
12260 	/* If the target symbol is a Thumb function, then set the
12261 	   Thumb bit in the address.  */
12262 	if (branch_type == ST_BRANCH_TO_THUMB)
12263 	  signed_value |= 1;
12264 
12265 	/* Calculate the value of the relevant G_n, in encoded
12266 	   constant-with-rotation format.  */
12267 	g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12268 					  group, &residual);
12269 
12270 	/* Check for overflow if required.  */
12271 	if ((r_type == R_ARM_ALU_PC_G0
12272 	     || r_type == R_ARM_ALU_PC_G1
12273 	     || r_type == R_ARM_ALU_PC_G2
12274 	     || r_type == R_ARM_ALU_SB_G0
12275 	     || r_type == R_ARM_ALU_SB_G1
12276 	     || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12277 	  {
12278 	    _bfd_error_handler
12279 	      /* xgettext:c-format */
12280 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12281 		 "splitting %#" PRIx64 " for group relocation %s"),
12282 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12283 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12284 	       howto->name);
12285 	    return bfd_reloc_overflow;
12286 	  }
12287 
12288 	/* Mask out the value and the ADD/SUB part of the opcode; take care
12289 	   not to destroy the S bit.  */
12290 	insn &= 0xff1ff000;
12291 
12292 	/* Set the opcode according to whether the value to go in the
12293 	   place is negative.  */
12294 	if (signed_value < 0)
12295 	  insn |= 1 << 22;
12296 	else
12297 	  insn |= 1 << 23;
12298 
12299 	/* Encode the offset.  */
12300 	insn |= g_n;
12301 
12302 	bfd_put_32 (input_bfd, insn, hit_data);
12303       }
12304       return bfd_reloc_ok;
12305 
12306     case R_ARM_LDR_PC_G0:
12307     case R_ARM_LDR_PC_G1:
12308     case R_ARM_LDR_PC_G2:
12309     case R_ARM_LDR_SB_G0:
12310     case R_ARM_LDR_SB_G1:
12311     case R_ARM_LDR_SB_G2:
12312       {
12313 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12314 	bfd_vma pc = input_section->output_section->vma
12315 		     + input_section->output_offset + rel->r_offset;
12316 	/* sb is the origin of the *segment* containing the symbol.  */
12317 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12318 	bfd_vma residual;
12319 	bfd_signed_vma signed_value;
12320 	int group = 0;
12321 
12322 	/* Determine which groups of bits to calculate.  */
12323 	switch (r_type)
12324 	  {
12325 	  case R_ARM_LDR_PC_G0:
12326 	  case R_ARM_LDR_SB_G0:
12327 	    group = 0;
12328 	    break;
12329 
12330 	  case R_ARM_LDR_PC_G1:
12331 	  case R_ARM_LDR_SB_G1:
12332 	    group = 1;
12333 	    break;
12334 
12335 	  case R_ARM_LDR_PC_G2:
12336 	  case R_ARM_LDR_SB_G2:
12337 	    group = 2;
12338 	    break;
12339 
12340 	  default:
12341 	    abort ();
12342 	  }
12343 
12344 	/* If REL, extract the addend from the insn.  If RELA, it will
12345 	   have already been fetched for us.  */
12346 	if (globals->use_rel)
12347 	  {
12348 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12349 	    signed_addend = negative * (insn & 0xfff);
12350 	  }
12351 
12352 	/* Compute the value (X) to go in the place.  */
12353 	if (r_type == R_ARM_LDR_PC_G0
12354 	    || r_type == R_ARM_LDR_PC_G1
12355 	    || r_type == R_ARM_LDR_PC_G2)
12356 	  /* PC relative.  */
12357 	  signed_value = value - pc + signed_addend;
12358 	else
12359 	  /* Section base relative.  */
12360 	  signed_value = value - sb + signed_addend;
12361 
12362 	/* Calculate the value of the relevant G_{n-1} to obtain
12363 	   the residual at that stage.  */
12364 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12365 				    group - 1, &residual);
12366 
12367 	/* Check for overflow.  */
12368 	if (residual >= 0x1000)
12369 	  {
12370 	    _bfd_error_handler
12371 	      /* xgettext:c-format */
12372 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12373 		 "splitting %#" PRIx64 " for group relocation %s"),
12374 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12375 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12376 	       howto->name);
12377 	    return bfd_reloc_overflow;
12378 	  }
12379 
12380 	/* Mask out the value and U bit.  */
12381 	insn &= 0xff7ff000;
12382 
12383 	/* Set the U bit if the value to go in the place is non-negative.  */
12384 	if (signed_value >= 0)
12385 	  insn |= 1 << 23;
12386 
12387 	/* Encode the offset.  */
12388 	insn |= residual;
12389 
12390 	bfd_put_32 (input_bfd, insn, hit_data);
12391       }
12392       return bfd_reloc_ok;
12393 
12394     case R_ARM_LDRS_PC_G0:
12395     case R_ARM_LDRS_PC_G1:
12396     case R_ARM_LDRS_PC_G2:
12397     case R_ARM_LDRS_SB_G0:
12398     case R_ARM_LDRS_SB_G1:
12399     case R_ARM_LDRS_SB_G2:
12400       {
12401 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12402 	bfd_vma pc = input_section->output_section->vma
12403 		     + input_section->output_offset + rel->r_offset;
12404 	/* sb is the origin of the *segment* containing the symbol.  */
12405 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12406 	bfd_vma residual;
12407 	bfd_signed_vma signed_value;
12408 	int group = 0;
12409 
12410 	/* Determine which groups of bits to calculate.  */
12411 	switch (r_type)
12412 	  {
12413 	  case R_ARM_LDRS_PC_G0:
12414 	  case R_ARM_LDRS_SB_G0:
12415 	    group = 0;
12416 	    break;
12417 
12418 	  case R_ARM_LDRS_PC_G1:
12419 	  case R_ARM_LDRS_SB_G1:
12420 	    group = 1;
12421 	    break;
12422 
12423 	  case R_ARM_LDRS_PC_G2:
12424 	  case R_ARM_LDRS_SB_G2:
12425 	    group = 2;
12426 	    break;
12427 
12428 	  default:
12429 	    abort ();
12430 	  }
12431 
12432 	/* If REL, extract the addend from the insn.  If RELA, it will
12433 	   have already been fetched for us.  */
12434 	if (globals->use_rel)
12435 	  {
12436 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12437 	    signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12438 	  }
12439 
12440 	/* Compute the value (X) to go in the place.  */
12441 	if (r_type == R_ARM_LDRS_PC_G0
12442 	    || r_type == R_ARM_LDRS_PC_G1
12443 	    || r_type == R_ARM_LDRS_PC_G2)
12444 	  /* PC relative.  */
12445 	  signed_value = value - pc + signed_addend;
12446 	else
12447 	  /* Section base relative.  */
12448 	  signed_value = value - sb + signed_addend;
12449 
12450 	/* Calculate the value of the relevant G_{n-1} to obtain
12451 	   the residual at that stage.  */
12452 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12453 				    group - 1, &residual);
12454 
12455 	/* Check for overflow.  */
12456 	if (residual >= 0x100)
12457 	  {
12458 	    _bfd_error_handler
12459 	      /* xgettext:c-format */
12460 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12461 		 "splitting %#" PRIx64 " for group relocation %s"),
12462 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12463 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12464 	       howto->name);
12465 	    return bfd_reloc_overflow;
12466 	  }
12467 
12468 	/* Mask out the value and U bit.  */
12469 	insn &= 0xff7ff0f0;
12470 
12471 	/* Set the U bit if the value to go in the place is non-negative.  */
12472 	if (signed_value >= 0)
12473 	  insn |= 1 << 23;
12474 
12475 	/* Encode the offset.  */
12476 	insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12477 
12478 	bfd_put_32 (input_bfd, insn, hit_data);
12479       }
12480       return bfd_reloc_ok;
12481 
12482     case R_ARM_LDC_PC_G0:
12483     case R_ARM_LDC_PC_G1:
12484     case R_ARM_LDC_PC_G2:
12485     case R_ARM_LDC_SB_G0:
12486     case R_ARM_LDC_SB_G1:
12487     case R_ARM_LDC_SB_G2:
12488       {
12489 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12490 	bfd_vma pc = input_section->output_section->vma
12491 		     + input_section->output_offset + rel->r_offset;
12492 	/* sb is the origin of the *segment* containing the symbol.  */
12493 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12494 	bfd_vma residual;
12495 	bfd_signed_vma signed_value;
12496 	int group = 0;
12497 
12498 	/* Determine which groups of bits to calculate.  */
12499 	switch (r_type)
12500 	  {
12501 	  case R_ARM_LDC_PC_G0:
12502 	  case R_ARM_LDC_SB_G0:
12503 	    group = 0;
12504 	    break;
12505 
12506 	  case R_ARM_LDC_PC_G1:
12507 	  case R_ARM_LDC_SB_G1:
12508 	    group = 1;
12509 	    break;
12510 
12511 	  case R_ARM_LDC_PC_G2:
12512 	  case R_ARM_LDC_SB_G2:
12513 	    group = 2;
12514 	    break;
12515 
12516 	  default:
12517 	    abort ();
12518 	  }
12519 
12520 	/* If REL, extract the addend from the insn.  If RELA, it will
12521 	   have already been fetched for us.  */
12522 	if (globals->use_rel)
12523 	  {
12524 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12525 	    signed_addend = negative * ((insn & 0xff) << 2);
12526 	  }
12527 
12528 	/* Compute the value (X) to go in the place.  */
12529 	if (r_type == R_ARM_LDC_PC_G0
12530 	    || r_type == R_ARM_LDC_PC_G1
12531 	    || r_type == R_ARM_LDC_PC_G2)
12532 	  /* PC relative.  */
12533 	  signed_value = value - pc + signed_addend;
12534 	else
12535 	  /* Section base relative.  */
12536 	  signed_value = value - sb + signed_addend;
12537 
12538 	/* Calculate the value of the relevant G_{n-1} to obtain
12539 	   the residual at that stage.  */
12540 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12541 				    group - 1, &residual);
12542 
12543 	/* Check for overflow.  (The absolute value to go in the place must be
12544 	   divisible by four and, after having been divided by four, must
12545 	   fit in eight bits.)  */
12546 	if ((residual & 0x3) != 0 || residual >= 0x400)
12547 	  {
12548 	    _bfd_error_handler
12549 	      /* xgettext:c-format */
12550 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12551 		 "splitting %#" PRIx64 " for group relocation %s"),
12552 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12553 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12554 	       howto->name);
12555 	    return bfd_reloc_overflow;
12556 	  }
12557 
12558 	/* Mask out the value and U bit.  */
12559 	insn &= 0xff7fff00;
12560 
12561 	/* Set the U bit if the value to go in the place is non-negative.  */
12562 	if (signed_value >= 0)
12563 	  insn |= 1 << 23;
12564 
12565 	/* Encode the offset.  */
12566 	insn |= residual >> 2;
12567 
12568 	bfd_put_32 (input_bfd, insn, hit_data);
12569       }
12570       return bfd_reloc_ok;
12571 
12572     case R_ARM_THM_ALU_ABS_G0_NC:
12573     case R_ARM_THM_ALU_ABS_G1_NC:
12574     case R_ARM_THM_ALU_ABS_G2_NC:
12575     case R_ARM_THM_ALU_ABS_G3_NC:
12576 	{
12577 	    const int shift_array[4] = {0, 8, 16, 24};
12578 	    bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12579 	    bfd_vma addr = value;
12580 	    int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12581 
12582 	    /* Compute address.  */
12583 	    if (globals->use_rel)
12584 		signed_addend = insn & 0xff;
12585 	    addr += signed_addend;
12586 	    if (branch_type == ST_BRANCH_TO_THUMB)
12587 		addr |= 1;
12588 	    /* Clean imm8 insn.  */
12589 	    insn &= 0xff00;
12590 	    /* And update with correct part of address.  */
12591 	    insn |= (addr >> shift) & 0xff;
12592 	    /* Update insn.  */
12593 	    bfd_put_16 (input_bfd, insn, hit_data);
12594 	}
12595 
12596 	*unresolved_reloc_p = false;
12597 	return bfd_reloc_ok;
12598 
12599     case R_ARM_GOTOFFFUNCDESC:
12600       {
12601 	if (h == NULL)
12602 	  {
12603 	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12604 	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12605 
12606 	    if (r_symndx >= elf32_arm_num_entries (input_bfd))
12607 	      {
12608 		* error_message = _("local symbol index too big");
12609 		return bfd_reloc_dangerous;
12610 	      }
12611 
12612 	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12613 	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12614 	    bfd_vma seg = -1;
12615 
12616 	    if (bfd_link_pic (info) && dynindx == 0)
12617 	      {
12618 		* error_message = _("no dynamic index information available");
12619 		return bfd_reloc_dangerous;
12620 	      }
12621 
12622 	    /* Resolve relocation.  */
12623 	    bfd_put_32 (output_bfd, (offset + sgot->output_offset)
12624 		       , contents + rel->r_offset);
12625 	    /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12626 	       not done yet.  */
12627 	    arm_elf_fill_funcdesc (output_bfd, info,
12628 				   &local_fdpic_cnts[r_symndx].funcdesc_offset,
12629 				   dynindx, offset, addr, dynreloc_value, seg);
12630 	  }
12631 	else
12632 	  {
12633 	    int dynindx;
12634 	    int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12635 	    bfd_vma addr;
12636 	    bfd_vma seg = -1;
12637 
12638 	    /* For static binaries, sym_sec can be null.  */
12639 	    if (sym_sec)
12640 	      {
12641 		dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12642 		addr = dynreloc_value - sym_sec->output_section->vma;
12643 	      }
12644 	    else
12645 	      {
12646 		dynindx = 0;
12647 		addr = 0;
12648 	      }
12649 
12650 	    if (bfd_link_pic (info) && dynindx == 0)
12651 	      {
12652 		* error_message = _("no dynamic index information available");
12653 		return bfd_reloc_dangerous;
12654 	      }
12655 
12656 	    /* This case cannot occur since funcdesc is allocated by
12657 	       the dynamic loader so we cannot resolve the relocation.  */
12658 	    if (h->dynindx != -1)
12659 	      {
12660 		* error_message = _("invalid dynamic index");
12661 		return bfd_reloc_dangerous;
12662 	      }
12663 
12664 	    /* Resolve relocation.  */
12665 	    bfd_put_32 (output_bfd, (offset + sgot->output_offset),
12666 		        contents + rel->r_offset);
12667 	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12668 	    arm_elf_fill_funcdesc (output_bfd, info,
12669 				   &eh->fdpic_cnts.funcdesc_offset,
12670 				   dynindx, offset, addr, dynreloc_value, seg);
12671 	  }
12672       }
12673       *unresolved_reloc_p = false;
12674       return bfd_reloc_ok;
12675 
12676     case R_ARM_GOTFUNCDESC:
12677       {
12678 	if (h != NULL)
12679 	  {
12680 	    Elf_Internal_Rela outrel;
12681 
12682 	    /* Resolve relocation.  */
12683 	    bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12684 				     + sgot->output_offset),
12685 			contents + rel->r_offset);
12686 	    /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE.  */
12687 	    if (h->dynindx == -1)
12688 	      {
12689 		int dynindx;
12690 		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12691 		bfd_vma addr;
12692 		bfd_vma seg = -1;
12693 
12694 		/* For static binaries sym_sec can be null.  */
12695 		if (sym_sec)
12696 		  {
12697 		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12698 		    addr = dynreloc_value - sym_sec->output_section->vma;
12699 		  }
12700 		else
12701 		  {
12702 		    dynindx = 0;
12703 		    addr = 0;
12704 		  }
12705 
12706 		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12707 		arm_elf_fill_funcdesc (output_bfd, info,
12708 				       &eh->fdpic_cnts.funcdesc_offset,
12709 				       dynindx, offset, addr, dynreloc_value, seg);
12710 	      }
12711 
12712 	    /* Add a dynamic relocation on GOT entry if not already done.  */
12713 	    if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12714 	      {
12715 		if (h->dynindx == -1)
12716 		  {
12717 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12718 		    if (h->root.type == bfd_link_hash_undefweak)
12719 		      bfd_put_32 (output_bfd, 0, sgot->contents
12720 				  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12721 		    else
12722 		      bfd_put_32 (output_bfd, sgot->output_section->vma
12723 				  + sgot->output_offset
12724 				  + (eh->fdpic_cnts.funcdesc_offset & ~1),
12725 				  sgot->contents
12726 				  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12727 		  }
12728 		else
12729 		  {
12730 		    outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12731 		  }
12732 		outrel.r_offset = sgot->output_section->vma
12733 		  + sgot->output_offset
12734 		  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12735 		outrel.r_addend = 0;
12736 		if (h->dynindx == -1 && !bfd_link_pic (info))
12737 		  if (h->root.type == bfd_link_hash_undefweak)
12738 		    arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
12739 		  else
12740 		    arm_elf_add_rofixup (output_bfd, globals->srofixup,
12741 					 outrel.r_offset);
12742 		else
12743 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12744 		eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12745 	      }
12746 	  }
12747 	else
12748 	  {
12749 	    /* Such relocation on static function should not have been
12750 	       emitted by the compiler.  */
12751 	    return bfd_reloc_notsupported;
12752 	  }
12753       }
12754       *unresolved_reloc_p = false;
12755       return bfd_reloc_ok;
12756 
12757     case R_ARM_FUNCDESC:
12758       {
12759 	if (h == NULL)
12760 	  {
12761 	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12762 	    Elf_Internal_Rela outrel;
12763 	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12764 
12765 	    if (r_symndx >= elf32_arm_num_entries (input_bfd))
12766 	      {
12767 		* error_message = _("local symbol index too big");
12768 		return bfd_reloc_dangerous;
12769 	      }
12770 
12771 	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12772 	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12773 	    bfd_vma seg = -1;
12774 
12775 	    if (bfd_link_pic (info) && dynindx == 0)
12776 	      {
12777 		* error_message = _("dynamic index information not available");
12778 		return bfd_reloc_dangerous;
12779 	      }
12780 
12781 	    /* Replace static FUNCDESC relocation with a
12782 	       R_ARM_RELATIVE dynamic relocation or with a rofixup for
12783 	       executable.  */
12784 	    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12785 	    outrel.r_offset = input_section->output_section->vma
12786 	      + input_section->output_offset + rel->r_offset;
12787 	    outrel.r_addend = 0;
12788 	    if (bfd_link_pic (info))
12789 	      elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12790 	    else
12791 	      arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12792 
12793 	    bfd_put_32 (input_bfd, sgot->output_section->vma
12794 			+ sgot->output_offset + offset, hit_data);
12795 
12796 	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12797 	    arm_elf_fill_funcdesc (output_bfd, info,
12798 				   &local_fdpic_cnts[r_symndx].funcdesc_offset,
12799 				   dynindx, offset, addr, dynreloc_value, seg);
12800 	  }
12801 	else
12802 	  {
12803 	    if (h->dynindx == -1)
12804 	      {
12805 		int dynindx;
12806 		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12807 		bfd_vma addr;
12808 		bfd_vma seg = -1;
12809 		Elf_Internal_Rela outrel;
12810 
12811 		/* For static binaries sym_sec can be null.  */
12812 		if (sym_sec)
12813 		  {
12814 		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12815 		    addr = dynreloc_value - sym_sec->output_section->vma;
12816 		  }
12817 		else
12818 		  {
12819 		    dynindx = 0;
12820 		    addr = 0;
12821 		  }
12822 
12823 		if (bfd_link_pic (info) && dynindx == 0)
12824 		  abort ();
12825 
12826 		/* Replace static FUNCDESC relocation with a
12827 		   R_ARM_RELATIVE dynamic relocation.  */
12828 		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12829 		outrel.r_offset = input_section->output_section->vma
12830 		  + input_section->output_offset + rel->r_offset;
12831 		outrel.r_addend = 0;
12832 		if (bfd_link_pic (info))
12833 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12834 		else
12835 		  arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12836 
12837 		bfd_put_32 (input_bfd, sgot->output_section->vma
12838 			    + sgot->output_offset + offset, hit_data);
12839 
12840 		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12841 		arm_elf_fill_funcdesc (output_bfd, info,
12842 				       &eh->fdpic_cnts.funcdesc_offset,
12843 				       dynindx, offset, addr, dynreloc_value, seg);
12844 	      }
12845 	    else
12846 	      {
12847 		Elf_Internal_Rela outrel;
12848 
12849 		/* Add a dynamic relocation.  */
12850 		outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12851 		outrel.r_offset = input_section->output_section->vma
12852 		  + input_section->output_offset + rel->r_offset;
12853 		outrel.r_addend = 0;
12854 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12855 	      }
12856 	  }
12857       }
12858       *unresolved_reloc_p = false;
12859       return bfd_reloc_ok;
12860 
12861     case R_ARM_THM_BF16:
12862       {
12863 	bfd_vma relocation;
12864 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12865 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12866 
12867 	if (globals->use_rel)
12868 	  {
12869 	    bfd_vma immA  = (upper_insn & 0x001f);
12870 	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12871 	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12872 	    addend  = (immA << 12);
12873 	    addend |= (immB << 2);
12874 	    addend |= (immC << 1);
12875 	    addend |= 1;
12876 	    /* Sign extend.  */
12877 	    signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12878 	  }
12879 
12880 	relocation  = value + signed_addend;
12881 	relocation -= (input_section->output_section->vma
12882 		       + input_section->output_offset
12883 		       + rel->r_offset);
12884 
12885 	/* Put RELOCATION back into the insn.  */
12886 	{
12887 	  bfd_vma immA = (relocation & 0x0001f000) >> 12;
12888 	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12889 	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12890 
12891 	  upper_insn = (upper_insn & 0xffe0) | immA;
12892 	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12893 	}
12894 
12895 	/* Put the relocated value back in the object file:  */
12896 	bfd_put_16 (input_bfd, upper_insn, hit_data);
12897 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12898 
12899 	return bfd_reloc_ok;
12900       }
12901 
12902     case R_ARM_THM_BF12:
12903       {
12904 	bfd_vma relocation;
12905 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12906 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12907 
12908 	if (globals->use_rel)
12909 	  {
12910 	    bfd_vma immA  = (upper_insn & 0x0001);
12911 	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12912 	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12913 	    addend  = (immA << 12);
12914 	    addend |= (immB << 2);
12915 	    addend |= (immC << 1);
12916 	    addend |= 1;
12917 	    /* Sign extend.  */
12918 	    addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12919 	    signed_addend = addend;
12920 	  }
12921 
12922 	relocation  = value + signed_addend;
12923 	relocation -= (input_section->output_section->vma
12924 		       + input_section->output_offset
12925 		       + rel->r_offset);
12926 
12927 	/* Put RELOCATION back into the insn.  */
12928 	{
12929 	  bfd_vma immA = (relocation & 0x00001000) >> 12;
12930 	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12931 	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12932 
12933 	  upper_insn = (upper_insn & 0xfffe) | immA;
12934 	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12935 	}
12936 
12937 	/* Put the relocated value back in the object file:  */
12938 	bfd_put_16 (input_bfd, upper_insn, hit_data);
12939 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12940 
12941 	return bfd_reloc_ok;
12942       }
12943 
12944     case R_ARM_THM_BF18:
12945       {
12946 	bfd_vma relocation;
12947 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12948 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12949 
12950 	if (globals->use_rel)
12951 	  {
12952 	    bfd_vma immA  = (upper_insn & 0x007f);
12953 	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12954 	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12955 	    addend  = (immA << 12);
12956 	    addend |= (immB << 2);
12957 	    addend |= (immC << 1);
12958 	    addend |= 1;
12959 	    /* Sign extend.  */
12960 	    addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12961 	    signed_addend = addend;
12962 	  }
12963 
12964 	relocation  = value + signed_addend;
12965 	relocation -= (input_section->output_section->vma
12966 		       + input_section->output_offset
12967 		       + rel->r_offset);
12968 
12969 	/* Put RELOCATION back into the insn.  */
12970 	{
12971 	  bfd_vma immA = (relocation & 0x0007f000) >> 12;
12972 	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12973 	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12974 
12975 	  upper_insn = (upper_insn & 0xff80) | immA;
12976 	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12977 	}
12978 
12979 	/* Put the relocated value back in the object file:  */
12980 	bfd_put_16 (input_bfd, upper_insn, hit_data);
12981 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12982 
12983 	return bfd_reloc_ok;
12984       }
12985 
12986     default:
12987       return bfd_reloc_notsupported;
12988     }
12989 }
12990 
12991 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS.  */
12992 static void
arm_add_to_rel(bfd * abfd,bfd_byte * address,reloc_howto_type * howto,bfd_signed_vma increment)12993 arm_add_to_rel (bfd *		   abfd,
12994 		bfd_byte *	   address,
12995 		reloc_howto_type * howto,
12996 		bfd_signed_vma	   increment)
12997 {
12998   bfd_signed_vma addend;
12999 
13000   if (howto->type == R_ARM_THM_CALL
13001       || howto->type == R_ARM_THM_JUMP24)
13002     {
13003       int upper_insn, lower_insn;
13004       int upper, lower;
13005 
13006       upper_insn = bfd_get_16 (abfd, address);
13007       lower_insn = bfd_get_16 (abfd, address + 2);
13008       upper = upper_insn & 0x7ff;
13009       lower = lower_insn & 0x7ff;
13010 
13011       addend = (upper << 12) | (lower << 1);
13012       addend += increment;
13013       addend >>= 1;
13014 
13015       upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13016       lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13017 
13018       bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13019       bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13020     }
13021   else
13022     {
13023       bfd_vma	     contents;
13024 
13025       contents = bfd_get_32 (abfd, address);
13026 
13027       /* Get the (signed) value from the instruction.  */
13028       addend = contents & howto->src_mask;
13029       if (addend & ((howto->src_mask + 1) >> 1))
13030 	{
13031 	  bfd_signed_vma mask;
13032 
13033 	  mask = -1;
13034 	  mask &= ~ howto->src_mask;
13035 	  addend |= mask;
13036 	}
13037 
13038       /* Add in the increment, (which is a byte value).  */
13039       switch (howto->type)
13040 	{
13041 	default:
13042 	  addend += increment;
13043 	  break;
13044 
13045 	case R_ARM_PC24:
13046 	case R_ARM_PLT32:
13047 	case R_ARM_CALL:
13048 	case R_ARM_JUMP24:
13049 	  addend <<= howto->size;
13050 	  addend += increment;
13051 
13052 	  /* Should we check for overflow here ?  */
13053 
13054 	  /* Drop any undesired bits.  */
13055 	  addend >>= howto->rightshift;
13056 	  break;
13057 	}
13058 
13059       contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13060 
13061       bfd_put_32 (abfd, contents, address);
13062     }
13063 }
13064 
13065 #define IS_ARM_TLS_RELOC(R_TYPE)	\
13066   ((R_TYPE) == R_ARM_TLS_GD32		\
13067    || (R_TYPE) == R_ARM_TLS_GD32_FDPIC  \
13068    || (R_TYPE) == R_ARM_TLS_LDO32	\
13069    || (R_TYPE) == R_ARM_TLS_LDM32	\
13070    || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC	\
13071    || (R_TYPE) == R_ARM_TLS_DTPOFF32	\
13072    || (R_TYPE) == R_ARM_TLS_DTPMOD32	\
13073    || (R_TYPE) == R_ARM_TLS_TPOFF32	\
13074    || (R_TYPE) == R_ARM_TLS_LE32	\
13075    || (R_TYPE) == R_ARM_TLS_IE32	\
13076    || (R_TYPE) == R_ARM_TLS_IE32_FDPIC	\
13077    || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13078 
13079 /* Specific set of relocations for the gnu tls dialect.  */
13080 #define IS_ARM_TLS_GNU_RELOC(R_TYPE)	\
13081   ((R_TYPE) == R_ARM_TLS_GOTDESC	\
13082    || (R_TYPE) == R_ARM_TLS_CALL	\
13083    || (R_TYPE) == R_ARM_THM_TLS_CALL	\
13084    || (R_TYPE) == R_ARM_TLS_DESCSEQ	\
13085    || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13086 
13087 /* Relocate an ARM ELF section.  */
13088 
13089 static int
elf32_arm_relocate_section(bfd * output_bfd,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * relocs,Elf_Internal_Sym * local_syms,asection ** local_sections)13090 elf32_arm_relocate_section (bfd *		   output_bfd,
13091 			    struct bfd_link_info * info,
13092 			    bfd *		   input_bfd,
13093 			    asection *		   input_section,
13094 			    bfd_byte *		   contents,
13095 			    Elf_Internal_Rela *	   relocs,
13096 			    Elf_Internal_Sym *	   local_syms,
13097 			    asection **		   local_sections)
13098 {
13099   Elf_Internal_Shdr *symtab_hdr;
13100   struct elf_link_hash_entry **sym_hashes;
13101   Elf_Internal_Rela *rel;
13102   Elf_Internal_Rela *relend;
13103   const char *name;
13104   struct elf32_arm_link_hash_table * globals;
13105 
13106   globals = elf32_arm_hash_table (info);
13107   if (globals == NULL)
13108     return false;
13109 
13110   symtab_hdr = & elf_symtab_hdr (input_bfd);
13111   sym_hashes = elf_sym_hashes (input_bfd);
13112 
13113   rel = relocs;
13114   relend = relocs + input_section->reloc_count;
13115   for (; rel < relend; rel++)
13116     {
13117       int			   r_type;
13118       reloc_howto_type *	   howto;
13119       unsigned long		   r_symndx;
13120       Elf_Internal_Sym *	   sym;
13121       asection *		   sec;
13122       struct elf_link_hash_entry * h;
13123       bfd_vma			   relocation;
13124       bfd_reloc_status_type	   r;
13125       arelent			   bfd_reloc;
13126       char			   sym_type;
13127       bool			   unresolved_reloc = false;
13128       char *error_message = NULL;
13129 
13130       r_symndx = ELF32_R_SYM (rel->r_info);
13131       r_type   = ELF32_R_TYPE (rel->r_info);
13132       r_type   = arm_real_reloc_type (globals, r_type);
13133 
13134       if (   r_type == R_ARM_GNU_VTENTRY
13135 	  || r_type == R_ARM_GNU_VTINHERIT)
13136 	continue;
13137 
13138       howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13139 
13140       if (howto == NULL)
13141 	return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13142 
13143       h = NULL;
13144       sym = NULL;
13145       sec = NULL;
13146 
13147       if (r_symndx < symtab_hdr->sh_info)
13148 	{
13149 	  sym = local_syms + r_symndx;
13150 	  sym_type = ELF32_ST_TYPE (sym->st_info);
13151 	  sec = local_sections[r_symndx];
13152 
13153 	  /* An object file might have a reference to a local
13154 	     undefined symbol.  This is a daft object file, but we
13155 	     should at least do something about it.  V4BX & NONE
13156 	     relocations do not use the symbol and are explicitly
13157 	     allowed to use the undefined symbol, so allow those.
13158 	     Likewise for relocations against STN_UNDEF.  */
13159 	  if (r_type != R_ARM_V4BX
13160 	      && r_type != R_ARM_NONE
13161 	      && r_symndx != STN_UNDEF
13162 	      && bfd_is_und_section (sec)
13163 	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13164 	    (*info->callbacks->undefined_symbol)
13165 	      (info, bfd_elf_string_from_elf_section
13166 	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
13167 	       input_bfd, input_section,
13168 	       rel->r_offset, true);
13169 
13170 	  if (globals->use_rel)
13171 	    {
13172 	      relocation = (sec->output_section->vma
13173 			    + sec->output_offset
13174 			    + sym->st_value);
13175 	      if (!bfd_link_relocatable (info)
13176 		  && (sec->flags & SEC_MERGE)
13177 		  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13178 		{
13179 		  asection *msec;
13180 		  bfd_vma addend, value;
13181 
13182 		  switch (r_type)
13183 		    {
13184 		    case R_ARM_MOVW_ABS_NC:
13185 		    case R_ARM_MOVT_ABS:
13186 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13187 		      addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13188 		      addend = (addend ^ 0x8000) - 0x8000;
13189 		      break;
13190 
13191 		    case R_ARM_THM_MOVW_ABS_NC:
13192 		    case R_ARM_THM_MOVT_ABS:
13193 		      value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13194 			      << 16;
13195 		      value |= bfd_get_16 (input_bfd,
13196 					   contents + rel->r_offset + 2);
13197 		      addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13198 			       | ((value & 0x04000000) >> 15);
13199 		      addend = (addend ^ 0x8000) - 0x8000;
13200 		      break;
13201 
13202 		    default:
13203 		      if (howto->rightshift
13204 			  || (howto->src_mask & (howto->src_mask + 1)))
13205 			{
13206 			  _bfd_error_handler
13207 			    /* xgettext:c-format */
13208 			    (_("%pB(%pA+%#" PRIx64 "): "
13209 			       "%s relocation against SEC_MERGE section"),
13210 			     input_bfd, input_section,
13211 			     (uint64_t) rel->r_offset, howto->name);
13212 			  return false;
13213 			}
13214 
13215 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13216 
13217 		      /* Get the (signed) value from the instruction.  */
13218 		      addend = value & howto->src_mask;
13219 		      if (addend & ((howto->src_mask + 1) >> 1))
13220 			{
13221 			  bfd_signed_vma mask;
13222 
13223 			  mask = -1;
13224 			  mask &= ~ howto->src_mask;
13225 			  addend |= mask;
13226 			}
13227 		      break;
13228 		    }
13229 
13230 		  msec = sec;
13231 		  addend =
13232 		    _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13233 		    - relocation;
13234 		  addend += msec->output_section->vma + msec->output_offset;
13235 
13236 		  /* Cases here must match those in the preceding
13237 		     switch statement.  */
13238 		  switch (r_type)
13239 		    {
13240 		    case R_ARM_MOVW_ABS_NC:
13241 		    case R_ARM_MOVT_ABS:
13242 		      value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13243 			      | (addend & 0xfff);
13244 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13245 		      break;
13246 
13247 		    case R_ARM_THM_MOVW_ABS_NC:
13248 		    case R_ARM_THM_MOVT_ABS:
13249 		      value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13250 			      | (addend & 0xff) | ((addend & 0x0800) << 15);
13251 		      bfd_put_16 (input_bfd, value >> 16,
13252 				  contents + rel->r_offset);
13253 		      bfd_put_16 (input_bfd, value,
13254 				  contents + rel->r_offset + 2);
13255 		      break;
13256 
13257 		    default:
13258 		      value = (value & ~ howto->dst_mask)
13259 			      | (addend & howto->dst_mask);
13260 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13261 		      break;
13262 		    }
13263 		}
13264 	    }
13265 	  else
13266 	    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13267 	}
13268       else
13269 	{
13270 	  bool warned, ignored;
13271 
13272 	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13273 				   r_symndx, symtab_hdr, sym_hashes,
13274 				   h, sec, relocation,
13275 				   unresolved_reloc, warned, ignored);
13276 
13277 	  sym_type = h->type;
13278 	}
13279 
13280       if (sec != NULL && discarded_section (sec))
13281 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13282 					 rel, 1, relend, howto, 0, contents);
13283 
13284       if (bfd_link_relocatable (info))
13285 	{
13286 	  /* This is a relocatable link.  We don't have to change
13287 	     anything, unless the reloc is against a section symbol,
13288 	     in which case we have to adjust according to where the
13289 	     section symbol winds up in the output section.  */
13290 	  if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13291 	    {
13292 	      if (globals->use_rel)
13293 		arm_add_to_rel (input_bfd, contents + rel->r_offset,
13294 				howto, (bfd_signed_vma) sec->output_offset);
13295 	      else
13296 		rel->r_addend += sec->output_offset;
13297 	    }
13298 	  continue;
13299 	}
13300 
13301       if (h != NULL)
13302 	name = h->root.root.string;
13303       else
13304 	{
13305 	  name = (bfd_elf_string_from_elf_section
13306 		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
13307 	  if (name == NULL || *name == '\0')
13308 	    name = bfd_section_name (sec);
13309 	}
13310 
13311       if (r_symndx != STN_UNDEF
13312 	  && r_type != R_ARM_NONE
13313 	  && (h == NULL
13314 	      || h->root.type == bfd_link_hash_defined
13315 	      || h->root.type == bfd_link_hash_defweak)
13316 	  && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13317 	{
13318 	  _bfd_error_handler
13319 	    ((sym_type == STT_TLS
13320 	      /* xgettext:c-format */
13321 	      ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13322 	      /* xgettext:c-format */
13323 	      : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13324 	     input_bfd,
13325 	     input_section,
13326 	     (uint64_t) rel->r_offset,
13327 	     howto->name,
13328 	     name);
13329 	}
13330 
13331       /* We call elf32_arm_final_link_relocate unless we're completely
13332 	 done, i.e., the relaxation produced the final output we want,
13333 	 and we won't let anybody mess with it. Also, we have to do
13334 	 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13335 	 both in relaxed and non-relaxed cases.  */
13336       if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13337 	  || (IS_ARM_TLS_GNU_RELOC (r_type)
13338 	      && !((h ? elf32_arm_hash_entry (h)->tls_type :
13339 		    elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13340 		   & GOT_TLS_GDESC)))
13341 	{
13342 	  r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13343 				   contents, rel, h == NULL);
13344 	  /* This may have been marked unresolved because it came from
13345 	     a shared library.  But we've just dealt with that.  */
13346 	  unresolved_reloc = 0;
13347 	}
13348       else
13349 	r = bfd_reloc_continue;
13350 
13351       if (r == bfd_reloc_continue)
13352 	{
13353 	  unsigned char branch_type =
13354 	    h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13355 	      : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13356 
13357 	  r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13358 					     input_section, contents, rel,
13359 					     relocation, info, sec, name,
13360 					     sym_type, branch_type, h,
13361 					     &unresolved_reloc,
13362 					     &error_message);
13363 	}
13364 
13365       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13366 	 because such sections are not SEC_ALLOC and thus ld.so will
13367 	 not process them.  */
13368       if (unresolved_reloc
13369 	  && !((input_section->flags & SEC_DEBUGGING) != 0
13370 	       && h->def_dynamic)
13371 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
13372 				      rel->r_offset) != (bfd_vma) -1)
13373 	{
13374 	  _bfd_error_handler
13375 	    /* xgettext:c-format */
13376 	    (_("%pB(%pA+%#" PRIx64 "): "
13377 	       "unresolvable %s relocation against symbol `%s'"),
13378 	     input_bfd,
13379 	     input_section,
13380 	     (uint64_t) rel->r_offset,
13381 	     howto->name,
13382 	     h->root.root.string);
13383 	  return false;
13384 	}
13385 
13386       if (r != bfd_reloc_ok)
13387 	{
13388 	  switch (r)
13389 	    {
13390 	    case bfd_reloc_overflow:
13391 	      /* If the overflowing reloc was to an undefined symbol,
13392 		 we have already printed one error message and there
13393 		 is no point complaining again.  */
13394 	      if (!h || h->root.type != bfd_link_hash_undefined)
13395 		(*info->callbacks->reloc_overflow)
13396 		  (info, (h ? &h->root : NULL), name, howto->name,
13397 		   (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13398 	      break;
13399 
13400 	    case bfd_reloc_undefined:
13401 	      (*info->callbacks->undefined_symbol)
13402 		(info, name, input_bfd, input_section, rel->r_offset, true);
13403 	      break;
13404 
13405 	    case bfd_reloc_outofrange:
13406 	      error_message = _("out of range");
13407 	      goto common_error;
13408 
13409 	    case bfd_reloc_notsupported:
13410 	      error_message = _("unsupported relocation");
13411 	      goto common_error;
13412 
13413 	    case bfd_reloc_dangerous:
13414 	      /* error_message should already be set.  */
13415 	      goto common_error;
13416 
13417 	    default:
13418 	      error_message = _("unknown error");
13419 	      /* Fall through.  */
13420 
13421 	    common_error:
13422 	      BFD_ASSERT (error_message != NULL);
13423 	      (*info->callbacks->reloc_dangerous)
13424 		(info, error_message, input_bfd, input_section, rel->r_offset);
13425 	      break;
13426 	    }
13427 	}
13428     }
13429 
13430   return true;
13431 }
13432 
13433 /* Add a new unwind edit to the list described by HEAD, TAIL.  If TINDEX is zero,
13434    adds the edit to the start of the list.  (The list must be built in order of
13435    ascending TINDEX: the function's callers are primarily responsible for
13436    maintaining that condition).  */
13437 
13438 static void
add_unwind_table_edit(arm_unwind_table_edit ** head,arm_unwind_table_edit ** tail,arm_unwind_edit_type type,asection * linked_section,unsigned int tindex)13439 add_unwind_table_edit (arm_unwind_table_edit **head,
13440 		       arm_unwind_table_edit **tail,
13441 		       arm_unwind_edit_type type,
13442 		       asection *linked_section,
13443 		       unsigned int tindex)
13444 {
13445   arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13446       xmalloc (sizeof (arm_unwind_table_edit));
13447 
13448   new_edit->type = type;
13449   new_edit->linked_section = linked_section;
13450   new_edit->index = tindex;
13451 
13452   if (tindex > 0)
13453     {
13454       new_edit->next = NULL;
13455 
13456       if (*tail)
13457 	(*tail)->next = new_edit;
13458 
13459       (*tail) = new_edit;
13460 
13461       if (!*head)
13462 	(*head) = new_edit;
13463     }
13464   else
13465     {
13466       new_edit->next = *head;
13467 
13468       if (!*tail)
13469 	*tail = new_edit;
13470 
13471       *head = new_edit;
13472     }
13473 }
13474 
13475 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13476 
13477 /* Increase the size of EXIDX_SEC by ADJUST bytes.  ADJUST mau be negative.  */
13478 
13479 static void
adjust_exidx_size(asection * exidx_sec,int adjust)13480 adjust_exidx_size (asection *exidx_sec, int adjust)
13481 {
13482   asection *out_sec;
13483 
13484   if (!exidx_sec->rawsize)
13485     exidx_sec->rawsize = exidx_sec->size;
13486 
13487   bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13488   out_sec = exidx_sec->output_section;
13489   /* Adjust size of output section.  */
13490   bfd_set_section_size (out_sec, out_sec->size + adjust);
13491 }
13492 
13493 /* Insert an EXIDX_CANTUNWIND marker at the end of a section.  */
13494 
13495 static void
insert_cantunwind_after(asection * text_sec,asection * exidx_sec)13496 insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
13497 {
13498   struct _arm_elf_section_data *exidx_arm_data;
13499 
13500   exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13501   add_unwind_table_edit
13502     (&exidx_arm_data->u.exidx.unwind_edit_list,
13503      &exidx_arm_data->u.exidx.unwind_edit_tail,
13504      INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13505 
13506   exidx_arm_data->additional_reloc_count++;
13507 
13508   adjust_exidx_size (exidx_sec, 8);
13509 }
13510 
13511 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13512    made to those tables, such that:
13513 
13514      1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13515      2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13516 	codes which have been inlined into the index).
13517 
13518    If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13519 
13520    The edits are applied when the tables are written
13521    (in elf32_arm_write_section).  */
13522 
13523 bool
elf32_arm_fix_exidx_coverage(asection ** text_section_order,unsigned int num_text_sections,struct bfd_link_info * info,bool merge_exidx_entries)13524 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13525 			      unsigned int num_text_sections,
13526 			      struct bfd_link_info *info,
13527 			      bool merge_exidx_entries)
13528 {
13529   bfd *inp;
13530   unsigned int last_second_word = 0, i;
13531   asection *last_exidx_sec = NULL;
13532   asection *last_text_sec = NULL;
13533   int last_unwind_type = -1;
13534 
13535   /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13536      text sections.  */
13537   for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13538     {
13539       asection *sec;
13540 
13541       for (sec = inp->sections; sec != NULL; sec = sec->next)
13542 	{
13543 	  struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13544 	  Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13545 
13546 	  if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13547 	    continue;
13548 
13549 	  if (elf_sec->linked_to)
13550 	    {
13551 	      Elf_Internal_Shdr *linked_hdr
13552 		= &elf_section_data (elf_sec->linked_to)->this_hdr;
13553 	      struct _arm_elf_section_data *linked_sec_arm_data
13554 		= get_arm_elf_section_data (linked_hdr->bfd_section);
13555 
13556 	      if (linked_sec_arm_data == NULL)
13557 		continue;
13558 
13559 	      /* Link this .ARM.exidx section back from the text section it
13560 		 describes.  */
13561 	      linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13562 	    }
13563 	}
13564     }
13565 
13566   /* Walk all text sections in order of increasing VMA.  Eilminate duplicate
13567      index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13568      and add EXIDX_CANTUNWIND entries for sections with no unwind table data.  */
13569 
13570   for (i = 0; i < num_text_sections; i++)
13571     {
13572       asection *sec = text_section_order[i];
13573       asection *exidx_sec;
13574       struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13575       struct _arm_elf_section_data *exidx_arm_data;
13576       bfd_byte *contents = NULL;
13577       int deleted_exidx_bytes = 0;
13578       bfd_vma j;
13579       arm_unwind_table_edit *unwind_edit_head = NULL;
13580       arm_unwind_table_edit *unwind_edit_tail = NULL;
13581       Elf_Internal_Shdr *hdr;
13582       bfd *ibfd;
13583 
13584       if (arm_data == NULL)
13585 	continue;
13586 
13587       exidx_sec = arm_data->u.text.arm_exidx_sec;
13588       if (exidx_sec == NULL)
13589 	{
13590 	  /* Section has no unwind data.  */
13591 	  if (last_unwind_type == 0 || !last_exidx_sec)
13592 	    continue;
13593 
13594 	  /* Ignore zero sized sections.  */
13595 	  if (sec->size == 0)
13596 	    continue;
13597 
13598 	  insert_cantunwind_after (last_text_sec, last_exidx_sec);
13599 	  last_unwind_type = 0;
13600 	  continue;
13601 	}
13602 
13603       /* Skip /DISCARD/ sections.  */
13604       if (bfd_is_abs_section (exidx_sec->output_section))
13605 	continue;
13606 
13607       hdr = &elf_section_data (exidx_sec)->this_hdr;
13608       if (hdr->sh_type != SHT_ARM_EXIDX)
13609 	continue;
13610 
13611       exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13612       if (exidx_arm_data == NULL)
13613 	continue;
13614 
13615       ibfd = exidx_sec->owner;
13616 
13617       if (hdr->contents != NULL)
13618 	contents = hdr->contents;
13619       else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13620 	/* An error?  */
13621 	continue;
13622 
13623       if (last_unwind_type > 0)
13624 	{
13625 	  unsigned int first_word = bfd_get_32 (ibfd, contents);
13626 	  /* Add cantunwind if first unwind item does not match section
13627 	     start.  */
13628 	  if (first_word != sec->vma)
13629 	    {
13630 	      insert_cantunwind_after (last_text_sec, last_exidx_sec);
13631 	      last_unwind_type = 0;
13632 	    }
13633 	}
13634 
13635       for (j = 0; j < hdr->sh_size; j += 8)
13636 	{
13637 	  unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13638 	  int unwind_type;
13639 	  int elide = 0;
13640 
13641 	  /* An EXIDX_CANTUNWIND entry.  */
13642 	  if (second_word == 1)
13643 	    {
13644 	      if (last_unwind_type == 0)
13645 		elide = 1;
13646 	      unwind_type = 0;
13647 	    }
13648 	  /* Inlined unwinding data.  Merge if equal to previous.  */
13649 	  else if ((second_word & 0x80000000) != 0)
13650 	    {
13651 	      if (merge_exidx_entries
13652 		   && last_second_word == second_word && last_unwind_type == 1)
13653 		elide = 1;
13654 	      unwind_type = 1;
13655 	      last_second_word = second_word;
13656 	    }
13657 	  /* Normal table entry.  In theory we could merge these too,
13658 	     but duplicate entries are likely to be much less common.  */
13659 	  else
13660 	    unwind_type = 2;
13661 
13662 	  if (elide && !bfd_link_relocatable (info))
13663 	    {
13664 	      add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13665 				     DELETE_EXIDX_ENTRY, NULL, j / 8);
13666 
13667 	      deleted_exidx_bytes += 8;
13668 	    }
13669 
13670 	  last_unwind_type = unwind_type;
13671 	}
13672 
13673       /* Free contents if we allocated it ourselves.  */
13674       if (contents != hdr->contents)
13675 	free (contents);
13676 
13677       /* Record edits to be applied later (in elf32_arm_write_section).  */
13678       exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13679       exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13680 
13681       if (deleted_exidx_bytes > 0)
13682 	adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
13683 
13684       last_exidx_sec = exidx_sec;
13685       last_text_sec = sec;
13686     }
13687 
13688   /* Add terminating CANTUNWIND entry.  */
13689   if (!bfd_link_relocatable (info) && last_exidx_sec
13690       && last_unwind_type != 0)
13691     insert_cantunwind_after (last_text_sec, last_exidx_sec);
13692 
13693   return true;
13694 }
13695 
13696 static bool
elf32_arm_output_glue_section(struct bfd_link_info * info,bfd * obfd,bfd * ibfd,const char * name)13697 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13698 			       bfd *ibfd, const char *name)
13699 {
13700   asection *sec, *osec;
13701 
13702   sec = bfd_get_linker_section (ibfd, name);
13703   if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13704     return true;
13705 
13706   osec = sec->output_section;
13707   if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13708     return true;
13709 
13710   if (! bfd_set_section_contents (obfd, osec, sec->contents,
13711 				  sec->output_offset, sec->size))
13712     return false;
13713 
13714   return true;
13715 }
13716 
13717 static bool
elf32_arm_final_link(bfd * abfd,struct bfd_link_info * info)13718 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13719 {
13720   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13721   asection *sec, *osec;
13722 
13723   if (globals == NULL)
13724     return false;
13725 
13726   /* Invoke the regular ELF backend linker to do all the work.  */
13727   if (!bfd_elf_final_link (abfd, info))
13728     return false;
13729 
13730   /* Process stub sections (eg BE8 encoding, ...).  */
13731   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13732   unsigned int i;
13733   for (i=0; i<htab->top_id; i++)
13734     {
13735       sec = htab->stub_group[i].stub_sec;
13736       /* Only process it once, in its link_sec slot.  */
13737       if (sec && i == htab->stub_group[i].link_sec->id)
13738 	{
13739 	  osec = sec->output_section;
13740 	  elf32_arm_write_section (abfd, info, sec, sec->contents);
13741 	  if (! bfd_set_section_contents (abfd, osec, sec->contents,
13742 					  sec->output_offset, sec->size))
13743 	    return false;
13744 	}
13745     }
13746 
13747   /* Write out any glue sections now that we have created all the
13748      stubs.  */
13749   if (globals->bfd_of_glue_owner != NULL)
13750     {
13751       if (! elf32_arm_output_glue_section (info, abfd,
13752 					   globals->bfd_of_glue_owner,
13753 					   ARM2THUMB_GLUE_SECTION_NAME))
13754 	return false;
13755 
13756       if (! elf32_arm_output_glue_section (info, abfd,
13757 					   globals->bfd_of_glue_owner,
13758 					   THUMB2ARM_GLUE_SECTION_NAME))
13759 	return false;
13760 
13761       if (! elf32_arm_output_glue_section (info, abfd,
13762 					   globals->bfd_of_glue_owner,
13763 					   VFP11_ERRATUM_VENEER_SECTION_NAME))
13764 	return false;
13765 
13766       if (! elf32_arm_output_glue_section (info, abfd,
13767 					   globals->bfd_of_glue_owner,
13768 					   STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13769 	return false;
13770 
13771       if (! elf32_arm_output_glue_section (info, abfd,
13772 					   globals->bfd_of_glue_owner,
13773 					   ARM_BX_GLUE_SECTION_NAME))
13774 	return false;
13775     }
13776 
13777   return true;
13778 }
13779 
13780 /* Return a best guess for the machine number based on the attributes.  */
13781 
13782 static unsigned int
bfd_arm_get_mach_from_attributes(bfd * abfd)13783 bfd_arm_get_mach_from_attributes (bfd * abfd)
13784 {
13785   int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13786 
13787   switch (arch)
13788     {
13789     case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13790     case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13791     case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13792     case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13793 
13794     case TAG_CPU_ARCH_V5TE:
13795       {
13796 	char * name;
13797 
13798 	BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13799 	name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13800 
13801 	if (name)
13802 	  {
13803 	    if (strcmp (name, "IWMMXT2") == 0)
13804 	      return bfd_mach_arm_iWMMXt2;
13805 
13806 	    if (strcmp (name, "IWMMXT") == 0)
13807 	      return bfd_mach_arm_iWMMXt;
13808 
13809 	    if (strcmp (name, "XSCALE") == 0)
13810 	      {
13811 		int wmmx;
13812 
13813 		BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13814 		wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13815 		switch (wmmx)
13816 		  {
13817 		  case 1: return bfd_mach_arm_iWMMXt;
13818 		  case 2: return bfd_mach_arm_iWMMXt2;
13819 		  default: return bfd_mach_arm_XScale;
13820 		  }
13821 	      }
13822 	  }
13823 
13824 	return bfd_mach_arm_5TE;
13825       }
13826 
13827     case TAG_CPU_ARCH_V5TEJ:
13828 	return bfd_mach_arm_5TEJ;
13829     case TAG_CPU_ARCH_V6:
13830 	return bfd_mach_arm_6;
13831     case TAG_CPU_ARCH_V6KZ:
13832 	return bfd_mach_arm_6KZ;
13833     case TAG_CPU_ARCH_V6T2:
13834 	return bfd_mach_arm_6T2;
13835     case TAG_CPU_ARCH_V6K:
13836 	return bfd_mach_arm_6K;
13837     case TAG_CPU_ARCH_V7:
13838 	return bfd_mach_arm_7;
13839     case TAG_CPU_ARCH_V6_M:
13840 	return bfd_mach_arm_6M;
13841     case TAG_CPU_ARCH_V6S_M:
13842 	return bfd_mach_arm_6SM;
13843     case TAG_CPU_ARCH_V7E_M:
13844 	return bfd_mach_arm_7EM;
13845     case TAG_CPU_ARCH_V8:
13846 	return bfd_mach_arm_8;
13847     case TAG_CPU_ARCH_V8R:
13848 	return bfd_mach_arm_8R;
13849     case TAG_CPU_ARCH_V8M_BASE:
13850 	return bfd_mach_arm_8M_BASE;
13851     case TAG_CPU_ARCH_V8M_MAIN:
13852 	return bfd_mach_arm_8M_MAIN;
13853     case TAG_CPU_ARCH_V8_1M_MAIN:
13854 	return bfd_mach_arm_8_1M_MAIN;
13855 
13856     default:
13857       /* Force entry to be added for any new known Tag_CPU_arch value.  */
13858       BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13859 
13860       /* Unknown Tag_CPU_arch value.  */
13861       return bfd_mach_arm_unknown;
13862     }
13863 }
13864 
13865 /* Set the right machine number.  */
13866 
13867 static bool
elf32_arm_object_p(bfd * abfd)13868 elf32_arm_object_p (bfd *abfd)
13869 {
13870   unsigned int mach;
13871 
13872   mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13873 
13874   if (mach == bfd_mach_arm_unknown)
13875     {
13876       if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13877 	mach = bfd_mach_arm_ep9312;
13878       else
13879 	mach = bfd_arm_get_mach_from_attributes (abfd);
13880     }
13881 
13882   bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13883   return true;
13884 }
13885 
13886 /* Function to keep ARM specific flags in the ELF header.  */
13887 
13888 static bool
elf32_arm_set_private_flags(bfd * abfd,flagword flags)13889 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13890 {
13891   if (elf_flags_init (abfd)
13892       && elf_elfheader (abfd)->e_flags != flags)
13893     {
13894       if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13895 	{
13896 	  if (flags & EF_ARM_INTERWORK)
13897 	    _bfd_error_handler
13898 	      (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13899 	       abfd);
13900 	  else
13901 	    _bfd_error_handler
13902 	      (_("warning: clearing the interworking flag of %pB due to outside request"),
13903 	       abfd);
13904 	}
13905     }
13906   else
13907     {
13908       elf_elfheader (abfd)->e_flags = flags;
13909       elf_flags_init (abfd) = true;
13910     }
13911 
13912   return true;
13913 }
13914 
13915 /* Copy backend specific data from one object module to another.  */
13916 
13917 static bool
elf32_arm_copy_private_bfd_data(bfd * ibfd,bfd * obfd)13918 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13919 {
13920   flagword in_flags;
13921   flagword out_flags;
13922 
13923   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13924     return true;
13925 
13926   in_flags  = elf_elfheader (ibfd)->e_flags;
13927   out_flags = elf_elfheader (obfd)->e_flags;
13928 
13929   if (elf_flags_init (obfd)
13930       && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13931       && in_flags != out_flags)
13932     {
13933       /* Cannot mix APCS26 and APCS32 code.  */
13934       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13935 	return false;
13936 
13937       /* Cannot mix float APCS and non-float APCS code.  */
13938       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13939 	return false;
13940 
13941       /* If the src and dest have different interworking flags
13942 	 then turn off the interworking bit.  */
13943       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13944 	{
13945 	  if (out_flags & EF_ARM_INTERWORK)
13946 	    _bfd_error_handler
13947 	      (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13948 	       obfd, ibfd);
13949 
13950 	  in_flags &= ~EF_ARM_INTERWORK;
13951 	}
13952 
13953       /* Likewise for PIC, though don't warn for this case.  */
13954       if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13955 	in_flags &= ~EF_ARM_PIC;
13956     }
13957 
13958   elf_elfheader (obfd)->e_flags = in_flags;
13959   elf_flags_init (obfd) = true;
13960 
13961   return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13962 }
13963 
13964 /* Values for Tag_ABI_PCS_R9_use.  */
13965 enum
13966 {
13967   AEABI_R9_V6,
13968   AEABI_R9_SB,
13969   AEABI_R9_TLS,
13970   AEABI_R9_unused
13971 };
13972 
13973 /* Values for Tag_ABI_PCS_RW_data.  */
13974 enum
13975 {
13976   AEABI_PCS_RW_data_absolute,
13977   AEABI_PCS_RW_data_PCrel,
13978   AEABI_PCS_RW_data_SBrel,
13979   AEABI_PCS_RW_data_unused
13980 };
13981 
13982 /* Values for Tag_ABI_enum_size.  */
13983 enum
13984 {
13985   AEABI_enum_unused,
13986   AEABI_enum_short,
13987   AEABI_enum_wide,
13988   AEABI_enum_forced_wide
13989 };
13990 
13991 /* Determine whether an object attribute tag takes an integer, a
13992    string or both.  */
13993 
13994 static int
elf32_arm_obj_attrs_arg_type(int tag)13995 elf32_arm_obj_attrs_arg_type (int tag)
13996 {
13997   if (tag == Tag_compatibility)
13998     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13999   else if (tag == Tag_nodefaults)
14000     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14001   else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14002     return ATTR_TYPE_FLAG_STR_VAL;
14003   else if (tag < 32)
14004     return ATTR_TYPE_FLAG_INT_VAL;
14005   else
14006     return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14007 }
14008 
14009 /* The ABI defines that Tag_conformance should be emitted first, and that
14010    Tag_nodefaults should be second (if either is defined).  This sets those
14011    two positions, and bumps up the position of all the remaining tags to
14012    compensate.  */
14013 static int
elf32_arm_obj_attrs_order(int num)14014 elf32_arm_obj_attrs_order (int num)
14015 {
14016   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14017     return Tag_conformance;
14018   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14019     return Tag_nodefaults;
14020   if ((num - 2) < Tag_nodefaults)
14021     return num - 2;
14022   if ((num - 1) < Tag_conformance)
14023     return num - 1;
14024   return num;
14025 }
14026 
14027 /* Attribute numbers >=64 (mod 128) can be safely ignored.  */
14028 static bool
elf32_arm_obj_attrs_handle_unknown(bfd * abfd,int tag)14029 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14030 {
14031   if ((tag & 127) < 64)
14032     {
14033       _bfd_error_handler
14034 	(_("%pB: unknown mandatory EABI object attribute %d"),
14035 	 abfd, tag);
14036       bfd_set_error (bfd_error_bad_value);
14037       return false;
14038     }
14039   else
14040     {
14041       _bfd_error_handler
14042 	(_("warning: %pB: unknown EABI object attribute %d"),
14043 	 abfd, tag);
14044       return true;
14045     }
14046 }
14047 
14048 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14049    Returns -1 if no architecture could be read.  */
14050 
14051 static int
get_secondary_compatible_arch(bfd * abfd)14052 get_secondary_compatible_arch (bfd *abfd)
14053 {
14054   obj_attribute *attr =
14055     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14056 
14057   /* Note: the tag and its argument below are uleb128 values, though
14058      currently-defined values fit in one byte for each.  */
14059   if (attr->s
14060       && attr->s[0] == Tag_CPU_arch
14061       && (attr->s[1] & 128) != 128
14062       && attr->s[2] == 0)
14063    return attr->s[1];
14064 
14065   /* This tag is "safely ignorable", so don't complain if it looks funny.  */
14066   return -1;
14067 }
14068 
14069 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14070    The tag is removed if ARCH is -1.  */
14071 
14072 static void
set_secondary_compatible_arch(bfd * abfd,int arch)14073 set_secondary_compatible_arch (bfd *abfd, int arch)
14074 {
14075   obj_attribute *attr =
14076     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14077 
14078   if (arch == -1)
14079     {
14080       attr->s = NULL;
14081       return;
14082     }
14083 
14084   /* Note: the tag and its argument below are uleb128 values, though
14085      currently-defined values fit in one byte for each.  */
14086   if (!attr->s)
14087     attr->s = (char *) bfd_alloc (abfd, 3);
14088   attr->s[0] = Tag_CPU_arch;
14089   attr->s[1] = arch;
14090   attr->s[2] = '\0';
14091 }
14092 
14093 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14094    into account.  */
14095 
14096 static int
tag_cpu_arch_combine(bfd * ibfd,int oldtag,int * secondary_compat_out,int newtag,int secondary_compat)14097 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14098 		      int newtag, int secondary_compat)
14099 {
14100 #define T(X) TAG_CPU_ARCH_##X
14101   int tagl, tagh, result;
14102   const int v6t2[] =
14103     {
14104       T(V6T2),   /* PRE_V4.  */
14105       T(V6T2),   /* V4.  */
14106       T(V6T2),   /* V4T.  */
14107       T(V6T2),   /* V5T.  */
14108       T(V6T2),   /* V5TE.  */
14109       T(V6T2),   /* V5TEJ.  */
14110       T(V6T2),   /* V6.  */
14111       T(V7),     /* V6KZ.  */
14112       T(V6T2)    /* V6T2.  */
14113     };
14114   const int v6k[] =
14115     {
14116       T(V6K),    /* PRE_V4.  */
14117       T(V6K),    /* V4.  */
14118       T(V6K),    /* V4T.  */
14119       T(V6K),    /* V5T.  */
14120       T(V6K),    /* V5TE.  */
14121       T(V6K),    /* V5TEJ.  */
14122       T(V6K),    /* V6.  */
14123       T(V6KZ),   /* V6KZ.  */
14124       T(V7),     /* V6T2.  */
14125       T(V6K)     /* V6K.  */
14126     };
14127   const int v7[] =
14128     {
14129       T(V7),     /* PRE_V4.  */
14130       T(V7),     /* V4.  */
14131       T(V7),     /* V4T.  */
14132       T(V7),     /* V5T.  */
14133       T(V7),     /* V5TE.  */
14134       T(V7),     /* V5TEJ.  */
14135       T(V7),     /* V6.  */
14136       T(V7),     /* V6KZ.  */
14137       T(V7),     /* V6T2.  */
14138       T(V7),     /* V6K.  */
14139       T(V7)      /* V7.  */
14140     };
14141   const int v6_m[] =
14142     {
14143       -1,	 /* PRE_V4.  */
14144       -1,	 /* V4.  */
14145       T(V6K),    /* V4T.  */
14146       T(V6K),    /* V5T.  */
14147       T(V6K),    /* V5TE.  */
14148       T(V6K),    /* V5TEJ.  */
14149       T(V6K),    /* V6.  */
14150       T(V6KZ),   /* V6KZ.  */
14151       T(V7),     /* V6T2.  */
14152       T(V6K),    /* V6K.  */
14153       T(V7),     /* V7.  */
14154       T(V6_M)    /* V6_M.  */
14155     };
14156   const int v6s_m[] =
14157     {
14158       -1,	 /* PRE_V4.  */
14159       -1,	 /* V4.  */
14160       T(V6K),    /* V4T.  */
14161       T(V6K),    /* V5T.  */
14162       T(V6K),    /* V5TE.  */
14163       T(V6K),    /* V5TEJ.  */
14164       T(V6K),    /* V6.  */
14165       T(V6KZ),   /* V6KZ.  */
14166       T(V7),     /* V6T2.  */
14167       T(V6K),    /* V6K.  */
14168       T(V7),     /* V7.  */
14169       T(V6S_M),  /* V6_M.  */
14170       T(V6S_M)   /* V6S_M.  */
14171     };
14172   const int v7e_m[] =
14173     {
14174       -1,	 /* PRE_V4.  */
14175       -1,	 /* V4.  */
14176       T(V7E_M),  /* V4T.  */
14177       T(V7E_M),  /* V5T.  */
14178       T(V7E_M),  /* V5TE.  */
14179       T(V7E_M),  /* V5TEJ.  */
14180       T(V7E_M),  /* V6.  */
14181       T(V7E_M),  /* V6KZ.  */
14182       T(V7E_M),  /* V6T2.  */
14183       T(V7E_M),  /* V6K.  */
14184       T(V7E_M),  /* V7.  */
14185       T(V7E_M),  /* V6_M.  */
14186       T(V7E_M),  /* V6S_M.  */
14187       T(V7E_M)   /* V7E_M.  */
14188     };
14189   const int v8[] =
14190     {
14191       T(V8),		/* PRE_V4.  */
14192       T(V8),		/* V4.  */
14193       T(V8),		/* V4T.  */
14194       T(V8),		/* V5T.  */
14195       T(V8),		/* V5TE.  */
14196       T(V8),		/* V5TEJ.  */
14197       T(V8),		/* V6.  */
14198       T(V8),		/* V6KZ.  */
14199       T(V8),		/* V6T2.  */
14200       T(V8),		/* V6K.  */
14201       T(V8),		/* V7.  */
14202       T(V8),		/* V6_M.  */
14203       T(V8),		/* V6S_M.  */
14204       T(V8),		/* V7E_M.  */
14205       T(V8)		/* V8.  */
14206     };
14207   const int v8r[] =
14208     {
14209       T(V8R),		/* PRE_V4.  */
14210       T(V8R),		/* V4.  */
14211       T(V8R),		/* V4T.  */
14212       T(V8R),		/* V5T.  */
14213       T(V8R),		/* V5TE.  */
14214       T(V8R),		/* V5TEJ.  */
14215       T(V8R),		/* V6.  */
14216       T(V8R),		/* V6KZ.  */
14217       T(V8R),		/* V6T2.  */
14218       T(V8R),		/* V6K.  */
14219       T(V8R),		/* V7.  */
14220       T(V8R),		/* V6_M.  */
14221       T(V8R),		/* V6S_M.  */
14222       T(V8R),		/* V7E_M.  */
14223       T(V8),		/* V8.  */
14224       T(V8R),		/* V8R.  */
14225     };
14226   const int v8m_baseline[] =
14227     {
14228       -1,		/* PRE_V4.  */
14229       -1,		/* V4.  */
14230       -1,		/* V4T.  */
14231       -1,		/* V5T.  */
14232       -1,		/* V5TE.  */
14233       -1,		/* V5TEJ.  */
14234       -1,		/* V6.  */
14235       -1,		/* V6KZ.  */
14236       -1,		/* V6T2.  */
14237       -1,		/* V6K.  */
14238       -1,		/* V7.  */
14239       T(V8M_BASE),	/* V6_M.  */
14240       T(V8M_BASE),	/* V6S_M.  */
14241       -1,		/* V7E_M.  */
14242       -1,		/* V8.  */
14243       -1,		/* V8R.  */
14244       T(V8M_BASE)	/* V8-M BASELINE.  */
14245     };
14246   const int v8m_mainline[] =
14247     {
14248       -1,		/* PRE_V4.  */
14249       -1,		/* V4.  */
14250       -1,		/* V4T.  */
14251       -1,		/* V5T.  */
14252       -1,		/* V5TE.  */
14253       -1,		/* V5TEJ.  */
14254       -1,		/* V6.  */
14255       -1,		/* V6KZ.  */
14256       -1,		/* V6T2.  */
14257       -1,		/* V6K.  */
14258       T(V8M_MAIN),	/* V7.  */
14259       T(V8M_MAIN),	/* V6_M.  */
14260       T(V8M_MAIN),	/* V6S_M.  */
14261       T(V8M_MAIN),	/* V7E_M.  */
14262       -1,		/* V8.  */
14263       -1,		/* V8R.  */
14264       T(V8M_MAIN),	/* V8-M BASELINE.  */
14265       T(V8M_MAIN)	/* V8-M MAINLINE.  */
14266     };
14267   const int v8_1m_mainline[] =
14268     {
14269       -1,		/* PRE_V4.  */
14270       -1,		/* V4.  */
14271       -1,		/* V4T.  */
14272       -1,		/* V5T.  */
14273       -1,		/* V5TE.  */
14274       -1,		/* V5TEJ.  */
14275       -1,		/* V6.  */
14276       -1,		/* V6KZ.  */
14277       -1,		/* V6T2.  */
14278       -1,		/* V6K.  */
14279       T(V8_1M_MAIN),	/* V7.  */
14280       T(V8_1M_MAIN),	/* V6_M.  */
14281       T(V8_1M_MAIN),	/* V6S_M.  */
14282       T(V8_1M_MAIN),	/* V7E_M.  */
14283       -1,		/* V8.  */
14284       -1,		/* V8R.  */
14285       T(V8_1M_MAIN),	/* V8-M BASELINE.  */
14286       T(V8_1M_MAIN),	/* V8-M MAINLINE.  */
14287       -1,		/* Unused (18).  */
14288       -1,		/* Unused (19).  */
14289       -1,		/* Unused (20).  */
14290       T(V8_1M_MAIN)	/* V8.1-M MAINLINE.  */
14291     };
14292   const int v4t_plus_v6_m[] =
14293     {
14294       -1,		/* PRE_V4.  */
14295       -1,		/* V4.  */
14296       T(V4T),		/* V4T.  */
14297       T(V5T),		/* V5T.  */
14298       T(V5TE),		/* V5TE.  */
14299       T(V5TEJ),		/* V5TEJ.  */
14300       T(V6),		/* V6.  */
14301       T(V6KZ),		/* V6KZ.  */
14302       T(V6T2),		/* V6T2.  */
14303       T(V6K),		/* V6K.  */
14304       T(V7),		/* V7.  */
14305       T(V6_M),		/* V6_M.  */
14306       T(V6S_M),		/* V6S_M.  */
14307       T(V7E_M),		/* V7E_M.  */
14308       T(V8),		/* V8.  */
14309       -1,		/* V8R.  */
14310       T(V8M_BASE),	/* V8-M BASELINE.  */
14311       T(V8M_MAIN),	/* V8-M MAINLINE.  */
14312       -1,		/* Unused (18).  */
14313       -1,		/* Unused (19).  */
14314       -1,		/* Unused (20).  */
14315       T(V8_1M_MAIN),	/* V8.1-M MAINLINE.  */
14316       T(V4T_PLUS_V6_M)	/* V4T plus V6_M.  */
14317     };
14318   const int *comb[] =
14319     {
14320       v6t2,
14321       v6k,
14322       v7,
14323       v6_m,
14324       v6s_m,
14325       v7e_m,
14326       v8,
14327       v8r,
14328       v8m_baseline,
14329       v8m_mainline,
14330       NULL,
14331       NULL,
14332       NULL,
14333       v8_1m_mainline,
14334       /* Pseudo-architecture.  */
14335       v4t_plus_v6_m
14336     };
14337 
14338   /* Check we've not got a higher architecture than we know about.  */
14339 
14340   if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14341     {
14342       _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14343       return -1;
14344     }
14345 
14346   /* Override old tag if we have a Tag_also_compatible_with on the output.  */
14347 
14348   if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14349       || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14350     oldtag = T(V4T_PLUS_V6_M);
14351 
14352   /* And override the new tag if we have a Tag_also_compatible_with on the
14353      input.  */
14354 
14355   if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14356       || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14357     newtag = T(V4T_PLUS_V6_M);
14358 
14359   tagl = (oldtag < newtag) ? oldtag : newtag;
14360   result = tagh = (oldtag > newtag) ? oldtag : newtag;
14361 
14362   /* Architectures before V6KZ add features monotonically.  */
14363   if (tagh <= TAG_CPU_ARCH_V6KZ)
14364     return result;
14365 
14366   result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14367 
14368   /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14369      as the canonical version.  */
14370   if (result == T(V4T_PLUS_V6_M))
14371     {
14372       result = T(V4T);
14373       *secondary_compat_out = T(V6_M);
14374     }
14375   else
14376     *secondary_compat_out = -1;
14377 
14378   if (result == -1)
14379     {
14380       _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14381 			  ibfd, oldtag, newtag);
14382       return -1;
14383     }
14384 
14385   return result;
14386 #undef T
14387 }
14388 
14389 /* Query attributes object to see if integer divide instructions may be
14390    present in an object.  */
14391 static bool
elf32_arm_attributes_accept_div(const obj_attribute * attr)14392 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14393 {
14394   int arch = attr[Tag_CPU_arch].i;
14395   int profile = attr[Tag_CPU_arch_profile].i;
14396 
14397   switch (attr[Tag_DIV_use].i)
14398     {
14399     case 0:
14400       /* Integer divide allowed if instruction contained in archetecture.  */
14401       if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14402 	return true;
14403       else if (arch >= TAG_CPU_ARCH_V7E_M)
14404 	return true;
14405       else
14406 	return false;
14407 
14408     case 1:
14409       /* Integer divide explicitly prohibited.  */
14410       return false;
14411 
14412     default:
14413       /* Unrecognised case - treat as allowing divide everywhere.  */
14414     case 2:
14415       /* Integer divide allowed in ARM state.  */
14416       return true;
14417     }
14418 }
14419 
14420 /* Query attributes object to see if integer divide instructions are
14421    forbidden to be in the object.  This is not the inverse of
14422    elf32_arm_attributes_accept_div.  */
14423 static bool
elf32_arm_attributes_forbid_div(const obj_attribute * attr)14424 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14425 {
14426   return attr[Tag_DIV_use].i == 1;
14427 }
14428 
14429 /* Merge EABI object attributes from IBFD into OBFD.  Raise an error if there
14430    are conflicting attributes.  */
14431 
14432 static bool
elf32_arm_merge_eabi_attributes(bfd * ibfd,struct bfd_link_info * info)14433 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14434 {
14435   bfd *obfd = info->output_bfd;
14436   obj_attribute *in_attr;
14437   obj_attribute *out_attr;
14438   /* Some tags have 0 = don't care, 1 = strong requirement,
14439      2 = weak requirement.  */
14440   static const int order_021[3] = {0, 2, 1};
14441   int i;
14442   bool result = true;
14443   const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14444 
14445   /* Skip the linker stubs file.  This preserves previous behavior
14446      of accepting unknown attributes in the first input file - but
14447      is that a bug?  */
14448   if (ibfd->flags & BFD_LINKER_CREATED)
14449     return true;
14450 
14451   /* Skip any input that hasn't attribute section.
14452      This enables to link object files without attribute section with
14453      any others.  */
14454   if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14455     return true;
14456 
14457   if (!elf_known_obj_attributes_proc (obfd)[0].i)
14458     {
14459       /* This is the first object.  Copy the attributes.  */
14460       _bfd_elf_copy_obj_attributes (ibfd, obfd);
14461 
14462       out_attr = elf_known_obj_attributes_proc (obfd);
14463 
14464       /* Use the Tag_null value to indicate the attributes have been
14465 	 initialized.  */
14466       out_attr[0].i = 1;
14467 
14468       /* We do not output objects with Tag_MPextension_use_legacy - we move
14469 	 the attribute's value to Tag_MPextension_use.  */
14470       if (out_attr[Tag_MPextension_use_legacy].i != 0)
14471 	{
14472 	  if (out_attr[Tag_MPextension_use].i != 0
14473 	      && out_attr[Tag_MPextension_use_legacy].i
14474 		!= out_attr[Tag_MPextension_use].i)
14475 	    {
14476 	      _bfd_error_handler
14477 		(_("Error: %pB has both the current and legacy "
14478 		   "Tag_MPextension_use attributes"), ibfd);
14479 	      result = false;
14480 	    }
14481 
14482 	  out_attr[Tag_MPextension_use] =
14483 	    out_attr[Tag_MPextension_use_legacy];
14484 	  out_attr[Tag_MPextension_use_legacy].type = 0;
14485 	  out_attr[Tag_MPextension_use_legacy].i = 0;
14486 	}
14487 
14488       return result;
14489     }
14490 
14491   in_attr = elf_known_obj_attributes_proc (ibfd);
14492   out_attr = elf_known_obj_attributes_proc (obfd);
14493   /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
14494   if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14495     {
14496       /* Ignore mismatches if the object doesn't use floating point or is
14497 	 floating point ABI independent.  */
14498       if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14499 	  || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14500 	      && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14501 	out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14502       else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14503 	       && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14504 	{
14505 	  _bfd_error_handler
14506 	    (_("error: %pB uses VFP register arguments, %pB does not"),
14507 	     in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14508 	     in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14509 	  result = false;
14510 	}
14511     }
14512 
14513   for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14514     {
14515       /* Merge this attribute with existing attributes.  */
14516       switch (i)
14517 	{
14518 	case Tag_CPU_raw_name:
14519 	case Tag_CPU_name:
14520 	  /* These are merged after Tag_CPU_arch.  */
14521 	  break;
14522 
14523 	case Tag_ABI_optimization_goals:
14524 	case Tag_ABI_FP_optimization_goals:
14525 	  /* Use the first value seen.  */
14526 	  break;
14527 
14528 	case Tag_CPU_arch:
14529 	  {
14530 	    int secondary_compat = -1, secondary_compat_out = -1;
14531 	    unsigned int saved_out_attr = out_attr[i].i;
14532 	    int arch_attr;
14533 	    static const char *name_table[] =
14534 	      {
14535 		/* These aren't real CPU names, but we can't guess
14536 		   that from the architecture version alone.  */
14537 		"Pre v4",
14538 		"ARM v4",
14539 		"ARM v4T",
14540 		"ARM v5T",
14541 		"ARM v5TE",
14542 		"ARM v5TEJ",
14543 		"ARM v6",
14544 		"ARM v6KZ",
14545 		"ARM v6T2",
14546 		"ARM v6K",
14547 		"ARM v7",
14548 		"ARM v6-M",
14549 		"ARM v6S-M",
14550 		"ARM v8",
14551 		"",
14552 		"ARM v8-M.baseline",
14553 		"ARM v8-M.mainline",
14554 	    };
14555 
14556 	    /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
14557 	    secondary_compat = get_secondary_compatible_arch (ibfd);
14558 	    secondary_compat_out = get_secondary_compatible_arch (obfd);
14559 	    arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14560 					      &secondary_compat_out,
14561 					      in_attr[i].i,
14562 					      secondary_compat);
14563 
14564 	    /* Return with error if failed to merge.  */
14565 	    if (arch_attr == -1)
14566 	      return false;
14567 
14568 	    out_attr[i].i = arch_attr;
14569 
14570 	    set_secondary_compatible_arch (obfd, secondary_compat_out);
14571 
14572 	    /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
14573 	    if (out_attr[i].i == saved_out_attr)
14574 	      ; /* Leave the names alone.  */
14575 	    else if (out_attr[i].i == in_attr[i].i)
14576 	      {
14577 		/* The output architecture has been changed to match the
14578 		   input architecture.  Use the input names.  */
14579 		out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14580 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14581 		  : NULL;
14582 		out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14583 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14584 		  : NULL;
14585 	      }
14586 	    else
14587 	      {
14588 		out_attr[Tag_CPU_name].s = NULL;
14589 		out_attr[Tag_CPU_raw_name].s = NULL;
14590 	      }
14591 
14592 	    /* If we still don't have a value for Tag_CPU_name,
14593 	       make one up now.  Tag_CPU_raw_name remains blank.  */
14594 	    if (out_attr[Tag_CPU_name].s == NULL
14595 		&& out_attr[i].i < ARRAY_SIZE (name_table))
14596 	      out_attr[Tag_CPU_name].s =
14597 		_bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14598 	  }
14599 	  break;
14600 
14601 	case Tag_ARM_ISA_use:
14602 	case Tag_THUMB_ISA_use:
14603 	case Tag_WMMX_arch:
14604 	case Tag_Advanced_SIMD_arch:
14605 	  /* ??? Do Advanced_SIMD (NEON) and WMMX conflict?  */
14606 	case Tag_ABI_FP_rounding:
14607 	case Tag_ABI_FP_exceptions:
14608 	case Tag_ABI_FP_user_exceptions:
14609 	case Tag_ABI_FP_number_model:
14610 	case Tag_FP_HP_extension:
14611 	case Tag_CPU_unaligned_access:
14612 	case Tag_T2EE_use:
14613 	case Tag_MPextension_use:
14614 	case Tag_MVE_arch:
14615 	  /* Use the largest value specified.  */
14616 	  if (in_attr[i].i > out_attr[i].i)
14617 	    out_attr[i].i = in_attr[i].i;
14618 	  break;
14619 
14620 	case Tag_ABI_align_preserved:
14621 	case Tag_ABI_PCS_RO_data:
14622 	  /* Use the smallest value specified.  */
14623 	  if (in_attr[i].i < out_attr[i].i)
14624 	    out_attr[i].i = in_attr[i].i;
14625 	  break;
14626 
14627 	case Tag_ABI_align_needed:
14628 	  if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14629 	      && (in_attr[Tag_ABI_align_preserved].i == 0
14630 		  || out_attr[Tag_ABI_align_preserved].i == 0))
14631 	    {
14632 	      /* This error message should be enabled once all non-conformant
14633 		 binaries in the toolchain have had the attributes set
14634 		 properly.
14635 	      _bfd_error_handler
14636 		(_("error: %pB: 8-byte data alignment conflicts with %pB"),
14637 		 obfd, ibfd);
14638 	      result = false; */
14639 	    }
14640 	  /* Fall through.  */
14641 	case Tag_ABI_FP_denormal:
14642 	case Tag_ABI_PCS_GOT_use:
14643 	  /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14644 	     value if greater than 2 (for future-proofing).  */
14645 	  if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14646 	      || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14647 		  && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14648 	    out_attr[i].i = in_attr[i].i;
14649 	  break;
14650 
14651 	case Tag_Virtualization_use:
14652 	  /* The virtualization tag effectively stores two bits of
14653 	     information: the intended use of TrustZone (in bit 0), and the
14654 	     intended use of Virtualization (in bit 1).  */
14655 	  if (out_attr[i].i == 0)
14656 	    out_attr[i].i = in_attr[i].i;
14657 	  else if (in_attr[i].i != 0
14658 		   && in_attr[i].i != out_attr[i].i)
14659 	    {
14660 	      if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14661 		out_attr[i].i = 3;
14662 	      else
14663 		{
14664 		  _bfd_error_handler
14665 		    (_("error: %pB: unable to merge virtualization attributes "
14666 		       "with %pB"),
14667 		     obfd, ibfd);
14668 		  result = false;
14669 		}
14670 	    }
14671 	  break;
14672 
14673 	case Tag_CPU_arch_profile:
14674 	  if (out_attr[i].i != in_attr[i].i)
14675 	    {
14676 	      /* 0 will merge with anything.
14677 		 'A' and 'S' merge to 'A'.
14678 		 'R' and 'S' merge to 'R'.
14679 		 'M' and 'A|R|S' is an error.  */
14680 	      if (out_attr[i].i == 0
14681 		  || (out_attr[i].i == 'S'
14682 		      && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14683 		out_attr[i].i = in_attr[i].i;
14684 	      else if (in_attr[i].i == 0
14685 		       || (in_attr[i].i == 'S'
14686 			   && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14687 		; /* Do nothing.  */
14688 	      else
14689 		{
14690 		  _bfd_error_handler
14691 		    (_("error: %pB: conflicting architecture profiles %c/%c"),
14692 		     ibfd,
14693 		     in_attr[i].i ? in_attr[i].i : '0',
14694 		     out_attr[i].i ? out_attr[i].i : '0');
14695 		  result = false;
14696 		}
14697 	    }
14698 	  break;
14699 
14700 	case Tag_DSP_extension:
14701 	  /* No need to change output value if any of:
14702 	     - pre (<=) ARMv5T input architecture (do not have DSP)
14703 	     - M input profile not ARMv7E-M and do not have DSP.  */
14704 	  if (in_attr[Tag_CPU_arch].i <= 3
14705 	      || (in_attr[Tag_CPU_arch_profile].i == 'M'
14706 		  && in_attr[Tag_CPU_arch].i != 13
14707 		  && in_attr[i].i == 0))
14708 	    ; /* Do nothing.  */
14709 	  /* Output value should be 0 if DSP part of architecture, ie.
14710 	     - post (>=) ARMv5te architecture output
14711 	     - A, R or S profile output or ARMv7E-M output architecture.  */
14712 	  else if (out_attr[Tag_CPU_arch].i >= 4
14713 		   && (out_attr[Tag_CPU_arch_profile].i == 'A'
14714 		       || out_attr[Tag_CPU_arch_profile].i == 'R'
14715 		       || out_attr[Tag_CPU_arch_profile].i == 'S'
14716 		       || out_attr[Tag_CPU_arch].i == 13))
14717 	    out_attr[i].i = 0;
14718 	  /* Otherwise, DSP instructions are added and not part of output
14719 	     architecture.  */
14720 	  else
14721 	    out_attr[i].i = 1;
14722 	  break;
14723 
14724 	case Tag_FP_arch:
14725 	    {
14726 	      /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14727 		 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14728 		 when it's 0.  It might mean absence of FP hardware if
14729 		 Tag_FP_arch is zero.  */
14730 
14731 #define VFP_VERSION_COUNT 9
14732 	      static const struct
14733 	      {
14734 		  int ver;
14735 		  int regs;
14736 	      } vfp_versions[VFP_VERSION_COUNT] =
14737 		{
14738 		  {0, 0},
14739 		  {1, 16},
14740 		  {2, 16},
14741 		  {3, 32},
14742 		  {3, 16},
14743 		  {4, 32},
14744 		  {4, 16},
14745 		  {8, 32},
14746 		  {8, 16}
14747 		};
14748 	      int ver;
14749 	      int regs;
14750 	      int newval;
14751 
14752 	      /* If the output has no requirement about FP hardware,
14753 		 follow the requirement of the input.  */
14754 	      if (out_attr[i].i == 0)
14755 		{
14756 		  /* This assert is still reasonable, we shouldn't
14757 		     produce the suspicious build attribute
14758 		     combination (See below for in_attr).  */
14759 		  BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14760 		  out_attr[i].i = in_attr[i].i;
14761 		  out_attr[Tag_ABI_HardFP_use].i
14762 		    = in_attr[Tag_ABI_HardFP_use].i;
14763 		  break;
14764 		}
14765 	      /* If the input has no requirement about FP hardware, do
14766 		 nothing.  */
14767 	      else if (in_attr[i].i == 0)
14768 		{
14769 		  /* We used to assert that Tag_ABI_HardFP_use was
14770 		     zero here, but we should never assert when
14771 		     consuming an object file that has suspicious
14772 		     build attributes.  The single precision variant
14773 		     of 'no FP architecture' is still 'no FP
14774 		     architecture', so we just ignore the tag in this
14775 		     case.  */
14776 		  break;
14777 		}
14778 
14779 	      /* Both the input and the output have nonzero Tag_FP_arch.
14780 		 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
14781 
14782 	      /* If both the input and the output have zero Tag_ABI_HardFP_use,
14783 		 do nothing.  */
14784 	      if (in_attr[Tag_ABI_HardFP_use].i == 0
14785 		  && out_attr[Tag_ABI_HardFP_use].i == 0)
14786 		;
14787 	      /* If the input and the output have different Tag_ABI_HardFP_use,
14788 		 the combination of them is 0 (implied by Tag_FP_arch).  */
14789 	      else if (in_attr[Tag_ABI_HardFP_use].i
14790 		       != out_attr[Tag_ABI_HardFP_use].i)
14791 		out_attr[Tag_ABI_HardFP_use].i = 0;
14792 
14793 	      /* Now we can handle Tag_FP_arch.  */
14794 
14795 	      /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14796 		 pick the biggest.  */
14797 	      if (in_attr[i].i >= VFP_VERSION_COUNT
14798 		  && in_attr[i].i > out_attr[i].i)
14799 		{
14800 		  out_attr[i] = in_attr[i];
14801 		  break;
14802 		}
14803 	      /* The output uses the superset of input features
14804 		 (ISA version) and registers.  */
14805 	      ver = vfp_versions[in_attr[i].i].ver;
14806 	      if (ver < vfp_versions[out_attr[i].i].ver)
14807 		ver = vfp_versions[out_attr[i].i].ver;
14808 	      regs = vfp_versions[in_attr[i].i].regs;
14809 	      if (regs < vfp_versions[out_attr[i].i].regs)
14810 		regs = vfp_versions[out_attr[i].i].regs;
14811 	      /* This assumes all possible supersets are also a valid
14812 		 options.  */
14813 	      for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14814 		{
14815 		  if (regs == vfp_versions[newval].regs
14816 		      && ver == vfp_versions[newval].ver)
14817 		    break;
14818 		}
14819 	      out_attr[i].i = newval;
14820 	    }
14821 	  break;
14822 	case Tag_PCS_config:
14823 	  if (out_attr[i].i == 0)
14824 	    out_attr[i].i = in_attr[i].i;
14825 	  else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14826 	    {
14827 	      /* It's sometimes ok to mix different configs, so this is only
14828 		 a warning.  */
14829 	      _bfd_error_handler
14830 		(_("warning: %pB: conflicting platform configuration"), ibfd);
14831 	    }
14832 	  break;
14833 	case Tag_ABI_PCS_R9_use:
14834 	  if (in_attr[i].i != out_attr[i].i
14835 	      && out_attr[i].i != AEABI_R9_unused
14836 	      && in_attr[i].i != AEABI_R9_unused)
14837 	    {
14838 	      _bfd_error_handler
14839 		(_("error: %pB: conflicting use of R9"), ibfd);
14840 	      result = false;
14841 	    }
14842 	  if (out_attr[i].i == AEABI_R9_unused)
14843 	    out_attr[i].i = in_attr[i].i;
14844 	  break;
14845 	case Tag_ABI_PCS_RW_data:
14846 	  if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14847 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14848 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14849 	    {
14850 	      _bfd_error_handler
14851 		(_("error: %pB: SB relative addressing conflicts with use of R9"),
14852 		 ibfd);
14853 	      result = false;
14854 	    }
14855 	  /* Use the smallest value specified.  */
14856 	  if (in_attr[i].i < out_attr[i].i)
14857 	    out_attr[i].i = in_attr[i].i;
14858 	  break;
14859 	case Tag_ABI_PCS_wchar_t:
14860 	  if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14861 	      && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14862 	    {
14863 	      _bfd_error_handler
14864 		(_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14865 		 ibfd, in_attr[i].i, out_attr[i].i);
14866 	    }
14867 	  else if (in_attr[i].i && !out_attr[i].i)
14868 	    out_attr[i].i = in_attr[i].i;
14869 	  break;
14870 	case Tag_ABI_enum_size:
14871 	  if (in_attr[i].i != AEABI_enum_unused)
14872 	    {
14873 	      if (out_attr[i].i == AEABI_enum_unused
14874 		  || out_attr[i].i == AEABI_enum_forced_wide)
14875 		{
14876 		  /* The existing object is compatible with anything.
14877 		     Use whatever requirements the new object has.  */
14878 		  out_attr[i].i = in_attr[i].i;
14879 		}
14880 	      else if (in_attr[i].i != AEABI_enum_forced_wide
14881 		       && out_attr[i].i != in_attr[i].i
14882 		       && !elf_arm_tdata (obfd)->no_enum_size_warning)
14883 		{
14884 		  static const char *aeabi_enum_names[] =
14885 		    { "", "variable-size", "32-bit", "" };
14886 		  const char *in_name =
14887 		    in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14888 		    ? aeabi_enum_names[in_attr[i].i]
14889 		    : "<unknown>";
14890 		  const char *out_name =
14891 		    out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14892 		    ? aeabi_enum_names[out_attr[i].i]
14893 		    : "<unknown>";
14894 		  _bfd_error_handler
14895 		    (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14896 		     ibfd, in_name, out_name);
14897 		}
14898 	    }
14899 	  break;
14900 	case Tag_ABI_VFP_args:
14901 	  /* Aready done.  */
14902 	  break;
14903 	case Tag_ABI_WMMX_args:
14904 	  if (in_attr[i].i != out_attr[i].i)
14905 	    {
14906 	      _bfd_error_handler
14907 		(_("error: %pB uses iWMMXt register arguments, %pB does not"),
14908 		 ibfd, obfd);
14909 	      result = false;
14910 	    }
14911 	  break;
14912 	case Tag_compatibility:
14913 	  /* Merged in target-independent code.  */
14914 	  break;
14915 	case Tag_ABI_HardFP_use:
14916 	  /* This is handled along with Tag_FP_arch.  */
14917 	  break;
14918 	case Tag_ABI_FP_16bit_format:
14919 	  if (in_attr[i].i != 0 && out_attr[i].i != 0)
14920 	    {
14921 	      if (in_attr[i].i != out_attr[i].i)
14922 		{
14923 		  _bfd_error_handler
14924 		    (_("error: fp16 format mismatch between %pB and %pB"),
14925 		     ibfd, obfd);
14926 		  result = false;
14927 		}
14928 	    }
14929 	  if (in_attr[i].i != 0)
14930 	    out_attr[i].i = in_attr[i].i;
14931 	  break;
14932 
14933 	case Tag_DIV_use:
14934 	  /* A value of zero on input means that the divide instruction may
14935 	     be used if available in the base architecture as specified via
14936 	     Tag_CPU_arch and Tag_CPU_arch_profile.  A value of 1 means that
14937 	     the user did not want divide instructions.  A value of 2
14938 	     explicitly means that divide instructions were allowed in ARM
14939 	     and Thumb state.  */
14940 	  if (in_attr[i].i == out_attr[i].i)
14941 	    /* Do nothing.  */ ;
14942 	  else if (elf32_arm_attributes_forbid_div (in_attr)
14943 		   && !elf32_arm_attributes_accept_div (out_attr))
14944 	    out_attr[i].i = 1;
14945 	  else if (elf32_arm_attributes_forbid_div (out_attr)
14946 		   && elf32_arm_attributes_accept_div (in_attr))
14947 	    out_attr[i].i = in_attr[i].i;
14948 	  else if (in_attr[i].i == 2)
14949 	    out_attr[i].i = in_attr[i].i;
14950 	  break;
14951 
14952 	case Tag_MPextension_use_legacy:
14953 	  /* We don't output objects with Tag_MPextension_use_legacy - we
14954 	     move the value to Tag_MPextension_use.  */
14955 	  if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14956 	    {
14957 	      if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14958 		{
14959 		  _bfd_error_handler
14960 		    (_("%pB has both the current and legacy "
14961 		       "Tag_MPextension_use attributes"),
14962 		     ibfd);
14963 		  result = false;
14964 		}
14965 	    }
14966 
14967 	  if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14968 	    out_attr[Tag_MPextension_use] = in_attr[i];
14969 
14970 	  break;
14971 
14972 	case Tag_nodefaults:
14973 	  /* This tag is set if it exists, but the value is unused (and is
14974 	     typically zero).  We don't actually need to do anything here -
14975 	     the merge happens automatically when the type flags are merged
14976 	     below.  */
14977 	  break;
14978 	case Tag_also_compatible_with:
14979 	  /* Already done in Tag_CPU_arch.  */
14980 	  break;
14981 	case Tag_conformance:
14982 	  /* Keep the attribute if it matches.  Throw it away otherwise.
14983 	     No attribute means no claim to conform.  */
14984 	  if (!in_attr[i].s || !out_attr[i].s
14985 	      || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14986 	    out_attr[i].s = NULL;
14987 	  break;
14988 
14989 	default:
14990 	  result
14991 	    = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14992 	}
14993 
14994       /* If out_attr was copied from in_attr then it won't have a type yet.  */
14995       if (in_attr[i].type && !out_attr[i].type)
14996 	out_attr[i].type = in_attr[i].type;
14997     }
14998 
14999   /* Merge Tag_compatibility attributes and any common GNU ones.  */
15000   if (!_bfd_elf_merge_object_attributes (ibfd, info))
15001     return false;
15002 
15003   /* Check for any attributes not known on ARM.  */
15004   result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15005 
15006   return result;
15007 }
15008 
15009 
15010 /* Return TRUE if the two EABI versions are incompatible.  */
15011 
15012 static bool
elf32_arm_versions_compatible(unsigned iver,unsigned over)15013 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15014 {
15015   /* v4 and v5 are the same spec before and after it was released,
15016      so allow mixing them.  */
15017   if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15018       || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15019     return true;
15020 
15021   return (iver == over);
15022 }
15023 
15024 /* Merge backend specific data from an object file to the output
15025    object file when linking.  */
15026 
15027 static bool
15028 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15029 
15030 /* Display the flags field.  */
15031 
15032 static bool
elf32_arm_print_private_bfd_data(bfd * abfd,void * ptr)15033 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15034 {
15035   FILE * file = (FILE *) ptr;
15036   unsigned long flags;
15037 
15038   BFD_ASSERT (abfd != NULL && ptr != NULL);
15039 
15040   /* Print normal ELF private data.  */
15041   _bfd_elf_print_private_bfd_data (abfd, ptr);
15042 
15043   flags = elf_elfheader (abfd)->e_flags;
15044   /* Ignore init flag - it may not be set, despite the flags field
15045      containing valid data.  */
15046 
15047   fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
15048 
15049   switch (EF_ARM_EABI_VERSION (flags))
15050     {
15051     case EF_ARM_EABI_UNKNOWN:
15052       /* The following flag bits are GNU extensions and not part of the
15053 	 official ARM ELF extended ABI.  Hence they are only decoded if
15054 	 the EABI version is not set.  */
15055       if (flags & EF_ARM_INTERWORK)
15056 	fprintf (file, _(" [interworking enabled]"));
15057 
15058       if (flags & EF_ARM_APCS_26)
15059 	fprintf (file, " [APCS-26]");
15060       else
15061 	fprintf (file, " [APCS-32]");
15062 
15063       if (flags & EF_ARM_VFP_FLOAT)
15064 	fprintf (file, _(" [VFP float format]"));
15065       else if (flags & EF_ARM_MAVERICK_FLOAT)
15066 	fprintf (file, _(" [Maverick float format]"));
15067       else
15068 	fprintf (file, _(" [FPA float format]"));
15069 
15070       if (flags & EF_ARM_APCS_FLOAT)
15071 	fprintf (file, _(" [floats passed in float registers]"));
15072 
15073       if (flags & EF_ARM_PIC)
15074 	fprintf (file, _(" [position independent]"));
15075 
15076       if (flags & EF_ARM_NEW_ABI)
15077 	fprintf (file, _(" [new ABI]"));
15078 
15079       if (flags & EF_ARM_OLD_ABI)
15080 	fprintf (file, _(" [old ABI]"));
15081 
15082       if (flags & EF_ARM_SOFT_FLOAT)
15083 	fprintf (file, _(" [software FP]"));
15084 
15085       flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15086 		 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15087 		 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15088 		 | EF_ARM_MAVERICK_FLOAT);
15089       break;
15090 
15091     case EF_ARM_EABI_VER1:
15092       fprintf (file, _(" [Version1 EABI]"));
15093 
15094       if (flags & EF_ARM_SYMSARESORTED)
15095 	fprintf (file, _(" [sorted symbol table]"));
15096       else
15097 	fprintf (file, _(" [unsorted symbol table]"));
15098 
15099       flags &= ~ EF_ARM_SYMSARESORTED;
15100       break;
15101 
15102     case EF_ARM_EABI_VER2:
15103       fprintf (file, _(" [Version2 EABI]"));
15104 
15105       if (flags & EF_ARM_SYMSARESORTED)
15106 	fprintf (file, _(" [sorted symbol table]"));
15107       else
15108 	fprintf (file, _(" [unsorted symbol table]"));
15109 
15110       if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15111 	fprintf (file, _(" [dynamic symbols use segment index]"));
15112 
15113       if (flags & EF_ARM_MAPSYMSFIRST)
15114 	fprintf (file, _(" [mapping symbols precede others]"));
15115 
15116       flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15117 		 | EF_ARM_MAPSYMSFIRST);
15118       break;
15119 
15120     case EF_ARM_EABI_VER3:
15121       fprintf (file, _(" [Version3 EABI]"));
15122       break;
15123 
15124     case EF_ARM_EABI_VER4:
15125       fprintf (file, _(" [Version4 EABI]"));
15126       goto eabi;
15127 
15128     case EF_ARM_EABI_VER5:
15129       fprintf (file, _(" [Version5 EABI]"));
15130 
15131       if (flags & EF_ARM_ABI_FLOAT_SOFT)
15132 	fprintf (file, _(" [soft-float ABI]"));
15133 
15134       if (flags & EF_ARM_ABI_FLOAT_HARD)
15135 	fprintf (file, _(" [hard-float ABI]"));
15136 
15137       flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15138 
15139     eabi:
15140       if (flags & EF_ARM_BE8)
15141 	fprintf (file, _(" [BE8]"));
15142 
15143       if (flags & EF_ARM_LE8)
15144 	fprintf (file, _(" [LE8]"));
15145 
15146       flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15147       break;
15148 
15149     default:
15150       fprintf (file, _(" <EABI version unrecognised>"));
15151       break;
15152     }
15153 
15154   flags &= ~ EF_ARM_EABIMASK;
15155 
15156   if (flags & EF_ARM_RELEXEC)
15157     fprintf (file, _(" [relocatable executable]"));
15158 
15159   if (flags & EF_ARM_PIC)
15160     fprintf (file, _(" [position independent]"));
15161 
15162   if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15163     fprintf (file, _(" [FDPIC ABI supplement]"));
15164 
15165   flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15166 
15167   if (flags)
15168     fprintf (file, _(" <Unrecognised flag bits set>"));
15169 
15170   fputc ('\n', file);
15171 
15172   return true;
15173 }
15174 
15175 static int
elf32_arm_get_symbol_type(Elf_Internal_Sym * elf_sym,int type)15176 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15177 {
15178   switch (ELF_ST_TYPE (elf_sym->st_info))
15179     {
15180     case STT_ARM_TFUNC:
15181       return ELF_ST_TYPE (elf_sym->st_info);
15182 
15183     case STT_ARM_16BIT:
15184       /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15185 	 This allows us to distinguish between data used by Thumb instructions
15186 	 and non-data (which is probably code) inside Thumb regions of an
15187 	 executable.  */
15188       if (type != STT_OBJECT && type != STT_TLS)
15189 	return ELF_ST_TYPE (elf_sym->st_info);
15190       break;
15191 
15192     default:
15193       break;
15194     }
15195 
15196   return type;
15197 }
15198 
15199 static asection *
elf32_arm_gc_mark_hook(asection * sec,struct bfd_link_info * info,Elf_Internal_Rela * rel,struct elf_link_hash_entry * h,Elf_Internal_Sym * sym)15200 elf32_arm_gc_mark_hook (asection *sec,
15201 			struct bfd_link_info *info,
15202 			Elf_Internal_Rela *rel,
15203 			struct elf_link_hash_entry *h,
15204 			Elf_Internal_Sym *sym)
15205 {
15206   if (h != NULL)
15207     switch (ELF32_R_TYPE (rel->r_info))
15208       {
15209       case R_ARM_GNU_VTINHERIT:
15210       case R_ARM_GNU_VTENTRY:
15211 	return NULL;
15212       }
15213 
15214   return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15215 }
15216 
15217 /* Look through the relocs for a section during the first phase.  */
15218 
15219 static bool
elf32_arm_check_relocs(bfd * abfd,struct bfd_link_info * info,asection * sec,const Elf_Internal_Rela * relocs)15220 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15221 			asection *sec, const Elf_Internal_Rela *relocs)
15222 {
15223   Elf_Internal_Shdr *symtab_hdr;
15224   struct elf_link_hash_entry **sym_hashes;
15225   const Elf_Internal_Rela *rel;
15226   const Elf_Internal_Rela *rel_end;
15227   bfd *dynobj;
15228   asection *sreloc;
15229   struct elf32_arm_link_hash_table *htab;
15230   bool call_reloc_p;
15231   bool may_become_dynamic_p;
15232   bool may_need_local_target_p;
15233   unsigned long nsyms;
15234 
15235   if (bfd_link_relocatable (info))
15236     return true;
15237 
15238   BFD_ASSERT (is_arm_elf (abfd));
15239 
15240   htab = elf32_arm_hash_table (info);
15241   if (htab == NULL)
15242     return false;
15243 
15244   sreloc = NULL;
15245 
15246   /* Create dynamic sections for relocatable executables so that we can
15247      copy relocations.  */
15248   if (htab->root.is_relocatable_executable
15249       && ! htab->root.dynamic_sections_created)
15250     {
15251       if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15252 	return false;
15253     }
15254 
15255   if (htab->root.dynobj == NULL)
15256     htab->root.dynobj = abfd;
15257   if (!create_ifunc_sections (info))
15258     return false;
15259 
15260   dynobj = htab->root.dynobj;
15261 
15262   symtab_hdr = & elf_symtab_hdr (abfd);
15263   sym_hashes = elf_sym_hashes (abfd);
15264   nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15265 
15266   rel_end = relocs + sec->reloc_count;
15267   for (rel = relocs; rel < rel_end; rel++)
15268     {
15269       Elf_Internal_Sym *isym;
15270       struct elf_link_hash_entry *h;
15271       struct elf32_arm_link_hash_entry *eh;
15272       unsigned int r_symndx;
15273       int r_type;
15274 
15275       r_symndx = ELF32_R_SYM (rel->r_info);
15276       r_type = ELF32_R_TYPE (rel->r_info);
15277       r_type = arm_real_reloc_type (htab, r_type);
15278 
15279       if (r_symndx >= nsyms
15280 	  /* PR 9934: It is possible to have relocations that do not
15281 	     refer to symbols, thus it is also possible to have an
15282 	     object file containing relocations but no symbol table.  */
15283 	  && (r_symndx > STN_UNDEF || nsyms > 0))
15284 	{
15285 	  _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15286 			      r_symndx);
15287 	  return false;
15288 	}
15289 
15290       h = NULL;
15291       isym = NULL;
15292       if (nsyms > 0)
15293 	{
15294 	  if (r_symndx < symtab_hdr->sh_info)
15295 	    {
15296 	      /* A local symbol.  */
15297 	      isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15298 					    abfd, r_symndx);
15299 	      if (isym == NULL)
15300 		return false;
15301 	    }
15302 	  else
15303 	    {
15304 	      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15305 	      while (h->root.type == bfd_link_hash_indirect
15306 		     || h->root.type == bfd_link_hash_warning)
15307 		h = (struct elf_link_hash_entry *) h->root.u.i.link;
15308 	    }
15309 	}
15310 
15311       eh = (struct elf32_arm_link_hash_entry *) h;
15312 
15313       call_reloc_p = false;
15314       may_become_dynamic_p = false;
15315       may_need_local_target_p = false;
15316 
15317       /* Could be done earlier, if h were already available.  */
15318       r_type = elf32_arm_tls_transition (info, r_type, h);
15319       switch (r_type)
15320 	{
15321 	case R_ARM_GOTOFFFUNCDESC:
15322 	  {
15323 	    if (h == NULL)
15324 	      {
15325 		if (!elf32_arm_allocate_local_sym_info (abfd))
15326 		  return false;
15327 		if (r_symndx >= elf32_arm_num_entries (abfd))
15328 		  return false;
15329 		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
15330 		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15331 	      }
15332 	    else
15333 	      {
15334 		eh->fdpic_cnts.gotofffuncdesc_cnt++;
15335 	      }
15336 	  }
15337 	  break;
15338 
15339 	case R_ARM_GOTFUNCDESC:
15340 	  {
15341 	    if (h == NULL)
15342 	      {
15343 		/* Such a relocation is not supposed to be generated
15344 		   by gcc on a static function.  */
15345 		/* Anyway if needed it could be handled.  */
15346 		return false;
15347 	      }
15348 	    else
15349 	      {
15350 		eh->fdpic_cnts.gotfuncdesc_cnt++;
15351 	      }
15352 	  }
15353 	  break;
15354 
15355 	case R_ARM_FUNCDESC:
15356 	  {
15357 	    if (h == NULL)
15358 	      {
15359 		if (!elf32_arm_allocate_local_sym_info (abfd))
15360 		  return false;
15361 		if (r_symndx >= elf32_arm_num_entries (abfd))
15362 		  return false;
15363 		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
15364 		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15365 	      }
15366 	    else
15367 	      {
15368 		eh->fdpic_cnts.funcdesc_cnt++;
15369 	      }
15370 	  }
15371 	  break;
15372 
15373 	  case R_ARM_GOT32:
15374 	  case R_ARM_GOT_PREL:
15375 	  case R_ARM_TLS_GD32:
15376 	  case R_ARM_TLS_GD32_FDPIC:
15377 	  case R_ARM_TLS_IE32:
15378 	  case R_ARM_TLS_IE32_FDPIC:
15379 	  case R_ARM_TLS_GOTDESC:
15380 	  case R_ARM_TLS_DESCSEQ:
15381 	  case R_ARM_THM_TLS_DESCSEQ:
15382 	  case R_ARM_TLS_CALL:
15383 	  case R_ARM_THM_TLS_CALL:
15384 	    /* This symbol requires a global offset table entry.  */
15385 	    {
15386 	      int tls_type, old_tls_type;
15387 
15388 	      switch (r_type)
15389 		{
15390 		case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15391 		case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15392 
15393 		case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15394 		case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15395 
15396 		case R_ARM_TLS_GOTDESC:
15397 		case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15398 		case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15399 		  tls_type = GOT_TLS_GDESC; break;
15400 
15401 		default: tls_type = GOT_NORMAL; break;
15402 		}
15403 
15404 	      if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15405 		info->flags |= DF_STATIC_TLS;
15406 
15407 	      if (h != NULL)
15408 		{
15409 		  h->got.refcount++;
15410 		  old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15411 		}
15412 	      else
15413 		{
15414 		  /* This is a global offset table entry for a local symbol.  */
15415 		  if (!elf32_arm_allocate_local_sym_info (abfd))
15416 		    return false;
15417 		  if (r_symndx >= elf32_arm_num_entries (abfd))
15418 		    {
15419 		      _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15420 					  r_symndx);
15421 		      return false;
15422 		    }
15423 
15424 		  elf_local_got_refcounts (abfd)[r_symndx] += 1;
15425 		  old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15426 		}
15427 
15428 	      /* If a variable is accessed with both tls methods, two
15429 		 slots may be created.  */
15430 	      if (GOT_TLS_GD_ANY_P (old_tls_type)
15431 		  && GOT_TLS_GD_ANY_P (tls_type))
15432 		tls_type |= old_tls_type;
15433 
15434 	      /* We will already have issued an error message if there
15435 		 is a TLS/non-TLS mismatch, based on the symbol
15436 		 type.  So just combine any TLS types needed.  */
15437 	      if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15438 		  && tls_type != GOT_NORMAL)
15439 		tls_type |= old_tls_type;
15440 
15441 	      /* If the symbol is accessed in both IE and GDESC
15442 		 method, we're able to relax. Turn off the GDESC flag,
15443 		 without messing up with any other kind of tls types
15444 		 that may be involved.  */
15445 	      if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15446 		tls_type &= ~GOT_TLS_GDESC;
15447 
15448 	      if (old_tls_type != tls_type)
15449 		{
15450 		  if (h != NULL)
15451 		    elf32_arm_hash_entry (h)->tls_type = tls_type;
15452 		  else
15453 		    elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15454 		}
15455 	    }
15456 	    /* Fall through.  */
15457 
15458 	  case R_ARM_TLS_LDM32:
15459 	  case R_ARM_TLS_LDM32_FDPIC:
15460 	    if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15461 		htab->tls_ldm_got.refcount++;
15462 	    /* Fall through.  */
15463 
15464 	  case R_ARM_GOTOFF32:
15465 	  case R_ARM_GOTPC:
15466 	    if (htab->root.sgot == NULL
15467 		&& !create_got_section (htab->root.dynobj, info))
15468 	      return false;
15469 	    break;
15470 
15471 	  case R_ARM_PC24:
15472 	  case R_ARM_PLT32:
15473 	  case R_ARM_CALL:
15474 	  case R_ARM_JUMP24:
15475 	  case R_ARM_PREL31:
15476 	  case R_ARM_THM_CALL:
15477 	  case R_ARM_THM_JUMP24:
15478 	  case R_ARM_THM_JUMP19:
15479 	    call_reloc_p = true;
15480 	    may_need_local_target_p = true;
15481 	    break;
15482 
15483 	  case R_ARM_ABS12:
15484 	    /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15485 	       ldr __GOTT_INDEX__ offsets.  */
15486 	    if (htab->root.target_os != is_vxworks)
15487 	      {
15488 		may_need_local_target_p = true;
15489 		break;
15490 	      }
15491 	    else goto jump_over;
15492 
15493 	    /* Fall through.  */
15494 
15495 	  case R_ARM_MOVW_ABS_NC:
15496 	  case R_ARM_MOVT_ABS:
15497 	  case R_ARM_THM_MOVW_ABS_NC:
15498 	  case R_ARM_THM_MOVT_ABS:
15499 	    if (bfd_link_pic (info))
15500 	      {
15501 		_bfd_error_handler
15502 		  (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15503 		   abfd, elf32_arm_howto_table_1[r_type].name,
15504 		   (h) ? h->root.root.string : "a local symbol");
15505 		bfd_set_error (bfd_error_bad_value);
15506 		return false;
15507 	      }
15508 
15509 	    /* Fall through.  */
15510 	  case R_ARM_ABS32:
15511 	  case R_ARM_ABS32_NOI:
15512 	jump_over:
15513 	    if (h != NULL && bfd_link_executable (info))
15514 	      {
15515 		h->pointer_equality_needed = 1;
15516 	      }
15517 	    /* Fall through.  */
15518 	  case R_ARM_REL32:
15519 	  case R_ARM_REL32_NOI:
15520 	  case R_ARM_MOVW_PREL_NC:
15521 	  case R_ARM_MOVT_PREL:
15522 	  case R_ARM_THM_MOVW_PREL_NC:
15523 	  case R_ARM_THM_MOVT_PREL:
15524 
15525 	    /* Should the interworking branches be listed here?  */
15526 	    if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15527 		 || htab->fdpic_p)
15528 		&& (sec->flags & SEC_ALLOC) != 0)
15529 	      {
15530 		if (h == NULL
15531 		    && elf32_arm_howto_from_type (r_type)->pc_relative)
15532 		  {
15533 		    /* In shared libraries and relocatable executables,
15534 		       we treat local relative references as calls;
15535 		       see the related SYMBOL_CALLS_LOCAL code in
15536 		       allocate_dynrelocs.  */
15537 		    call_reloc_p = true;
15538 		    may_need_local_target_p = true;
15539 		  }
15540 		else
15541 		  /* We are creating a shared library or relocatable
15542 		     executable, and this is a reloc against a global symbol,
15543 		     or a non-PC-relative reloc against a local symbol.
15544 		     We may need to copy the reloc into the output.  */
15545 		  may_become_dynamic_p = true;
15546 	      }
15547 	    else
15548 	      may_need_local_target_p = true;
15549 	    break;
15550 
15551 	/* This relocation describes the C++ object vtable hierarchy.
15552 	   Reconstruct it for later use during GC.  */
15553 	case R_ARM_GNU_VTINHERIT:
15554 	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15555 	    return false;
15556 	  break;
15557 
15558 	/* This relocation describes which C++ vtable entries are actually
15559 	   used.  Record for later use during GC.  */
15560 	case R_ARM_GNU_VTENTRY:
15561 	  if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15562 	    return false;
15563 	  break;
15564 	}
15565 
15566       if (h != NULL)
15567 	{
15568 	  if (call_reloc_p)
15569 	    /* We may need a .plt entry if the function this reloc
15570 	       refers to is in a different object, regardless of the
15571 	       symbol's type.  We can't tell for sure yet, because
15572 	       something later might force the symbol local.  */
15573 	    h->needs_plt = 1;
15574 	  else if (may_need_local_target_p)
15575 	    /* If this reloc is in a read-only section, we might
15576 	       need a copy reloc.  We can't check reliably at this
15577 	       stage whether the section is read-only, as input
15578 	       sections have not yet been mapped to output sections.
15579 	       Tentatively set the flag for now, and correct in
15580 	       adjust_dynamic_symbol.  */
15581 	    h->non_got_ref = 1;
15582 	}
15583 
15584       if (may_need_local_target_p
15585 	  && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15586 	{
15587 	  union gotplt_union *root_plt;
15588 	  struct arm_plt_info *arm_plt;
15589 	  struct arm_local_iplt_info *local_iplt;
15590 
15591 	  if (h != NULL)
15592 	    {
15593 	      root_plt = &h->plt;
15594 	      arm_plt = &eh->plt;
15595 	    }
15596 	  else
15597 	    {
15598 	      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15599 	      if (local_iplt == NULL)
15600 		return false;
15601 	      root_plt = &local_iplt->root;
15602 	      arm_plt = &local_iplt->arm;
15603 	    }
15604 
15605 	  /* If the symbol is a function that doesn't bind locally,
15606 	     this relocation will need a PLT entry.  */
15607 	  if (root_plt->refcount != -1)
15608 	    root_plt->refcount += 1;
15609 
15610 	  if (!call_reloc_p)
15611 	    arm_plt->noncall_refcount++;
15612 
15613 	  /* It's too early to use htab->use_blx here, so we have to
15614 	     record possible blx references separately from
15615 	     relocs that definitely need a thumb stub.  */
15616 
15617 	  if (r_type == R_ARM_THM_CALL)
15618 	    arm_plt->maybe_thumb_refcount += 1;
15619 
15620 	  if (r_type == R_ARM_THM_JUMP24
15621 	      || r_type == R_ARM_THM_JUMP19)
15622 	    arm_plt->thumb_refcount += 1;
15623 	}
15624 
15625       if (may_become_dynamic_p)
15626 	{
15627 	  struct elf_dyn_relocs *p, **head;
15628 
15629 	  /* Create a reloc section in dynobj.  */
15630 	  if (sreloc == NULL)
15631 	    {
15632 	      sreloc = _bfd_elf_make_dynamic_reloc_section
15633 		(sec, dynobj, 2, abfd, ! htab->use_rel);
15634 
15635 	      if (sreloc == NULL)
15636 		return false;
15637 	    }
15638 
15639 	  /* If this is a global symbol, count the number of
15640 	     relocations we need for this symbol.  */
15641 	  if (h != NULL)
15642 	    head = &h->dyn_relocs;
15643 	  else
15644 	    {
15645 	      head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15646 	      if (head == NULL)
15647 		return false;
15648 	    }
15649 
15650 	  p = *head;
15651 	  if (p == NULL || p->sec != sec)
15652 	    {
15653 	      size_t amt = sizeof *p;
15654 
15655 	      p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15656 	      if (p == NULL)
15657 		return false;
15658 	      p->next = *head;
15659 	      *head = p;
15660 	      p->sec = sec;
15661 	      p->count = 0;
15662 	      p->pc_count = 0;
15663 	    }
15664 
15665 	  if (elf32_arm_howto_from_type (r_type)->pc_relative)
15666 	    p->pc_count += 1;
15667 	  p->count += 1;
15668 	  if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
15669 	      && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
15670 	    {
15671 	      /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15672 		 that will become rofixup.  */
15673 	      /* This is due to the fact that we suppose all will become rofixup.  */
15674 	      _bfd_error_handler
15675 		(_("FDPIC does not yet support %s relocation"
15676 		   " to become dynamic for executable"),
15677 		 elf32_arm_howto_table_1[r_type].name);
15678 	      abort ();
15679 	    }
15680 	}
15681     }
15682 
15683   return true;
15684 }
15685 
15686 static void
elf32_arm_update_relocs(asection * o,struct bfd_elf_section_reloc_data * reldata)15687 elf32_arm_update_relocs (asection *o,
15688 			 struct bfd_elf_section_reloc_data *reldata)
15689 {
15690   void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15691   void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15692   const struct elf_backend_data *bed;
15693   _arm_elf_section_data *eado;
15694   struct bfd_link_order *p;
15695   bfd_byte *erela_head, *erela;
15696   Elf_Internal_Rela *irela_head, *irela;
15697   Elf_Internal_Shdr *rel_hdr;
15698   bfd *abfd;
15699   unsigned int count;
15700 
15701   eado = get_arm_elf_section_data (o);
15702 
15703   if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15704     return;
15705 
15706   abfd = o->owner;
15707   bed = get_elf_backend_data (abfd);
15708   rel_hdr = reldata->hdr;
15709 
15710   if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15711     {
15712       swap_in = bed->s->swap_reloc_in;
15713       swap_out = bed->s->swap_reloc_out;
15714     }
15715   else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15716     {
15717       swap_in = bed->s->swap_reloca_in;
15718       swap_out = bed->s->swap_reloca_out;
15719     }
15720   else
15721     abort ();
15722 
15723   erela_head = rel_hdr->contents;
15724   irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15725     ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15726 
15727   erela = erela_head;
15728   irela = irela_head;
15729   count = 0;
15730 
15731   for (p = o->map_head.link_order; p; p = p->next)
15732     {
15733       if (p->type == bfd_section_reloc_link_order
15734 	  || p->type == bfd_symbol_reloc_link_order)
15735 	{
15736 	  (*swap_in) (abfd, erela, irela);
15737 	  erela += rel_hdr->sh_entsize;
15738 	  irela++;
15739 	  count++;
15740 	}
15741       else if (p->type == bfd_indirect_link_order)
15742 	{
15743 	  struct bfd_elf_section_reloc_data *input_reldata;
15744 	  arm_unwind_table_edit *edit_list, *edit_tail;
15745 	  _arm_elf_section_data *eadi;
15746 	  bfd_size_type j;
15747 	  bfd_vma offset;
15748 	  asection *i;
15749 
15750 	  i = p->u.indirect.section;
15751 
15752 	  eadi = get_arm_elf_section_data (i);
15753 	  edit_list = eadi->u.exidx.unwind_edit_list;
15754 	  edit_tail = eadi->u.exidx.unwind_edit_tail;
15755 	  offset = i->output_offset;
15756 
15757 	  if (eadi->elf.rel.hdr &&
15758 	      eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15759 	    input_reldata = &eadi->elf.rel;
15760 	  else if (eadi->elf.rela.hdr &&
15761 		   eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15762 	    input_reldata = &eadi->elf.rela;
15763 	  else
15764 	    abort ();
15765 
15766 	  if (edit_list)
15767 	    {
15768 	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15769 		{
15770 		  arm_unwind_table_edit *edit_node, *edit_next;
15771 		  bfd_vma bias;
15772 		  bfd_vma reloc_index;
15773 
15774 		  (*swap_in) (abfd, erela, irela);
15775 		  reloc_index = (irela->r_offset - offset) / 8;
15776 
15777 		  bias = 0;
15778 		  edit_node = edit_list;
15779 		  for (edit_next = edit_list;
15780 		       edit_next && edit_next->index <= reloc_index;
15781 		       edit_next = edit_node->next)
15782 		    {
15783 		      bias++;
15784 		      edit_node = edit_next;
15785 		    }
15786 
15787 		  if (edit_node->type != DELETE_EXIDX_ENTRY
15788 		      || edit_node->index != reloc_index)
15789 		    {
15790 		      irela->r_offset -= bias * 8;
15791 		      irela++;
15792 		      count++;
15793 		    }
15794 
15795 		  erela += rel_hdr->sh_entsize;
15796 		}
15797 
15798 	      if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15799 		{
15800 		  /* New relocation entity.  */
15801 		  asection *text_sec = edit_tail->linked_section;
15802 		  asection *text_out = text_sec->output_section;
15803 		  bfd_vma exidx_offset = offset + i->size - 8;
15804 
15805 		  irela->r_addend = 0;
15806 		  irela->r_offset = exidx_offset;
15807 		  irela->r_info = ELF32_R_INFO
15808 		    (text_out->target_index, R_ARM_PREL31);
15809 		  irela++;
15810 		  count++;
15811 		}
15812 	    }
15813 	  else
15814 	    {
15815 	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15816 		{
15817 		  (*swap_in) (abfd, erela, irela);
15818 		  erela += rel_hdr->sh_entsize;
15819 		  irela++;
15820 		}
15821 
15822 	      count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15823 	    }
15824 	}
15825     }
15826 
15827   reldata->count = count;
15828   rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15829 
15830   erela = erela_head;
15831   irela = irela_head;
15832   while (count > 0)
15833     {
15834       (*swap_out) (abfd, irela, erela);
15835       erela += rel_hdr->sh_entsize;
15836       irela++;
15837       count--;
15838     }
15839 
15840   free (irela_head);
15841 
15842   /* Hashes are no longer valid.  */
15843   free (reldata->hashes);
15844   reldata->hashes = NULL;
15845 }
15846 
15847 /* Unwinding tables are not referenced directly.  This pass marks them as
15848    required if the corresponding code section is marked.  Similarly, ARMv8-M
15849    secure entry functions can only be referenced by SG veneers which are
15850    created after the GC process. They need to be marked in case they reside in
15851    their own section (as would be the case if code was compiled with
15852    -ffunction-sections).  */
15853 
15854 static bool
elf32_arm_gc_mark_extra_sections(struct bfd_link_info * info,elf_gc_mark_hook_fn gc_mark_hook)15855 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15856 				  elf_gc_mark_hook_fn gc_mark_hook)
15857 {
15858   bfd *sub;
15859   Elf_Internal_Shdr **elf_shdrp;
15860   asection *cmse_sec;
15861   obj_attribute *out_attr;
15862   Elf_Internal_Shdr *symtab_hdr;
15863   unsigned i, sym_count, ext_start;
15864   const struct elf_backend_data *bed;
15865   struct elf_link_hash_entry **sym_hashes;
15866   struct elf32_arm_link_hash_entry *cmse_hash;
15867   bool again, is_v8m, first_bfd_browse = true;
15868   bool debug_sec_need_to_be_marked = false;
15869   asection *isec;
15870 
15871   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15872 
15873   out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15874   is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15875 	   && out_attr[Tag_CPU_arch_profile].i == 'M';
15876 
15877   /* Marking EH data may cause additional code sections to be marked,
15878      requiring multiple passes.  */
15879   again = true;
15880   while (again)
15881     {
15882       again = false;
15883       for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15884 	{
15885 	  asection *o;
15886 
15887 	  if (! is_arm_elf (sub))
15888 	    continue;
15889 
15890 	  elf_shdrp = elf_elfsections (sub);
15891 	  for (o = sub->sections; o != NULL; o = o->next)
15892 	    {
15893 	      Elf_Internal_Shdr *hdr;
15894 
15895 	      hdr = &elf_section_data (o)->this_hdr;
15896 	      if (hdr->sh_type == SHT_ARM_EXIDX
15897 		  && hdr->sh_link
15898 		  && hdr->sh_link < elf_numsections (sub)
15899 		  && !o->gc_mark
15900 		  && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15901 		{
15902 		  again = true;
15903 		  if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15904 		    return false;
15905 		}
15906 	    }
15907 
15908 	  /* Mark section holding ARMv8-M secure entry functions.  We mark all
15909 	     of them so no need for a second browsing.  */
15910 	  if (is_v8m && first_bfd_browse)
15911 	    {
15912 	      sym_hashes = elf_sym_hashes (sub);
15913 	      bed = get_elf_backend_data (sub);
15914 	      symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15915 	      sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15916 	      ext_start = symtab_hdr->sh_info;
15917 
15918 	      /* Scan symbols.  */
15919 	      for (i = ext_start; i < sym_count; i++)
15920 		{
15921 		  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15922 
15923 		  /* Assume it is a special symbol.  If not, cmse_scan will
15924 		     warn about it and user can do something about it.  */
15925 		  if (startswith (cmse_hash->root.root.root.string,
15926 				    CMSE_PREFIX))
15927 		    {
15928 		      cmse_sec = cmse_hash->root.root.u.def.section;
15929 		      if (!cmse_sec->gc_mark
15930 			  && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15931 			return false;
15932 		      /* The debug sections related to these secure entry
15933 			 functions are marked on enabling below flag.  */
15934 		      debug_sec_need_to_be_marked = true;
15935 		    }
15936 		}
15937 
15938 	      if (debug_sec_need_to_be_marked)
15939 		{
15940 		  /* Looping over all the sections of the object file containing
15941 		     Armv8-M secure entry functions and marking all the debug
15942 		     sections.  */
15943 		  for (isec = sub->sections; isec != NULL; isec = isec->next)
15944 		    {
15945 		      /* If not a debug sections, skip it.  */
15946 		      if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
15947 			isec->gc_mark = 1 ;
15948 		    }
15949 		  debug_sec_need_to_be_marked = false;
15950 		}
15951 	    }
15952 	}
15953       first_bfd_browse = false;
15954     }
15955 
15956   return true;
15957 }
15958 
15959 /* Treat mapping symbols as special target symbols.  */
15960 
15961 static bool
elf32_arm_is_target_special_symbol(bfd * abfd ATTRIBUTE_UNUSED,asymbol * sym)15962 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15963 {
15964   return bfd_is_arm_special_symbol_name (sym->name,
15965 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15966 }
15967 
15968 /* If the ELF symbol SYM might be a function in SEC, return the
15969    function size and set *CODE_OFF to the function's entry point,
15970    otherwise return zero.  */
15971 
15972 static bfd_size_type
elf32_arm_maybe_function_sym(const asymbol * sym,asection * sec,bfd_vma * code_off)15973 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
15974 			      bfd_vma *code_off)
15975 {
15976   bfd_size_type size;
15977   elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
15978 
15979   if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
15980 		     | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
15981       || sym->section != sec)
15982     return 0;
15983 
15984   size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
15985 
15986   if (!(sym->flags & BSF_SYNTHETIC))
15987     switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
15988       {
15989 	case STT_NOTYPE:
15990 	  /* Ignore symbols created by the annobin plugin for gcc and clang.
15991 	     These symbols are hidden, local, notype and have a size of 0.  */
15992 	  if (size == 0
15993 	      && sym->flags & BSF_LOCAL
15994 	      && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
15995 	    return 0;
15996 	  /* Fall through.  */
15997 	case STT_FUNC:
15998 	case STT_ARM_TFUNC:
15999 	  /* FIXME: Allow STT_GNU_IFUNC as well ?  */
16000 	  break;
16001 	default:
16002 	  return 0;
16003       }
16004 
16005   if ((sym->flags & BSF_LOCAL)
16006       && bfd_is_arm_special_symbol_name (sym->name,
16007 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16008     return 0;
16009 
16010   *code_off = sym->value;
16011 
16012   /* Do not return 0 for the function's size.  */
16013   return size ? size : 1;
16014 
16015 }
16016 
16017 static bool
elf32_arm_find_inliner_info(bfd * abfd,const char ** filename_ptr,const char ** functionname_ptr,unsigned int * line_ptr)16018 elf32_arm_find_inliner_info (bfd *	    abfd,
16019 			     const char **  filename_ptr,
16020 			     const char **  functionname_ptr,
16021 			     unsigned int * line_ptr)
16022 {
16023   bool found;
16024   found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16025 					 functionname_ptr, line_ptr,
16026 					 & elf_tdata (abfd)->dwarf2_find_line_info);
16027   return found;
16028 }
16029 
16030 /* Adjust a symbol defined by a dynamic object and referenced by a
16031    regular object.  The current definition is in some section of the
16032    dynamic object, but we're not including those sections.  We have to
16033    change the definition to something the rest of the link can
16034    understand.  */
16035 
16036 static bool
elf32_arm_adjust_dynamic_symbol(struct bfd_link_info * info,struct elf_link_hash_entry * h)16037 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16038 				 struct elf_link_hash_entry * h)
16039 {
16040   bfd * dynobj;
16041   asection *s, *srel;
16042   struct elf32_arm_link_hash_entry * eh;
16043   struct elf32_arm_link_hash_table *globals;
16044 
16045   globals = elf32_arm_hash_table (info);
16046   if (globals == NULL)
16047     return false;
16048 
16049   dynobj = elf_hash_table (info)->dynobj;
16050 
16051   /* Make sure we know what is going on here.  */
16052   BFD_ASSERT (dynobj != NULL
16053 	      && (h->needs_plt
16054 		  || h->type == STT_GNU_IFUNC
16055 		  || h->is_weakalias
16056 		  || (h->def_dynamic
16057 		      && h->ref_regular
16058 		      && !h->def_regular)));
16059 
16060   eh = (struct elf32_arm_link_hash_entry *) h;
16061 
16062   /* If this is a function, put it in the procedure linkage table.  We
16063      will fill in the contents of the procedure linkage table later,
16064      when we know the address of the .got section.  */
16065   if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16066     {
16067       /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16068 	 symbol binds locally.  */
16069       if (h->plt.refcount <= 0
16070 	  || (h->type != STT_GNU_IFUNC
16071 	      && (SYMBOL_CALLS_LOCAL (info, h)
16072 		  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16073 		      && h->root.type == bfd_link_hash_undefweak))))
16074 	{
16075 	  /* This case can occur if we saw a PLT32 reloc in an input
16076 	     file, but the symbol was never referred to by a dynamic
16077 	     object, or if all references were garbage collected.  In
16078 	     such a case, we don't actually need to build a procedure
16079 	     linkage table, and we can just do a PC24 reloc instead.  */
16080 	  h->plt.offset = (bfd_vma) -1;
16081 	  eh->plt.thumb_refcount = 0;
16082 	  eh->plt.maybe_thumb_refcount = 0;
16083 	  eh->plt.noncall_refcount = 0;
16084 	  h->needs_plt = 0;
16085 	}
16086 
16087       return true;
16088     }
16089   else
16090     {
16091       /* It's possible that we incorrectly decided a .plt reloc was
16092 	 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16093 	 in check_relocs.  We can't decide accurately between function
16094 	 and non-function syms in check-relocs; Objects loaded later in
16095 	 the link may change h->type.  So fix it now.  */
16096       h->plt.offset = (bfd_vma) -1;
16097       eh->plt.thumb_refcount = 0;
16098       eh->plt.maybe_thumb_refcount = 0;
16099       eh->plt.noncall_refcount = 0;
16100     }
16101 
16102   /* If this is a weak symbol, and there is a real definition, the
16103      processor independent code will have arranged for us to see the
16104      real definition first, and we can just use the same value.  */
16105   if (h->is_weakalias)
16106     {
16107       struct elf_link_hash_entry *def = weakdef (h);
16108       BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16109       h->root.u.def.section = def->root.u.def.section;
16110       h->root.u.def.value = def->root.u.def.value;
16111       return true;
16112     }
16113 
16114   /* If there are no non-GOT references, we do not need a copy
16115      relocation.  */
16116   if (!h->non_got_ref)
16117     return true;
16118 
16119   /* This is a reference to a symbol defined by a dynamic object which
16120      is not a function.  */
16121 
16122   /* If we are creating a shared library, we must presume that the
16123      only references to the symbol are via the global offset table.
16124      For such cases we need not do anything here; the relocations will
16125      be handled correctly by relocate_section.  Relocatable executables
16126      can reference data in shared objects directly, so we don't need to
16127      do anything here.  */
16128   if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16129     return true;
16130 
16131   /* We must allocate the symbol in our .dynbss section, which will
16132      become part of the .bss section of the executable.  There will be
16133      an entry for this symbol in the .dynsym section.  The dynamic
16134      object will contain position independent code, so all references
16135      from the dynamic object to this symbol will go through the global
16136      offset table.  The dynamic linker will use the .dynsym entry to
16137      determine the address it must put in the global offset table, so
16138      both the dynamic object and the regular object will refer to the
16139      same memory location for the variable.  */
16140   /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16141      linker to copy the initial value out of the dynamic object and into
16142      the runtime process image.  We need to remember the offset into the
16143      .rel(a).bss section we are going to use.  */
16144   if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16145     {
16146       s = globals->root.sdynrelro;
16147       srel = globals->root.sreldynrelro;
16148     }
16149   else
16150     {
16151       s = globals->root.sdynbss;
16152       srel = globals->root.srelbss;
16153     }
16154   if (info->nocopyreloc == 0
16155       && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16156       && h->size != 0)
16157     {
16158       elf32_arm_allocate_dynrelocs (info, srel, 1);
16159       h->needs_copy = 1;
16160     }
16161 
16162   return _bfd_elf_adjust_dynamic_copy (info, h, s);
16163 }
16164 
16165 /* Allocate space in .plt, .got and associated reloc sections for
16166    dynamic relocs.  */
16167 
16168 static bool
allocate_dynrelocs_for_symbol(struct elf_link_hash_entry * h,void * inf)16169 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16170 {
16171   struct bfd_link_info *info;
16172   struct elf32_arm_link_hash_table *htab;
16173   struct elf32_arm_link_hash_entry *eh;
16174   struct elf_dyn_relocs *p;
16175 
16176   if (h->root.type == bfd_link_hash_indirect)
16177     return true;
16178 
16179   eh = (struct elf32_arm_link_hash_entry *) h;
16180 
16181   info = (struct bfd_link_info *) inf;
16182   htab = elf32_arm_hash_table (info);
16183   if (htab == NULL)
16184     return false;
16185 
16186   if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16187       && h->plt.refcount > 0)
16188     {
16189       /* Make sure this symbol is output as a dynamic symbol.
16190 	 Undefined weak syms won't yet be marked as dynamic.  */
16191       if (h->dynindx == -1 && !h->forced_local
16192 	  && h->root.type == bfd_link_hash_undefweak)
16193 	{
16194 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16195 	    return false;
16196 	}
16197 
16198       /* If the call in the PLT entry binds locally, the associated
16199 	 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16200 	 the usual R_ARM_JUMP_SLOT.  Put it in the .iplt section rather
16201 	 than the .plt section.  */
16202       if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16203 	{
16204 	  eh->is_iplt = 1;
16205 	  if (eh->plt.noncall_refcount == 0
16206 	      && SYMBOL_REFERENCES_LOCAL (info, h))
16207 	    /* All non-call references can be resolved directly.
16208 	       This means that they can (and in some cases, must)
16209 	       resolve directly to the run-time target, rather than
16210 	       to the PLT.  That in turns means that any .got entry
16211 	       would be equal to the .igot.plt entry, so there's
16212 	       no point having both.  */
16213 	    h->got.refcount = 0;
16214 	}
16215 
16216       if (bfd_link_pic (info)
16217 	  || eh->is_iplt
16218 	  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16219 	{
16220 	  elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16221 
16222 	  /* If this symbol is not defined in a regular file, and we are
16223 	     not generating a shared library, then set the symbol to this
16224 	     location in the .plt.  This is required to make function
16225 	     pointers compare as equal between the normal executable and
16226 	     the shared library.  */
16227 	  if (! bfd_link_pic (info)
16228 	      && !h->def_regular)
16229 	    {
16230 	      h->root.u.def.section = htab->root.splt;
16231 	      h->root.u.def.value = h->plt.offset;
16232 
16233 	      /* Make sure the function is not marked as Thumb, in case
16234 		 it is the target of an ABS32 relocation, which will
16235 		 point to the PLT entry.  */
16236 	      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16237 	    }
16238 
16239 	  /* VxWorks executables have a second set of relocations for
16240 	     each PLT entry.  They go in a separate relocation section,
16241 	     which is processed by the kernel loader.  */
16242 	  if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16243 	    {
16244 	      /* There is a relocation for the initial PLT entry:
16245 		 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
16246 	      if (h->plt.offset == htab->plt_header_size)
16247 		elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16248 
16249 	      /* There are two extra relocations for each subsequent
16250 		 PLT entry: an R_ARM_32 relocation for the GOT entry,
16251 		 and an R_ARM_32 relocation for the PLT entry.  */
16252 	      elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16253 	    }
16254 	}
16255       else
16256 	{
16257 	  h->plt.offset = (bfd_vma) -1;
16258 	  h->needs_plt = 0;
16259 	}
16260     }
16261   else
16262     {
16263       h->plt.offset = (bfd_vma) -1;
16264       h->needs_plt = 0;
16265     }
16266 
16267   eh = (struct elf32_arm_link_hash_entry *) h;
16268   eh->tlsdesc_got = (bfd_vma) -1;
16269 
16270   if (h->got.refcount > 0)
16271     {
16272       asection *s;
16273       bool dyn;
16274       int tls_type = elf32_arm_hash_entry (h)->tls_type;
16275       int indx;
16276 
16277       /* Make sure this symbol is output as a dynamic symbol.
16278 	 Undefined weak syms won't yet be marked as dynamic.  */
16279       if (htab->root.dynamic_sections_created
16280 	  && h->dynindx == -1
16281 	  && !h->forced_local
16282 	  && h->root.type == bfd_link_hash_undefweak)
16283 	{
16284 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16285 	    return false;
16286 	}
16287 
16288       s = htab->root.sgot;
16289       h->got.offset = s->size;
16290 
16291       if (tls_type == GOT_UNKNOWN)
16292 	abort ();
16293 
16294       if (tls_type == GOT_NORMAL)
16295 	/* Non-TLS symbols need one GOT slot.  */
16296 	s->size += 4;
16297       else
16298 	{
16299 	  if (tls_type & GOT_TLS_GDESC)
16300 	    {
16301 	      /* R_ARM_TLS_DESC needs 2 GOT slots.  */
16302 	      eh->tlsdesc_got
16303 		= (htab->root.sgotplt->size
16304 		   - elf32_arm_compute_jump_table_size (htab));
16305 	      htab->root.sgotplt->size += 8;
16306 	      h->got.offset = (bfd_vma) -2;
16307 	      /* plt.got_offset needs to know there's a TLS_DESC
16308 		 reloc in the middle of .got.plt.  */
16309 	      htab->num_tls_desc++;
16310 	    }
16311 
16312 	  if (tls_type & GOT_TLS_GD)
16313 	    {
16314 	      /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16315 		 consecutive GOT slots.  If the symbol is both GD
16316 		 and GDESC, got.offset may have been
16317 		 overwritten.  */
16318 	      h->got.offset = s->size;
16319 	      s->size += 8;
16320 	    }
16321 
16322 	  if (tls_type & GOT_TLS_IE)
16323 	    /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16324 	       slot.  */
16325 	    s->size += 4;
16326 	}
16327 
16328       dyn = htab->root.dynamic_sections_created;
16329 
16330       indx = 0;
16331       if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16332 	  && (!bfd_link_pic (info)
16333 	      || !SYMBOL_REFERENCES_LOCAL (info, h)))
16334 	indx = h->dynindx;
16335 
16336       if (tls_type != GOT_NORMAL
16337 	  && (bfd_link_dll (info) || indx != 0)
16338 	  && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16339 	      || h->root.type != bfd_link_hash_undefweak))
16340 	{
16341 	  if (tls_type & GOT_TLS_IE)
16342 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16343 
16344 	  if (tls_type & GOT_TLS_GD)
16345 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16346 
16347 	  if (tls_type & GOT_TLS_GDESC)
16348 	    {
16349 	      elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16350 	      /* GDESC needs a trampoline to jump to.  */
16351 	      htab->tls_trampoline = -1;
16352 	    }
16353 
16354 	  /* Only GD needs it.  GDESC just emits one relocation per
16355 	     2 entries.  */
16356 	  if ((tls_type & GOT_TLS_GD) && indx != 0)
16357 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16358 	}
16359       else if (((indx != -1) || htab->fdpic_p)
16360 	       && !SYMBOL_REFERENCES_LOCAL (info, h))
16361 	{
16362 	  if (htab->root.dynamic_sections_created)
16363 	    /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation.  */
16364 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16365 	}
16366       else if (h->type == STT_GNU_IFUNC
16367 	       && eh->plt.noncall_refcount == 0)
16368 	/* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16369 	   they all resolve dynamically instead.  Reserve room for the
16370 	   GOT entry's R_ARM_IRELATIVE relocation.  */
16371 	elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16372       else if (bfd_link_pic (info)
16373 	       && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16374 	/* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
16375 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16376       else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16377 	/* Reserve room for rofixup for FDPIC executable.  */
16378 	/* TLS relocs do not need space since they are completely
16379 	   resolved.  */
16380 	htab->srofixup->size += 4;
16381     }
16382   else
16383     h->got.offset = (bfd_vma) -1;
16384 
16385   /* FDPIC support.  */
16386   if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16387     {
16388       /* Symbol musn't be exported.  */
16389       if (h->dynindx != -1)
16390 	abort ();
16391 
16392       /* We only allocate one function descriptor with its associated
16393 	 relocation.  */
16394       if (eh->fdpic_cnts.funcdesc_offset == -1)
16395 	{
16396 	  asection *s = htab->root.sgot;
16397 
16398 	  eh->fdpic_cnts.funcdesc_offset = s->size;
16399 	  s->size += 8;
16400 	  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16401 	  if (bfd_link_pic (info))
16402 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16403 	  else
16404 	    htab->srofixup->size += 8;
16405 	}
16406     }
16407 
16408   if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16409     {
16410       asection *s = htab->root.sgot;
16411 
16412       if (htab->root.dynamic_sections_created && h->dynindx == -1
16413 	  && !h->forced_local)
16414 	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16415 	  return false;
16416 
16417       if (h->dynindx == -1)
16418 	{
16419 	  /* We only allocate one function descriptor with its
16420 	     associated relocation.  */
16421 	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16422 	    {
16423 
16424 	      eh->fdpic_cnts.funcdesc_offset = s->size;
16425 	      s->size += 8;
16426 	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16427 		 rofixups.  */
16428 	      if (bfd_link_pic (info))
16429 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16430 	      else
16431 		htab->srofixup->size += 8;
16432 	    }
16433 	}
16434 
16435       /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16436 	 R_ARM_RELATIVE/rofixup relocation on it.  */
16437       eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16438       s->size += 4;
16439       if (h->dynindx == -1 && !bfd_link_pic (info))
16440 	htab->srofixup->size += 4;
16441       else
16442 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16443     }
16444 
16445   if (eh->fdpic_cnts.funcdesc_cnt > 0)
16446     {
16447       if (htab->root.dynamic_sections_created && h->dynindx == -1
16448 	  && !h->forced_local)
16449 	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16450 	  return false;
16451 
16452       if (h->dynindx == -1)
16453 	{
16454 	  /* We only allocate one function descriptor with its
16455 	     associated relocation.  */
16456 	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16457 	    {
16458 	      asection *s = htab->root.sgot;
16459 
16460 	      eh->fdpic_cnts.funcdesc_offset = s->size;
16461 	      s->size += 8;
16462 	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16463 		 rofixups.  */
16464 	      if (bfd_link_pic (info))
16465 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16466 	      else
16467 		htab->srofixup->size += 8;
16468 	    }
16469 	}
16470       if (h->dynindx == -1 && !bfd_link_pic (info))
16471 	{
16472 	  /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup.  */
16473 	  htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16474 	}
16475       else
16476 	{
16477 	  /* Will need one dynamic reloc per reference. will be either
16478 	     R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols.  */
16479 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16480 					eh->fdpic_cnts.funcdesc_cnt);
16481 	}
16482     }
16483 
16484   /* Allocate stubs for exported Thumb functions on v4t.  */
16485   if (!htab->use_blx && h->dynindx != -1
16486       && h->def_regular
16487       && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16488       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16489     {
16490       struct elf_link_hash_entry * th;
16491       struct bfd_link_hash_entry * bh;
16492       struct elf_link_hash_entry * myh;
16493       char name[1024];
16494       asection *s;
16495       bh = NULL;
16496       /* Create a new symbol to regist the real location of the function.  */
16497       s = h->root.u.def.section;
16498       sprintf (name, "__real_%s", h->root.root.string);
16499       _bfd_generic_link_add_one_symbol (info, s->owner,
16500 					name, BSF_GLOBAL, s,
16501 					h->root.u.def.value,
16502 					NULL, true, false, &bh);
16503 
16504       myh = (struct elf_link_hash_entry *) bh;
16505       myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16506       myh->forced_local = 1;
16507       ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16508       eh->export_glue = myh;
16509       th = record_arm_to_thumb_glue (info, h);
16510       /* Point the symbol at the stub.  */
16511       h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16512       ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16513       h->root.u.def.section = th->root.u.def.section;
16514       h->root.u.def.value = th->root.u.def.value & ~1;
16515     }
16516 
16517   if (h->dyn_relocs == NULL)
16518     return true;
16519 
16520   /* In the shared -Bsymbolic case, discard space allocated for
16521      dynamic pc-relative relocs against symbols which turn out to be
16522      defined in regular objects.  For the normal shared case, discard
16523      space for pc-relative relocs that have become local due to symbol
16524      visibility changes.  */
16525 
16526   if (bfd_link_pic (info)
16527       || htab->root.is_relocatable_executable
16528       || htab->fdpic_p)
16529     {
16530       /* Relocs that use pc_count are PC-relative forms, which will appear
16531 	 on something like ".long foo - ." or "movw REG, foo - .".  We want
16532 	 calls to protected symbols to resolve directly to the function
16533 	 rather than going via the plt.  If people want function pointer
16534 	 comparisons to work as expected then they should avoid writing
16535 	 assembly like ".long foo - .".  */
16536       if (SYMBOL_CALLS_LOCAL (info, h))
16537 	{
16538 	  struct elf_dyn_relocs **pp;
16539 
16540 	  for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16541 	    {
16542 	      p->count -= p->pc_count;
16543 	      p->pc_count = 0;
16544 	      if (p->count == 0)
16545 		*pp = p->next;
16546 	      else
16547 		pp = &p->next;
16548 	    }
16549 	}
16550 
16551       if (htab->root.target_os == is_vxworks)
16552 	{
16553 	  struct elf_dyn_relocs **pp;
16554 
16555 	  for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16556 	    {
16557 	      if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16558 		*pp = p->next;
16559 	      else
16560 		pp = &p->next;
16561 	    }
16562 	}
16563 
16564       /* Also discard relocs on undefined weak syms with non-default
16565 	 visibility.  */
16566       if (h->dyn_relocs != NULL
16567 	  && h->root.type == bfd_link_hash_undefweak)
16568 	{
16569 	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16570 	      || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16571 	    h->dyn_relocs = NULL;
16572 
16573 	  /* Make sure undefined weak symbols are output as a dynamic
16574 	     symbol in PIEs.  */
16575 	  else if (htab->root.dynamic_sections_created && h->dynindx == -1
16576 		   && !h->forced_local)
16577 	    {
16578 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16579 		return false;
16580 	    }
16581 	}
16582 
16583       else if (htab->root.is_relocatable_executable && h->dynindx == -1
16584 	       && h->root.type == bfd_link_hash_new)
16585 	{
16586 	  /* Output absolute symbols so that we can create relocations
16587 	     against them.  For normal symbols we output a relocation
16588 	     against the section that contains them.  */
16589 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16590 	    return false;
16591 	}
16592 
16593     }
16594   else
16595     {
16596       /* For the non-shared case, discard space for relocs against
16597 	 symbols which turn out to need copy relocs or are not
16598 	 dynamic.  */
16599 
16600       if (!h->non_got_ref
16601 	  && ((h->def_dynamic
16602 	       && !h->def_regular)
16603 	      || (htab->root.dynamic_sections_created
16604 		  && (h->root.type == bfd_link_hash_undefweak
16605 		      || h->root.type == bfd_link_hash_undefined))))
16606 	{
16607 	  /* Make sure this symbol is output as a dynamic symbol.
16608 	     Undefined weak syms won't yet be marked as dynamic.  */
16609 	  if (h->dynindx == -1 && !h->forced_local
16610 	      && h->root.type == bfd_link_hash_undefweak)
16611 	    {
16612 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16613 		return false;
16614 	    }
16615 
16616 	  /* If that succeeded, we know we'll be keeping all the
16617 	     relocs.  */
16618 	  if (h->dynindx != -1)
16619 	    goto keep;
16620 	}
16621 
16622       h->dyn_relocs = NULL;
16623 
16624     keep: ;
16625     }
16626 
16627   /* Finally, allocate space.  */
16628   for (p = h->dyn_relocs; p != NULL; p = p->next)
16629     {
16630       asection *sreloc = elf_section_data (p->sec)->sreloc;
16631 
16632       if (h->type == STT_GNU_IFUNC
16633 	  && eh->plt.noncall_refcount == 0
16634 	  && SYMBOL_REFERENCES_LOCAL (info, h))
16635 	elf32_arm_allocate_irelocs (info, sreloc, p->count);
16636       else if (h->dynindx != -1
16637 	       && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
16638 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16639       else if (htab->fdpic_p && !bfd_link_pic (info))
16640 	htab->srofixup->size += 4 * p->count;
16641       else
16642 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16643     }
16644 
16645   return true;
16646 }
16647 
16648 void
bfd_elf32_arm_set_byteswap_code(struct bfd_link_info * info,int byteswap_code)16649 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16650 				 int byteswap_code)
16651 {
16652   struct elf32_arm_link_hash_table *globals;
16653 
16654   globals = elf32_arm_hash_table (info);
16655   if (globals == NULL)
16656     return;
16657 
16658   globals->byteswap_code = byteswap_code;
16659 }
16660 
16661 /* Set the sizes of the dynamic sections.  */
16662 
16663 static bool
elf32_arm_size_dynamic_sections(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info)16664 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16665 				 struct bfd_link_info * info)
16666 {
16667   bfd * dynobj;
16668   asection * s;
16669   bool relocs;
16670   bfd *ibfd;
16671   struct elf32_arm_link_hash_table *htab;
16672 
16673   htab = elf32_arm_hash_table (info);
16674   if (htab == NULL)
16675     return false;
16676 
16677   dynobj = elf_hash_table (info)->dynobj;
16678   BFD_ASSERT (dynobj != NULL);
16679   check_use_blx (htab);
16680 
16681   if (elf_hash_table (info)->dynamic_sections_created)
16682     {
16683       /* Set the contents of the .interp section to the interpreter.  */
16684       if (bfd_link_executable (info) && !info->nointerp)
16685 	{
16686 	  s = bfd_get_linker_section (dynobj, ".interp");
16687 	  BFD_ASSERT (s != NULL);
16688 	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16689 	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16690 	}
16691     }
16692 
16693   /* Set up .got offsets for local syms, and space for local dynamic
16694      relocs.  */
16695   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16696     {
16697       bfd_signed_vma *local_got;
16698       bfd_signed_vma *end_local_got;
16699       struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16700       char *local_tls_type;
16701       bfd_vma *local_tlsdesc_gotent;
16702       bfd_size_type locsymcount;
16703       Elf_Internal_Shdr *symtab_hdr;
16704       asection *srel;
16705       unsigned int symndx;
16706       struct fdpic_local *local_fdpic_cnts;
16707 
16708       if (! is_arm_elf (ibfd))
16709 	continue;
16710 
16711       for (s = ibfd->sections; s != NULL; s = s->next)
16712 	{
16713 	  struct elf_dyn_relocs *p;
16714 
16715 	  for (p = (struct elf_dyn_relocs *)
16716 		   elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16717 	    {
16718 	      if (!bfd_is_abs_section (p->sec)
16719 		  && bfd_is_abs_section (p->sec->output_section))
16720 		{
16721 		  /* Input section has been discarded, either because
16722 		     it is a copy of a linkonce section or due to
16723 		     linker script /DISCARD/, so we'll be discarding
16724 		     the relocs too.  */
16725 		}
16726 	      else if (htab->root.target_os == is_vxworks
16727 		       && strcmp (p->sec->output_section->name,
16728 				  ".tls_vars") == 0)
16729 		{
16730 		  /* Relocations in vxworks .tls_vars sections are
16731 		     handled specially by the loader.  */
16732 		}
16733 	      else if (p->count != 0)
16734 		{
16735 		  srel = elf_section_data (p->sec)->sreloc;
16736 		  if (htab->fdpic_p && !bfd_link_pic (info))
16737 		    htab->srofixup->size += 4 * p->count;
16738 		  else
16739 		    elf32_arm_allocate_dynrelocs (info, srel, p->count);
16740 		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16741 		    info->flags |= DF_TEXTREL;
16742 		}
16743 	    }
16744 	}
16745 
16746       local_got = elf_local_got_refcounts (ibfd);
16747       if (local_got == NULL)
16748 	continue;
16749 
16750       symtab_hdr = & elf_symtab_hdr (ibfd);
16751       locsymcount = symtab_hdr->sh_info;
16752       end_local_got = local_got + locsymcount;
16753       local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16754       local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16755       local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16756       local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16757       symndx = 0;
16758       s = htab->root.sgot;
16759       srel = htab->root.srelgot;
16760       for (; local_got < end_local_got;
16761 	   ++local_got, ++local_iplt_ptr, ++local_tls_type,
16762 	   ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16763 	{
16764 	  if (symndx >= elf32_arm_num_entries (ibfd))
16765 	    return false;
16766 
16767 	  *local_tlsdesc_gotent = (bfd_vma) -1;
16768 	  local_iplt = *local_iplt_ptr;
16769 
16770 	  /* FDPIC support.  */
16771 	  if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16772 	    {
16773 	      if (local_fdpic_cnts->funcdesc_offset == -1)
16774 		{
16775 		  local_fdpic_cnts->funcdesc_offset = s->size;
16776 		  s->size += 8;
16777 
16778 		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16779 		  if (bfd_link_pic (info))
16780 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16781 		  else
16782 		    htab->srofixup->size += 8;
16783 		}
16784 	    }
16785 
16786 	  if (local_fdpic_cnts->funcdesc_cnt > 0)
16787 	    {
16788 	      if (local_fdpic_cnts->funcdesc_offset == -1)
16789 		{
16790 		  local_fdpic_cnts->funcdesc_offset = s->size;
16791 		  s->size += 8;
16792 
16793 		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16794 		  if (bfd_link_pic (info))
16795 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16796 		  else
16797 		    htab->srofixup->size += 8;
16798 		}
16799 
16800 	      /* We will add n R_ARM_RELATIVE relocations or n rofixups.  */
16801 	      if (bfd_link_pic (info))
16802 		elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16803 	      else
16804 		htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16805 	    }
16806 
16807 	  if (local_iplt != NULL)
16808 	    {
16809 	      struct elf_dyn_relocs *p;
16810 
16811 	      if (local_iplt->root.refcount > 0)
16812 		{
16813 		  elf32_arm_allocate_plt_entry (info, true,
16814 						&local_iplt->root,
16815 						&local_iplt->arm);
16816 		  if (local_iplt->arm.noncall_refcount == 0)
16817 		    /* All references to the PLT are calls, so all
16818 		       non-call references can resolve directly to the
16819 		       run-time target.  This means that the .got entry
16820 		       would be the same as the .igot.plt entry, so there's
16821 		       no point creating both.  */
16822 		    *local_got = 0;
16823 		}
16824 	      else
16825 		{
16826 		  BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16827 		  local_iplt->root.offset = (bfd_vma) -1;
16828 		}
16829 
16830 	      for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16831 		{
16832 		  asection *psrel;
16833 
16834 		  psrel = elf_section_data (p->sec)->sreloc;
16835 		  if (local_iplt->arm.noncall_refcount == 0)
16836 		    elf32_arm_allocate_irelocs (info, psrel, p->count);
16837 		  else
16838 		    elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16839 		}
16840 	    }
16841 	  if (*local_got > 0)
16842 	    {
16843 	      Elf_Internal_Sym *isym;
16844 
16845 	      *local_got = s->size;
16846 	      if (*local_tls_type & GOT_TLS_GD)
16847 		/* TLS_GD relocs need an 8-byte structure in the GOT.  */
16848 		s->size += 8;
16849 	      if (*local_tls_type & GOT_TLS_GDESC)
16850 		{
16851 		  *local_tlsdesc_gotent = htab->root.sgotplt->size
16852 		    - elf32_arm_compute_jump_table_size (htab);
16853 		  htab->root.sgotplt->size += 8;
16854 		  *local_got = (bfd_vma) -2;
16855 		  /* plt.got_offset needs to know there's a TLS_DESC
16856 		     reloc in the middle of .got.plt.  */
16857 		  htab->num_tls_desc++;
16858 		}
16859 	      if (*local_tls_type & GOT_TLS_IE)
16860 		s->size += 4;
16861 
16862 	      if (*local_tls_type & GOT_NORMAL)
16863 		{
16864 		  /* If the symbol is both GD and GDESC, *local_got
16865 		     may have been overwritten.  */
16866 		  *local_got = s->size;
16867 		  s->size += 4;
16868 		}
16869 
16870 	      isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16871 					    symndx);
16872 	      if (isym == NULL)
16873 		return false;
16874 
16875 	      /* If all references to an STT_GNU_IFUNC PLT are calls,
16876 		 then all non-call references, including this GOT entry,
16877 		 resolve directly to the run-time target.  */
16878 	      if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16879 		  && (local_iplt == NULL
16880 		      || local_iplt->arm.noncall_refcount == 0))
16881 		elf32_arm_allocate_irelocs (info, srel, 1);
16882 	      else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16883 		{
16884 		  if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16885 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16886 		  else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16887 		    htab->srofixup->size += 4;
16888 
16889 		  if ((bfd_link_pic (info) || htab->fdpic_p)
16890 		      && *local_tls_type & GOT_TLS_GDESC)
16891 		    {
16892 		      elf32_arm_allocate_dynrelocs (info,
16893 						    htab->root.srelplt, 1);
16894 		      htab->tls_trampoline = -1;
16895 		    }
16896 		}
16897 	    }
16898 	  else
16899 	    *local_got = (bfd_vma) -1;
16900 	}
16901     }
16902 
16903   if (htab->tls_ldm_got.refcount > 0)
16904     {
16905       /* Allocate two GOT entries and one dynamic relocation (if necessary)
16906 	 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
16907       htab->tls_ldm_got.offset = htab->root.sgot->size;
16908       htab->root.sgot->size += 8;
16909       if (bfd_link_pic (info))
16910 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16911     }
16912   else
16913     htab->tls_ldm_got.offset = -1;
16914 
16915   /* At the very end of the .rofixup section is a pointer to the GOT,
16916      reserve space for it. */
16917   if (htab->fdpic_p && htab->srofixup != NULL)
16918     htab->srofixup->size += 4;
16919 
16920   /* Allocate global sym .plt and .got entries, and space for global
16921      sym dynamic relocs.  */
16922   elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16923 
16924   /* Here we rummage through the found bfds to collect glue information.  */
16925   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16926     {
16927       if (! is_arm_elf (ibfd))
16928 	continue;
16929 
16930       /* Initialise mapping tables for code/data.  */
16931       bfd_elf32_arm_init_maps (ibfd);
16932 
16933       if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16934 	  || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16935 	  || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16936 	_bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16937     }
16938 
16939   /* Allocate space for the glue sections now that we've sized them.  */
16940   bfd_elf32_arm_allocate_interworking_sections (info);
16941 
16942   /* For every jump slot reserved in the sgotplt, reloc_count is
16943      incremented.  However, when we reserve space for TLS descriptors,
16944      it's not incremented, so in order to compute the space reserved
16945      for them, it suffices to multiply the reloc count by the jump
16946      slot size.  */
16947   if (htab->root.srelplt)
16948     htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
16949 
16950   if (htab->tls_trampoline)
16951     {
16952       if (htab->root.splt->size == 0)
16953 	htab->root.splt->size += htab->plt_header_size;
16954 
16955       htab->tls_trampoline = htab->root.splt->size;
16956       htab->root.splt->size += htab->plt_entry_size;
16957 
16958       /* If we're not using lazy TLS relocations, don't generate the
16959 	 PLT and GOT entries they require.  */
16960       if ((info->flags & DF_BIND_NOW))
16961 	htab->root.tlsdesc_plt = 0;
16962       else
16963 	{
16964 	  htab->root.tlsdesc_got = htab->root.sgot->size;
16965 	  htab->root.sgot->size += 4;
16966 
16967 	  htab->root.tlsdesc_plt = htab->root.splt->size;
16968 	  htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16969 	}
16970     }
16971 
16972   /* The check_relocs and adjust_dynamic_symbol entry points have
16973      determined the sizes of the various dynamic sections.  Allocate
16974      memory for them.  */
16975   relocs = false;
16976   for (s = dynobj->sections; s != NULL; s = s->next)
16977     {
16978       const char * name;
16979 
16980       if ((s->flags & SEC_LINKER_CREATED) == 0)
16981 	continue;
16982 
16983       /* It's OK to base decisions on the section name, because none
16984 	 of the dynobj section names depend upon the input files.  */
16985       name = bfd_section_name (s);
16986 
16987       if (s == htab->root.splt)
16988 	{
16989 	  /* Remember whether there is a PLT.  */
16990 	  ;
16991 	}
16992       else if (startswith (name, ".rel"))
16993 	{
16994 	  if (s->size != 0)
16995 	    {
16996 	      /* Remember whether there are any reloc sections other
16997 		 than .rel(a).plt and .rela.plt.unloaded.  */
16998 	      if (s != htab->root.srelplt && s != htab->srelplt2)
16999 		relocs = true;
17000 
17001 	      /* We use the reloc_count field as a counter if we need
17002 		 to copy relocs into the output file.  */
17003 	      s->reloc_count = 0;
17004 	    }
17005 	}
17006       else if (s != htab->root.sgot
17007 	       && s != htab->root.sgotplt
17008 	       && s != htab->root.iplt
17009 	       && s != htab->root.igotplt
17010 	       && s != htab->root.sdynbss
17011 	       && s != htab->root.sdynrelro
17012 	       && s != htab->srofixup)
17013 	{
17014 	  /* It's not one of our sections, so don't allocate space.  */
17015 	  continue;
17016 	}
17017 
17018       if (s->size == 0)
17019 	{
17020 	  /* If we don't need this section, strip it from the
17021 	     output file.  This is mostly to handle .rel(a).bss and
17022 	     .rel(a).plt.  We must create both sections in
17023 	     create_dynamic_sections, because they must be created
17024 	     before the linker maps input sections to output
17025 	     sections.  The linker does that before
17026 	     adjust_dynamic_symbol is called, and it is that
17027 	     function which decides whether anything needs to go
17028 	     into these sections.  */
17029 	  s->flags |= SEC_EXCLUDE;
17030 	  continue;
17031 	}
17032 
17033       if ((s->flags & SEC_HAS_CONTENTS) == 0)
17034 	continue;
17035 
17036       /* Allocate memory for the section contents.  */
17037       s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17038       if (s->contents == NULL)
17039 	return false;
17040     }
17041 
17042   return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
17043 						  relocs);
17044 }
17045 
17046 /* Size sections even though they're not dynamic.  We use it to setup
17047    _TLS_MODULE_BASE_, if needed.  */
17048 
17049 static bool
elf32_arm_always_size_sections(bfd * output_bfd,struct bfd_link_info * info)17050 elf32_arm_always_size_sections (bfd *output_bfd,
17051 				struct bfd_link_info *info)
17052 {
17053   asection *tls_sec;
17054   struct elf32_arm_link_hash_table *htab;
17055 
17056   htab = elf32_arm_hash_table (info);
17057 
17058   if (bfd_link_relocatable (info))
17059     return true;
17060 
17061   tls_sec = elf_hash_table (info)->tls_sec;
17062 
17063   if (tls_sec)
17064     {
17065       struct elf_link_hash_entry *tlsbase;
17066 
17067       tlsbase = elf_link_hash_lookup
17068 	(elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
17069 
17070       if (tlsbase)
17071 	{
17072 	  struct bfd_link_hash_entry *bh = NULL;
17073 	  const struct elf_backend_data *bed
17074 	    = get_elf_backend_data (output_bfd);
17075 
17076 	  if (!(_bfd_generic_link_add_one_symbol
17077 		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17078 		 tls_sec, 0, NULL, false,
17079 		 bed->collect, &bh)))
17080 	    return false;
17081 
17082 	  tlsbase->type = STT_TLS;
17083 	  tlsbase = (struct elf_link_hash_entry *)bh;
17084 	  tlsbase->def_regular = 1;
17085 	  tlsbase->other = STV_HIDDEN;
17086 	  (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
17087 	}
17088     }
17089 
17090   if (htab->fdpic_p && !bfd_link_relocatable (info)
17091       && !bfd_elf_stack_segment_size (output_bfd, info,
17092 				      "__stacksize", DEFAULT_STACK_SIZE))
17093     return false;
17094 
17095   return true;
17096 }
17097 
17098 /* Finish up dynamic symbol handling.  We set the contents of various
17099    dynamic sections here.  */
17100 
17101 static bool
elf32_arm_finish_dynamic_symbol(bfd * output_bfd,struct bfd_link_info * info,struct elf_link_hash_entry * h,Elf_Internal_Sym * sym)17102 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17103 				 struct bfd_link_info * info,
17104 				 struct elf_link_hash_entry * h,
17105 				 Elf_Internal_Sym * sym)
17106 {
17107   struct elf32_arm_link_hash_table *htab;
17108   struct elf32_arm_link_hash_entry *eh;
17109 
17110   htab = elf32_arm_hash_table (info);
17111   if (htab == NULL)
17112     return false;
17113 
17114   eh = (struct elf32_arm_link_hash_entry *) h;
17115 
17116   if (h->plt.offset != (bfd_vma) -1)
17117     {
17118       if (!eh->is_iplt)
17119 	{
17120 	  BFD_ASSERT (h->dynindx != -1);
17121 	  if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17122 					      h->dynindx, 0))
17123 	    return false;
17124 	}
17125 
17126       if (!h->def_regular)
17127 	{
17128 	  /* Mark the symbol as undefined, rather than as defined in
17129 	     the .plt section.  */
17130 	  sym->st_shndx = SHN_UNDEF;
17131 	  /* If the symbol is weak we need to clear the value.
17132 	     Otherwise, the PLT entry would provide a definition for
17133 	     the symbol even if the symbol wasn't defined anywhere,
17134 	     and so the symbol would never be NULL.  Leave the value if
17135 	     there were any relocations where pointer equality matters
17136 	     (this is a clue for the dynamic linker, to make function
17137 	     pointer comparisons work between an application and shared
17138 	     library).  */
17139 	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17140 	    sym->st_value = 0;
17141 	}
17142       else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17143 	{
17144 	  /* At least one non-call relocation references this .iplt entry,
17145 	     so the .iplt entry is the function's canonical address.  */
17146 	  sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17147 	  ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17148 	  sym->st_shndx = (_bfd_elf_section_from_bfd_section
17149 			   (output_bfd, htab->root.iplt->output_section));
17150 	  sym->st_value = (h->plt.offset
17151 			   + htab->root.iplt->output_section->vma
17152 			   + htab->root.iplt->output_offset);
17153 	}
17154     }
17155 
17156   if (h->needs_copy)
17157     {
17158       asection * s;
17159       Elf_Internal_Rela rel;
17160 
17161       /* This symbol needs a copy reloc.  Set it up.  */
17162       BFD_ASSERT (h->dynindx != -1
17163 		  && (h->root.type == bfd_link_hash_defined
17164 		      || h->root.type == bfd_link_hash_defweak));
17165 
17166       rel.r_addend = 0;
17167       rel.r_offset = (h->root.u.def.value
17168 		      + h->root.u.def.section->output_section->vma
17169 		      + h->root.u.def.section->output_offset);
17170       rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17171       if (h->root.u.def.section == htab->root.sdynrelro)
17172 	s = htab->root.sreldynrelro;
17173       else
17174 	s = htab->root.srelbss;
17175       elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17176     }
17177 
17178   /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  On VxWorks,
17179      and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17180      it is relative to the ".got" section.  */
17181   if (h == htab->root.hdynamic
17182       || (!htab->fdpic_p
17183 	  && htab->root.target_os != is_vxworks
17184 	  && h == htab->root.hgot))
17185     sym->st_shndx = SHN_ABS;
17186 
17187   return true;
17188 }
17189 
17190 static void
arm_put_trampoline(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,void * contents,const unsigned long * template,unsigned count)17191 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17192 		    void *contents,
17193 		    const unsigned long *template, unsigned count)
17194 {
17195   unsigned ix;
17196 
17197   for (ix = 0; ix != count; ix++)
17198     {
17199       unsigned long insn = template[ix];
17200 
17201       /* Emit mov pc,rx if bx is not permitted.  */
17202       if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17203 	insn = (insn & 0xf000000f) | 0x01a0f000;
17204       put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17205     }
17206 }
17207 
17208 /* Install the special first PLT entry for elf32-arm-nacl.  Unlike
17209    other variants, NaCl needs this entry in a static executable's
17210    .iplt too.  When we're handling that case, GOT_DISPLACEMENT is
17211    zero.  For .iplt really only the last bundle is useful, and .iplt
17212    could have a shorter first entry, with each individual PLT entry's
17213    relative branch calculated differently so it targets the last
17214    bundle instead of the instruction before it (labelled .Lplt_tail
17215    above).  But it's simpler to keep the size and layout of PLT0
17216    consistent with the dynamic case, at the cost of some dead code at
17217    the start of .iplt and the one dead store to the stack at the start
17218    of .Lplt_tail.  */
17219 static void
arm_nacl_put_plt0(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,asection * plt,bfd_vma got_displacement)17220 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17221 		   asection *plt, bfd_vma got_displacement)
17222 {
17223   unsigned int i;
17224 
17225   put_arm_insn (htab, output_bfd,
17226 		elf32_arm_nacl_plt0_entry[0]
17227 		| arm_movw_immediate (got_displacement),
17228 		plt->contents + 0);
17229   put_arm_insn (htab, output_bfd,
17230 		elf32_arm_nacl_plt0_entry[1]
17231 		| arm_movt_immediate (got_displacement),
17232 		plt->contents + 4);
17233 
17234   for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17235     put_arm_insn (htab, output_bfd,
17236 		  elf32_arm_nacl_plt0_entry[i],
17237 		  plt->contents + (i * 4));
17238 }
17239 
17240 /* Finish up the dynamic sections.  */
17241 
17242 static bool
elf32_arm_finish_dynamic_sections(bfd * output_bfd,struct bfd_link_info * info)17243 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17244 {
17245   bfd * dynobj;
17246   asection * sgot;
17247   asection * sdyn;
17248   struct elf32_arm_link_hash_table *htab;
17249 
17250   htab = elf32_arm_hash_table (info);
17251   if (htab == NULL)
17252     return false;
17253 
17254   dynobj = elf_hash_table (info)->dynobj;
17255 
17256   sgot = htab->root.sgotplt;
17257   /* A broken linker script might have discarded the dynamic sections.
17258      Catch this here so that we do not seg-fault later on.  */
17259   if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17260     return false;
17261   sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17262 
17263   if (elf_hash_table (info)->dynamic_sections_created)
17264     {
17265       asection *splt;
17266       Elf32_External_Dyn *dyncon, *dynconend;
17267 
17268       splt = htab->root.splt;
17269       BFD_ASSERT (splt != NULL && sdyn != NULL);
17270       BFD_ASSERT (sgot != NULL);
17271 
17272       dyncon = (Elf32_External_Dyn *) sdyn->contents;
17273       dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17274 
17275       for (; dyncon < dynconend; dyncon++)
17276 	{
17277 	  Elf_Internal_Dyn dyn;
17278 	  const char * name;
17279 	  asection * s;
17280 
17281 	  bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17282 
17283 	  switch (dyn.d_tag)
17284 	    {
17285 	    default:
17286 	      if (htab->root.target_os == is_vxworks
17287 		  && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17288 		bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17289 	      break;
17290 
17291 	    case DT_HASH:
17292 	    case DT_STRTAB:
17293 	    case DT_SYMTAB:
17294 	    case DT_VERSYM:
17295 	    case DT_VERDEF:
17296 	    case DT_VERNEED:
17297 	      break;
17298 
17299 	    case DT_PLTGOT:
17300 	      name = ".got.plt";
17301 	      goto get_vma;
17302 	    case DT_JMPREL:
17303 	      name = RELOC_SECTION (htab, ".plt");
17304 	    get_vma:
17305 	      s = bfd_get_linker_section (dynobj, name);
17306 	      if (s == NULL)
17307 		{
17308 		  _bfd_error_handler
17309 		    (_("could not find section %s"), name);
17310 		  bfd_set_error (bfd_error_invalid_operation);
17311 		  return false;
17312 		}
17313 	      dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17314 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17315 	      break;
17316 
17317 	    case DT_PLTRELSZ:
17318 	      s = htab->root.srelplt;
17319 	      BFD_ASSERT (s != NULL);
17320 	      dyn.d_un.d_val = s->size;
17321 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17322 	      break;
17323 
17324 	    case DT_RELSZ:
17325 	    case DT_RELASZ:
17326 	    case DT_REL:
17327 	    case DT_RELA:
17328 	      break;
17329 
17330 	    case DT_TLSDESC_PLT:
17331 	      s = htab->root.splt;
17332 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17333 				+ htab->root.tlsdesc_plt);
17334 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17335 	      break;
17336 
17337 	    case DT_TLSDESC_GOT:
17338 	      s = htab->root.sgot;
17339 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17340 				+ htab->root.tlsdesc_got);
17341 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17342 	      break;
17343 
17344 	      /* Set the bottom bit of DT_INIT/FINI if the
17345 		 corresponding function is Thumb.  */
17346 	    case DT_INIT:
17347 	      name = info->init_function;
17348 	      goto get_sym;
17349 	    case DT_FINI:
17350 	      name = info->fini_function;
17351 	    get_sym:
17352 	      /* If it wasn't set by elf_bfd_final_link
17353 		 then there is nothing to adjust.  */
17354 	      if (dyn.d_un.d_val != 0)
17355 		{
17356 		  struct elf_link_hash_entry * eh;
17357 
17358 		  eh = elf_link_hash_lookup (elf_hash_table (info), name,
17359 					     false, false, true);
17360 		  if (eh != NULL
17361 		      && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17362 			 == ST_BRANCH_TO_THUMB)
17363 		    {
17364 		      dyn.d_un.d_val |= 1;
17365 		      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17366 		    }
17367 		}
17368 	      break;
17369 	    }
17370 	}
17371 
17372       /* Fill in the first entry in the procedure linkage table.  */
17373       if (splt->size > 0 && htab->plt_header_size)
17374 	{
17375 	  const bfd_vma *plt0_entry;
17376 	  bfd_vma got_address, plt_address, got_displacement;
17377 
17378 	  /* Calculate the addresses of the GOT and PLT.  */
17379 	  got_address = sgot->output_section->vma + sgot->output_offset;
17380 	  plt_address = splt->output_section->vma + splt->output_offset;
17381 
17382 	  if (htab->root.target_os == is_vxworks)
17383 	    {
17384 	      /* The VxWorks GOT is relocated by the dynamic linker.
17385 		 Therefore, we must emit relocations rather than simply
17386 		 computing the values now.  */
17387 	      Elf_Internal_Rela rel;
17388 
17389 	      plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17390 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17391 			    splt->contents + 0);
17392 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17393 			    splt->contents + 4);
17394 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17395 			    splt->contents + 8);
17396 	      bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17397 
17398 	      /* Generate a relocation for _GLOBAL_OFFSET_TABLE_.  */
17399 	      rel.r_offset = plt_address + 12;
17400 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17401 	      rel.r_addend = 0;
17402 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17403 				     htab->srelplt2->contents);
17404 	    }
17405 	  else if (htab->root.target_os == is_nacl)
17406 	    arm_nacl_put_plt0 (htab, output_bfd, splt,
17407 			       got_address + 8 - (plt_address + 16));
17408 	  else if (using_thumb_only (htab))
17409 	    {
17410 	      got_displacement = got_address - (plt_address + 12);
17411 
17412 	      plt0_entry = elf32_thumb2_plt0_entry;
17413 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17414 			    splt->contents + 0);
17415 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17416 			    splt->contents + 4);
17417 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17418 			    splt->contents + 8);
17419 
17420 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17421 	    }
17422 	  else
17423 	    {
17424 	      got_displacement = got_address - (plt_address + 16);
17425 
17426 	      plt0_entry = elf32_arm_plt0_entry;
17427 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17428 			    splt->contents + 0);
17429 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17430 			    splt->contents + 4);
17431 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17432 			    splt->contents + 8);
17433 	      put_arm_insn (htab, output_bfd, plt0_entry[3],
17434 			    splt->contents + 12);
17435 
17436 #ifdef FOUR_WORD_PLT
17437 	      /* The displacement value goes in the otherwise-unused
17438 		 last word of the second entry.  */
17439 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17440 #else
17441 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17442 #endif
17443 	    }
17444 	}
17445 
17446       /* UnixWare sets the entsize of .plt to 4, although that doesn't
17447 	 really seem like the right value.  */
17448       if (splt->output_section->owner == output_bfd)
17449 	elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17450 
17451       if (htab->root.tlsdesc_plt)
17452 	{
17453 	  bfd_vma got_address
17454 	    = sgot->output_section->vma + sgot->output_offset;
17455 	  bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17456 				    + htab->root.sgot->output_offset);
17457 	  bfd_vma plt_address
17458 	    = splt->output_section->vma + splt->output_offset;
17459 
17460 	  arm_put_trampoline (htab, output_bfd,
17461 			      splt->contents + htab->root.tlsdesc_plt,
17462 			      dl_tlsdesc_lazy_trampoline, 6);
17463 
17464 	  bfd_put_32 (output_bfd,
17465 		      gotplt_address + htab->root.tlsdesc_got
17466 		      - (plt_address + htab->root.tlsdesc_plt)
17467 		      - dl_tlsdesc_lazy_trampoline[6],
17468 		      splt->contents + htab->root.tlsdesc_plt + 24);
17469 	  bfd_put_32 (output_bfd,
17470 		      got_address - (plt_address + htab->root.tlsdesc_plt)
17471 		      - dl_tlsdesc_lazy_trampoline[7],
17472 		      splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17473 	}
17474 
17475       if (htab->tls_trampoline)
17476 	{
17477 	  arm_put_trampoline (htab, output_bfd,
17478 			      splt->contents + htab->tls_trampoline,
17479 			      tls_trampoline, 3);
17480 #ifdef FOUR_WORD_PLT
17481 	  bfd_put_32 (output_bfd, 0x00000000,
17482 		      splt->contents + htab->tls_trampoline + 12);
17483 #endif
17484 	}
17485 
17486       if (htab->root.target_os == is_vxworks
17487 	  && !bfd_link_pic (info)
17488 	  && htab->root.splt->size > 0)
17489 	{
17490 	  /* Correct the .rel(a).plt.unloaded relocations.  They will have
17491 	     incorrect symbol indexes.  */
17492 	  int num_plts;
17493 	  unsigned char *p;
17494 
17495 	  num_plts = ((htab->root.splt->size - htab->plt_header_size)
17496 		      / htab->plt_entry_size);
17497 	  p = htab->srelplt2->contents + RELOC_SIZE (htab);
17498 
17499 	  for (; num_plts; num_plts--)
17500 	    {
17501 	      Elf_Internal_Rela rel;
17502 
17503 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17504 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17505 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17506 	      p += RELOC_SIZE (htab);
17507 
17508 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17509 	      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17510 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17511 	      p += RELOC_SIZE (htab);
17512 	    }
17513 	}
17514     }
17515 
17516   if (htab->root.target_os == is_nacl
17517       && htab->root.iplt != NULL
17518       && htab->root.iplt->size > 0)
17519     /* NaCl uses a special first entry in .iplt too.  */
17520     arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17521 
17522   /* Fill in the first three entries in the global offset table.  */
17523   if (sgot)
17524     {
17525       if (sgot->size > 0)
17526 	{
17527 	  if (sdyn == NULL)
17528 	    bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17529 	  else
17530 	    bfd_put_32 (output_bfd,
17531 			sdyn->output_section->vma + sdyn->output_offset,
17532 			sgot->contents);
17533 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17534 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17535 	}
17536 
17537       elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17538     }
17539 
17540   /* At the very end of the .rofixup section is a pointer to the GOT.  */
17541   if (htab->fdpic_p && htab->srofixup != NULL)
17542     {
17543       struct elf_link_hash_entry *hgot = htab->root.hgot;
17544 
17545       bfd_vma got_value = hgot->root.u.def.value
17546 	+ hgot->root.u.def.section->output_section->vma
17547 	+ hgot->root.u.def.section->output_offset;
17548 
17549       arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
17550 
17551       /* Make sure we allocated and generated the same number of fixups.  */
17552       BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17553     }
17554 
17555   return true;
17556 }
17557 
17558 static bool
elf32_arm_init_file_header(bfd * abfd,struct bfd_link_info * link_info)17559 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17560 {
17561   Elf_Internal_Ehdr * i_ehdrp;	/* ELF file header, internal form.  */
17562   struct elf32_arm_link_hash_table *globals;
17563   struct elf_segment_map *m;
17564 
17565   if (!_bfd_elf_init_file_header (abfd, link_info))
17566     return false;
17567 
17568   i_ehdrp = elf_elfheader (abfd);
17569 
17570   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17571     i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17572   i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17573 
17574   if (link_info)
17575     {
17576       globals = elf32_arm_hash_table (link_info);
17577       if (globals != NULL && globals->byteswap_code)
17578 	i_ehdrp->e_flags |= EF_ARM_BE8;
17579 
17580       if (globals->fdpic_p)
17581 	i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17582     }
17583 
17584   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17585       && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17586     {
17587       int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17588       if (abi == AEABI_VFP_args_vfp)
17589 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17590       else
17591 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17592     }
17593 
17594   /* Scan segment to set p_flags attribute if it contains only sections with
17595      SHF_ARM_PURECODE flag.  */
17596   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17597     {
17598       unsigned int j;
17599 
17600       if (m->count == 0)
17601 	continue;
17602       for (j = 0; j < m->count; j++)
17603 	{
17604 	  if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17605 	    break;
17606 	}
17607       if (j == m->count)
17608 	{
17609 	  m->p_flags = PF_X;
17610 	  m->p_flags_valid = 1;
17611 	}
17612     }
17613   return true;
17614 }
17615 
17616 static enum elf_reloc_type_class
elf32_arm_reloc_type_class(const struct bfd_link_info * info ATTRIBUTE_UNUSED,const asection * rel_sec ATTRIBUTE_UNUSED,const Elf_Internal_Rela * rela)17617 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17618 			    const asection *rel_sec ATTRIBUTE_UNUSED,
17619 			    const Elf_Internal_Rela *rela)
17620 {
17621   switch ((int) ELF32_R_TYPE (rela->r_info))
17622     {
17623     case R_ARM_RELATIVE:
17624       return reloc_class_relative;
17625     case R_ARM_JUMP_SLOT:
17626       return reloc_class_plt;
17627     case R_ARM_COPY:
17628       return reloc_class_copy;
17629     case R_ARM_IRELATIVE:
17630       return reloc_class_ifunc;
17631     default:
17632       return reloc_class_normal;
17633     }
17634 }
17635 
17636 static void
arm_final_write_processing(bfd * abfd)17637 arm_final_write_processing (bfd *abfd)
17638 {
17639   bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17640 }
17641 
17642 static bool
elf32_arm_final_write_processing(bfd * abfd)17643 elf32_arm_final_write_processing (bfd *abfd)
17644 {
17645   arm_final_write_processing (abfd);
17646   return _bfd_elf_final_write_processing (abfd);
17647 }
17648 
17649 /* Return TRUE if this is an unwinding table entry.  */
17650 
17651 static bool
is_arm_elf_unwind_section_name(bfd * abfd ATTRIBUTE_UNUSED,const char * name)17652 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17653 {
17654   return (startswith (name, ELF_STRING_ARM_unwind)
17655 	  || startswith (name, ELF_STRING_ARM_unwind_once));
17656 }
17657 
17658 
17659 /* Set the type and flags for an ARM section.  We do this by
17660    the section name, which is a hack, but ought to work.  */
17661 
17662 static bool
elf32_arm_fake_sections(bfd * abfd,Elf_Internal_Shdr * hdr,asection * sec)17663 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17664 {
17665   const char * name;
17666 
17667   name = bfd_section_name (sec);
17668 
17669   if (is_arm_elf_unwind_section_name (abfd, name))
17670     {
17671       hdr->sh_type = SHT_ARM_EXIDX;
17672       hdr->sh_flags |= SHF_LINK_ORDER;
17673     }
17674 
17675   if (sec->flags & SEC_ELF_PURECODE)
17676     hdr->sh_flags |= SHF_ARM_PURECODE;
17677 
17678   return true;
17679 }
17680 
17681 /* Handle an ARM specific section when reading an object file.  This is
17682    called when bfd_section_from_shdr finds a section with an unknown
17683    type.  */
17684 
17685 static bool
elf32_arm_section_from_shdr(bfd * abfd,Elf_Internal_Shdr * hdr,const char * name,int shindex)17686 elf32_arm_section_from_shdr (bfd *abfd,
17687 			     Elf_Internal_Shdr * hdr,
17688 			     const char *name,
17689 			     int shindex)
17690 {
17691   /* There ought to be a place to keep ELF backend specific flags, but
17692      at the moment there isn't one.  We just keep track of the
17693      sections by their name, instead.  Fortunately, the ABI gives
17694      names for all the ARM specific sections, so we will probably get
17695      away with this.  */
17696   switch (hdr->sh_type)
17697     {
17698     case SHT_ARM_EXIDX:
17699     case SHT_ARM_PREEMPTMAP:
17700     case SHT_ARM_ATTRIBUTES:
17701       break;
17702 
17703     default:
17704       return false;
17705     }
17706 
17707   if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17708     return false;
17709 
17710   return true;
17711 }
17712 
17713 static _arm_elf_section_data *
get_arm_elf_section_data(asection * sec)17714 get_arm_elf_section_data (asection * sec)
17715 {
17716   if (sec && sec->owner && is_arm_elf (sec->owner))
17717     return elf32_arm_section_data (sec);
17718   else
17719     return NULL;
17720 }
17721 
17722 typedef struct
17723 {
17724   void *flaginfo;
17725   struct bfd_link_info *info;
17726   asection *sec;
17727   int sec_shndx;
17728   int (*func) (void *, const char *, Elf_Internal_Sym *,
17729 	       asection *, struct elf_link_hash_entry *);
17730 } output_arch_syminfo;
17731 
17732 enum map_symbol_type
17733 {
17734   ARM_MAP_ARM,
17735   ARM_MAP_THUMB,
17736   ARM_MAP_DATA
17737 };
17738 
17739 
17740 /* Output a single mapping symbol.  */
17741 
17742 static bool
elf32_arm_output_map_sym(output_arch_syminfo * osi,enum map_symbol_type type,bfd_vma offset)17743 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17744 			  enum map_symbol_type type,
17745 			  bfd_vma offset)
17746 {
17747   static const char *names[3] = {"$a", "$t", "$d"};
17748   Elf_Internal_Sym sym;
17749 
17750   sym.st_value = osi->sec->output_section->vma
17751 		 + osi->sec->output_offset
17752 		 + offset;
17753   sym.st_size = 0;
17754   sym.st_other = 0;
17755   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17756   sym.st_shndx = osi->sec_shndx;
17757   sym.st_target_internal = 0;
17758   elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17759   return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17760 }
17761 
17762 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17763    IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt.  */
17764 
17765 static bool
elf32_arm_output_plt_map_1(output_arch_syminfo * osi,bool is_iplt_entry_p,union gotplt_union * root_plt,struct arm_plt_info * arm_plt)17766 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17767 			    bool is_iplt_entry_p,
17768 			    union gotplt_union *root_plt,
17769 			    struct arm_plt_info *arm_plt)
17770 {
17771   struct elf32_arm_link_hash_table *htab;
17772   bfd_vma addr, plt_header_size;
17773 
17774   if (root_plt->offset == (bfd_vma) -1)
17775     return true;
17776 
17777   htab = elf32_arm_hash_table (osi->info);
17778   if (htab == NULL)
17779     return false;
17780 
17781   if (is_iplt_entry_p)
17782     {
17783       osi->sec = htab->root.iplt;
17784       plt_header_size = 0;
17785     }
17786   else
17787     {
17788       osi->sec = htab->root.splt;
17789       plt_header_size = htab->plt_header_size;
17790     }
17791   osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17792 		    (osi->info->output_bfd, osi->sec->output_section));
17793 
17794   addr = root_plt->offset & -2;
17795   if (htab->root.target_os == is_vxworks)
17796     {
17797       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17798 	return false;
17799       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17800 	return false;
17801       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17802 	return false;
17803       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17804 	return false;
17805     }
17806   else if (htab->root.target_os == is_nacl)
17807     {
17808       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17809 	return false;
17810     }
17811   else if (htab->fdpic_p)
17812     {
17813       enum map_symbol_type type = using_thumb_only (htab)
17814 	? ARM_MAP_THUMB
17815 	: ARM_MAP_ARM;
17816 
17817       if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17818 	if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17819 	  return false;
17820       if (!elf32_arm_output_map_sym (osi, type, addr))
17821 	return false;
17822       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17823 	return false;
17824       if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
17825 	if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17826 	  return false;
17827     }
17828   else if (using_thumb_only (htab))
17829     {
17830       if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17831 	return false;
17832     }
17833   else
17834     {
17835       bool thumb_stub_p;
17836 
17837       thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17838       if (thumb_stub_p)
17839 	{
17840 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17841 	    return false;
17842 	}
17843 #ifdef FOUR_WORD_PLT
17844       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17845 	return false;
17846       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17847 	return false;
17848 #else
17849       /* A three-word PLT with no Thumb thunk contains only Arm code,
17850 	 so only need to output a mapping symbol for the first PLT entry and
17851 	 entries with thumb thunks.  */
17852       if (thumb_stub_p || addr == plt_header_size)
17853 	{
17854 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17855 	    return false;
17856 	}
17857 #endif
17858     }
17859 
17860   return true;
17861 }
17862 
17863 /* Output mapping symbols for PLT entries associated with H.  */
17864 
17865 static bool
elf32_arm_output_plt_map(struct elf_link_hash_entry * h,void * inf)17866 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17867 {
17868   output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17869   struct elf32_arm_link_hash_entry *eh;
17870 
17871   if (h->root.type == bfd_link_hash_indirect)
17872     return true;
17873 
17874   if (h->root.type == bfd_link_hash_warning)
17875     /* When warning symbols are created, they **replace** the "real"
17876        entry in the hash table, thus we never get to see the real
17877        symbol in a hash traversal.  So look at it now.  */
17878     h = (struct elf_link_hash_entry *) h->root.u.i.link;
17879 
17880   eh = (struct elf32_arm_link_hash_entry *) h;
17881   return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17882 				     &h->plt, &eh->plt);
17883 }
17884 
17885 /* Bind a veneered symbol to its veneer identified by its hash entry
17886    STUB_ENTRY.  The veneered location thus loose its symbol.  */
17887 
17888 static void
arm_stub_claim_sym(struct elf32_arm_stub_hash_entry * stub_entry)17889 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17890 {
17891   struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17892 
17893   BFD_ASSERT (hash);
17894   hash->root.root.u.def.section = stub_entry->stub_sec;
17895   hash->root.root.u.def.value = stub_entry->stub_offset;
17896   hash->root.size = stub_entry->stub_size;
17897 }
17898 
17899 /* Output a single local symbol for a generated stub.  */
17900 
17901 static bool
elf32_arm_output_stub_sym(output_arch_syminfo * osi,const char * name,bfd_vma offset,bfd_vma size)17902 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17903 			   bfd_vma offset, bfd_vma size)
17904 {
17905   Elf_Internal_Sym sym;
17906 
17907   sym.st_value = osi->sec->output_section->vma
17908 		 + osi->sec->output_offset
17909 		 + offset;
17910   sym.st_size = size;
17911   sym.st_other = 0;
17912   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17913   sym.st_shndx = osi->sec_shndx;
17914   sym.st_target_internal = 0;
17915   return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17916 }
17917 
17918 static bool
arm_map_one_stub(struct bfd_hash_entry * gen_entry,void * in_arg)17919 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17920 		  void * in_arg)
17921 {
17922   struct elf32_arm_stub_hash_entry *stub_entry;
17923   asection *stub_sec;
17924   bfd_vma addr;
17925   char *stub_name;
17926   output_arch_syminfo *osi;
17927   const insn_sequence *template_sequence;
17928   enum stub_insn_type prev_type;
17929   int size;
17930   int i;
17931   enum map_symbol_type sym_type;
17932 
17933   /* Massage our args to the form they really have.  */
17934   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17935   osi = (output_arch_syminfo *) in_arg;
17936 
17937   stub_sec = stub_entry->stub_sec;
17938 
17939   /* Ensure this stub is attached to the current section being
17940      processed.  */
17941   if (stub_sec != osi->sec)
17942     return true;
17943 
17944   addr = (bfd_vma) stub_entry->stub_offset;
17945   template_sequence = stub_entry->stub_template;
17946 
17947   if (arm_stub_sym_claimed (stub_entry->stub_type))
17948     arm_stub_claim_sym (stub_entry);
17949   else
17950     {
17951       stub_name = stub_entry->output_name;
17952       switch (template_sequence[0].type)
17953 	{
17954 	case ARM_TYPE:
17955 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17956 					  stub_entry->stub_size))
17957 	    return false;
17958 	  break;
17959 	case THUMB16_TYPE:
17960 	case THUMB32_TYPE:
17961 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17962 					  stub_entry->stub_size))
17963 	    return false;
17964 	  break;
17965 	default:
17966 	  BFD_FAIL ();
17967 	  return 0;
17968 	}
17969     }
17970 
17971   prev_type = DATA_TYPE;
17972   size = 0;
17973   for (i = 0; i < stub_entry->stub_template_size; i++)
17974     {
17975       switch (template_sequence[i].type)
17976 	{
17977 	case ARM_TYPE:
17978 	  sym_type = ARM_MAP_ARM;
17979 	  break;
17980 
17981 	case THUMB16_TYPE:
17982 	case THUMB32_TYPE:
17983 	  sym_type = ARM_MAP_THUMB;
17984 	  break;
17985 
17986 	case DATA_TYPE:
17987 	  sym_type = ARM_MAP_DATA;
17988 	  break;
17989 
17990 	default:
17991 	  BFD_FAIL ();
17992 	  return false;
17993 	}
17994 
17995       if (template_sequence[i].type != prev_type)
17996 	{
17997 	  prev_type = template_sequence[i].type;
17998 	  if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
17999 	    return false;
18000 	}
18001 
18002       switch (template_sequence[i].type)
18003 	{
18004 	case ARM_TYPE:
18005 	case THUMB32_TYPE:
18006 	  size += 4;
18007 	  break;
18008 
18009 	case THUMB16_TYPE:
18010 	  size += 2;
18011 	  break;
18012 
18013 	case DATA_TYPE:
18014 	  size += 4;
18015 	  break;
18016 
18017 	default:
18018 	  BFD_FAIL ();
18019 	  return false;
18020 	}
18021     }
18022 
18023   return true;
18024 }
18025 
18026 /* Output mapping symbols for linker generated sections,
18027    and for those data-only sections that do not have a
18028    $d.  */
18029 
18030 static bool
elf32_arm_output_arch_local_syms(bfd * output_bfd,struct bfd_link_info * info,void * flaginfo,int (* func)(void *,const char *,Elf_Internal_Sym *,asection *,struct elf_link_hash_entry *))18031 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18032 				  struct bfd_link_info *info,
18033 				  void *flaginfo,
18034 				  int (*func) (void *, const char *,
18035 					       Elf_Internal_Sym *,
18036 					       asection *,
18037 					       struct elf_link_hash_entry *))
18038 {
18039   output_arch_syminfo osi;
18040   struct elf32_arm_link_hash_table *htab;
18041   bfd_vma offset;
18042   bfd_size_type size;
18043   bfd *input_bfd;
18044 
18045   htab = elf32_arm_hash_table (info);
18046   if (htab == NULL)
18047     return false;
18048 
18049   check_use_blx (htab);
18050 
18051   osi.flaginfo = flaginfo;
18052   osi.info = info;
18053   osi.func = func;
18054 
18055   /* Add a $d mapping symbol to data-only sections that
18056      don't have any mapping symbol.  This may result in (harmless) redundant
18057      mapping symbols.  */
18058   for (input_bfd = info->input_bfds;
18059        input_bfd != NULL;
18060        input_bfd = input_bfd->link.next)
18061     {
18062       if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18063 	for (osi.sec = input_bfd->sections;
18064 	     osi.sec != NULL;
18065 	     osi.sec = osi.sec->next)
18066 	  {
18067 	    if (osi.sec->output_section != NULL
18068 		&& ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18069 		    != 0)
18070 		&& (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18071 		   == SEC_HAS_CONTENTS
18072 		&& get_arm_elf_section_data (osi.sec) != NULL
18073 		&& get_arm_elf_section_data (osi.sec)->mapcount == 0
18074 		&& osi.sec->size > 0
18075 		&& (osi.sec->flags & SEC_EXCLUDE) == 0)
18076 	      {
18077 		osi.sec_shndx = _bfd_elf_section_from_bfd_section
18078 		  (output_bfd, osi.sec->output_section);
18079 		if (osi.sec_shndx != (int)SHN_BAD)
18080 		  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18081 	      }
18082 	  }
18083     }
18084 
18085   /* ARM->Thumb glue.  */
18086   if (htab->arm_glue_size > 0)
18087     {
18088       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18089 					ARM2THUMB_GLUE_SECTION_NAME);
18090 
18091       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18092 	  (output_bfd, osi.sec->output_section);
18093       if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18094 	  || htab->pic_veneer)
18095 	size = ARM2THUMB_PIC_GLUE_SIZE;
18096       else if (htab->use_blx)
18097 	size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18098       else
18099 	size = ARM2THUMB_STATIC_GLUE_SIZE;
18100 
18101       for (offset = 0; offset < htab->arm_glue_size; offset += size)
18102 	{
18103 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18104 	  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18105 	}
18106     }
18107 
18108   /* Thumb->ARM glue.  */
18109   if (htab->thumb_glue_size > 0)
18110     {
18111       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18112 					THUMB2ARM_GLUE_SECTION_NAME);
18113 
18114       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18115 	  (output_bfd, osi.sec->output_section);
18116       size = THUMB2ARM_GLUE_SIZE;
18117 
18118       for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18119 	{
18120 	  elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18121 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18122 	}
18123     }
18124 
18125   /* ARMv4 BX veneers.  */
18126   if (htab->bx_glue_size > 0)
18127     {
18128       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18129 					ARM_BX_GLUE_SECTION_NAME);
18130 
18131       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18132 	  (output_bfd, osi.sec->output_section);
18133 
18134       elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18135     }
18136 
18137   /* Long calls stubs.  */
18138   if (htab->stub_bfd && htab->stub_bfd->sections)
18139     {
18140       asection* stub_sec;
18141 
18142       for (stub_sec = htab->stub_bfd->sections;
18143 	   stub_sec != NULL;
18144 	   stub_sec = stub_sec->next)
18145 	{
18146 	  /* Ignore non-stub sections.  */
18147 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
18148 	    continue;
18149 
18150 	  osi.sec = stub_sec;
18151 
18152 	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
18153 	    (output_bfd, osi.sec->output_section);
18154 
18155 	  bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18156 	}
18157     }
18158 
18159   /* Finally, output mapping symbols for the PLT.  */
18160   if (htab->root.splt && htab->root.splt->size > 0)
18161     {
18162       osi.sec = htab->root.splt;
18163       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18164 		       (output_bfd, osi.sec->output_section));
18165 
18166       /* Output mapping symbols for the plt header.  */
18167       if (htab->root.target_os == is_vxworks)
18168 	{
18169 	  /* VxWorks shared libraries have no PLT header.  */
18170 	  if (!bfd_link_pic (info))
18171 	    {
18172 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18173 		return false;
18174 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18175 		return false;
18176 	    }
18177 	}
18178       else if (htab->root.target_os == is_nacl)
18179 	{
18180 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18181 	    return false;
18182 	}
18183       else if (using_thumb_only (htab) && !htab->fdpic_p)
18184 	{
18185 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18186 	    return false;
18187 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18188 	    return false;
18189 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18190 	    return false;
18191 	}
18192       else if (!htab->fdpic_p)
18193 	{
18194 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18195 	    return false;
18196 #ifndef FOUR_WORD_PLT
18197 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18198 	    return false;
18199 #endif
18200 	}
18201     }
18202   if (htab->root.target_os == is_nacl
18203       && htab->root.iplt
18204       && htab->root.iplt->size > 0)
18205     {
18206       /* NaCl uses a special first entry in .iplt too.  */
18207       osi.sec = htab->root.iplt;
18208       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18209 		       (output_bfd, osi.sec->output_section));
18210       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18211 	return false;
18212     }
18213   if ((htab->root.splt && htab->root.splt->size > 0)
18214       || (htab->root.iplt && htab->root.iplt->size > 0))
18215     {
18216       elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18217       for (input_bfd = info->input_bfds;
18218 	   input_bfd != NULL;
18219 	   input_bfd = input_bfd->link.next)
18220 	{
18221 	  struct arm_local_iplt_info **local_iplt;
18222 	  unsigned int i, num_syms;
18223 
18224 	  local_iplt = elf32_arm_local_iplt (input_bfd);
18225 	  if (local_iplt != NULL)
18226 	    {
18227 	      num_syms = elf_symtab_hdr (input_bfd).sh_info;
18228 	      if (num_syms > elf32_arm_num_entries (input_bfd))
18229 		{
18230 		  _bfd_error_handler (_("\
18231 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18232 				      input_bfd,
18233 				      (unsigned long) elf32_arm_num_entries (input_bfd),
18234 				      num_syms);
18235 		  return false;
18236 		}
18237 	      for (i = 0; i < num_syms; i++)
18238 		if (local_iplt[i] != NULL
18239 		    && !elf32_arm_output_plt_map_1 (&osi, true,
18240 						    &local_iplt[i]->root,
18241 						    &local_iplt[i]->arm))
18242 		  return false;
18243 	    }
18244 	}
18245     }
18246   if (htab->root.tlsdesc_plt != 0)
18247     {
18248       /* Mapping symbols for the lazy tls trampoline.  */
18249       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18250 				     htab->root.tlsdesc_plt))
18251 	return false;
18252 
18253       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18254 				     htab->root.tlsdesc_plt + 24))
18255 	return false;
18256     }
18257   if (htab->tls_trampoline != 0)
18258     {
18259       /* Mapping symbols for the tls trampoline.  */
18260       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18261 	return false;
18262 #ifdef FOUR_WORD_PLT
18263       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18264 				     htab->tls_trampoline + 12))
18265 	return false;
18266 #endif
18267     }
18268 
18269   return true;
18270 }
18271 
18272 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18273    the import library.  All SYMCOUNT symbols of ABFD can be examined
18274    from their pointers in SYMS.  Pointers of symbols to keep should be
18275    stored continuously at the beginning of that array.
18276 
18277    Returns the number of symbols to keep.  */
18278 
18279 static unsigned int
elf32_arm_filter_cmse_symbols(bfd * abfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,asymbol ** syms,long symcount)18280 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18281 			       struct bfd_link_info *info,
18282 			       asymbol **syms, long symcount)
18283 {
18284   size_t maxnamelen;
18285   char *cmse_name;
18286   long src_count, dst_count = 0;
18287   struct elf32_arm_link_hash_table *htab;
18288 
18289   htab = elf32_arm_hash_table (info);
18290   if (!htab->stub_bfd || !htab->stub_bfd->sections)
18291     symcount = 0;
18292 
18293   maxnamelen = 128;
18294   cmse_name = (char *) bfd_malloc (maxnamelen);
18295   BFD_ASSERT (cmse_name);
18296 
18297   for (src_count = 0; src_count < symcount; src_count++)
18298     {
18299       struct elf32_arm_link_hash_entry *cmse_hash;
18300       asymbol *sym;
18301       flagword flags;
18302       char *name;
18303       size_t namelen;
18304 
18305       sym = syms[src_count];
18306       flags = sym->flags;
18307       name = (char *) bfd_asymbol_name (sym);
18308 
18309       if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18310 	continue;
18311       if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18312 	continue;
18313 
18314       namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18315       if (namelen > maxnamelen)
18316 	{
18317 	  cmse_name = (char *)
18318 	    bfd_realloc (cmse_name, namelen);
18319 	  maxnamelen = namelen;
18320 	}
18321       snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18322       cmse_hash = (struct elf32_arm_link_hash_entry *)
18323 	elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
18324 
18325       if (!cmse_hash
18326 	  || (cmse_hash->root.root.type != bfd_link_hash_defined
18327 	      && cmse_hash->root.root.type != bfd_link_hash_defweak)
18328 	  || cmse_hash->root.type != STT_FUNC)
18329 	continue;
18330 
18331       syms[dst_count++] = sym;
18332     }
18333   free (cmse_name);
18334 
18335   syms[dst_count] = NULL;
18336 
18337   return dst_count;
18338 }
18339 
18340 /* Filter symbols of ABFD to include in the import library.  All
18341    SYMCOUNT symbols of ABFD can be examined from their pointers in
18342    SYMS.  Pointers of symbols to keep should be stored continuously at
18343    the beginning of that array.
18344 
18345    Returns the number of symbols to keep.  */
18346 
18347 static unsigned int
elf32_arm_filter_implib_symbols(bfd * abfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,asymbol ** syms,long symcount)18348 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18349 				 struct bfd_link_info *info,
18350 				 asymbol **syms, long symcount)
18351 {
18352   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18353 
18354   /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18355      Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18356      library to be a relocatable object file.  */
18357   BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18358   if (globals->cmse_implib)
18359     return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18360   else
18361     return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18362 }
18363 
18364 /* Allocate target specific section data.  */
18365 
18366 static bool
elf32_arm_new_section_hook(bfd * abfd,asection * sec)18367 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18368 {
18369   if (!sec->used_by_bfd)
18370     {
18371       _arm_elf_section_data *sdata;
18372       size_t amt = sizeof (*sdata);
18373 
18374       sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18375       if (sdata == NULL)
18376 	return false;
18377       sec->used_by_bfd = sdata;
18378     }
18379 
18380   return _bfd_elf_new_section_hook (abfd, sec);
18381 }
18382 
18383 
18384 /* Used to order a list of mapping symbols by address.  */
18385 
18386 static int
elf32_arm_compare_mapping(const void * a,const void * b)18387 elf32_arm_compare_mapping (const void * a, const void * b)
18388 {
18389   const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18390   const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18391 
18392   if (amap->vma > bmap->vma)
18393     return 1;
18394   else if (amap->vma < bmap->vma)
18395     return -1;
18396   else if (amap->type > bmap->type)
18397     /* Ensure results do not depend on the host qsort for objects with
18398        multiple mapping symbols at the same address by sorting on type
18399        after vma.  */
18400     return 1;
18401   else if (amap->type < bmap->type)
18402     return -1;
18403   else
18404     return 0;
18405 }
18406 
18407 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified.  */
18408 
18409 static unsigned long
offset_prel31(unsigned long addr,bfd_vma offset)18410 offset_prel31 (unsigned long addr, bfd_vma offset)
18411 {
18412   return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18413 }
18414 
18415 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18416    relocations.  */
18417 
18418 static void
copy_exidx_entry(bfd * output_bfd,bfd_byte * to,bfd_byte * from,bfd_vma offset)18419 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18420 {
18421   unsigned long first_word = bfd_get_32 (output_bfd, from);
18422   unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18423 
18424   /* High bit of first word is supposed to be zero.  */
18425   if ((first_word & 0x80000000ul) == 0)
18426     first_word = offset_prel31 (first_word, offset);
18427 
18428   /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18429      (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry.  */
18430   if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18431     second_word = offset_prel31 (second_word, offset);
18432 
18433   bfd_put_32 (output_bfd, first_word, to);
18434   bfd_put_32 (output_bfd, second_word, to + 4);
18435 }
18436 
18437 /* Data for make_branch_to_a8_stub().  */
18438 
18439 struct a8_branch_to_stub_data
18440 {
18441   asection *writing_section;
18442   bfd_byte *contents;
18443 };
18444 
18445 
18446 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18447    places for a particular section.  */
18448 
18449 static bool
make_branch_to_a8_stub(struct bfd_hash_entry * gen_entry,void * in_arg)18450 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18451 		       void *in_arg)
18452 {
18453   struct elf32_arm_stub_hash_entry *stub_entry;
18454   struct a8_branch_to_stub_data *data;
18455   bfd_byte *contents;
18456   unsigned long branch_insn;
18457   bfd_vma veneered_insn_loc, veneer_entry_loc;
18458   bfd_signed_vma branch_offset;
18459   bfd *abfd;
18460   unsigned int loc;
18461 
18462   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18463   data = (struct a8_branch_to_stub_data *) in_arg;
18464 
18465   if (stub_entry->target_section != data->writing_section
18466       || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18467     return true;
18468 
18469   contents = data->contents;
18470 
18471   /* We use target_section as Cortex-A8 erratum workaround stubs are only
18472      generated when both source and target are in the same section.  */
18473   veneered_insn_loc = stub_entry->target_section->output_section->vma
18474 		      + stub_entry->target_section->output_offset
18475 		      + stub_entry->source_value;
18476 
18477   veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18478 		     + stub_entry->stub_sec->output_offset
18479 		     + stub_entry->stub_offset;
18480 
18481   if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18482     veneered_insn_loc &= ~3u;
18483 
18484   branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18485 
18486   abfd = stub_entry->target_section->owner;
18487   loc = stub_entry->source_value;
18488 
18489   /* We attempt to avoid this condition by setting stubs_always_after_branch
18490      in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18491      This check is just to be on the safe side...  */
18492   if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18493     {
18494       _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18495 			    "allocated in unsafe location"), abfd);
18496       return false;
18497     }
18498 
18499   switch (stub_entry->stub_type)
18500     {
18501     case arm_stub_a8_veneer_b:
18502     case arm_stub_a8_veneer_b_cond:
18503       branch_insn = 0xf0009000;
18504       goto jump24;
18505 
18506     case arm_stub_a8_veneer_blx:
18507       branch_insn = 0xf000e800;
18508       goto jump24;
18509 
18510     case arm_stub_a8_veneer_bl:
18511       {
18512 	unsigned int i1, j1, i2, j2, s;
18513 
18514 	branch_insn = 0xf000d000;
18515 
18516       jump24:
18517 	if (branch_offset < -16777216 || branch_offset > 16777214)
18518 	  {
18519 	    /* There's not much we can do apart from complain if this
18520 	       happens.  */
18521 	    _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18522 				  "of range (input file too large)"), abfd);
18523 	    return false;
18524 	  }
18525 
18526 	/* i1 = not(j1 eor s), so:
18527 	   not i1 = j1 eor s
18528 	   j1 = (not i1) eor s.  */
18529 
18530 	branch_insn |= (branch_offset >> 1) & 0x7ff;
18531 	branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18532 	i2 = (branch_offset >> 22) & 1;
18533 	i1 = (branch_offset >> 23) & 1;
18534 	s = (branch_offset >> 24) & 1;
18535 	j1 = (!i1) ^ s;
18536 	j2 = (!i2) ^ s;
18537 	branch_insn |= j2 << 11;
18538 	branch_insn |= j1 << 13;
18539 	branch_insn |= s << 26;
18540       }
18541       break;
18542 
18543     default:
18544       BFD_FAIL ();
18545       return false;
18546     }
18547 
18548   bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18549   bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18550 
18551   return true;
18552 }
18553 
18554 /* Beginning of stm32l4xx work-around.  */
18555 
18556 /* Functions encoding instructions necessary for the emission of the
18557    fix-stm32l4xx-629360.
18558    Encoding is extracted from the
18559    ARM (C) Architecture Reference Manual
18560    ARMv7-A and ARMv7-R edition
18561    ARM DDI 0406C.b (ID072512).  */
18562 
18563 static inline bfd_vma
create_instruction_branch_absolute(int branch_offset)18564 create_instruction_branch_absolute (int branch_offset)
18565 {
18566   /* A8.8.18 B (A8-334)
18567      B target_address (Encoding T4).  */
18568   /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
18569   /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
18570   /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
18571 
18572   int s = ((branch_offset & 0x1000000) >> 24);
18573   int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18574   int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18575 
18576   if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18577     BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
18578 
18579   bfd_vma patched_inst = 0xf0009000
18580     | s << 26 /* S.  */
18581     | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
18582     | j1 << 13 /* J1.  */
18583     | j2 << 11 /* J2.  */
18584     | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
18585 
18586   return patched_inst;
18587 }
18588 
18589 static inline bfd_vma
create_instruction_ldmia(int base_reg,int wback,int reg_mask)18590 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18591 {
18592   /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18593      LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
18594   bfd_vma patched_inst = 0xe8900000
18595     | (/*W=*/wback << 21)
18596     | (base_reg << 16)
18597     | (reg_mask & 0x0000ffff);
18598 
18599   return patched_inst;
18600 }
18601 
18602 static inline bfd_vma
create_instruction_ldmdb(int base_reg,int wback,int reg_mask)18603 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18604 {
18605   /* A8.8.60 LDMDB/LDMEA (A8-402)
18606      LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
18607   bfd_vma patched_inst = 0xe9100000
18608     | (/*W=*/wback << 21)
18609     | (base_reg << 16)
18610     | (reg_mask & 0x0000ffff);
18611 
18612   return patched_inst;
18613 }
18614 
18615 static inline bfd_vma
create_instruction_mov(int target_reg,int source_reg)18616 create_instruction_mov (int target_reg, int source_reg)
18617 {
18618   /* A8.8.103 MOV (register) (A8-486)
18619      MOV Rd, Rm (Encoding T1).  */
18620   bfd_vma patched_inst = 0x4600
18621     | (target_reg & 0x7)
18622     | ((target_reg & 0x8) >> 3) << 7
18623     | (source_reg << 3);
18624 
18625   return patched_inst;
18626 }
18627 
18628 static inline bfd_vma
create_instruction_sub(int target_reg,int source_reg,int value)18629 create_instruction_sub (int target_reg, int source_reg, int value)
18630 {
18631   /* A8.8.221 SUB (immediate) (A8-708)
18632      SUB Rd, Rn, #value (Encoding T3).  */
18633   bfd_vma patched_inst = 0xf1a00000
18634     | (target_reg << 8)
18635     | (source_reg << 16)
18636     | (/*S=*/0 << 20)
18637     | ((value & 0x800) >> 11) << 26
18638     | ((value & 0x700) >>  8) << 12
18639     | (value & 0x0ff);
18640 
18641   return patched_inst;
18642 }
18643 
18644 static inline bfd_vma
create_instruction_vldmia(int base_reg,int is_dp,int wback,int num_words,int first_reg)18645 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18646 			   int first_reg)
18647 {
18648   /* A8.8.332 VLDM (A8-922)
18649      VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
18650   bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18651     | (/*W=*/wback << 21)
18652     | (base_reg << 16)
18653     | (num_words & 0x000000ff)
18654     | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18655     | (first_reg & 0x00000001) << 22;
18656 
18657   return patched_inst;
18658 }
18659 
18660 static inline bfd_vma
create_instruction_vldmdb(int base_reg,int is_dp,int num_words,int first_reg)18661 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18662 			   int first_reg)
18663 {
18664   /* A8.8.332 VLDM (A8-922)
18665      VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
18666   bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18667     | (base_reg << 16)
18668     | (num_words & 0x000000ff)
18669     | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18670     | (first_reg & 0x00000001) << 22;
18671 
18672   return patched_inst;
18673 }
18674 
18675 static inline bfd_vma
create_instruction_udf_w(int value)18676 create_instruction_udf_w (int value)
18677 {
18678   /* A8.8.247 UDF (A8-758)
18679      Undefined (Encoding T2).  */
18680   bfd_vma patched_inst = 0xf7f0a000
18681     | (value & 0x00000fff)
18682     | (value & 0x000f0000) << 16;
18683 
18684   return patched_inst;
18685 }
18686 
18687 static inline bfd_vma
create_instruction_udf(int value)18688 create_instruction_udf (int value)
18689 {
18690   /* A8.8.247 UDF (A8-758)
18691      Undefined (Encoding T1).  */
18692   bfd_vma patched_inst = 0xde00
18693     | (value & 0xff);
18694 
18695   return patched_inst;
18696 }
18697 
18698 /* Functions writing an instruction in memory, returning the next
18699    memory position to write to.  */
18700 
18701 static inline bfd_byte *
push_thumb2_insn32(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,bfd_byte * pt,insn32 insn)18702 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18703 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18704 {
18705   put_thumb2_insn (htab, output_bfd, insn, pt);
18706   return pt + 4;
18707 }
18708 
18709 static inline bfd_byte *
push_thumb2_insn16(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,bfd_byte * pt,insn32 insn)18710 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18711 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18712 {
18713   put_thumb_insn (htab, output_bfd, insn, pt);
18714   return pt + 2;
18715 }
18716 
18717 /* Function filling up a region in memory with T1 and T2 UDFs taking
18718    care of alignment.  */
18719 
18720 static bfd_byte *
stm32l4xx_fill_stub_udf(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,const bfd_byte * const base_stub_contents,bfd_byte * const from_stub_contents,const bfd_byte * const end_stub_contents)18721 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18722 			 bfd *			 output_bfd,
18723 			 const bfd_byte * const	 base_stub_contents,
18724 			 bfd_byte * const	 from_stub_contents,
18725 			 const bfd_byte * const	 end_stub_contents)
18726 {
18727   bfd_byte *current_stub_contents = from_stub_contents;
18728 
18729   /* Fill the remaining of the stub with deterministic contents : UDF
18730      instructions.
18731      Check if realignment is needed on modulo 4 frontier using T1, to
18732      further use T2.  */
18733   if ((current_stub_contents < end_stub_contents)
18734       && !((current_stub_contents - base_stub_contents) % 2)
18735       && ((current_stub_contents - base_stub_contents) % 4))
18736     current_stub_contents =
18737       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18738 			  create_instruction_udf (0));
18739 
18740   for (; current_stub_contents < end_stub_contents;)
18741     current_stub_contents =
18742       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18743 			  create_instruction_udf_w (0));
18744 
18745   return current_stub_contents;
18746 }
18747 
18748 /* Functions writing the stream of instructions equivalent to the
18749    derived sequence for ldmia, ldmdb, vldm respectively.  */
18750 
18751 static void
stm32l4xx_create_replacing_stub_ldmia(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,const insn32 initial_insn,const bfd_byte * const initial_insn_addr,bfd_byte * const base_stub_contents)18752 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18753 				       bfd * output_bfd,
18754 				       const insn32 initial_insn,
18755 				       const bfd_byte *const initial_insn_addr,
18756 				       bfd_byte *const base_stub_contents)
18757 {
18758   int wback = (initial_insn & 0x00200000) >> 21;
18759   int ri, rn = (initial_insn & 0x000F0000) >> 16;
18760   int insn_all_registers = initial_insn & 0x0000ffff;
18761   int insn_low_registers, insn_high_registers;
18762   int usable_register_mask;
18763   int nb_registers = elf32_arm_popcount (insn_all_registers);
18764   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18765   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18766   bfd_byte *current_stub_contents = base_stub_contents;
18767 
18768   BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18769 
18770   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18771      smaller than 8 registers load sequences that do not cause the
18772      hardware issue.  */
18773   if (nb_registers <= 8)
18774     {
18775       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18776       current_stub_contents =
18777 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18778 			    initial_insn);
18779 
18780       /* B initial_insn_addr+4.  */
18781       if (!restore_pc)
18782 	current_stub_contents =
18783 	  push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18784 			      create_instruction_branch_absolute
18785 			      (initial_insn_addr - current_stub_contents));
18786 
18787       /* Fill the remaining of the stub with deterministic contents.  */
18788       current_stub_contents =
18789 	stm32l4xx_fill_stub_udf (htab, output_bfd,
18790 				 base_stub_contents, current_stub_contents,
18791 				 base_stub_contents +
18792 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18793 
18794       return;
18795     }
18796 
18797   /* - reg_list[13] == 0.  */
18798   BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18799 
18800   /* - reg_list[14] & reg_list[15] != 1.  */
18801   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18802 
18803   /* - if (wback==1) reg_list[rn] == 0.  */
18804   BFD_ASSERT (!wback || !restore_rn);
18805 
18806   /* - nb_registers > 8.  */
18807   BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18808 
18809   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
18810 
18811   /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18812     - One with the 7 lowest registers (register mask 0x007F)
18813       This LDM will finally contain between 2 and 7 registers
18814     - One with the 7 highest registers (register mask 0xDF80)
18815       This ldm will finally contain between 2 and 7 registers.  */
18816   insn_low_registers = insn_all_registers & 0x007F;
18817   insn_high_registers = insn_all_registers & 0xDF80;
18818 
18819   /* A spare register may be needed during this veneer to temporarily
18820      handle the base register.  This register will be restored with the
18821      last LDM operation.
18822      The usable register may be any general purpose register (that
18823      excludes PC, SP, LR : register mask is 0x1FFF).  */
18824   usable_register_mask = 0x1FFF;
18825 
18826   /* Generate the stub function.  */
18827   if (wback)
18828     {
18829       /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
18830       current_stub_contents =
18831 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18832 			    create_instruction_ldmia
18833 			    (rn, /*wback=*/1, insn_low_registers));
18834 
18835       /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
18836       current_stub_contents =
18837 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18838 			    create_instruction_ldmia
18839 			    (rn, /*wback=*/1, insn_high_registers));
18840       if (!restore_pc)
18841 	{
18842 	  /* B initial_insn_addr+4.  */
18843 	  current_stub_contents =
18844 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18845 				create_instruction_branch_absolute
18846 				(initial_insn_addr - current_stub_contents));
18847        }
18848     }
18849   else /* if (!wback).  */
18850     {
18851       ri = rn;
18852 
18853       /* If Rn is not part of the high-register-list, move it there.  */
18854       if (!(insn_high_registers & (1 << rn)))
18855 	{
18856 	  /* Choose a Ri in the high-register-list that will be restored.  */
18857 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18858 
18859 	  /* MOV Ri, Rn.  */
18860 	  current_stub_contents =
18861 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18862 				create_instruction_mov (ri, rn));
18863 	}
18864 
18865       /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
18866       current_stub_contents =
18867 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18868 			    create_instruction_ldmia
18869 			    (ri, /*wback=*/1, insn_low_registers));
18870 
18871       /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
18872       current_stub_contents =
18873 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18874 			    create_instruction_ldmia
18875 			    (ri, /*wback=*/0, insn_high_registers));
18876 
18877       if (!restore_pc)
18878 	{
18879 	  /* B initial_insn_addr+4.  */
18880 	  current_stub_contents =
18881 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18882 				create_instruction_branch_absolute
18883 				(initial_insn_addr - current_stub_contents));
18884 	}
18885     }
18886 
18887   /* Fill the remaining of the stub with deterministic contents.  */
18888   current_stub_contents =
18889     stm32l4xx_fill_stub_udf (htab, output_bfd,
18890 			     base_stub_contents, current_stub_contents,
18891 			     base_stub_contents +
18892 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18893 }
18894 
18895 static void
stm32l4xx_create_replacing_stub_ldmdb(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,const insn32 initial_insn,const bfd_byte * const initial_insn_addr,bfd_byte * const base_stub_contents)18896 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18897 				       bfd * output_bfd,
18898 				       const insn32 initial_insn,
18899 				       const bfd_byte *const initial_insn_addr,
18900 				       bfd_byte *const base_stub_contents)
18901 {
18902   int wback = (initial_insn & 0x00200000) >> 21;
18903   int ri, rn = (initial_insn & 0x000f0000) >> 16;
18904   int insn_all_registers = initial_insn & 0x0000ffff;
18905   int insn_low_registers, insn_high_registers;
18906   int usable_register_mask;
18907   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18908   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18909   int nb_registers = elf32_arm_popcount (insn_all_registers);
18910   bfd_byte *current_stub_contents = base_stub_contents;
18911 
18912   BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18913 
18914   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18915      smaller than 8 registers load sequences that do not cause the
18916      hardware issue.  */
18917   if (nb_registers <= 8)
18918     {
18919       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18920       current_stub_contents =
18921 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18922 			    initial_insn);
18923 
18924       /* B initial_insn_addr+4.  */
18925       current_stub_contents =
18926 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18927 			    create_instruction_branch_absolute
18928 			    (initial_insn_addr - current_stub_contents));
18929 
18930       /* Fill the remaining of the stub with deterministic contents.  */
18931       current_stub_contents =
18932 	stm32l4xx_fill_stub_udf (htab, output_bfd,
18933 				 base_stub_contents, current_stub_contents,
18934 				 base_stub_contents +
18935 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18936 
18937       return;
18938     }
18939 
18940   /* - reg_list[13] == 0.  */
18941   BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18942 
18943   /* - reg_list[14] & reg_list[15] != 1.  */
18944   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18945 
18946   /* - if (wback==1) reg_list[rn] == 0.  */
18947   BFD_ASSERT (!wback || !restore_rn);
18948 
18949   /* - nb_registers > 8.  */
18950   BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18951 
18952   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
18953 
18954   /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18955     - One with the 7 lowest registers (register mask 0x007F)
18956       This LDM will finally contain between 2 and 7 registers
18957     - One with the 7 highest registers (register mask 0xDF80)
18958       This ldm will finally contain between 2 and 7 registers.  */
18959   insn_low_registers = insn_all_registers & 0x007F;
18960   insn_high_registers = insn_all_registers & 0xDF80;
18961 
18962   /* A spare register may be needed during this veneer to temporarily
18963      handle the base register.  This register will be restored with
18964      the last LDM operation.
18965      The usable register may be any general purpose register (that excludes
18966      PC, SP, LR : register mask is 0x1FFF).  */
18967   usable_register_mask = 0x1FFF;
18968 
18969   /* Generate the stub function.  */
18970   if (!wback && !restore_pc && !restore_rn)
18971     {
18972       /* Choose a Ri in the low-register-list that will be restored.  */
18973       ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18974 
18975       /* MOV Ri, Rn.  */
18976       current_stub_contents =
18977 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18978 			    create_instruction_mov (ri, rn));
18979 
18980       /* LDMDB Ri!, {R-high-register-list}.  */
18981       current_stub_contents =
18982 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18983 			    create_instruction_ldmdb
18984 			    (ri, /*wback=*/1, insn_high_registers));
18985 
18986       /* LDMDB Ri, {R-low-register-list}.  */
18987       current_stub_contents =
18988 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18989 			    create_instruction_ldmdb
18990 			    (ri, /*wback=*/0, insn_low_registers));
18991 
18992       /* B initial_insn_addr+4.  */
18993       current_stub_contents =
18994 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18995 			    create_instruction_branch_absolute
18996 			    (initial_insn_addr - current_stub_contents));
18997     }
18998   else if (wback && !restore_pc && !restore_rn)
18999     {
19000       /* LDMDB Rn!, {R-high-register-list}.  */
19001       current_stub_contents =
19002 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19003 			    create_instruction_ldmdb
19004 			    (rn, /*wback=*/1, insn_high_registers));
19005 
19006       /* LDMDB Rn!, {R-low-register-list}.  */
19007       current_stub_contents =
19008 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19009 			    create_instruction_ldmdb
19010 			    (rn, /*wback=*/1, insn_low_registers));
19011 
19012       /* B initial_insn_addr+4.  */
19013       current_stub_contents =
19014 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19015 			    create_instruction_branch_absolute
19016 			    (initial_insn_addr - current_stub_contents));
19017     }
19018   else if (!wback && restore_pc && !restore_rn)
19019     {
19020       /* Choose a Ri in the high-register-list that will be restored.  */
19021       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19022 
19023       /* SUB Ri, Rn, #(4*nb_registers).  */
19024       current_stub_contents =
19025 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19026 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19027 
19028       /* LDMIA Ri!, {R-low-register-list}.  */
19029       current_stub_contents =
19030 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19031 			    create_instruction_ldmia
19032 			    (ri, /*wback=*/1, insn_low_registers));
19033 
19034       /* LDMIA Ri, {R-high-register-list}.  */
19035       current_stub_contents =
19036 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19037 			    create_instruction_ldmia
19038 			    (ri, /*wback=*/0, insn_high_registers));
19039     }
19040   else if (wback && restore_pc && !restore_rn)
19041     {
19042       /* Choose a Ri in the high-register-list that will be restored.  */
19043       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19044 
19045       /* SUB Rn, Rn, #(4*nb_registers)  */
19046       current_stub_contents =
19047 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19048 			    create_instruction_sub (rn, rn, (4 * nb_registers)));
19049 
19050       /* MOV Ri, Rn.  */
19051       current_stub_contents =
19052 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19053 			    create_instruction_mov (ri, rn));
19054 
19055       /* LDMIA Ri!, {R-low-register-list}.  */
19056       current_stub_contents =
19057 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19058 			    create_instruction_ldmia
19059 			    (ri, /*wback=*/1, insn_low_registers));
19060 
19061       /* LDMIA Ri, {R-high-register-list}.  */
19062       current_stub_contents =
19063 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19064 			    create_instruction_ldmia
19065 			    (ri, /*wback=*/0, insn_high_registers));
19066     }
19067   else if (!wback && !restore_pc && restore_rn)
19068     {
19069       ri = rn;
19070       if (!(insn_low_registers & (1 << rn)))
19071 	{
19072 	  /* Choose a Ri in the low-register-list that will be restored.  */
19073 	  ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19074 
19075 	  /* MOV Ri, Rn.  */
19076 	  current_stub_contents =
19077 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19078 				create_instruction_mov (ri, rn));
19079 	}
19080 
19081       /* LDMDB Ri!, {R-high-register-list}.  */
19082       current_stub_contents =
19083 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19084 			    create_instruction_ldmdb
19085 			    (ri, /*wback=*/1, insn_high_registers));
19086 
19087       /* LDMDB Ri, {R-low-register-list}.  */
19088       current_stub_contents =
19089 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19090 			    create_instruction_ldmdb
19091 			    (ri, /*wback=*/0, insn_low_registers));
19092 
19093       /* B initial_insn_addr+4.  */
19094       current_stub_contents =
19095 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19096 			    create_instruction_branch_absolute
19097 			    (initial_insn_addr - current_stub_contents));
19098     }
19099   else if (!wback && restore_pc && restore_rn)
19100     {
19101       ri = rn;
19102       if (!(insn_high_registers & (1 << rn)))
19103 	{
19104 	  /* Choose a Ri in the high-register-list that will be restored.  */
19105 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19106 	}
19107 
19108       /* SUB Ri, Rn, #(4*nb_registers).  */
19109       current_stub_contents =
19110 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19111 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19112 
19113       /* LDMIA Ri!, {R-low-register-list}.  */
19114       current_stub_contents =
19115 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19116 			    create_instruction_ldmia
19117 			    (ri, /*wback=*/1, insn_low_registers));
19118 
19119       /* LDMIA Ri, {R-high-register-list}.  */
19120       current_stub_contents =
19121 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19122 			    create_instruction_ldmia
19123 			    (ri, /*wback=*/0, insn_high_registers));
19124     }
19125   else if (wback && restore_rn)
19126     {
19127       /* The assembler should not have accepted to encode this.  */
19128       BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19129 	"undefined behavior.\n");
19130     }
19131 
19132   /* Fill the remaining of the stub with deterministic contents.  */
19133   current_stub_contents =
19134     stm32l4xx_fill_stub_udf (htab, output_bfd,
19135 			     base_stub_contents, current_stub_contents,
19136 			     base_stub_contents +
19137 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19138 
19139 }
19140 
19141 static void
stm32l4xx_create_replacing_stub_vldm(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,const insn32 initial_insn,const bfd_byte * const initial_insn_addr,bfd_byte * const base_stub_contents)19142 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19143 				      bfd * output_bfd,
19144 				      const insn32 initial_insn,
19145 				      const bfd_byte *const initial_insn_addr,
19146 				      bfd_byte *const base_stub_contents)
19147 {
19148   int num_words = initial_insn & 0xff;
19149   bfd_byte *current_stub_contents = base_stub_contents;
19150 
19151   BFD_ASSERT (is_thumb2_vldm (initial_insn));
19152 
19153   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19154      smaller than 8 words load sequences that do not cause the
19155      hardware issue.  */
19156   if (num_words <= 8)
19157     {
19158       /* Untouched instruction.  */
19159       current_stub_contents =
19160 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19161 			    initial_insn);
19162 
19163       /* B initial_insn_addr+4.  */
19164       current_stub_contents =
19165 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19166 			    create_instruction_branch_absolute
19167 			    (initial_insn_addr - current_stub_contents));
19168     }
19169   else
19170     {
19171       bool is_dp = /* DP encoding.  */
19172 	(initial_insn & 0xfe100f00) == 0xec100b00;
19173       bool is_ia_nobang = /* (IA without !).  */
19174 	(((initial_insn << 7) >> 28) & 0xd) == 0x4;
19175       bool is_ia_bang = /* (IA with !) - includes VPOP.  */
19176 	(((initial_insn << 7) >> 28) & 0xd) == 0x5;
19177       bool is_db_bang = /* (DB with !).  */
19178 	(((initial_insn << 7) >> 28) & 0xd) == 0x9;
19179       int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19180       /* d = UInt (Vd:D);.  */
19181       int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19182 	| (((unsigned int)initial_insn << 9) >> 31);
19183 
19184       /* Compute the number of 8-words chunks needed to split.  */
19185       int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19186       int chunk;
19187 
19188       /* The test coverage has been done assuming the following
19189 	 hypothesis that exactly one of the previous is_ predicates is
19190 	 true.  */
19191       BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19192 		  && !(is_ia_nobang & is_ia_bang & is_db_bang));
19193 
19194       /* We treat the cutting of the words in one pass for all
19195 	 cases, then we emit the adjustments:
19196 
19197 	 vldm rx, {...}
19198 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19199 	 -> sub rx, rx, #size (list)
19200 
19201 	 vldm rx!, {...}
19202 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19203 	 This also handles vpop instruction (when rx is sp)
19204 
19205 	 vldmd rx!, {...}
19206 	 -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
19207       for (chunk = 0; chunk < chunks; ++chunk)
19208 	{
19209 	  bfd_vma new_insn = 0;
19210 
19211 	  if (is_ia_nobang || is_ia_bang)
19212 	    {
19213 	      new_insn = create_instruction_vldmia
19214 		(base_reg,
19215 		 is_dp,
19216 		 /*wback= .  */1,
19217 		 chunks - (chunk + 1) ?
19218 		 8 : num_words - chunk * 8,
19219 		 first_reg + chunk * 8);
19220 	    }
19221 	  else if (is_db_bang)
19222 	    {
19223 	      new_insn = create_instruction_vldmdb
19224 		(base_reg,
19225 		 is_dp,
19226 		 chunks - (chunk + 1) ?
19227 		 8 : num_words - chunk * 8,
19228 		 first_reg + chunk * 8);
19229 	    }
19230 
19231 	  if (new_insn)
19232 	    current_stub_contents =
19233 	      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19234 				  new_insn);
19235 	}
19236 
19237       /* Only this case requires the base register compensation
19238 	 subtract.  */
19239       if (is_ia_nobang)
19240 	{
19241 	  current_stub_contents =
19242 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19243 				create_instruction_sub
19244 				(base_reg, base_reg, 4*num_words));
19245 	}
19246 
19247       /* B initial_insn_addr+4.  */
19248       current_stub_contents =
19249 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19250 			    create_instruction_branch_absolute
19251 			    (initial_insn_addr - current_stub_contents));
19252     }
19253 
19254   /* Fill the remaining of the stub with deterministic contents.  */
19255   current_stub_contents =
19256     stm32l4xx_fill_stub_udf (htab, output_bfd,
19257 			     base_stub_contents, current_stub_contents,
19258 			     base_stub_contents +
19259 			     STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19260 }
19261 
19262 static void
stm32l4xx_create_replacing_stub(struct elf32_arm_link_hash_table * htab,bfd * output_bfd,const insn32 wrong_insn,const bfd_byte * const wrong_insn_addr,bfd_byte * const stub_contents)19263 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19264 				 bfd * output_bfd,
19265 				 const insn32 wrong_insn,
19266 				 const bfd_byte *const wrong_insn_addr,
19267 				 bfd_byte *const stub_contents)
19268 {
19269   if (is_thumb2_ldmia (wrong_insn))
19270     stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19271 					   wrong_insn, wrong_insn_addr,
19272 					   stub_contents);
19273   else if (is_thumb2_ldmdb (wrong_insn))
19274     stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19275 					   wrong_insn, wrong_insn_addr,
19276 					   stub_contents);
19277   else if (is_thumb2_vldm (wrong_insn))
19278     stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19279 					  wrong_insn, wrong_insn_addr,
19280 					  stub_contents);
19281 }
19282 
19283 /* End of stm32l4xx work-around.  */
19284 
19285 
19286 /* Do code byteswapping.  Return FALSE afterwards so that the section is
19287    written out as normal.  */
19288 
19289 static bool
elf32_arm_write_section(bfd * output_bfd,struct bfd_link_info * link_info,asection * sec,bfd_byte * contents)19290 elf32_arm_write_section (bfd *output_bfd,
19291 			 struct bfd_link_info *link_info,
19292 			 asection *sec,
19293 			 bfd_byte *contents)
19294 {
19295   unsigned int mapcount, errcount;
19296   _arm_elf_section_data *arm_data;
19297   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19298   elf32_arm_section_map *map;
19299   elf32_vfp11_erratum_list *errnode;
19300   elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19301   bfd_vma ptr;
19302   bfd_vma end;
19303   bfd_vma offset = sec->output_section->vma + sec->output_offset;
19304   bfd_byte tmp;
19305   unsigned int i;
19306 
19307   if (globals == NULL)
19308     return false;
19309 
19310   /* If this section has not been allocated an _arm_elf_section_data
19311      structure then we cannot record anything.  */
19312   arm_data = get_arm_elf_section_data (sec);
19313   if (arm_data == NULL)
19314     return false;
19315 
19316   mapcount = arm_data->mapcount;
19317   map = arm_data->map;
19318   errcount = arm_data->erratumcount;
19319 
19320   if (errcount != 0)
19321     {
19322       unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19323 
19324       for (errnode = arm_data->erratumlist; errnode != 0;
19325 	   errnode = errnode->next)
19326 	{
19327 	  bfd_vma target = errnode->vma - offset;
19328 
19329 	  switch (errnode->type)
19330 	    {
19331 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19332 	      {
19333 		bfd_vma branch_to_veneer;
19334 		/* Original condition code of instruction, plus bit mask for
19335 		   ARM B instruction.  */
19336 		unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19337 				  | 0x0a000000;
19338 
19339 		/* The instruction is before the label.  */
19340 		target -= 4;
19341 
19342 		/* Above offset included in -4 below.  */
19343 		branch_to_veneer = errnode->u.b.veneer->vma
19344 				   - errnode->vma - 4;
19345 
19346 		if ((signed) branch_to_veneer < -(1 << 25)
19347 		    || (signed) branch_to_veneer >= (1 << 25))
19348 		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19349 					"range"), output_bfd);
19350 
19351 		insn |= (branch_to_veneer >> 2) & 0xffffff;
19352 		contents[endianflip ^ target] = insn & 0xff;
19353 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19354 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19355 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19356 	      }
19357 	      break;
19358 
19359 	    case VFP11_ERRATUM_ARM_VENEER:
19360 	      {
19361 		bfd_vma branch_from_veneer;
19362 		unsigned int insn;
19363 
19364 		/* Take size of veneer into account.  */
19365 		branch_from_veneer = errnode->u.v.branch->vma
19366 				     - errnode->vma - 12;
19367 
19368 		if ((signed) branch_from_veneer < -(1 << 25)
19369 		    || (signed) branch_from_veneer >= (1 << 25))
19370 		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19371 					"range"), output_bfd);
19372 
19373 		/* Original instruction.  */
19374 		insn = errnode->u.v.branch->u.b.vfp_insn;
19375 		contents[endianflip ^ target] = insn & 0xff;
19376 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19377 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19378 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19379 
19380 		/* Branch back to insn after original insn.  */
19381 		insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19382 		contents[endianflip ^ (target + 4)] = insn & 0xff;
19383 		contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19384 		contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19385 		contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19386 	      }
19387 	      break;
19388 
19389 	    default:
19390 	      abort ();
19391 	    }
19392 	}
19393     }
19394 
19395   if (arm_data->stm32l4xx_erratumcount != 0)
19396     {
19397       for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19398 	   stm32l4xx_errnode != 0;
19399 	   stm32l4xx_errnode = stm32l4xx_errnode->next)
19400 	{
19401 	  bfd_vma target = stm32l4xx_errnode->vma - offset;
19402 
19403 	  switch (stm32l4xx_errnode->type)
19404 	    {
19405 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19406 	      {
19407 		unsigned int insn;
19408 		bfd_vma branch_to_veneer =
19409 		  stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19410 
19411 		if ((signed) branch_to_veneer < -(1 << 24)
19412 		    || (signed) branch_to_veneer >= (1 << 24))
19413 		  {
19414 		    bfd_vma out_of_range =
19415 		      ((signed) branch_to_veneer < -(1 << 24)) ?
19416 		      - branch_to_veneer - (1 << 24) :
19417 		      ((signed) branch_to_veneer >= (1 << 24)) ?
19418 		      branch_to_veneer - (1 << 24) : 0;
19419 
19420 		    _bfd_error_handler
19421 		      (_("%pB(%#" PRIx64 "): error: "
19422 			 "cannot create STM32L4XX veneer; "
19423 			 "jump out of range by %" PRId64 " bytes; "
19424 			 "cannot encode branch instruction"),
19425 		       output_bfd,
19426 		       (uint64_t) (stm32l4xx_errnode->vma - 4),
19427 		       (int64_t) out_of_range);
19428 		    continue;
19429 		  }
19430 
19431 		insn = create_instruction_branch_absolute
19432 		  (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19433 
19434 		/* The instruction is before the label.  */
19435 		target -= 4;
19436 
19437 		put_thumb2_insn (globals, output_bfd,
19438 				 (bfd_vma) insn, contents + target);
19439 	      }
19440 	      break;
19441 
19442 	    case STM32L4XX_ERRATUM_VENEER:
19443 	      {
19444 		bfd_byte * veneer;
19445 		bfd_byte * veneer_r;
19446 		unsigned int insn;
19447 
19448 		veneer = contents + target;
19449 		veneer_r = veneer
19450 		  + stm32l4xx_errnode->u.b.veneer->vma
19451 		  - stm32l4xx_errnode->vma - 4;
19452 
19453 		if ((signed) (veneer_r - veneer -
19454 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19455 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19456 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19457 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19458 		    || (signed) (veneer_r - veneer) >= (1 << 24))
19459 		  {
19460 		    _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19461 					  "veneer"), output_bfd);
19462 		     continue;
19463 		  }
19464 
19465 		/* Original instruction.  */
19466 		insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19467 
19468 		stm32l4xx_create_replacing_stub
19469 		  (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19470 	      }
19471 	      break;
19472 
19473 	    default:
19474 	      abort ();
19475 	    }
19476 	}
19477     }
19478 
19479   if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19480     {
19481       arm_unwind_table_edit *edit_node
19482 	= arm_data->u.exidx.unwind_edit_list;
19483       /* Now, sec->size is the size of the section we will write.  The original
19484 	 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19485 	 markers) was sec->rawsize.  (This isn't the case if we perform no
19486 	 edits, then rawsize will be zero and we should use size).  */
19487       bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19488       unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19489       unsigned int in_index, out_index;
19490       bfd_vma add_to_offsets = 0;
19491 
19492       if (edited_contents == NULL)
19493 	return false;
19494       for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19495 	{
19496 	  if (edit_node)
19497 	    {
19498 	      unsigned int edit_index = edit_node->index;
19499 
19500 	      if (in_index < edit_index && in_index * 8 < input_size)
19501 		{
19502 		  copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19503 				    contents + in_index * 8, add_to_offsets);
19504 		  out_index++;
19505 		  in_index++;
19506 		}
19507 	      else if (in_index == edit_index
19508 		       || (in_index * 8 >= input_size
19509 			   && edit_index == UINT_MAX))
19510 		{
19511 		  switch (edit_node->type)
19512 		    {
19513 		    case DELETE_EXIDX_ENTRY:
19514 		      in_index++;
19515 		      add_to_offsets += 8;
19516 		      break;
19517 
19518 		    case INSERT_EXIDX_CANTUNWIND_AT_END:
19519 		      {
19520 			asection *text_sec = edit_node->linked_section;
19521 			bfd_vma text_offset = text_sec->output_section->vma
19522 					      + text_sec->output_offset
19523 					      + text_sec->size;
19524 			bfd_vma exidx_offset = offset + out_index * 8;
19525 			unsigned long prel31_offset;
19526 
19527 			/* Note: this is meant to be equivalent to an
19528 			   R_ARM_PREL31 relocation.  These synthetic
19529 			   EXIDX_CANTUNWIND markers are not relocated by the
19530 			   usual BFD method.  */
19531 			prel31_offset = (text_offset - exidx_offset)
19532 					& 0x7ffffffful;
19533 			if (bfd_link_relocatable (link_info))
19534 			  {
19535 			    /* Here relocation for new EXIDX_CANTUNWIND is
19536 			       created, so there is no need to
19537 			       adjust offset by hand.  */
19538 			    prel31_offset = text_sec->output_offset
19539 					    + text_sec->size;
19540 			  }
19541 
19542 			/* First address we can't unwind.  */
19543 			bfd_put_32 (output_bfd, prel31_offset,
19544 				    &edited_contents[out_index * 8]);
19545 
19546 			/* Code for EXIDX_CANTUNWIND.  */
19547 			bfd_put_32 (output_bfd, 0x1,
19548 				    &edited_contents[out_index * 8 + 4]);
19549 
19550 			out_index++;
19551 			add_to_offsets -= 8;
19552 		      }
19553 		      break;
19554 		    }
19555 
19556 		  edit_node = edit_node->next;
19557 		}
19558 	    }
19559 	  else
19560 	    {
19561 	      /* No more edits, copy remaining entries verbatim.  */
19562 	      copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19563 				contents + in_index * 8, add_to_offsets);
19564 	      out_index++;
19565 	      in_index++;
19566 	    }
19567 	}
19568 
19569       if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19570 	bfd_set_section_contents (output_bfd, sec->output_section,
19571 				  edited_contents,
19572 				  (file_ptr) sec->output_offset, sec->size);
19573 
19574       return true;
19575     }
19576 
19577   /* Fix code to point to Cortex-A8 erratum stubs.  */
19578   if (globals->fix_cortex_a8)
19579     {
19580       struct a8_branch_to_stub_data data;
19581 
19582       data.writing_section = sec;
19583       data.contents = contents;
19584 
19585       bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19586 			 & data);
19587     }
19588 
19589   if (mapcount == 0)
19590     return false;
19591 
19592   if (globals->byteswap_code)
19593     {
19594       qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19595 
19596       ptr = map[0].vma;
19597       for (i = 0; i < mapcount; i++)
19598 	{
19599 	  if (i == mapcount - 1)
19600 	    end = sec->size;
19601 	  else
19602 	    end = map[i + 1].vma;
19603 
19604 	  switch (map[i].type)
19605 	    {
19606 	    case 'a':
19607 	      /* Byte swap code words.  */
19608 	      while (ptr + 3 < end)
19609 		{
19610 		  tmp = contents[ptr];
19611 		  contents[ptr] = contents[ptr + 3];
19612 		  contents[ptr + 3] = tmp;
19613 		  tmp = contents[ptr + 1];
19614 		  contents[ptr + 1] = contents[ptr + 2];
19615 		  contents[ptr + 2] = tmp;
19616 		  ptr += 4;
19617 		}
19618 	      break;
19619 
19620 	    case 't':
19621 	      /* Byte swap code halfwords.  */
19622 	      while (ptr + 1 < end)
19623 		{
19624 		  tmp = contents[ptr];
19625 		  contents[ptr] = contents[ptr + 1];
19626 		  contents[ptr + 1] = tmp;
19627 		  ptr += 2;
19628 		}
19629 	      break;
19630 
19631 	    case 'd':
19632 	      /* Leave data alone.  */
19633 	      break;
19634 	    }
19635 	  ptr = end;
19636 	}
19637     }
19638 
19639   free (map);
19640   arm_data->mapcount = -1;
19641   arm_data->mapsize = 0;
19642   arm_data->map = NULL;
19643 
19644   return false;
19645 }
19646 
19647 /* Mangle thumb function symbols as we read them in.  */
19648 
19649 static bool
elf32_arm_swap_symbol_in(bfd * abfd,const void * psrc,const void * pshn,Elf_Internal_Sym * dst)19650 elf32_arm_swap_symbol_in (bfd * abfd,
19651 			  const void *psrc,
19652 			  const void *pshn,
19653 			  Elf_Internal_Sym *dst)
19654 {
19655   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19656     return false;
19657   dst->st_target_internal = 0;
19658 
19659   /* New EABI objects mark thumb function symbols by setting the low bit of
19660      the address.  */
19661   if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19662       || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19663     {
19664       if (dst->st_value & 1)
19665 	{
19666 	  dst->st_value &= ~(bfd_vma) 1;
19667 	  ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19668 				   ST_BRANCH_TO_THUMB);
19669 	}
19670       else
19671 	ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19672     }
19673   else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19674     {
19675       dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19676       ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19677     }
19678   else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19679     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19680   else
19681     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19682 
19683   return true;
19684 }
19685 
19686 
19687 /* Mangle thumb function symbols as we write them out.  */
19688 
19689 static void
elf32_arm_swap_symbol_out(bfd * abfd,const Elf_Internal_Sym * src,void * cdst,void * shndx)19690 elf32_arm_swap_symbol_out (bfd *abfd,
19691 			   const Elf_Internal_Sym *src,
19692 			   void *cdst,
19693 			   void *shndx)
19694 {
19695   Elf_Internal_Sym newsym;
19696 
19697   /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19698      of the address set, as per the new EABI.  We do this unconditionally
19699      because objcopy does not set the elf header flags until after
19700      it writes out the symbol table.  */
19701   if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19702     {
19703       newsym = *src;
19704       if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19705 	newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19706       if (newsym.st_shndx != SHN_UNDEF)
19707 	{
19708 	  /* Do this only for defined symbols. At link type, the static
19709 	     linker will simulate the work of dynamic linker of resolving
19710 	     symbols and will carry over the thumbness of found symbols to
19711 	     the output symbol table. It's not clear how it happens, but
19712 	     the thumbness of undefined symbols can well be different at
19713 	     runtime, and writing '1' for them will be confusing for users
19714 	     and possibly for dynamic linker itself.
19715 	  */
19716 	  newsym.st_value |= 1;
19717 	}
19718 
19719       src = &newsym;
19720     }
19721   bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19722 }
19723 
19724 /* Add the PT_ARM_EXIDX program header.  */
19725 
19726 static bool
elf32_arm_modify_segment_map(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED)19727 elf32_arm_modify_segment_map (bfd *abfd,
19728 			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19729 {
19730   struct elf_segment_map *m;
19731   asection *sec;
19732 
19733   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19734   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19735     {
19736       /* If there is already a PT_ARM_EXIDX header, then we do not
19737 	 want to add another one.  This situation arises when running
19738 	 "strip"; the input binary already has the header.  */
19739       m = elf_seg_map (abfd);
19740       while (m && m->p_type != PT_ARM_EXIDX)
19741 	m = m->next;
19742       if (!m)
19743 	{
19744 	  m = (struct elf_segment_map *)
19745 	      bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19746 	  if (m == NULL)
19747 	    return false;
19748 	  m->p_type = PT_ARM_EXIDX;
19749 	  m->count = 1;
19750 	  m->sections[0] = sec;
19751 
19752 	  m->next = elf_seg_map (abfd);
19753 	  elf_seg_map (abfd) = m;
19754 	}
19755     }
19756 
19757   return true;
19758 }
19759 
19760 /* We may add a PT_ARM_EXIDX program header.  */
19761 
19762 static int
elf32_arm_additional_program_headers(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED)19763 elf32_arm_additional_program_headers (bfd *abfd,
19764 				      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19765 {
19766   asection *sec;
19767 
19768   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19769   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19770     return 1;
19771   else
19772     return 0;
19773 }
19774 
19775 /* Hook called by the linker routine which adds symbols from an object
19776    file.  */
19777 
19778 static bool
elf32_arm_add_symbol_hook(bfd * abfd,struct bfd_link_info * info,Elf_Internal_Sym * sym,const char ** namep,flagword * flagsp,asection ** secp,bfd_vma * valp)19779 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19780 			   Elf_Internal_Sym *sym, const char **namep,
19781 			   flagword *flagsp, asection **secp, bfd_vma *valp)
19782 {
19783   if (elf32_arm_hash_table (info) == NULL)
19784     return false;
19785 
19786   if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19787       && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19788 				       flagsp, secp, valp))
19789     return false;
19790 
19791   return true;
19792 }
19793 
19794 /* We use this to override swap_symbol_in and swap_symbol_out.  */
19795 const struct elf_size_info elf32_arm_size_info =
19796 {
19797   sizeof (Elf32_External_Ehdr),
19798   sizeof (Elf32_External_Phdr),
19799   sizeof (Elf32_External_Shdr),
19800   sizeof (Elf32_External_Rel),
19801   sizeof (Elf32_External_Rela),
19802   sizeof (Elf32_External_Sym),
19803   sizeof (Elf32_External_Dyn),
19804   sizeof (Elf_External_Note),
19805   4,
19806   1,
19807   32, 2,
19808   ELFCLASS32, EV_CURRENT,
19809   bfd_elf32_write_out_phdrs,
19810   bfd_elf32_write_shdrs_and_ehdr,
19811   bfd_elf32_checksum_contents,
19812   bfd_elf32_write_relocs,
19813   elf32_arm_swap_symbol_in,
19814   elf32_arm_swap_symbol_out,
19815   bfd_elf32_slurp_reloc_table,
19816   bfd_elf32_slurp_symbol_table,
19817   bfd_elf32_swap_dyn_in,
19818   bfd_elf32_swap_dyn_out,
19819   bfd_elf32_swap_reloc_in,
19820   bfd_elf32_swap_reloc_out,
19821   bfd_elf32_swap_reloca_in,
19822   bfd_elf32_swap_reloca_out
19823 };
19824 
19825 static bfd_vma
read_code32(const bfd * abfd,const bfd_byte * addr)19826 read_code32 (const bfd *abfd, const bfd_byte *addr)
19827 {
19828   /* V7 BE8 code is always little endian.  */
19829   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19830     return bfd_getl32 (addr);
19831 
19832   return bfd_get_32 (abfd, addr);
19833 }
19834 
19835 static bfd_vma
read_code16(const bfd * abfd,const bfd_byte * addr)19836 read_code16 (const bfd *abfd, const bfd_byte *addr)
19837 {
19838   /* V7 BE8 code is always little endian.  */
19839   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19840     return bfd_getl16 (addr);
19841 
19842   return bfd_get_16 (abfd, addr);
19843 }
19844 
19845 /* Return size of plt0 entry starting at ADDR
19846    or (bfd_vma) -1 if size can not be determined.  */
19847 
19848 static bfd_vma
elf32_arm_plt0_size(const bfd * abfd,const bfd_byte * addr)19849 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19850 {
19851   bfd_vma first_word;
19852   bfd_vma plt0_size;
19853 
19854   first_word = read_code32 (abfd, addr);
19855 
19856   if (first_word == elf32_arm_plt0_entry[0])
19857     plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19858   else if (first_word == elf32_thumb2_plt0_entry[0])
19859     plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19860   else
19861     /* We don't yet handle this PLT format.  */
19862     return (bfd_vma) -1;
19863 
19864   return plt0_size;
19865 }
19866 
19867 /* Return size of plt entry starting at offset OFFSET
19868    of plt section located at address START
19869    or (bfd_vma) -1 if size can not be determined.  */
19870 
19871 static bfd_vma
elf32_arm_plt_size(const bfd * abfd,const bfd_byte * start,bfd_vma offset)19872 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19873 {
19874   bfd_vma first_insn;
19875   bfd_vma plt_size = 0;
19876   const bfd_byte *addr = start + offset;
19877 
19878   /* PLT entry size if fixed on Thumb-only platforms.  */
19879   if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19880       return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19881 
19882   /* Respect Thumb stub if necessary.  */
19883   if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19884     {
19885       plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
19886     }
19887 
19888   /* Strip immediate from first add.  */
19889   first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19890 
19891 #ifdef FOUR_WORD_PLT
19892   if (first_insn == elf32_arm_plt_entry[0])
19893     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19894 #else
19895   if (first_insn == elf32_arm_plt_entry_long[0])
19896     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19897   else if (first_insn == elf32_arm_plt_entry_short[0])
19898     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19899 #endif
19900   else
19901     /* We don't yet handle this PLT format.  */
19902     return (bfd_vma) -1;
19903 
19904   return plt_size;
19905 }
19906 
19907 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
19908 
19909 static long
elf32_arm_get_synthetic_symtab(bfd * abfd,long symcount ATTRIBUTE_UNUSED,asymbol ** syms ATTRIBUTE_UNUSED,long dynsymcount,asymbol ** dynsyms,asymbol ** ret)19910 elf32_arm_get_synthetic_symtab (bfd *abfd,
19911 			       long symcount ATTRIBUTE_UNUSED,
19912 			       asymbol **syms ATTRIBUTE_UNUSED,
19913 			       long dynsymcount,
19914 			       asymbol **dynsyms,
19915 			       asymbol **ret)
19916 {
19917   asection *relplt;
19918   asymbol *s;
19919   arelent *p;
19920   long count, i, n;
19921   size_t size;
19922   Elf_Internal_Shdr *hdr;
19923   char *names;
19924   asection *plt;
19925   bfd_vma offset;
19926   bfd_byte *data;
19927 
19928   *ret = NULL;
19929 
19930   if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19931     return 0;
19932 
19933   if (dynsymcount <= 0)
19934     return 0;
19935 
19936   relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19937   if (relplt == NULL)
19938     return 0;
19939 
19940   hdr = &elf_section_data (relplt)->this_hdr;
19941   if (hdr->sh_link != elf_dynsymtab (abfd)
19942       || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19943     return 0;
19944 
19945   plt = bfd_get_section_by_name (abfd, ".plt");
19946   if (plt == NULL)
19947     return 0;
19948 
19949   if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
19950     return -1;
19951 
19952   data = plt->contents;
19953   if (data == NULL)
19954     {
19955       if (!bfd_get_full_section_contents (abfd, (asection *) plt, &data) || data == NULL)
19956 	return -1;
19957       bfd_cache_section_contents ((asection *) plt, data);
19958     }
19959 
19960   count = relplt->size / hdr->sh_entsize;
19961   size = count * sizeof (asymbol);
19962   p = relplt->relocation;
19963   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19964     {
19965       size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19966       if (p->addend != 0)
19967 	size += sizeof ("+0x") - 1 + 8;
19968     }
19969 
19970   s = *ret = (asymbol *) bfd_malloc (size);
19971   if (s == NULL)
19972     return -1;
19973 
19974   offset = elf32_arm_plt0_size (abfd, data);
19975   if (offset == (bfd_vma) -1)
19976     return -1;
19977 
19978   names = (char *) (s + count);
19979   p = relplt->relocation;
19980   n = 0;
19981   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19982     {
19983       size_t len;
19984 
19985       bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19986       if (plt_size == (bfd_vma) -1)
19987 	break;
19988 
19989       *s = **p->sym_ptr_ptr;
19990       /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
19991 	 we are defining a symbol, ensure one of them is set.  */
19992       if ((s->flags & BSF_LOCAL) == 0)
19993 	s->flags |= BSF_GLOBAL;
19994       s->flags |= BSF_SYNTHETIC;
19995       s->section = plt;
19996       s->value = offset;
19997       s->name = names;
19998       s->udata.p = NULL;
19999       len = strlen ((*p->sym_ptr_ptr)->name);
20000       memcpy (names, (*p->sym_ptr_ptr)->name, len);
20001       names += len;
20002       if (p->addend != 0)
20003 	{
20004 	  char buf[30], *a;
20005 
20006 	  memcpy (names, "+0x", sizeof ("+0x") - 1);
20007 	  names += sizeof ("+0x") - 1;
20008 	  bfd_sprintf_vma (abfd, buf, p->addend);
20009 	  for (a = buf; *a == '0'; ++a)
20010 	    ;
20011 	  len = strlen (a);
20012 	  memcpy (names, a, len);
20013 	  names += len;
20014 	}
20015       memcpy (names, "@plt", sizeof ("@plt"));
20016       names += sizeof ("@plt");
20017       ++s, ++n;
20018       offset += plt_size;
20019     }
20020 
20021   return n;
20022 }
20023 
20024 static bool
elf32_arm_section_flags(const Elf_Internal_Shdr * hdr)20025 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20026 {
20027   if (hdr->sh_flags & SHF_ARM_PURECODE)
20028     hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20029   return true;
20030 }
20031 
20032 static flagword
elf32_arm_lookup_section_flags(char * flag_name)20033 elf32_arm_lookup_section_flags (char *flag_name)
20034 {
20035   if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20036     return SHF_ARM_PURECODE;
20037 
20038   return SEC_NO_FLAGS;
20039 }
20040 
20041 static unsigned int
elf32_arm_count_additional_relocs(asection * sec)20042 elf32_arm_count_additional_relocs (asection *sec)
20043 {
20044   struct _arm_elf_section_data *arm_data;
20045   arm_data = get_arm_elf_section_data (sec);
20046 
20047   return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20048 }
20049 
20050 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20051    has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised
20052    FALSE otherwise.  ISECTION is the best guess matching section from the
20053    input bfd IBFD, but it might be NULL.  */
20054 
20055 static bool
elf32_arm_copy_special_section_fields(const bfd * ibfd ATTRIBUTE_UNUSED,bfd * obfd ATTRIBUTE_UNUSED,const Elf_Internal_Shdr * isection ATTRIBUTE_UNUSED,Elf_Internal_Shdr * osection)20056 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20057 				       bfd *obfd ATTRIBUTE_UNUSED,
20058 				       const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20059 				       Elf_Internal_Shdr *osection)
20060 {
20061   switch (osection->sh_type)
20062     {
20063     case SHT_ARM_EXIDX:
20064       {
20065 	Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20066 	Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20067 	unsigned i = 0;
20068 
20069 	osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20070 	osection->sh_info = 0;
20071 
20072 	/* The sh_link field must be set to the text section associated with
20073 	   this index section.  Unfortunately the ARM EHABI does not specify
20074 	   exactly how to determine this association.  Our caller does try
20075 	   to match up OSECTION with its corresponding input section however
20076 	   so that is a good first guess.  */
20077 	if (isection != NULL
20078 	    && osection->bfd_section != NULL
20079 	    && isection->bfd_section != NULL
20080 	    && isection->bfd_section->output_section != NULL
20081 	    && isection->bfd_section->output_section == osection->bfd_section
20082 	    && iheaders != NULL
20083 	    && isection->sh_link > 0
20084 	    && isection->sh_link < elf_numsections (ibfd)
20085 	    && iheaders[isection->sh_link]->bfd_section != NULL
20086 	    && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20087 	    )
20088 	  {
20089 	    for (i = elf_numsections (obfd); i-- > 0;)
20090 	      if (oheaders[i]->bfd_section
20091 		  == iheaders[isection->sh_link]->bfd_section->output_section)
20092 		break;
20093 	  }
20094 
20095 	if (i == 0)
20096 	  {
20097 	    /* Failing that we have to find a matching section ourselves.  If
20098 	       we had the output section name available we could compare that
20099 	       with input section names.  Unfortunately we don't.  So instead
20100 	       we use a simple heuristic and look for the nearest executable
20101 	       section before this one.  */
20102 	    for (i = elf_numsections (obfd); i-- > 0;)
20103 	      if (oheaders[i] == osection)
20104 		break;
20105 	    if (i == 0)
20106 	      break;
20107 
20108 	    while (i-- > 0)
20109 	      if (oheaders[i]->sh_type == SHT_PROGBITS
20110 		  && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20111 		  == (SHF_ALLOC | SHF_EXECINSTR))
20112 		break;
20113 	  }
20114 
20115 	if (i)
20116 	  {
20117 	    osection->sh_link = i;
20118 	    /* If the text section was part of a group
20119 	       then the index section should be too.  */
20120 	    if (oheaders[i]->sh_flags & SHF_GROUP)
20121 	      osection->sh_flags |= SHF_GROUP;
20122 	    return true;
20123 	  }
20124       }
20125       break;
20126 
20127     case SHT_ARM_PREEMPTMAP:
20128       osection->sh_flags = SHF_ALLOC;
20129       break;
20130 
20131     case SHT_ARM_ATTRIBUTES:
20132     case SHT_ARM_DEBUGOVERLAY:
20133     case SHT_ARM_OVERLAYSECTION:
20134     default:
20135       break;
20136     }
20137 
20138   return false;
20139 }
20140 
20141 /* Returns TRUE if NAME is an ARM mapping symbol.
20142    Traditionally the symbols $a, $d and $t have been used.
20143    The ARM ELF standard also defines $x (for A64 code).  It also allows a
20144    period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20145    Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20146    not support them here.  $t.x indicates the start of ThumbEE instructions.  */
20147 
20148 static bool
is_arm_mapping_symbol(const char * name)20149 is_arm_mapping_symbol (const char * name)
20150 {
20151   return name != NULL /* Paranoia.  */
20152     && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20153 			 the mapping symbols could have acquired a prefix.
20154 			 We do not support this here, since such symbols no
20155 			 longer conform to the ARM ELF ABI.  */
20156     && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20157     && (name[2] == 0 || name[2] == '.');
20158   /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20159      any characters that follow the period are legal characters for the body
20160      of a symbol's name.  For now we just assume that this is the case.  */
20161 }
20162 
20163 /* Make sure that mapping symbols in object files are not removed via the
20164    "strip --strip-unneeded" tool.  These symbols are needed in order to
20165    correctly generate interworking veneers, and for byte swapping code
20166    regions.  Once an object file has been linked, it is safe to remove the
20167    symbols as they will no longer be needed.  */
20168 
20169 static void
elf32_arm_backend_symbol_processing(bfd * abfd,asymbol * sym)20170 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20171 {
20172   if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20173       && sym->section != bfd_abs_section_ptr
20174       && is_arm_mapping_symbol (sym->name))
20175     sym->flags |= BSF_KEEP;
20176 }
20177 
20178 #undef  elf_backend_copy_special_section_fields
20179 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20180 
20181 #define ELF_ARCH			bfd_arch_arm
20182 #define ELF_TARGET_ID			ARM_ELF_DATA
20183 #define ELF_MACHINE_CODE		EM_ARM
20184 #ifdef __QNXTARGET__
20185 #define ELF_MAXPAGESIZE			0x1000
20186 #else
20187 #define ELF_MAXPAGESIZE			0x10000
20188 #endif
20189 #define ELF_MINPAGESIZE			0x1000
20190 #define ELF_COMMONPAGESIZE		0x1000
20191 
20192 #define bfd_elf32_mkobject			elf32_arm_mkobject
20193 
20194 #define bfd_elf32_bfd_copy_private_bfd_data	elf32_arm_copy_private_bfd_data
20195 #define bfd_elf32_bfd_merge_private_bfd_data	elf32_arm_merge_private_bfd_data
20196 #define bfd_elf32_bfd_set_private_flags		elf32_arm_set_private_flags
20197 #define bfd_elf32_bfd_print_private_bfd_data	elf32_arm_print_private_bfd_data
20198 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_link_hash_table_create
20199 #define bfd_elf32_bfd_reloc_type_lookup		elf32_arm_reloc_type_lookup
20200 #define bfd_elf32_bfd_reloc_name_lookup		elf32_arm_reloc_name_lookup
20201 #define bfd_elf32_find_inliner_info		elf32_arm_find_inliner_info
20202 #define bfd_elf32_new_section_hook		elf32_arm_new_section_hook
20203 #define bfd_elf32_bfd_is_target_special_symbol	elf32_arm_is_target_special_symbol
20204 #define bfd_elf32_bfd_final_link		elf32_arm_final_link
20205 #define bfd_elf32_get_synthetic_symtab	elf32_arm_get_synthetic_symtab
20206 
20207 #define elf_backend_get_symbol_type		elf32_arm_get_symbol_type
20208 #define elf_backend_maybe_function_sym		elf32_arm_maybe_function_sym
20209 #define elf_backend_gc_mark_hook		elf32_arm_gc_mark_hook
20210 #define elf_backend_gc_mark_extra_sections	elf32_arm_gc_mark_extra_sections
20211 #define elf_backend_check_relocs		elf32_arm_check_relocs
20212 #define elf_backend_update_relocs		elf32_arm_update_relocs
20213 #define elf_backend_relocate_section		elf32_arm_relocate_section
20214 #define elf_backend_write_section		elf32_arm_write_section
20215 #define elf_backend_adjust_dynamic_symbol	elf32_arm_adjust_dynamic_symbol
20216 #define elf_backend_create_dynamic_sections	elf32_arm_create_dynamic_sections
20217 #define elf_backend_finish_dynamic_symbol	elf32_arm_finish_dynamic_symbol
20218 #define elf_backend_finish_dynamic_sections	elf32_arm_finish_dynamic_sections
20219 #define elf_backend_size_dynamic_sections	elf32_arm_size_dynamic_sections
20220 #define elf_backend_always_size_sections	elf32_arm_always_size_sections
20221 #define elf_backend_init_index_section		_bfd_elf_init_2_index_sections
20222 #define elf_backend_init_file_header		elf32_arm_init_file_header
20223 #define elf_backend_reloc_type_class		elf32_arm_reloc_type_class
20224 #define elf_backend_object_p			elf32_arm_object_p
20225 #define elf_backend_fake_sections		elf32_arm_fake_sections
20226 #define elf_backend_section_from_shdr		elf32_arm_section_from_shdr
20227 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20228 #define elf_backend_copy_indirect_symbol	elf32_arm_copy_indirect_symbol
20229 #define elf_backend_size_info			elf32_arm_size_info
20230 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20231 #define elf_backend_additional_program_headers	elf32_arm_additional_program_headers
20232 #define elf_backend_output_arch_local_syms	elf32_arm_output_arch_local_syms
20233 #define elf_backend_filter_implib_symbols	elf32_arm_filter_implib_symbols
20234 #define elf_backend_begin_write_processing	elf32_arm_begin_write_processing
20235 #define elf_backend_add_symbol_hook		elf32_arm_add_symbol_hook
20236 #define elf_backend_count_additional_relocs	elf32_arm_count_additional_relocs
20237 #define elf_backend_symbol_processing		elf32_arm_backend_symbol_processing
20238 
20239 #define elf_backend_can_refcount       1
20240 #define elf_backend_can_gc_sections    1
20241 #define elf_backend_plt_readonly       1
20242 #define elf_backend_want_got_plt       1
20243 #define elf_backend_want_plt_sym       0
20244 #define elf_backend_want_dynrelro      1
20245 #define elf_backend_may_use_rel_p      1
20246 #define elf_backend_may_use_rela_p     0
20247 #define elf_backend_default_use_rela_p 0
20248 #define elf_backend_dtrel_excludes_plt 1
20249 
20250 #define elf_backend_got_header_size	12
20251 #define elf_backend_extern_protected_data 1
20252 
20253 #undef	elf_backend_obj_attrs_vendor
20254 #define elf_backend_obj_attrs_vendor		"aeabi"
20255 #undef	elf_backend_obj_attrs_section
20256 #define elf_backend_obj_attrs_section		".ARM.attributes"
20257 #undef	elf_backend_obj_attrs_arg_type
20258 #define elf_backend_obj_attrs_arg_type		elf32_arm_obj_attrs_arg_type
20259 #undef	elf_backend_obj_attrs_section_type
20260 #define elf_backend_obj_attrs_section_type	SHT_ARM_ATTRIBUTES
20261 #define elf_backend_obj_attrs_order		elf32_arm_obj_attrs_order
20262 #define elf_backend_obj_attrs_handle_unknown	elf32_arm_obj_attrs_handle_unknown
20263 
20264 #undef	elf_backend_section_flags
20265 #define elf_backend_section_flags		elf32_arm_section_flags
20266 #undef	elf_backend_lookup_section_flags_hook
20267 #define elf_backend_lookup_section_flags_hook	elf32_arm_lookup_section_flags
20268 
20269 #define elf_backend_linux_prpsinfo32_ugid16	true
20270 
20271 #include "elf32-target.h"
20272 
20273 /* Native Client targets.  */
20274 
20275 #undef	TARGET_LITTLE_SYM
20276 #define TARGET_LITTLE_SYM		arm_elf32_nacl_le_vec
20277 #undef	TARGET_LITTLE_NAME
20278 #define TARGET_LITTLE_NAME		"elf32-littlearm-nacl"
20279 #undef	TARGET_BIG_SYM
20280 #define TARGET_BIG_SYM			arm_elf32_nacl_be_vec
20281 #undef	TARGET_BIG_NAME
20282 #define TARGET_BIG_NAME			"elf32-bigarm-nacl"
20283 
20284 /* Like elf32_arm_link_hash_table_create -- but overrides
20285    appropriately for NaCl.  */
20286 
20287 static struct bfd_link_hash_table *
elf32_arm_nacl_link_hash_table_create(bfd * abfd)20288 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20289 {
20290   struct bfd_link_hash_table *ret;
20291 
20292   ret = elf32_arm_link_hash_table_create (abfd);
20293   if (ret)
20294     {
20295       struct elf32_arm_link_hash_table *htab
20296 	= (struct elf32_arm_link_hash_table *) ret;
20297 
20298       htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20299       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20300     }
20301   return ret;
20302 }
20303 
20304 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20305    really need to use elf32_arm_modify_segment_map.  But we do it
20306    anyway just to reduce gratuitous differences with the stock ARM backend.  */
20307 
20308 static bool
elf32_arm_nacl_modify_segment_map(bfd * abfd,struct bfd_link_info * info)20309 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20310 {
20311   return (elf32_arm_modify_segment_map (abfd, info)
20312 	  && nacl_modify_segment_map (abfd, info));
20313 }
20314 
20315 static bool
elf32_arm_nacl_final_write_processing(bfd * abfd)20316 elf32_arm_nacl_final_write_processing (bfd *abfd)
20317 {
20318   arm_final_write_processing (abfd);
20319   return nacl_final_write_processing (abfd);
20320 }
20321 
20322 static bfd_vma
elf32_arm_nacl_plt_sym_val(bfd_vma i,const asection * plt,const arelent * rel ATTRIBUTE_UNUSED)20323 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20324 			    const arelent *rel ATTRIBUTE_UNUSED)
20325 {
20326   return plt->vma
20327     + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20328 	   i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20329 }
20330 
20331 #undef	elf32_bed
20332 #define elf32_bed				elf32_arm_nacl_bed
20333 #undef  bfd_elf32_bfd_link_hash_table_create
20334 #define bfd_elf32_bfd_link_hash_table_create	\
20335   elf32_arm_nacl_link_hash_table_create
20336 #undef	elf_backend_plt_alignment
20337 #define elf_backend_plt_alignment		4
20338 #undef	elf_backend_modify_segment_map
20339 #define	elf_backend_modify_segment_map		elf32_arm_nacl_modify_segment_map
20340 #undef	elf_backend_modify_headers
20341 #define	elf_backend_modify_headers		nacl_modify_headers
20342 #undef  elf_backend_final_write_processing
20343 #define elf_backend_final_write_processing	elf32_arm_nacl_final_write_processing
20344 #undef bfd_elf32_get_synthetic_symtab
20345 #undef  elf_backend_plt_sym_val
20346 #define elf_backend_plt_sym_val			elf32_arm_nacl_plt_sym_val
20347 #undef  elf_backend_copy_special_section_fields
20348 
20349 #undef	ELF_MINPAGESIZE
20350 #undef	ELF_COMMONPAGESIZE
20351 
20352 #undef ELF_TARGET_OS
20353 #define ELF_TARGET_OS				is_nacl
20354 
20355 #include "elf32-target.h"
20356 
20357 /* Reset to defaults.  */
20358 #undef	elf_backend_plt_alignment
20359 #undef	elf_backend_modify_segment_map
20360 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20361 #undef	elf_backend_modify_headers
20362 #undef  elf_backend_final_write_processing
20363 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20364 #undef	ELF_MINPAGESIZE
20365 #define ELF_MINPAGESIZE			0x1000
20366 #undef	ELF_COMMONPAGESIZE
20367 #define ELF_COMMONPAGESIZE		0x1000
20368 
20369 
20370 /* FDPIC Targets.  */
20371 
20372 #undef  TARGET_LITTLE_SYM
20373 #define TARGET_LITTLE_SYM		arm_elf32_fdpic_le_vec
20374 #undef  TARGET_LITTLE_NAME
20375 #define TARGET_LITTLE_NAME		"elf32-littlearm-fdpic"
20376 #undef  TARGET_BIG_SYM
20377 #define TARGET_BIG_SYM			arm_elf32_fdpic_be_vec
20378 #undef  TARGET_BIG_NAME
20379 #define TARGET_BIG_NAME			"elf32-bigarm-fdpic"
20380 #undef elf_match_priority
20381 #define elf_match_priority		128
20382 #undef ELF_OSABI
20383 #define ELF_OSABI		ELFOSABI_ARM_FDPIC
20384 
20385 /* Like elf32_arm_link_hash_table_create -- but overrides
20386    appropriately for FDPIC.  */
20387 
20388 static struct bfd_link_hash_table *
elf32_arm_fdpic_link_hash_table_create(bfd * abfd)20389 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20390 {
20391   struct bfd_link_hash_table *ret;
20392 
20393   ret = elf32_arm_link_hash_table_create (abfd);
20394   if (ret)
20395     {
20396       struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20397 
20398       htab->fdpic_p = 1;
20399     }
20400   return ret;
20401 }
20402 
20403 /* We need dynamic symbols for every section, since segments can
20404    relocate independently.  */
20405 static bool
elf32_arm_fdpic_omit_section_dynsym(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info ATTRIBUTE_UNUSED,asection * p ATTRIBUTE_UNUSED)20406 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20407 				    struct bfd_link_info *info
20408 				    ATTRIBUTE_UNUSED,
20409 				    asection *p ATTRIBUTE_UNUSED)
20410 {
20411   switch (elf_section_data (p)->this_hdr.sh_type)
20412     {
20413     case SHT_PROGBITS:
20414     case SHT_NOBITS:
20415       /* If sh_type is yet undecided, assume it could be
20416 	 SHT_PROGBITS/SHT_NOBITS.  */
20417     case SHT_NULL:
20418       return false;
20419 
20420       /* There shouldn't be section relative relocations
20421 	 against any other section.  */
20422     default:
20423       return true;
20424     }
20425 }
20426 
20427 #undef  elf32_bed
20428 #define elf32_bed				elf32_arm_fdpic_bed
20429 
20430 #undef  bfd_elf32_bfd_link_hash_table_create
20431 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_fdpic_link_hash_table_create
20432 
20433 #undef elf_backend_omit_section_dynsym
20434 #define elf_backend_omit_section_dynsym		elf32_arm_fdpic_omit_section_dynsym
20435 
20436 #undef ELF_TARGET_OS
20437 
20438 #include "elf32-target.h"
20439 
20440 #undef elf_match_priority
20441 #undef ELF_OSABI
20442 #undef elf_backend_omit_section_dynsym
20443 
20444 /* VxWorks Targets.  */
20445 
20446 #undef	TARGET_LITTLE_SYM
20447 #define TARGET_LITTLE_SYM		arm_elf32_vxworks_le_vec
20448 #undef	TARGET_LITTLE_NAME
20449 #define TARGET_LITTLE_NAME		"elf32-littlearm-vxworks"
20450 #undef	TARGET_BIG_SYM
20451 #define TARGET_BIG_SYM			arm_elf32_vxworks_be_vec
20452 #undef	TARGET_BIG_NAME
20453 #define TARGET_BIG_NAME			"elf32-bigarm-vxworks"
20454 
20455 /* Like elf32_arm_link_hash_table_create -- but overrides
20456    appropriately for VxWorks.  */
20457 
20458 static struct bfd_link_hash_table *
elf32_arm_vxworks_link_hash_table_create(bfd * abfd)20459 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20460 {
20461   struct bfd_link_hash_table *ret;
20462 
20463   ret = elf32_arm_link_hash_table_create (abfd);
20464   if (ret)
20465     {
20466       struct elf32_arm_link_hash_table *htab
20467 	= (struct elf32_arm_link_hash_table *) ret;
20468       htab->use_rel = 0;
20469     }
20470   return ret;
20471 }
20472 
20473 static bool
elf32_arm_vxworks_final_write_processing(bfd * abfd)20474 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20475 {
20476   arm_final_write_processing (abfd);
20477   return elf_vxworks_final_write_processing (abfd);
20478 }
20479 
20480 #undef  elf32_bed
20481 #define elf32_bed elf32_arm_vxworks_bed
20482 
20483 #undef  bfd_elf32_bfd_link_hash_table_create
20484 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_vxworks_link_hash_table_create
20485 #undef  elf_backend_final_write_processing
20486 #define elf_backend_final_write_processing	elf32_arm_vxworks_final_write_processing
20487 #undef  elf_backend_emit_relocs
20488 #define elf_backend_emit_relocs			elf_vxworks_emit_relocs
20489 
20490 #undef  elf_backend_may_use_rel_p
20491 #define elf_backend_may_use_rel_p	0
20492 #undef  elf_backend_may_use_rela_p
20493 #define elf_backend_may_use_rela_p	1
20494 #undef  elf_backend_default_use_rela_p
20495 #define elf_backend_default_use_rela_p	1
20496 #undef  elf_backend_want_plt_sym
20497 #define elf_backend_want_plt_sym	1
20498 #undef  ELF_MAXPAGESIZE
20499 #define ELF_MAXPAGESIZE			0x1000
20500 #undef ELF_TARGET_OS
20501 #define ELF_TARGET_OS			is_vxworks
20502 
20503 #include "elf32-target.h"
20504 
20505 
20506 /* Merge backend specific data from an object file to the output
20507    object file when linking.  */
20508 
20509 static bool
elf32_arm_merge_private_bfd_data(bfd * ibfd,struct bfd_link_info * info)20510 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20511 {
20512   bfd *obfd = info->output_bfd;
20513   flagword out_flags;
20514   flagword in_flags;
20515   bool flags_compatible = true;
20516   asection *sec;
20517 
20518   /* Check if we have the same endianness.  */
20519   if (! _bfd_generic_verify_endian_match (ibfd, info))
20520     return false;
20521 
20522   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20523     return true;
20524 
20525   if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20526     return false;
20527 
20528   /* The input BFD must have had its flags initialised.  */
20529   /* The following seems bogus to me -- The flags are initialized in
20530      the assembler but I don't think an elf_flags_init field is
20531      written into the object.  */
20532   /* BFD_ASSERT (elf_flags_init (ibfd)); */
20533 
20534   in_flags  = elf_elfheader (ibfd)->e_flags;
20535   out_flags = elf_elfheader (obfd)->e_flags;
20536 
20537   /* In theory there is no reason why we couldn't handle this.  However
20538      in practice it isn't even close to working and there is no real
20539      reason to want it.  */
20540   if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20541       && !(ibfd->flags & DYNAMIC)
20542       && (in_flags & EF_ARM_BE8))
20543     {
20544       _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20545 			  ibfd);
20546       return false;
20547     }
20548 
20549   if (!elf_flags_init (obfd))
20550     {
20551       /* If the input is the default architecture and had the default
20552 	 flags then do not bother setting the flags for the output
20553 	 architecture, instead allow future merges to do this.  If no
20554 	 future merges ever set these flags then they will retain their
20555 	 uninitialised values, which surprise surprise, correspond
20556 	 to the default values.  */
20557       if (bfd_get_arch_info (ibfd)->the_default
20558 	  && elf_elfheader (ibfd)->e_flags == 0)
20559 	return true;
20560 
20561       elf_flags_init (obfd) = true;
20562       elf_elfheader (obfd)->e_flags = in_flags;
20563 
20564       if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20565 	  && bfd_get_arch_info (obfd)->the_default)
20566 	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20567 
20568       return true;
20569     }
20570 
20571   /* Determine what should happen if the input ARM architecture
20572      does not match the output ARM architecture.  */
20573   if (! bfd_arm_merge_machines (ibfd, obfd))
20574     return false;
20575 
20576   /* Identical flags must be compatible.  */
20577   if (in_flags == out_flags)
20578     return true;
20579 
20580   /* Check to see if the input BFD actually contains any sections.  If
20581      not, its flags may not have been initialised either, but it
20582      cannot actually cause any incompatiblity.  Do not short-circuit
20583      dynamic objects; their section list may be emptied by
20584     elf_link_add_object_symbols.
20585 
20586     Also check to see if there are no code sections in the input.
20587     In this case there is no need to check for code specific flags.
20588     XXX - do we need to worry about floating-point format compatability
20589     in data sections ?  */
20590   if (!(ibfd->flags & DYNAMIC))
20591     {
20592       bool null_input_bfd = true;
20593       bool only_data_sections = true;
20594 
20595       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20596 	{
20597 	  /* Ignore synthetic glue sections.  */
20598 	  if (strcmp (sec->name, ".glue_7")
20599 	      && strcmp (sec->name, ".glue_7t"))
20600 	    {
20601 	      if ((bfd_section_flags (sec)
20602 		   & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20603 		  == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20604 		only_data_sections = false;
20605 
20606 	      null_input_bfd = false;
20607 	      break;
20608 	    }
20609 	}
20610 
20611       if (null_input_bfd || only_data_sections)
20612 	return true;
20613     }
20614 
20615   /* Complain about various flag mismatches.  */
20616   if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20617 				      EF_ARM_EABI_VERSION (out_flags)))
20618     {
20619       _bfd_error_handler
20620 	(_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20621 	 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20622 	 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20623       return false;
20624     }
20625 
20626   /* Not sure what needs to be checked for EABI versions >= 1.  */
20627   /* VxWorks libraries do not use these flags.  */
20628   if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20629       && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20630       && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20631     {
20632       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20633 	{
20634 	  _bfd_error_handler
20635 	    (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20636 	     ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20637 	     obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20638 	  flags_compatible = false;
20639 	}
20640 
20641       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20642 	{
20643 	  if (in_flags & EF_ARM_APCS_FLOAT)
20644 	    _bfd_error_handler
20645 	      (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20646 	       ibfd, obfd);
20647 	  else
20648 	    _bfd_error_handler
20649 	      (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20650 	       ibfd, obfd);
20651 
20652 	  flags_compatible = false;
20653 	}
20654 
20655       if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20656 	{
20657 	  if (in_flags & EF_ARM_VFP_FLOAT)
20658 	    _bfd_error_handler
20659 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20660 	       ibfd, "VFP", obfd);
20661 	  else
20662 	    _bfd_error_handler
20663 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20664 	       ibfd, "FPA", obfd);
20665 
20666 	  flags_compatible = false;
20667 	}
20668 
20669       if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20670 	{
20671 	  if (in_flags & EF_ARM_MAVERICK_FLOAT)
20672 	    _bfd_error_handler
20673 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20674 	       ibfd, "Maverick", obfd);
20675 	  else
20676 	    _bfd_error_handler
20677 	      (_("error: %pB does not use %s instructions, whereas %pB does"),
20678 	       ibfd, "Maverick", obfd);
20679 
20680 	  flags_compatible = false;
20681 	}
20682 
20683 #ifdef EF_ARM_SOFT_FLOAT
20684       if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20685 	{
20686 	  /* We can allow interworking between code that is VFP format
20687 	     layout, and uses either soft float or integer regs for
20688 	     passing floating point arguments and results.  We already
20689 	     know that the APCS_FLOAT flags match; similarly for VFP
20690 	     flags.  */
20691 	  if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20692 	      || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20693 	    {
20694 	      if (in_flags & EF_ARM_SOFT_FLOAT)
20695 		_bfd_error_handler
20696 		  (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20697 		   ibfd, obfd);
20698 	      else
20699 		_bfd_error_handler
20700 		  (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20701 		   ibfd, obfd);
20702 
20703 	      flags_compatible = false;
20704 	    }
20705 	}
20706 #endif
20707 
20708       /* Interworking mismatch is only a warning.  */
20709       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20710 	{
20711 	  if (in_flags & EF_ARM_INTERWORK)
20712 	    {
20713 	      _bfd_error_handler
20714 		(_("warning: %pB supports interworking, whereas %pB does not"),
20715 		 ibfd, obfd);
20716 	    }
20717 	  else
20718 	    {
20719 	      _bfd_error_handler
20720 		(_("warning: %pB does not support interworking, whereas %pB does"),
20721 		 ibfd, obfd);
20722 	    }
20723 	}
20724     }
20725 
20726   return flags_compatible;
20727 }
20728