1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2021 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25
26 #include "opcode/i386.h"
27 #include "elf/x86-64.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 3, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 4, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 2, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 2, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 2, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 2, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 2, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 1, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 1, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 0, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 0, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 2, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 2, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 4, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 4, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 2, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 4, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 3, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173
174 /* We have a gap in the reloc numbers here.
175 R_X86_64_standard counts the number up to this point, and
176 R_X86_64_vt_offset is the value to subtract from a reloc type of
177 R_X86_64_GNU_VT* to form an index into this table. */
178 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
179 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
180
181 /* GNU extension to record C++ vtable hierarchy. */
182 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, false, 0, complain_overflow_dont,
183 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
184
185 /* GNU extension to record C++ vtable member usage. */
186 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, false, 0, complain_overflow_dont,
187 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
188 false),
189
190 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
191 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_bitfield,
192 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
193 false)
194 };
195
196 #define X86_PCREL_TYPE_P(TYPE) \
197 ( ((TYPE) == R_X86_64_PC8) \
198 || ((TYPE) == R_X86_64_PC16) \
199 || ((TYPE) == R_X86_64_PC32) \
200 || ((TYPE) == R_X86_64_PC32_BND) \
201 || ((TYPE) == R_X86_64_PC64))
202
203 #define X86_SIZE_TYPE_P(TYPE) \
204 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
205
206 /* Map BFD relocs to the x86_64 elf relocs. */
207 struct elf_reloc_map
208 {
209 bfd_reloc_code_real_type bfd_reloc_val;
210 unsigned char elf_reloc_val;
211 };
212
213 static const struct elf_reloc_map x86_64_reloc_map[] =
214 {
215 { BFD_RELOC_NONE, R_X86_64_NONE, },
216 { BFD_RELOC_64, R_X86_64_64, },
217 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
218 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
219 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
220 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
221 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
222 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
223 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
224 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
225 { BFD_RELOC_32, R_X86_64_32, },
226 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
227 { BFD_RELOC_16, R_X86_64_16, },
228 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
229 { BFD_RELOC_8, R_X86_64_8, },
230 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
231 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
232 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
233 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
234 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
235 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
236 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
237 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
238 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
239 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
240 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
241 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
242 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
243 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
244 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
245 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
246 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
247 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
248 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
249 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
250 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
251 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
252 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
253 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
254 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
255 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
256 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
257 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
258 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
259 };
260
261 static reloc_howto_type *
elf_x86_64_rtype_to_howto(bfd * abfd,unsigned r_type)262 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
263 {
264 unsigned i;
265
266 if (r_type == (unsigned int) R_X86_64_32)
267 {
268 if (ABI_64_P (abfd))
269 i = r_type;
270 else
271 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
272 }
273 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
274 || r_type >= (unsigned int) R_X86_64_max)
275 {
276 if (r_type >= (unsigned int) R_X86_64_standard)
277 {
278 /* xgettext:c-format */
279 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
280 abfd, r_type);
281 bfd_set_error (bfd_error_bad_value);
282 return NULL;
283 }
284 i = r_type;
285 }
286 else
287 i = r_type - (unsigned int) R_X86_64_vt_offset;
288 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
289 return &x86_64_elf_howto_table[i];
290 }
291
292 /* Given a BFD reloc type, return a HOWTO structure. */
293 static reloc_howto_type *
elf_x86_64_reloc_type_lookup(bfd * abfd,bfd_reloc_code_real_type code)294 elf_x86_64_reloc_type_lookup (bfd *abfd,
295 bfd_reloc_code_real_type code)
296 {
297 unsigned int i;
298
299 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
300 i++)
301 {
302 if (x86_64_reloc_map[i].bfd_reloc_val == code)
303 return elf_x86_64_rtype_to_howto (abfd,
304 x86_64_reloc_map[i].elf_reloc_val);
305 }
306 return NULL;
307 }
308
309 static reloc_howto_type *
elf_x86_64_reloc_name_lookup(bfd * abfd,const char * r_name)310 elf_x86_64_reloc_name_lookup (bfd *abfd,
311 const char *r_name)
312 {
313 unsigned int i;
314
315 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
316 {
317 /* Get x32 R_X86_64_32. */
318 reloc_howto_type *reloc
319 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
320 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
321 return reloc;
322 }
323
324 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
325 if (x86_64_elf_howto_table[i].name != NULL
326 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
327 return &x86_64_elf_howto_table[i];
328
329 return NULL;
330 }
331
332 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
333
334 static bool
elf_x86_64_info_to_howto(bfd * abfd,arelent * cache_ptr,Elf_Internal_Rela * dst)335 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
336 Elf_Internal_Rela *dst)
337 {
338 unsigned r_type;
339
340 r_type = ELF32_R_TYPE (dst->r_info);
341 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
342 if (cache_ptr->howto == NULL)
343 return false;
344 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
345 return true;
346 }
347
348 /* Support for core dump NOTE sections. */
349 static bool
elf_x86_64_grok_prstatus(bfd * abfd,Elf_Internal_Note * note)350 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
351 {
352 int offset;
353 size_t size;
354
355 switch (note->descsz)
356 {
357 default:
358 return false;
359
360 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
361 /* pr_cursig */
362 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
363
364 /* pr_pid */
365 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
366
367 /* pr_reg */
368 offset = 72;
369 size = 216;
370
371 break;
372
373 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
374 /* pr_cursig */
375 elf_tdata (abfd)->core->signal
376 = bfd_get_16 (abfd, note->descdata + 12);
377
378 /* pr_pid */
379 elf_tdata (abfd)->core->lwpid
380 = bfd_get_32 (abfd, note->descdata + 32);
381
382 /* pr_reg */
383 offset = 112;
384 size = 216;
385
386 break;
387 }
388
389 /* Make a ".reg/999" section. */
390 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
391 size, note->descpos + offset);
392 }
393
394 static bool
elf_x86_64_grok_psinfo(bfd * abfd,Elf_Internal_Note * note)395 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
396 {
397 switch (note->descsz)
398 {
399 default:
400 return false;
401
402 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
403 elf_tdata (abfd)->core->pid
404 = bfd_get_32 (abfd, note->descdata + 12);
405 elf_tdata (abfd)->core->program
406 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
407 elf_tdata (abfd)->core->command
408 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
409 break;
410
411 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
412 elf_tdata (abfd)->core->pid
413 = bfd_get_32 (abfd, note->descdata + 24);
414 elf_tdata (abfd)->core->program
415 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
416 elf_tdata (abfd)->core->command
417 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
418 }
419
420 /* Note that for some reason, a spurious space is tacked
421 onto the end of the args in some (at least one anyway)
422 implementations, so strip it off if it exists. */
423
424 {
425 char *command = elf_tdata (abfd)->core->command;
426 int n = strlen (command);
427
428 if (0 < n && command[n - 1] == ' ')
429 command[n - 1] = '\0';
430 }
431
432 return true;
433 }
434
435 #ifdef CORE_HEADER
436 # if GCC_VERSION >= 8000
437 # pragma GCC diagnostic push
438 # pragma GCC diagnostic ignored "-Wstringop-truncation"
439 # endif
440 static char *
elf_x86_64_write_core_note(bfd * abfd,char * buf,int * bufsiz,int note_type,...)441 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
442 int note_type, ...)
443 {
444 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
445 va_list ap;
446 const char *fname, *psargs;
447 long pid;
448 int cursig;
449 const void *gregs;
450
451 switch (note_type)
452 {
453 default:
454 return NULL;
455
456 case NT_PRPSINFO:
457 va_start (ap, note_type);
458 fname = va_arg (ap, const char *);
459 psargs = va_arg (ap, const char *);
460 va_end (ap);
461
462 if (bed->s->elfclass == ELFCLASS32)
463 {
464 prpsinfo32_t data;
465 memset (&data, 0, sizeof (data));
466 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
467 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
468 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
469 &data, sizeof (data));
470 }
471 else
472 {
473 prpsinfo64_t data;
474 memset (&data, 0, sizeof (data));
475 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
476 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
477 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
478 &data, sizeof (data));
479 }
480 /* NOTREACHED */
481
482 case NT_PRSTATUS:
483 va_start (ap, note_type);
484 pid = va_arg (ap, long);
485 cursig = va_arg (ap, int);
486 gregs = va_arg (ap, const void *);
487 va_end (ap);
488
489 if (bed->s->elfclass == ELFCLASS32)
490 {
491 if (bed->elf_machine_code == EM_X86_64)
492 {
493 prstatusx32_t prstat;
494 memset (&prstat, 0, sizeof (prstat));
495 prstat.pr_pid = pid;
496 prstat.pr_cursig = cursig;
497 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
498 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
499 &prstat, sizeof (prstat));
500 }
501 else
502 {
503 prstatus32_t prstat;
504 memset (&prstat, 0, sizeof (prstat));
505 prstat.pr_pid = pid;
506 prstat.pr_cursig = cursig;
507 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
508 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
509 &prstat, sizeof (prstat));
510 }
511 }
512 else
513 {
514 prstatus64_t prstat;
515 memset (&prstat, 0, sizeof (prstat));
516 prstat.pr_pid = pid;
517 prstat.pr_cursig = cursig;
518 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
519 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
520 &prstat, sizeof (prstat));
521 }
522 }
523 /* NOTREACHED */
524 }
525 # if GCC_VERSION >= 8000
526 # pragma GCC diagnostic pop
527 # endif
528 #endif
529
530 /* Functions for the x86-64 ELF linker. */
531
532 /* The size in bytes of an entry in the global offset table. */
533
534 #define GOT_ENTRY_SIZE 8
535
536 /* The size in bytes of an entry in the lazy procedure linkage table. */
537
538 #define LAZY_PLT_ENTRY_SIZE 16
539
540 /* The size in bytes of an entry in the non-lazy procedure linkage
541 table. */
542
543 #define NON_LAZY_PLT_ENTRY_SIZE 8
544
545 /* The first entry in a lazy procedure linkage table looks like this.
546 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
547 works. */
548
549 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
550 {
551 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
552 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
553 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
554 };
555
556 /* Subsequent entries in a lazy procedure linkage table look like this. */
557
558 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
559 {
560 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
561 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
562 0x68, /* pushq immediate */
563 0, 0, 0, 0, /* replaced with index into relocation table. */
564 0xe9, /* jmp relative */
565 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
566 };
567
568 /* The first entry in a lazy procedure linkage table with BND prefix
569 like this. */
570
571 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
572 {
573 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
574 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
575 0x0f, 0x1f, 0 /* nopl (%rax) */
576 };
577
578 /* Subsequent entries for branches with BND prefx in a lazy procedure
579 linkage table look like this. */
580
581 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
582 {
583 0x68, 0, 0, 0, 0, /* pushq immediate */
584 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
585 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
586 };
587
588 /* The first entry in the IBT-enabled lazy procedure linkage table is the
589 the same as the lazy PLT with BND prefix so that bound registers are
590 preserved when control is passed to dynamic linker. Subsequent
591 entries for a IBT-enabled lazy procedure linkage table look like
592 this. */
593
594 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
595 {
596 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x90 /* nop */
600 };
601
602 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
603 is the same as the normal lazy PLT. Subsequent entries for an
604 x32 IBT-enabled lazy procedure linkage table look like this. */
605
606 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
607 {
608 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
609 0x68, 0, 0, 0, 0, /* pushq immediate */
610 0xe9, 0, 0, 0, 0, /* jmpq relative */
611 0x66, 0x90 /* xchg %ax,%ax */
612 };
613
614 /* Entries in the non-lazey procedure linkage table look like this. */
615
616 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 {
618 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
619 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
620 0x66, 0x90 /* xchg %ax,%ax */
621 };
622
623 /* Entries for branches with BND prefix in the non-lazey procedure
624 linkage table look like this. */
625
626 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
627 {
628 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
629 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
630 0x90 /* nop */
631 };
632
633 /* Entries for branches with IBT-enabled in the non-lazey procedure
634 linkage table look like this. They have the same size as the lazy
635 PLT entry. */
636
637 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
638 {
639 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
640 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
641 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
642 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
643 };
644
645 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
646 linkage table look like this. They have the same size as the lazy
647 PLT entry. */
648
649 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
650 {
651 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
652 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
653 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
654 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
655 };
656
657 /* The TLSDESC entry in a lazy procedure linkage table. */
658 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
659 {
660 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
661 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
662 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
663 };
664
665 /* .eh_frame covering the lazy .plt section. */
666
667 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
668 {
669 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
670 0, 0, 0, 0, /* CIE ID */
671 1, /* CIE version */
672 'z', 'R', 0, /* Augmentation string */
673 1, /* Code alignment factor */
674 0x78, /* Data alignment factor */
675 16, /* Return address column */
676 1, /* Augmentation size */
677 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
678 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
679 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
680 DW_CFA_nop, DW_CFA_nop,
681
682 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
683 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
684 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
685 0, 0, 0, 0, /* .plt size goes here */
686 0, /* Augmentation size */
687 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
688 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
689 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
690 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
691 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
692 11, /* Block length */
693 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
694 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
695 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
696 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
697 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
698 };
699
700 /* .eh_frame covering the lazy BND .plt section. */
701
702 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
703 {
704 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
705 0, 0, 0, 0, /* CIE ID */
706 1, /* CIE version */
707 'z', 'R', 0, /* Augmentation string */
708 1, /* Code alignment factor */
709 0x78, /* Data alignment factor */
710 16, /* Return address column */
711 1, /* Augmentation size */
712 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
713 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
714 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
715 DW_CFA_nop, DW_CFA_nop,
716
717 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
718 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
719 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
720 0, 0, 0, 0, /* .plt size goes here */
721 0, /* Augmentation size */
722 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
723 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
724 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
725 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
726 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
727 11, /* Block length */
728 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
729 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
730 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
731 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
732 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
733 };
734
735 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
736
737 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
738 {
739 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
740 0, 0, 0, 0, /* CIE ID */
741 1, /* CIE version */
742 'z', 'R', 0, /* Augmentation string */
743 1, /* Code alignment factor */
744 0x78, /* Data alignment factor */
745 16, /* Return address column */
746 1, /* Augmentation size */
747 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
748 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
749 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
750 DW_CFA_nop, DW_CFA_nop,
751
752 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
753 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
754 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
755 0, 0, 0, 0, /* .plt size goes here */
756 0, /* Augmentation size */
757 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
758 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
759 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
760 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
761 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
762 11, /* Block length */
763 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
764 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
765 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
766 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
767 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
768 };
769
770 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
771
772 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
773 {
774 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
775 0, 0, 0, 0, /* CIE ID */
776 1, /* CIE version */
777 'z', 'R', 0, /* Augmentation string */
778 1, /* Code alignment factor */
779 0x78, /* Data alignment factor */
780 16, /* Return address column */
781 1, /* Augmentation size */
782 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
783 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
784 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
785 DW_CFA_nop, DW_CFA_nop,
786
787 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
788 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
789 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
790 0, 0, 0, 0, /* .plt size goes here */
791 0, /* Augmentation size */
792 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
793 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
794 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
795 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
796 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
797 11, /* Block length */
798 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
799 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
800 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
801 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
802 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
803 };
804
805 /* .eh_frame covering the non-lazy .plt section. */
806
807 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
808 {
809 #define PLT_GOT_FDE_LENGTH 20
810 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
811 0, 0, 0, 0, /* CIE ID */
812 1, /* CIE version */
813 'z', 'R', 0, /* Augmentation string */
814 1, /* Code alignment factor */
815 0x78, /* Data alignment factor */
816 16, /* Return address column */
817 1, /* Augmentation size */
818 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
819 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
820 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
821 DW_CFA_nop, DW_CFA_nop,
822
823 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
824 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
825 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
826 0, 0, 0, 0, /* non-lazy .plt size goes here */
827 0, /* Augmentation size */
828 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
829 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
830 };
831
832 /* These are the standard parameters. */
833 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
834 {
835 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
836 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
837 elf_x86_64_lazy_plt_entry, /* plt_entry */
838 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
839 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
840 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
841 6, /* plt_tlsdesc_got1_offset */
842 12, /* plt_tlsdesc_got2_offset */
843 10, /* plt_tlsdesc_got1_insn_end */
844 16, /* plt_tlsdesc_got2_insn_end */
845 2, /* plt0_got1_offset */
846 8, /* plt0_got2_offset */
847 12, /* plt0_got2_insn_end */
848 2, /* plt_got_offset */
849 7, /* plt_reloc_offset */
850 12, /* plt_plt_offset */
851 6, /* plt_got_insn_size */
852 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
853 6, /* plt_lazy_offset */
854 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
855 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
856 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
857 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
858 };
859
860 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
861 {
862 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
863 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
864 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
865 2, /* plt_got_offset */
866 6, /* plt_got_insn_size */
867 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
868 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
869 };
870
871 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
872 {
873 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
874 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
875 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
876 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
877 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
878 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
879 6, /* plt_tlsdesc_got1_offset */
880 12, /* plt_tlsdesc_got2_offset */
881 10, /* plt_tlsdesc_got1_insn_end */
882 16, /* plt_tlsdesc_got2_insn_end */
883 2, /* plt0_got1_offset */
884 1+8, /* plt0_got2_offset */
885 1+12, /* plt0_got2_insn_end */
886 1+2, /* plt_got_offset */
887 1, /* plt_reloc_offset */
888 7, /* plt_plt_offset */
889 1+6, /* plt_got_insn_size */
890 11, /* plt_plt_insn_end */
891 0, /* plt_lazy_offset */
892 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
893 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
894 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
895 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
896 };
897
898 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
899 {
900 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
901 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
902 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
903 1+2, /* plt_got_offset */
904 1+6, /* plt_got_insn_size */
905 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
916 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
917 6, /* plt_tlsdesc_got1_offset */
918 12, /* plt_tlsdesc_got2_offset */
919 10, /* plt_tlsdesc_got1_insn_end */
920 16, /* plt_tlsdesc_got2_insn_end */
921 2, /* plt0_got1_offset */
922 1+8, /* plt0_got2_offset */
923 1+12, /* plt0_got2_insn_end */
924 4+1+2, /* plt_got_offset */
925 4+1, /* plt_reloc_offset */
926 4+1+6, /* plt_plt_offset */
927 4+1+6, /* plt_got_insn_size */
928 4+1+5+5, /* plt_plt_insn_end */
929 0, /* plt_lazy_offset */
930 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
931 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
932 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
933 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
934 };
935
936 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
937 {
938 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
939 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
940 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
941 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
942 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
943 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
944 6, /* plt_tlsdesc_got1_offset */
945 12, /* plt_tlsdesc_got2_offset */
946 10, /* plt_tlsdesc_got1_insn_end */
947 16, /* plt_tlsdesc_got2_insn_end */
948 2, /* plt0_got1_offset */
949 8, /* plt0_got2_offset */
950 12, /* plt0_got2_insn_end */
951 4+2, /* plt_got_offset */
952 4+1, /* plt_reloc_offset */
953 4+6, /* plt_plt_offset */
954 4+6, /* plt_got_insn_size */
955 4+5+5, /* plt_plt_insn_end */
956 0, /* plt_lazy_offset */
957 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
958 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
959 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
960 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
961 };
962
963 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
964 {
965 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
966 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
967 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
968 4+1+2, /* plt_got_offset */
969 4+1+6, /* plt_got_insn_size */
970 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
971 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
972 };
973
974 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
975 {
976 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
977 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
978 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
979 4+2, /* plt_got_offset */
980 4+6, /* plt_got_insn_size */
981 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
982 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
983 };
984
985
986 static bool
elf64_x86_64_elf_object_p(bfd * abfd)987 elf64_x86_64_elf_object_p (bfd *abfd)
988 {
989 /* Set the right machine number for an x86-64 elf64 file. */
990 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
991 return true;
992 }
993
994 static bool
elf32_x86_64_elf_object_p(bfd * abfd)995 elf32_x86_64_elf_object_p (bfd *abfd)
996 {
997 /* Set the right machine number for an x86-64 elf32 file. */
998 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
999 return true;
1000 }
1001
1002 /* Return TRUE if the TLS access code sequence support transition
1003 from R_TYPE. */
1004
1005 static bool
elf_x86_64_check_tls_transition(bfd * abfd,struct bfd_link_info * info,asection * sec,bfd_byte * contents,Elf_Internal_Shdr * symtab_hdr,struct elf_link_hash_entry ** sym_hashes,unsigned int r_type,const Elf_Internal_Rela * rel,const Elf_Internal_Rela * relend)1006 elf_x86_64_check_tls_transition (bfd *abfd,
1007 struct bfd_link_info *info,
1008 asection *sec,
1009 bfd_byte *contents,
1010 Elf_Internal_Shdr *symtab_hdr,
1011 struct elf_link_hash_entry **sym_hashes,
1012 unsigned int r_type,
1013 const Elf_Internal_Rela *rel,
1014 const Elf_Internal_Rela *relend)
1015 {
1016 unsigned int val;
1017 unsigned long r_symndx;
1018 bool largepic = false;
1019 struct elf_link_hash_entry *h;
1020 bfd_vma offset;
1021 struct elf_x86_link_hash_table *htab;
1022 bfd_byte *call;
1023 bool indirect_call;
1024
1025 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1026 offset = rel->r_offset;
1027 switch (r_type)
1028 {
1029 case R_X86_64_TLSGD:
1030 case R_X86_64_TLSLD:
1031 if ((rel + 1) >= relend)
1032 return false;
1033
1034 if (r_type == R_X86_64_TLSGD)
1035 {
1036 /* Check transition from GD access model. For 64bit, only
1037 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1038 .word 0x6666; rex64; call __tls_get_addr@PLT
1039 or
1040 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1041 .byte 0x66; rex64
1042 call *__tls_get_addr@GOTPCREL(%rip)
1043 which may be converted to
1044 addr32 call __tls_get_addr
1045 can transit to different access model. For 32bit, only
1046 leaq foo@tlsgd(%rip), %rdi
1047 .word 0x6666; rex64; call __tls_get_addr@PLT
1048 or
1049 leaq foo@tlsgd(%rip), %rdi
1050 .byte 0x66; rex64
1051 call *__tls_get_addr@GOTPCREL(%rip)
1052 which may be converted to
1053 addr32 call __tls_get_addr
1054 can transit to different access model. For largepic,
1055 we also support:
1056 leaq foo@tlsgd(%rip), %rdi
1057 movabsq $__tls_get_addr@pltoff, %rax
1058 addq $r15, %rax
1059 call *%rax
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 movabsq $__tls_get_addr@pltoff, %rax
1063 addq $rbx, %rax
1064 call *%rax */
1065
1066 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1067
1068 if ((offset + 12) > sec->size)
1069 return false;
1070
1071 call = contents + offset + 4;
1072 if (call[0] != 0x66
1073 || !((call[1] == 0x48
1074 && call[2] == 0xff
1075 && call[3] == 0x15)
1076 || (call[1] == 0x48
1077 && call[2] == 0x67
1078 && call[3] == 0xe8)
1079 || (call[1] == 0x66
1080 && call[2] == 0x48
1081 && call[3] == 0xe8)))
1082 {
1083 if (!ABI_64_P (abfd)
1084 || (offset + 19) > sec->size
1085 || offset < 3
1086 || memcmp (call - 7, leaq + 1, 3) != 0
1087 || memcmp (call, "\x48\xb8", 2) != 0
1088 || call[11] != 0x01
1089 || call[13] != 0xff
1090 || call[14] != 0xd0
1091 || !((call[10] == 0x48 && call[12] == 0xd8)
1092 || (call[10] == 0x4c && call[12] == 0xf8)))
1093 return false;
1094 largepic = true;
1095 }
1096 else if (ABI_64_P (abfd))
1097 {
1098 if (offset < 4
1099 || memcmp (contents + offset - 4, leaq, 4) != 0)
1100 return false;
1101 }
1102 else
1103 {
1104 if (offset < 3
1105 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1106 return false;
1107 }
1108 indirect_call = call[2] == 0xff;
1109 }
1110 else
1111 {
1112 /* Check transition from LD access model. Only
1113 leaq foo@tlsld(%rip), %rdi;
1114 call __tls_get_addr@PLT
1115 or
1116 leaq foo@tlsld(%rip), %rdi;
1117 call *__tls_get_addr@GOTPCREL(%rip)
1118 which may be converted to
1119 addr32 call __tls_get_addr
1120 can transit to different access model. For largepic
1121 we also support:
1122 leaq foo@tlsld(%rip), %rdi
1123 movabsq $__tls_get_addr@pltoff, %rax
1124 addq $r15, %rax
1125 call *%rax
1126 or
1127 leaq foo@tlsld(%rip), %rdi
1128 movabsq $__tls_get_addr@pltoff, %rax
1129 addq $rbx, %rax
1130 call *%rax */
1131
1132 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1133
1134 if (offset < 3 || (offset + 9) > sec->size)
1135 return false;
1136
1137 if (memcmp (contents + offset - 3, lea, 3) != 0)
1138 return false;
1139
1140 call = contents + offset + 4;
1141 if (!(call[0] == 0xe8
1142 || (call[0] == 0xff && call[1] == 0x15)
1143 || (call[0] == 0x67 && call[1] == 0xe8)))
1144 {
1145 if (!ABI_64_P (abfd)
1146 || (offset + 19) > sec->size
1147 || memcmp (call, "\x48\xb8", 2) != 0
1148 || call[11] != 0x01
1149 || call[13] != 0xff
1150 || call[14] != 0xd0
1151 || !((call[10] == 0x48 && call[12] == 0xd8)
1152 || (call[10] == 0x4c && call[12] == 0xf8)))
1153 return false;
1154 largepic = true;
1155 }
1156 indirect_call = call[0] == 0xff;
1157 }
1158
1159 r_symndx = htab->r_sym (rel[1].r_info);
1160 if (r_symndx < symtab_hdr->sh_info)
1161 return false;
1162
1163 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1164 if (h == NULL
1165 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1166 return false;
1167 else
1168 {
1169 r_type = (ELF32_R_TYPE (rel[1].r_info)
1170 & ~R_X86_64_converted_reloc_bit);
1171 if (largepic)
1172 return r_type == R_X86_64_PLTOFF64;
1173 else if (indirect_call)
1174 return r_type == R_X86_64_GOTPCRELX;
1175 else
1176 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1177 }
1178
1179 case R_X86_64_GOTTPOFF:
1180 /* Check transition from IE access model:
1181 mov foo@gottpoff(%rip), %reg
1182 add foo@gottpoff(%rip), %reg
1183 */
1184
1185 /* Check REX prefix first. */
1186 if (offset >= 3 && (offset + 4) <= sec->size)
1187 {
1188 val = bfd_get_8 (abfd, contents + offset - 3);
1189 if (val != 0x48 && val != 0x4c)
1190 {
1191 /* X32 may have 0x44 REX prefix or no REX prefix. */
1192 if (ABI_64_P (abfd))
1193 return false;
1194 }
1195 }
1196 else
1197 {
1198 /* X32 may not have any REX prefix. */
1199 if (ABI_64_P (abfd))
1200 return false;
1201 if (offset < 2 || (offset + 3) > sec->size)
1202 return false;
1203 }
1204
1205 val = bfd_get_8 (abfd, contents + offset - 2);
1206 if (val != 0x8b && val != 0x03)
1207 return false;
1208
1209 val = bfd_get_8 (abfd, contents + offset - 1);
1210 return (val & 0xc7) == 5;
1211
1212 case R_X86_64_GOTPC32_TLSDESC:
1213 /* Check transition from GDesc access model:
1214 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1215 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1216
1217 Make sure it's a leaq adding rip to a 32-bit offset
1218 into any register, although it's probably almost always
1219 going to be rax. */
1220
1221 if (offset < 3 || (offset + 4) > sec->size)
1222 return false;
1223
1224 val = bfd_get_8 (abfd, contents + offset - 3);
1225 val &= 0xfb;
1226 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1227 return false;
1228
1229 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1230 return false;
1231
1232 val = bfd_get_8 (abfd, contents + offset - 1);
1233 return (val & 0xc7) == 0x05;
1234
1235 case R_X86_64_TLSDESC_CALL:
1236 /* Check transition from GDesc access model:
1237 call *x@tlsdesc(%rax) <--- LP64 mode.
1238 call *x@tlsdesc(%eax) <--- X32 mode.
1239 */
1240 if (offset + 2 <= sec->size)
1241 {
1242 unsigned int prefix;
1243 call = contents + offset;
1244 prefix = 0;
1245 if (!ABI_64_P (abfd))
1246 {
1247 /* Check for call *x@tlsdesc(%eax). */
1248 if (call[0] == 0x67)
1249 {
1250 prefix = 1;
1251 if (offset + 3 > sec->size)
1252 return false;
1253 }
1254 }
1255 /* Make sure that it's a call *x@tlsdesc(%rax). */
1256 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1257 }
1258
1259 return false;
1260
1261 default:
1262 abort ();
1263 }
1264 }
1265
1266 /* Return TRUE if the TLS access transition is OK or no transition
1267 will be performed. Update R_TYPE if there is a transition. */
1268
1269 static bool
elf_x86_64_tls_transition(struct bfd_link_info * info,bfd * abfd,asection * sec,bfd_byte * contents,Elf_Internal_Shdr * symtab_hdr,struct elf_link_hash_entry ** sym_hashes,unsigned int * r_type,int tls_type,const Elf_Internal_Rela * rel,const Elf_Internal_Rela * relend,struct elf_link_hash_entry * h,unsigned long r_symndx,bool from_relocate_section)1270 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1271 asection *sec, bfd_byte *contents,
1272 Elf_Internal_Shdr *symtab_hdr,
1273 struct elf_link_hash_entry **sym_hashes,
1274 unsigned int *r_type, int tls_type,
1275 const Elf_Internal_Rela *rel,
1276 const Elf_Internal_Rela *relend,
1277 struct elf_link_hash_entry *h,
1278 unsigned long r_symndx,
1279 bool from_relocate_section)
1280 {
1281 unsigned int from_type = *r_type;
1282 unsigned int to_type = from_type;
1283 bool check = true;
1284
1285 /* Skip TLS transition for functions. */
1286 if (h != NULL
1287 && (h->type == STT_FUNC
1288 || h->type == STT_GNU_IFUNC))
1289 return true;
1290
1291 switch (from_type)
1292 {
1293 case R_X86_64_TLSGD:
1294 case R_X86_64_GOTPC32_TLSDESC:
1295 case R_X86_64_TLSDESC_CALL:
1296 case R_X86_64_GOTTPOFF:
1297 if (bfd_link_executable (info))
1298 {
1299 if (h == NULL)
1300 to_type = R_X86_64_TPOFF32;
1301 else
1302 to_type = R_X86_64_GOTTPOFF;
1303 }
1304
1305 /* When we are called from elf_x86_64_relocate_section, there may
1306 be additional transitions based on TLS_TYPE. */
1307 if (from_relocate_section)
1308 {
1309 unsigned int new_to_type = to_type;
1310
1311 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1312 new_to_type = R_X86_64_TPOFF32;
1313
1314 if (to_type == R_X86_64_TLSGD
1315 || to_type == R_X86_64_GOTPC32_TLSDESC
1316 || to_type == R_X86_64_TLSDESC_CALL)
1317 {
1318 if (tls_type == GOT_TLS_IE)
1319 new_to_type = R_X86_64_GOTTPOFF;
1320 }
1321
1322 /* We checked the transition before when we were called from
1323 elf_x86_64_check_relocs. We only want to check the new
1324 transition which hasn't been checked before. */
1325 check = new_to_type != to_type && from_type == to_type;
1326 to_type = new_to_type;
1327 }
1328
1329 break;
1330
1331 case R_X86_64_TLSLD:
1332 if (bfd_link_executable (info))
1333 to_type = R_X86_64_TPOFF32;
1334 break;
1335
1336 default:
1337 return true;
1338 }
1339
1340 /* Return TRUE if there is no transition. */
1341 if (from_type == to_type)
1342 return true;
1343
1344 /* Check if the transition can be performed. */
1345 if (check
1346 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1347 symtab_hdr, sym_hashes,
1348 from_type, rel, relend))
1349 {
1350 reloc_howto_type *from, *to;
1351 const char *name;
1352
1353 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1354 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1355
1356 if (from == NULL || to == NULL)
1357 return false;
1358
1359 if (h)
1360 name = h->root.root.string;
1361 else
1362 {
1363 struct elf_x86_link_hash_table *htab;
1364
1365 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1366 if (htab == NULL)
1367 name = "*unknown*";
1368 else
1369 {
1370 Elf_Internal_Sym *isym;
1371
1372 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1373 abfd, r_symndx);
1374 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1375 }
1376 }
1377
1378 _bfd_error_handler
1379 /* xgettext:c-format */
1380 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1381 " in section `%pA' failed"),
1382 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1383 bfd_set_error (bfd_error_bad_value);
1384 return false;
1385 }
1386
1387 *r_type = to_type;
1388 return true;
1389 }
1390
1391 /* Rename some of the generic section flags to better document how they
1392 are used here. */
1393 #define check_relocs_failed sec_flg0
1394
1395 static bool
elf_x86_64_need_pic(struct bfd_link_info * info,bfd * input_bfd,asection * sec,struct elf_link_hash_entry * h,Elf_Internal_Shdr * symtab_hdr,Elf_Internal_Sym * isym,reloc_howto_type * howto)1396 elf_x86_64_need_pic (struct bfd_link_info *info,
1397 bfd *input_bfd, asection *sec,
1398 struct elf_link_hash_entry *h,
1399 Elf_Internal_Shdr *symtab_hdr,
1400 Elf_Internal_Sym *isym,
1401 reloc_howto_type *howto)
1402 {
1403 const char *v = "";
1404 const char *und = "";
1405 const char *pic = "";
1406 const char *object;
1407
1408 const char *name;
1409 if (h)
1410 {
1411 name = h->root.root.string;
1412 switch (ELF_ST_VISIBILITY (h->other))
1413 {
1414 case STV_HIDDEN:
1415 v = _("hidden symbol ");
1416 break;
1417 case STV_INTERNAL:
1418 v = _("internal symbol ");
1419 break;
1420 case STV_PROTECTED:
1421 v = _("protected symbol ");
1422 break;
1423 default:
1424 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1425 v = _("protected symbol ");
1426 else
1427 v = _("symbol ");
1428 pic = NULL;
1429 break;
1430 }
1431
1432 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1433 und = _("undefined ");
1434 }
1435 else
1436 {
1437 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1438 pic = NULL;
1439 }
1440
1441 if (bfd_link_dll (info))
1442 {
1443 object = _("a shared object");
1444 if (!pic)
1445 pic = _("; recompile with -fPIC");
1446 }
1447 else
1448 {
1449 if (bfd_link_pie (info))
1450 object = _("a PIE object");
1451 else
1452 object = _("a PDE object");
1453 if (!pic)
1454 pic = _("; recompile with -fPIE");
1455 }
1456
1457 /* xgettext:c-format */
1458 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1459 "not be used when making %s%s"),
1460 input_bfd, howto->name, und, v, name,
1461 object, pic);
1462 bfd_set_error (bfd_error_bad_value);
1463 sec->check_relocs_failed = 1;
1464 return false;
1465 }
1466
1467 /* With the local symbol, foo, we convert
1468 mov foo@GOTPCREL(%rip), %reg
1469 to
1470 lea foo(%rip), %reg
1471 and convert
1472 call/jmp *foo@GOTPCREL(%rip)
1473 to
1474 nop call foo/jmp foo nop
1475 When PIC is false, convert
1476 test %reg, foo@GOTPCREL(%rip)
1477 to
1478 test $foo, %reg
1479 and convert
1480 binop foo@GOTPCREL(%rip), %reg
1481 to
1482 binop $foo, %reg
1483 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1484 instructions. */
1485
1486 static bool
elf_x86_64_convert_load_reloc(bfd * abfd,bfd_byte * contents,unsigned int * r_type_p,Elf_Internal_Rela * irel,struct elf_link_hash_entry * h,bool * converted,struct bfd_link_info * link_info)1487 elf_x86_64_convert_load_reloc (bfd *abfd,
1488 bfd_byte *contents,
1489 unsigned int *r_type_p,
1490 Elf_Internal_Rela *irel,
1491 struct elf_link_hash_entry *h,
1492 bool *converted,
1493 struct bfd_link_info *link_info)
1494 {
1495 struct elf_x86_link_hash_table *htab;
1496 bool is_pic;
1497 bool no_overflow;
1498 bool relocx;
1499 bool to_reloc_pc32;
1500 bool abs_symbol;
1501 bool local_ref;
1502 asection *tsec;
1503 bfd_signed_vma raddend;
1504 unsigned int opcode;
1505 unsigned int modrm;
1506 unsigned int r_type = *r_type_p;
1507 unsigned int r_symndx;
1508 bfd_vma roff = irel->r_offset;
1509 bfd_vma abs_relocation;
1510
1511 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1512 return true;
1513
1514 raddend = irel->r_addend;
1515 /* Addend for 32-bit PC-relative relocation must be -4. */
1516 if (raddend != -4)
1517 return true;
1518
1519 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1520 is_pic = bfd_link_pic (link_info);
1521
1522 relocx = (r_type == R_X86_64_GOTPCRELX
1523 || r_type == R_X86_64_REX_GOTPCRELX);
1524
1525 /* TRUE if --no-relax is used. */
1526 no_overflow = link_info->disable_target_specific_optimizations > 1;
1527
1528 r_symndx = htab->r_sym (irel->r_info);
1529
1530 opcode = bfd_get_8 (abfd, contents + roff - 2);
1531
1532 /* Convert mov to lea since it has been done for a while. */
1533 if (opcode != 0x8b)
1534 {
1535 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1536 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1537 test, xor instructions. */
1538 if (!relocx)
1539 return true;
1540 }
1541
1542 /* We convert only to R_X86_64_PC32:
1543 1. Branch.
1544 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1545 3. no_overflow is true.
1546 4. PIC.
1547 */
1548 to_reloc_pc32 = (opcode == 0xff
1549 || !relocx
1550 || no_overflow
1551 || is_pic);
1552
1553 abs_symbol = false;
1554 abs_relocation = 0;
1555
1556 /* Get the symbol referred to by the reloc. */
1557 if (h == NULL)
1558 {
1559 Elf_Internal_Sym *isym
1560 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1561
1562 /* Skip relocation against undefined symbols. */
1563 if (isym->st_shndx == SHN_UNDEF)
1564 return true;
1565
1566 local_ref = true;
1567 if (isym->st_shndx == SHN_ABS)
1568 {
1569 tsec = bfd_abs_section_ptr;
1570 abs_symbol = true;
1571 abs_relocation = isym->st_value;
1572 }
1573 else if (isym->st_shndx == SHN_COMMON)
1574 tsec = bfd_com_section_ptr;
1575 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1576 tsec = &_bfd_elf_large_com_section;
1577 else
1578 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1579 }
1580 else
1581 {
1582 /* Undefined weak symbol is only bound locally in executable
1583 and its reference is resolved as 0 without relocation
1584 overflow. We can only perform this optimization for
1585 GOTPCRELX relocations since we need to modify REX byte.
1586 It is OK convert mov with R_X86_64_GOTPCREL to
1587 R_X86_64_PC32. */
1588 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1589
1590 abs_symbol = ABS_SYMBOL_P (h);
1591 abs_relocation = h->root.u.def.value;
1592
1593 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1594 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1595 if ((relocx || opcode == 0x8b)
1596 && (h->root.type == bfd_link_hash_undefweak
1597 && !eh->linker_def
1598 && local_ref))
1599 {
1600 if (opcode == 0xff)
1601 {
1602 /* Skip for branch instructions since R_X86_64_PC32
1603 may overflow. */
1604 if (no_overflow)
1605 return true;
1606 }
1607 else if (relocx)
1608 {
1609 /* For non-branch instructions, we can convert to
1610 R_X86_64_32/R_X86_64_32S since we know if there
1611 is a REX byte. */
1612 to_reloc_pc32 = false;
1613 }
1614
1615 /* Since we don't know the current PC when PIC is true,
1616 we can't convert to R_X86_64_PC32. */
1617 if (to_reloc_pc32 && is_pic)
1618 return true;
1619
1620 goto convert;
1621 }
1622 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1623 ld.so may use its link-time address. */
1624 else if (h->start_stop
1625 || eh->linker_def
1626 || ((h->def_regular
1627 || h->root.type == bfd_link_hash_defined
1628 || h->root.type == bfd_link_hash_defweak)
1629 && h != htab->elf.hdynamic
1630 && local_ref))
1631 {
1632 /* bfd_link_hash_new or bfd_link_hash_undefined is
1633 set by an assignment in a linker script in
1634 bfd_elf_record_link_assignment. start_stop is set
1635 on __start_SECNAME/__stop_SECNAME which mark section
1636 SECNAME. */
1637 if (h->start_stop
1638 || eh->linker_def
1639 || (h->def_regular
1640 && (h->root.type == bfd_link_hash_new
1641 || h->root.type == bfd_link_hash_undefined
1642 || ((h->root.type == bfd_link_hash_defined
1643 || h->root.type == bfd_link_hash_defweak)
1644 && h->root.u.def.section == bfd_und_section_ptr))))
1645 {
1646 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1647 if (no_overflow)
1648 return true;
1649 goto convert;
1650 }
1651 tsec = h->root.u.def.section;
1652 }
1653 else
1654 return true;
1655 }
1656
1657 /* Don't convert GOTPCREL relocation against large section. */
1658 if (elf_section_data (tsec) != NULL
1659 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1660 return true;
1661
1662 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1663 if (no_overflow)
1664 return true;
1665
1666 convert:
1667 if (opcode == 0xff)
1668 {
1669 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1670 unsigned int nop;
1671 unsigned int disp;
1672 bfd_vma nop_offset;
1673
1674 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1675 R_X86_64_PC32. */
1676 modrm = bfd_get_8 (abfd, contents + roff - 1);
1677 if (modrm == 0x25)
1678 {
1679 /* Convert to "jmp foo nop". */
1680 modrm = 0xe9;
1681 nop = NOP_OPCODE;
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 {
1689 struct elf_x86_link_hash_entry *eh
1690 = (struct elf_x86_link_hash_entry *) h;
1691
1692 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1693 is a nop prefix. */
1694 modrm = 0xe8;
1695 /* To support TLS optimization, always use addr32 prefix for
1696 "call *__tls_get_addr@GOTPCREL(%rip)". */
1697 if (eh && eh->tls_get_addr)
1698 {
1699 nop = 0x67;
1700 nop_offset = irel->r_offset - 2;
1701 }
1702 else
1703 {
1704 nop = htab->params->call_nop_byte;
1705 if (htab->params->call_nop_as_suffix)
1706 {
1707 nop_offset = irel->r_offset + 3;
1708 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1709 irel->r_offset -= 1;
1710 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1711 }
1712 else
1713 nop_offset = irel->r_offset - 2;
1714 }
1715 }
1716 bfd_put_8 (abfd, nop, contents + nop_offset);
1717 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1718 r_type = R_X86_64_PC32;
1719 }
1720 else
1721 {
1722 unsigned int rex;
1723 unsigned int rex_mask = REX_R;
1724
1725 if (r_type == R_X86_64_REX_GOTPCRELX)
1726 rex = bfd_get_8 (abfd, contents + roff - 3);
1727 else
1728 rex = 0;
1729
1730 if (opcode == 0x8b)
1731 {
1732 if (abs_symbol && local_ref && relocx)
1733 to_reloc_pc32 = false;
1734
1735 if (to_reloc_pc32)
1736 {
1737 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1738 "lea foo(%rip), %reg". */
1739 opcode = 0x8d;
1740 r_type = R_X86_64_PC32;
1741 }
1742 else
1743 {
1744 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1745 "mov $foo, %reg". */
1746 opcode = 0xc7;
1747 modrm = bfd_get_8 (abfd, contents + roff - 1);
1748 modrm = 0xc0 | (modrm & 0x38) >> 3;
1749 if ((rex & REX_W) != 0
1750 && ABI_64_P (link_info->output_bfd))
1751 {
1752 /* Keep the REX_W bit in REX byte for LP64. */
1753 r_type = R_X86_64_32S;
1754 goto rewrite_modrm_rex;
1755 }
1756 else
1757 {
1758 /* If the REX_W bit in REX byte isn't needed,
1759 use R_X86_64_32 and clear the W bit to avoid
1760 sign-extend imm32 to imm64. */
1761 r_type = R_X86_64_32;
1762 /* Clear the W bit in REX byte. */
1763 rex_mask |= REX_W;
1764 goto rewrite_modrm_rex;
1765 }
1766 }
1767 }
1768 else
1769 {
1770 /* R_X86_64_PC32 isn't supported. */
1771 if (to_reloc_pc32)
1772 return true;
1773
1774 modrm = bfd_get_8 (abfd, contents + roff - 1);
1775 if (opcode == 0x85)
1776 {
1777 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1778 "test $foo, %reg". */
1779 modrm = 0xc0 | (modrm & 0x38) >> 3;
1780 opcode = 0xf7;
1781 }
1782 else
1783 {
1784 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1785 "binop $foo, %reg". */
1786 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1787 opcode = 0x81;
1788 }
1789
1790 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1791 overflow when sign-extending imm32 to imm64. */
1792 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1793
1794 rewrite_modrm_rex:
1795 if (abs_relocation)
1796 {
1797 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1798 if (r_type == R_X86_64_32S)
1799 {
1800 if ((abs_relocation + 0x80000000) > 0xffffffff)
1801 return true;
1802 }
1803 else
1804 {
1805 if (abs_relocation > 0xffffffff)
1806 return true;
1807 }
1808 }
1809
1810 bfd_put_8 (abfd, modrm, contents + roff - 1);
1811
1812 if (rex)
1813 {
1814 /* Move the R bit to the B bit in REX byte. */
1815 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1816 bfd_put_8 (abfd, rex, contents + roff - 3);
1817 }
1818
1819 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1820 irel->r_addend = 0;
1821 }
1822
1823 bfd_put_8 (abfd, opcode, contents + roff - 2);
1824 }
1825
1826 *r_type_p = r_type;
1827 irel->r_info = htab->r_info (r_symndx,
1828 r_type | R_X86_64_converted_reloc_bit);
1829
1830 *converted = true;
1831
1832 return true;
1833 }
1834
1835 /* Look through the relocs for a section during the first phase, and
1836 calculate needed space in the global offset table, procedure
1837 linkage table, and dynamic reloc sections. */
1838
1839 static bool
elf_x86_64_check_relocs(bfd * abfd,struct bfd_link_info * info,asection * sec,const Elf_Internal_Rela * relocs)1840 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1841 asection *sec,
1842 const Elf_Internal_Rela *relocs)
1843 {
1844 struct elf_x86_link_hash_table *htab;
1845 Elf_Internal_Shdr *symtab_hdr;
1846 struct elf_link_hash_entry **sym_hashes;
1847 const Elf_Internal_Rela *rel;
1848 const Elf_Internal_Rela *rel_end;
1849 asection *sreloc;
1850 bfd_byte *contents;
1851 bool converted;
1852
1853 if (bfd_link_relocatable (info))
1854 return true;
1855
1856 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1857 if (htab == NULL)
1858 {
1859 sec->check_relocs_failed = 1;
1860 return false;
1861 }
1862
1863 BFD_ASSERT (is_x86_elf (abfd, htab));
1864
1865 /* Get the section contents. */
1866 if (elf_section_data (sec)->this_hdr.contents != NULL)
1867 contents = elf_section_data (sec)->this_hdr.contents;
1868 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1869 {
1870 sec->check_relocs_failed = 1;
1871 return false;
1872 }
1873
1874 symtab_hdr = &elf_symtab_hdr (abfd);
1875 sym_hashes = elf_sym_hashes (abfd);
1876
1877 converted = false;
1878
1879 sreloc = NULL;
1880
1881 rel_end = relocs + sec->reloc_count;
1882 for (rel = relocs; rel < rel_end; rel++)
1883 {
1884 unsigned int r_type;
1885 unsigned int r_symndx;
1886 struct elf_link_hash_entry *h;
1887 struct elf_x86_link_hash_entry *eh;
1888 Elf_Internal_Sym *isym;
1889 const char *name;
1890 bool size_reloc;
1891 bool converted_reloc;
1892 bool no_dynreloc;
1893
1894 r_symndx = htab->r_sym (rel->r_info);
1895 r_type = ELF32_R_TYPE (rel->r_info);
1896
1897 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1898 {
1899 /* xgettext:c-format */
1900 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1901 abfd, r_symndx);
1902 goto error_return;
1903 }
1904
1905 if (r_symndx < symtab_hdr->sh_info)
1906 {
1907 /* A local symbol. */
1908 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1909 abfd, r_symndx);
1910 if (isym == NULL)
1911 goto error_return;
1912
1913 /* Check relocation against local STT_GNU_IFUNC symbol. */
1914 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1915 {
1916 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1917 true);
1918 if (h == NULL)
1919 goto error_return;
1920
1921 /* Fake a STT_GNU_IFUNC symbol. */
1922 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1923 isym, NULL);
1924 h->type = STT_GNU_IFUNC;
1925 h->def_regular = 1;
1926 h->ref_regular = 1;
1927 h->forced_local = 1;
1928 h->root.type = bfd_link_hash_defined;
1929 }
1930 else
1931 h = NULL;
1932 }
1933 else
1934 {
1935 isym = NULL;
1936 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1937 while (h->root.type == bfd_link_hash_indirect
1938 || h->root.type == bfd_link_hash_warning)
1939 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1940 }
1941
1942 /* Check invalid x32 relocations. */
1943 if (!ABI_64_P (abfd))
1944 switch (r_type)
1945 {
1946 default:
1947 break;
1948
1949 case R_X86_64_DTPOFF64:
1950 case R_X86_64_TPOFF64:
1951 case R_X86_64_PC64:
1952 case R_X86_64_GOTOFF64:
1953 case R_X86_64_GOT64:
1954 case R_X86_64_GOTPCREL64:
1955 case R_X86_64_GOTPC64:
1956 case R_X86_64_GOTPLT64:
1957 case R_X86_64_PLTOFF64:
1958 {
1959 if (h)
1960 name = h->root.root.string;
1961 else
1962 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1963 NULL);
1964 _bfd_error_handler
1965 /* xgettext:c-format */
1966 (_("%pB: relocation %s against symbol `%s' isn't "
1967 "supported in x32 mode"), abfd,
1968 x86_64_elf_howto_table[r_type].name, name);
1969 bfd_set_error (bfd_error_bad_value);
1970 goto error_return;
1971 }
1972 break;
1973 }
1974
1975 if (h != NULL)
1976 {
1977 /* It is referenced by a non-shared object. */
1978 h->ref_regular = 1;
1979 }
1980
1981 converted_reloc = false;
1982 if ((r_type == R_X86_64_GOTPCREL
1983 || r_type == R_X86_64_GOTPCRELX
1984 || r_type == R_X86_64_REX_GOTPCRELX)
1985 && (h == NULL || h->type != STT_GNU_IFUNC))
1986 {
1987 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1988 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1989 irel, h, &converted_reloc,
1990 info))
1991 goto error_return;
1992
1993 if (converted_reloc)
1994 converted = true;
1995 }
1996
1997 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
1998 symtab_hdr, &no_dynreloc))
1999 return false;
2000
2001 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2002 symtab_hdr, sym_hashes,
2003 &r_type, GOT_UNKNOWN,
2004 rel, rel_end, h, r_symndx, false))
2005 goto error_return;
2006
2007 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2008 if (h == htab->elf.hgot)
2009 htab->got_referenced = true;
2010
2011 eh = (struct elf_x86_link_hash_entry *) h;
2012 switch (r_type)
2013 {
2014 case R_X86_64_TLSLD:
2015 htab->tls_ld_or_ldm_got.refcount = 1;
2016 goto create_got;
2017
2018 case R_X86_64_TPOFF32:
2019 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2020 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2021 &x86_64_elf_howto_table[r_type]);
2022 if (eh != NULL)
2023 eh->zero_undefweak &= 0x2;
2024 break;
2025
2026 case R_X86_64_GOTTPOFF:
2027 if (!bfd_link_executable (info))
2028 info->flags |= DF_STATIC_TLS;
2029 /* Fall through */
2030
2031 case R_X86_64_GOT32:
2032 case R_X86_64_GOTPCREL:
2033 case R_X86_64_GOTPCRELX:
2034 case R_X86_64_REX_GOTPCRELX:
2035 case R_X86_64_TLSGD:
2036 case R_X86_64_GOT64:
2037 case R_X86_64_GOTPCREL64:
2038 case R_X86_64_GOTPLT64:
2039 case R_X86_64_GOTPC32_TLSDESC:
2040 case R_X86_64_TLSDESC_CALL:
2041 /* This symbol requires a global offset table entry. */
2042 {
2043 int tls_type, old_tls_type;
2044
2045 switch (r_type)
2046 {
2047 default:
2048 tls_type = GOT_NORMAL;
2049 if (h)
2050 {
2051 if (ABS_SYMBOL_P (h))
2052 tls_type = GOT_ABS;
2053 }
2054 else if (isym->st_shndx == SHN_ABS)
2055 tls_type = GOT_ABS;
2056 break;
2057 case R_X86_64_TLSGD:
2058 tls_type = GOT_TLS_GD;
2059 break;
2060 case R_X86_64_GOTTPOFF:
2061 tls_type = GOT_TLS_IE;
2062 break;
2063 case R_X86_64_GOTPC32_TLSDESC:
2064 case R_X86_64_TLSDESC_CALL:
2065 tls_type = GOT_TLS_GDESC;
2066 break;
2067 }
2068
2069 if (h != NULL)
2070 {
2071 h->got.refcount = 1;
2072 old_tls_type = eh->tls_type;
2073 }
2074 else
2075 {
2076 bfd_signed_vma *local_got_refcounts;
2077
2078 /* This is a global offset table entry for a local symbol. */
2079 local_got_refcounts = elf_local_got_refcounts (abfd);
2080 if (local_got_refcounts == NULL)
2081 {
2082 bfd_size_type size;
2083
2084 size = symtab_hdr->sh_info;
2085 size *= sizeof (bfd_signed_vma)
2086 + sizeof (bfd_vma) + sizeof (char);
2087 local_got_refcounts = ((bfd_signed_vma *)
2088 bfd_zalloc (abfd, size));
2089 if (local_got_refcounts == NULL)
2090 goto error_return;
2091 elf_local_got_refcounts (abfd) = local_got_refcounts;
2092 elf_x86_local_tlsdesc_gotent (abfd)
2093 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2094 elf_x86_local_got_tls_type (abfd)
2095 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2096 }
2097 local_got_refcounts[r_symndx] = 1;
2098 old_tls_type
2099 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2100 }
2101
2102 /* If a TLS symbol is accessed using IE at least once,
2103 there is no point to use dynamic model for it. */
2104 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2105 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2106 || tls_type != GOT_TLS_IE))
2107 {
2108 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2109 tls_type = old_tls_type;
2110 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2111 && GOT_TLS_GD_ANY_P (tls_type))
2112 tls_type |= old_tls_type;
2113 else
2114 {
2115 if (h)
2116 name = h->root.root.string;
2117 else
2118 name = bfd_elf_sym_name (abfd, symtab_hdr,
2119 isym, NULL);
2120 _bfd_error_handler
2121 /* xgettext:c-format */
2122 (_("%pB: '%s' accessed both as normal and"
2123 " thread local symbol"),
2124 abfd, name);
2125 bfd_set_error (bfd_error_bad_value);
2126 goto error_return;
2127 }
2128 }
2129
2130 if (old_tls_type != tls_type)
2131 {
2132 if (eh != NULL)
2133 eh->tls_type = tls_type;
2134 else
2135 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2136 }
2137 }
2138 /* Fall through */
2139
2140 case R_X86_64_GOTOFF64:
2141 case R_X86_64_GOTPC32:
2142 case R_X86_64_GOTPC64:
2143 create_got:
2144 if (eh != NULL)
2145 eh->zero_undefweak &= 0x2;
2146 break;
2147
2148 case R_X86_64_PLT32:
2149 case R_X86_64_PLT32_BND:
2150 /* This symbol requires a procedure linkage table entry. We
2151 actually build the entry in adjust_dynamic_symbol,
2152 because this might be a case of linking PIC code which is
2153 never referenced by a dynamic object, in which case we
2154 don't need to generate a procedure linkage table entry
2155 after all. */
2156
2157 /* If this is a local symbol, we resolve it directly without
2158 creating a procedure linkage table entry. */
2159 if (h == NULL)
2160 continue;
2161
2162 eh->zero_undefweak &= 0x2;
2163 h->needs_plt = 1;
2164 h->plt.refcount = 1;
2165 break;
2166
2167 case R_X86_64_PLTOFF64:
2168 /* This tries to form the 'address' of a function relative
2169 to GOT. For global symbols we need a PLT entry. */
2170 if (h != NULL)
2171 {
2172 h->needs_plt = 1;
2173 h->plt.refcount = 1;
2174 }
2175 goto create_got;
2176
2177 case R_X86_64_SIZE32:
2178 case R_X86_64_SIZE64:
2179 size_reloc = true;
2180 goto do_size;
2181
2182 case R_X86_64_32:
2183 if (!ABI_64_P (abfd))
2184 goto pointer;
2185 /* Fall through. */
2186 case R_X86_64_8:
2187 case R_X86_64_16:
2188 case R_X86_64_32S:
2189 /* Check relocation overflow as these relocs may lead to
2190 run-time relocation overflow. Don't error out for
2191 sections we don't care about, such as debug sections or
2192 when relocation overflow check is disabled. */
2193 if (!htab->params->no_reloc_overflow_check
2194 && !converted_reloc
2195 && (bfd_link_pic (info)
2196 || (bfd_link_executable (info)
2197 && h != NULL
2198 && !h->def_regular
2199 && h->def_dynamic
2200 && (sec->flags & SEC_READONLY) == 0)))
2201 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2202 &x86_64_elf_howto_table[r_type]);
2203 /* Fall through. */
2204
2205 case R_X86_64_PC8:
2206 case R_X86_64_PC16:
2207 case R_X86_64_PC32:
2208 case R_X86_64_PC32_BND:
2209 case R_X86_64_PC64:
2210 case R_X86_64_64:
2211 pointer:
2212 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2213 eh->zero_undefweak |= 0x2;
2214 /* We are called after all symbols have been resolved. Only
2215 relocation against STT_GNU_IFUNC symbol must go through
2216 PLT. */
2217 if (h != NULL
2218 && (bfd_link_executable (info)
2219 || h->type == STT_GNU_IFUNC))
2220 {
2221 bool func_pointer_ref = false;
2222
2223 if (r_type == R_X86_64_PC32)
2224 {
2225 /* Since something like ".long foo - ." may be used
2226 as pointer, make sure that PLT is used if foo is
2227 a function defined in a shared library. */
2228 if ((sec->flags & SEC_CODE) == 0)
2229 {
2230 h->pointer_equality_needed = 1;
2231 if (bfd_link_pie (info)
2232 && h->type == STT_FUNC
2233 && !h->def_regular
2234 && h->def_dynamic)
2235 {
2236 h->needs_plt = 1;
2237 h->plt.refcount = 1;
2238 }
2239 }
2240 }
2241 else if (r_type != R_X86_64_PC32_BND
2242 && r_type != R_X86_64_PC64)
2243 {
2244 h->pointer_equality_needed = 1;
2245 /* At run-time, R_X86_64_64 can be resolved for both
2246 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2247 can only be resolved for x32. */
2248 if ((sec->flags & SEC_READONLY) == 0
2249 && (r_type == R_X86_64_64
2250 || (!ABI_64_P (abfd)
2251 && (r_type == R_X86_64_32
2252 || r_type == R_X86_64_32S))))
2253 func_pointer_ref = true;
2254 }
2255
2256 if (!func_pointer_ref)
2257 {
2258 /* If this reloc is in a read-only section, we might
2259 need a copy reloc. We can't check reliably at this
2260 stage whether the section is read-only, as input
2261 sections have not yet been mapped to output sections.
2262 Tentatively set the flag for now, and correct in
2263 adjust_dynamic_symbol. */
2264 h->non_got_ref = 1;
2265
2266 /* We may need a .plt entry if the symbol is a function
2267 defined in a shared lib or is a function referenced
2268 from the code or read-only section. */
2269 if (!h->def_regular
2270 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2271 h->plt.refcount = 1;
2272 }
2273 }
2274
2275 size_reloc = false;
2276 do_size:
2277 if (!no_dynreloc
2278 && NEED_DYNAMIC_RELOCATION_P (info, true, h, sec, r_type,
2279 htab->pointer_r_type))
2280 {
2281 struct elf_dyn_relocs *p;
2282 struct elf_dyn_relocs **head;
2283
2284 /* We must copy these reloc types into the output file.
2285 Create a reloc section in dynobj and make room for
2286 this reloc. */
2287 if (sreloc == NULL)
2288 {
2289 sreloc = _bfd_elf_make_dynamic_reloc_section
2290 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2291 abfd, /*rela?*/ true);
2292
2293 if (sreloc == NULL)
2294 goto error_return;
2295 }
2296
2297 /* If this is a global symbol, we count the number of
2298 relocations we need for this symbol. */
2299 if (h != NULL)
2300 head = &h->dyn_relocs;
2301 else
2302 {
2303 /* Track dynamic relocs needed for local syms too.
2304 We really need local syms available to do this
2305 easily. Oh well. */
2306 asection *s;
2307 void **vpp;
2308
2309 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2310 abfd, r_symndx);
2311 if (isym == NULL)
2312 goto error_return;
2313
2314 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2315 if (s == NULL)
2316 s = sec;
2317
2318 /* Beware of type punned pointers vs strict aliasing
2319 rules. */
2320 vpp = &(elf_section_data (s)->local_dynrel);
2321 head = (struct elf_dyn_relocs **)vpp;
2322 }
2323
2324 p = *head;
2325 if (p == NULL || p->sec != sec)
2326 {
2327 size_t amt = sizeof *p;
2328
2329 p = ((struct elf_dyn_relocs *)
2330 bfd_alloc (htab->elf.dynobj, amt));
2331 if (p == NULL)
2332 goto error_return;
2333 p->next = *head;
2334 *head = p;
2335 p->sec = sec;
2336 p->count = 0;
2337 p->pc_count = 0;
2338 }
2339
2340 p->count += 1;
2341 /* Count size relocation as PC-relative relocation. */
2342 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2343 p->pc_count += 1;
2344 }
2345 break;
2346
2347 /* This relocation describes the C++ object vtable hierarchy.
2348 Reconstruct it for later use during GC. */
2349 case R_X86_64_GNU_VTINHERIT:
2350 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2351 goto error_return;
2352 break;
2353
2354 /* This relocation describes which C++ vtable entries are actually
2355 used. Record for later use during GC. */
2356 case R_X86_64_GNU_VTENTRY:
2357 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2358 goto error_return;
2359 break;
2360
2361 default:
2362 break;
2363 }
2364 }
2365
2366 if (elf_section_data (sec)->this_hdr.contents != contents)
2367 {
2368 if (!converted && !info->keep_memory)
2369 free (contents);
2370 else
2371 {
2372 /* Cache the section contents for elf_link_input_bfd if any
2373 load is converted or --no-keep-memory isn't used. */
2374 elf_section_data (sec)->this_hdr.contents = contents;
2375 }
2376 }
2377
2378 /* Cache relocations if any load is converted. */
2379 if (elf_section_data (sec)->relocs != relocs && converted)
2380 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2381
2382 return true;
2383
2384 error_return:
2385 if (elf_section_data (sec)->this_hdr.contents != contents)
2386 free (contents);
2387 sec->check_relocs_failed = 1;
2388 return false;
2389 }
2390
2391 /* Return the relocation value for @tpoff relocation
2392 if STT_TLS virtual address is ADDRESS. */
2393
2394 static bfd_vma
elf_x86_64_tpoff(struct bfd_link_info * info,bfd_vma address)2395 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2396 {
2397 struct elf_link_hash_table *htab = elf_hash_table (info);
2398 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2399 bfd_vma static_tls_size;
2400
2401 /* If tls_segment is NULL, we should have signalled an error already. */
2402 if (htab->tls_sec == NULL)
2403 return 0;
2404
2405 /* Consider special static TLS alignment requirements. */
2406 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2407 return address - static_tls_size - htab->tls_sec->vma;
2408 }
2409
2410 /* Relocate an x86_64 ELF section. */
2411
2412 static int
elf_x86_64_relocate_section(bfd * output_bfd,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * relocs,Elf_Internal_Sym * local_syms,asection ** local_sections)2413 elf_x86_64_relocate_section (bfd *output_bfd,
2414 struct bfd_link_info *info,
2415 bfd *input_bfd,
2416 asection *input_section,
2417 bfd_byte *contents,
2418 Elf_Internal_Rela *relocs,
2419 Elf_Internal_Sym *local_syms,
2420 asection **local_sections)
2421 {
2422 struct elf_x86_link_hash_table *htab;
2423 Elf_Internal_Shdr *symtab_hdr;
2424 struct elf_link_hash_entry **sym_hashes;
2425 bfd_vma *local_got_offsets;
2426 bfd_vma *local_tlsdesc_gotents;
2427 Elf_Internal_Rela *rel;
2428 Elf_Internal_Rela *wrel;
2429 Elf_Internal_Rela *relend;
2430 unsigned int plt_entry_size;
2431 bool status;
2432
2433 /* Skip if check_relocs failed. */
2434 if (input_section->check_relocs_failed)
2435 return false;
2436
2437 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2438 if (htab == NULL)
2439 return false;
2440
2441 if (!is_x86_elf (input_bfd, htab))
2442 {
2443 bfd_set_error (bfd_error_wrong_format);
2444 return false;
2445 }
2446
2447 plt_entry_size = htab->plt.plt_entry_size;
2448 symtab_hdr = &elf_symtab_hdr (input_bfd);
2449 sym_hashes = elf_sym_hashes (input_bfd);
2450 local_got_offsets = elf_local_got_offsets (input_bfd);
2451 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2452
2453 _bfd_x86_elf_set_tls_module_base (info);
2454
2455 status = true;
2456 rel = wrel = relocs;
2457 relend = relocs + input_section->reloc_count;
2458 for (; rel < relend; wrel++, rel++)
2459 {
2460 unsigned int r_type, r_type_tls;
2461 reloc_howto_type *howto;
2462 unsigned long r_symndx;
2463 struct elf_link_hash_entry *h;
2464 struct elf_x86_link_hash_entry *eh;
2465 Elf_Internal_Sym *sym;
2466 asection *sec;
2467 bfd_vma off, offplt, plt_offset;
2468 bfd_vma relocation;
2469 bool unresolved_reloc;
2470 bfd_reloc_status_type r;
2471 int tls_type;
2472 asection *base_got, *resolved_plt;
2473 bfd_vma st_size;
2474 bool resolved_to_zero;
2475 bool relative_reloc;
2476 bool converted_reloc;
2477 bool need_copy_reloc_in_pie;
2478 bool no_copyreloc_p;
2479
2480 r_type = ELF32_R_TYPE (rel->r_info);
2481 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2482 || r_type == (int) R_X86_64_GNU_VTENTRY)
2483 {
2484 if (wrel != rel)
2485 *wrel = *rel;
2486 continue;
2487 }
2488
2489 r_symndx = htab->r_sym (rel->r_info);
2490 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2491 if (converted_reloc)
2492 {
2493 r_type &= ~R_X86_64_converted_reloc_bit;
2494 rel->r_info = htab->r_info (r_symndx, r_type);
2495 }
2496
2497 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2498 if (howto == NULL)
2499 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2500
2501 h = NULL;
2502 sym = NULL;
2503 sec = NULL;
2504 unresolved_reloc = false;
2505 if (r_symndx < symtab_hdr->sh_info)
2506 {
2507 sym = local_syms + r_symndx;
2508 sec = local_sections[r_symndx];
2509
2510 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2511 &sec, rel);
2512 st_size = sym->st_size;
2513
2514 /* Relocate against local STT_GNU_IFUNC symbol. */
2515 if (!bfd_link_relocatable (info)
2516 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2517 {
2518 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2519 rel, false);
2520 if (h == NULL)
2521 abort ();
2522
2523 /* Set STT_GNU_IFUNC symbol value. */
2524 h->root.u.def.value = sym->st_value;
2525 h->root.u.def.section = sec;
2526 }
2527 }
2528 else
2529 {
2530 bool warned ATTRIBUTE_UNUSED;
2531 bool ignored ATTRIBUTE_UNUSED;
2532
2533 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2534 r_symndx, symtab_hdr, sym_hashes,
2535 h, sec, relocation,
2536 unresolved_reloc, warned, ignored);
2537 st_size = h->size;
2538 }
2539
2540 if (sec != NULL && discarded_section (sec))
2541 {
2542 _bfd_clear_contents (howto, input_bfd, input_section,
2543 contents, rel->r_offset);
2544 wrel->r_offset = rel->r_offset;
2545 wrel->r_info = 0;
2546 wrel->r_addend = 0;
2547
2548 /* For ld -r, remove relocations in debug sections against
2549 sections defined in discarded sections. Not done for
2550 eh_frame editing code expects to be present. */
2551 if (bfd_link_relocatable (info)
2552 && (input_section->flags & SEC_DEBUGGING))
2553 wrel--;
2554
2555 continue;
2556 }
2557
2558 if (bfd_link_relocatable (info))
2559 {
2560 if (wrel != rel)
2561 *wrel = *rel;
2562 continue;
2563 }
2564
2565 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2566 {
2567 if (r_type == R_X86_64_64)
2568 {
2569 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2570 zero-extend it to 64bit if addend is zero. */
2571 r_type = R_X86_64_32;
2572 memset (contents + rel->r_offset + 4, 0, 4);
2573 }
2574 else if (r_type == R_X86_64_SIZE64)
2575 {
2576 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2577 zero-extend it to 64bit if addend is zero. */
2578 r_type = R_X86_64_SIZE32;
2579 memset (contents + rel->r_offset + 4, 0, 4);
2580 }
2581 }
2582
2583 eh = (struct elf_x86_link_hash_entry *) h;
2584
2585 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2586 it here if it is defined in a non-shared object. */
2587 if (h != NULL
2588 && h->type == STT_GNU_IFUNC
2589 && h->def_regular)
2590 {
2591 bfd_vma plt_index;
2592 const char *name;
2593
2594 if ((input_section->flags & SEC_ALLOC) == 0)
2595 {
2596 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2597 STT_GNU_IFUNC symbol as STT_FUNC. */
2598 if (elf_section_type (input_section) == SHT_NOTE)
2599 goto skip_ifunc;
2600 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2601 sections because such sections are not SEC_ALLOC and
2602 thus ld.so will not process them. */
2603 if ((input_section->flags & SEC_DEBUGGING) != 0)
2604 continue;
2605 abort ();
2606 }
2607
2608 switch (r_type)
2609 {
2610 default:
2611 break;
2612
2613 case R_X86_64_GOTPCREL:
2614 case R_X86_64_GOTPCRELX:
2615 case R_X86_64_REX_GOTPCRELX:
2616 case R_X86_64_GOTPCREL64:
2617 base_got = htab->elf.sgot;
2618 off = h->got.offset;
2619
2620 if (base_got == NULL)
2621 abort ();
2622
2623 if (off == (bfd_vma) -1)
2624 {
2625 /* We can't use h->got.offset here to save state, or
2626 even just remember the offset, as finish_dynamic_symbol
2627 would use that as offset into .got. */
2628
2629 if (h->plt.offset == (bfd_vma) -1)
2630 abort ();
2631
2632 if (htab->elf.splt != NULL)
2633 {
2634 plt_index = (h->plt.offset / plt_entry_size
2635 - htab->plt.has_plt0);
2636 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2637 base_got = htab->elf.sgotplt;
2638 }
2639 else
2640 {
2641 plt_index = h->plt.offset / plt_entry_size;
2642 off = plt_index * GOT_ENTRY_SIZE;
2643 base_got = htab->elf.igotplt;
2644 }
2645
2646 if (h->dynindx == -1
2647 || h->forced_local
2648 || info->symbolic)
2649 {
2650 /* This references the local defitionion. We must
2651 initialize this entry in the global offset table.
2652 Since the offset must always be a multiple of 8,
2653 we use the least significant bit to record
2654 whether we have initialized it already.
2655
2656 When doing a dynamic link, we create a .rela.got
2657 relocation entry to initialize the value. This
2658 is done in the finish_dynamic_symbol routine. */
2659 if ((off & 1) != 0)
2660 off &= ~1;
2661 else
2662 {
2663 bfd_put_64 (output_bfd, relocation,
2664 base_got->contents + off);
2665 /* Note that this is harmless for the GOTPLT64
2666 case, as -1 | 1 still is -1. */
2667 h->got.offset |= 1;
2668 }
2669 }
2670 }
2671
2672 relocation = (base_got->output_section->vma
2673 + base_got->output_offset + off);
2674
2675 goto do_relocation;
2676 }
2677
2678 if (h->plt.offset == (bfd_vma) -1)
2679 {
2680 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2681 if (r_type == htab->pointer_r_type
2682 && (input_section->flags & SEC_CODE) == 0)
2683 goto do_ifunc_pointer;
2684 goto bad_ifunc_reloc;
2685 }
2686
2687 /* STT_GNU_IFUNC symbol must go through PLT. */
2688 if (htab->elf.splt != NULL)
2689 {
2690 if (htab->plt_second != NULL)
2691 {
2692 resolved_plt = htab->plt_second;
2693 plt_offset = eh->plt_second.offset;
2694 }
2695 else
2696 {
2697 resolved_plt = htab->elf.splt;
2698 plt_offset = h->plt.offset;
2699 }
2700 }
2701 else
2702 {
2703 resolved_plt = htab->elf.iplt;
2704 plt_offset = h->plt.offset;
2705 }
2706
2707 relocation = (resolved_plt->output_section->vma
2708 + resolved_plt->output_offset + plt_offset);
2709
2710 switch (r_type)
2711 {
2712 default:
2713 bad_ifunc_reloc:
2714 if (h->root.root.string)
2715 name = h->root.root.string;
2716 else
2717 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2718 NULL);
2719 _bfd_error_handler
2720 /* xgettext:c-format */
2721 (_("%pB: relocation %s against STT_GNU_IFUNC "
2722 "symbol `%s' isn't supported"), input_bfd,
2723 howto->name, name);
2724 bfd_set_error (bfd_error_bad_value);
2725 return false;
2726
2727 case R_X86_64_32S:
2728 if (bfd_link_pic (info))
2729 abort ();
2730 goto do_relocation;
2731
2732 case R_X86_64_32:
2733 if (ABI_64_P (output_bfd))
2734 goto do_relocation;
2735 /* FALLTHROUGH */
2736 case R_X86_64_64:
2737 do_ifunc_pointer:
2738 if (rel->r_addend != 0)
2739 {
2740 if (h->root.root.string)
2741 name = h->root.root.string;
2742 else
2743 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2744 sym, NULL);
2745 _bfd_error_handler
2746 /* xgettext:c-format */
2747 (_("%pB: relocation %s against STT_GNU_IFUNC "
2748 "symbol `%s' has non-zero addend: %" PRId64),
2749 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2750 bfd_set_error (bfd_error_bad_value);
2751 return false;
2752 }
2753
2754 /* Generate dynamic relcoation only when there is a
2755 non-GOT reference in a shared object or there is no
2756 PLT. */
2757 if ((bfd_link_pic (info) && h->non_got_ref)
2758 || h->plt.offset == (bfd_vma) -1)
2759 {
2760 Elf_Internal_Rela outrel;
2761 asection *sreloc;
2762
2763 /* Need a dynamic relocation to get the real function
2764 address. */
2765 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2766 info,
2767 input_section,
2768 rel->r_offset);
2769 if (outrel.r_offset == (bfd_vma) -1
2770 || outrel.r_offset == (bfd_vma) -2)
2771 abort ();
2772
2773 outrel.r_offset += (input_section->output_section->vma
2774 + input_section->output_offset);
2775
2776 if (POINTER_LOCAL_IFUNC_P (info, h))
2777 {
2778 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2779 h->root.root.string,
2780 h->root.u.def.section->owner);
2781
2782 /* This symbol is resolved locally. */
2783 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2784 outrel.r_addend = (h->root.u.def.value
2785 + h->root.u.def.section->output_section->vma
2786 + h->root.u.def.section->output_offset);
2787
2788 if (htab->params->report_relative_reloc)
2789 _bfd_x86_elf_link_report_relative_reloc
2790 (info, input_section, h, sym,
2791 "R_X86_64_IRELATIVE", &outrel);
2792 }
2793 else
2794 {
2795 outrel.r_info = htab->r_info (h->dynindx, r_type);
2796 outrel.r_addend = 0;
2797 }
2798
2799 /* Dynamic relocations are stored in
2800 1. .rela.ifunc section in PIC object.
2801 2. .rela.got section in dynamic executable.
2802 3. .rela.iplt section in static executable. */
2803 if (bfd_link_pic (info))
2804 sreloc = htab->elf.irelifunc;
2805 else if (htab->elf.splt != NULL)
2806 sreloc = htab->elf.srelgot;
2807 else
2808 sreloc = htab->elf.irelplt;
2809 elf_append_rela (output_bfd, sreloc, &outrel);
2810
2811 /* If this reloc is against an external symbol, we
2812 do not want to fiddle with the addend. Otherwise,
2813 we need to include the symbol value so that it
2814 becomes an addend for the dynamic reloc. For an
2815 internal symbol, we have updated addend. */
2816 continue;
2817 }
2818 /* FALLTHROUGH */
2819 case R_X86_64_PC32:
2820 case R_X86_64_PC32_BND:
2821 case R_X86_64_PC64:
2822 case R_X86_64_PLT32:
2823 case R_X86_64_PLT32_BND:
2824 goto do_relocation;
2825 }
2826 }
2827
2828 skip_ifunc:
2829 resolved_to_zero = (eh != NULL
2830 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2831
2832 /* When generating a shared object, the relocations handled here are
2833 copied into the output file to be resolved at run time. */
2834 switch (r_type)
2835 {
2836 case R_X86_64_GOT32:
2837 case R_X86_64_GOT64:
2838 /* Relocation is to the entry for this symbol in the global
2839 offset table. */
2840 case R_X86_64_GOTPCREL:
2841 case R_X86_64_GOTPCRELX:
2842 case R_X86_64_REX_GOTPCRELX:
2843 case R_X86_64_GOTPCREL64:
2844 /* Use global offset table entry as symbol value. */
2845 case R_X86_64_GOTPLT64:
2846 /* This is obsolete and treated the same as GOT64. */
2847 base_got = htab->elf.sgot;
2848
2849 if (htab->elf.sgot == NULL)
2850 abort ();
2851
2852 relative_reloc = false;
2853 if (h != NULL)
2854 {
2855 off = h->got.offset;
2856 if (h->needs_plt
2857 && h->plt.offset != (bfd_vma)-1
2858 && off == (bfd_vma)-1)
2859 {
2860 /* We can't use h->got.offset here to save
2861 state, or even just remember the offset, as
2862 finish_dynamic_symbol would use that as offset into
2863 .got. */
2864 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2865 - htab->plt.has_plt0);
2866 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2867 base_got = htab->elf.sgotplt;
2868 }
2869
2870 if (RESOLVED_LOCALLY_P (info, h, htab))
2871 {
2872 /* We must initialize this entry in the global offset
2873 table. Since the offset must always be a multiple
2874 of 8, we use the least significant bit to record
2875 whether we have initialized it already.
2876
2877 When doing a dynamic link, we create a .rela.got
2878 relocation entry to initialize the value. This is
2879 done in the finish_dynamic_symbol routine. */
2880 if ((off & 1) != 0)
2881 off &= ~1;
2882 else
2883 {
2884 bfd_put_64 (output_bfd, relocation,
2885 base_got->contents + off);
2886 /* Note that this is harmless for the GOTPLT64 case,
2887 as -1 | 1 still is -1. */
2888 h->got.offset |= 1;
2889
2890 if (GENERATE_RELATIVE_RELOC_P (info, h))
2891 {
2892 /* If this symbol isn't dynamic in PIC,
2893 generate R_X86_64_RELATIVE here. */
2894 eh->no_finish_dynamic_symbol = 1;
2895 relative_reloc = true;
2896 }
2897 }
2898 }
2899 else
2900 unresolved_reloc = false;
2901 }
2902 else
2903 {
2904 if (local_got_offsets == NULL)
2905 abort ();
2906
2907 off = local_got_offsets[r_symndx];
2908
2909 /* The offset must always be a multiple of 8. We use
2910 the least significant bit to record whether we have
2911 already generated the necessary reloc. */
2912 if ((off & 1) != 0)
2913 off &= ~1;
2914 else
2915 {
2916 bfd_put_64 (output_bfd, relocation,
2917 base_got->contents + off);
2918 local_got_offsets[r_symndx] |= 1;
2919
2920 /* NB: GOTPCREL relocations against local absolute
2921 symbol store relocation value in the GOT slot
2922 without relative relocation. */
2923 if (bfd_link_pic (info)
2924 && !(sym->st_shndx == SHN_ABS
2925 && (r_type == R_X86_64_GOTPCREL
2926 || r_type == R_X86_64_GOTPCRELX
2927 || r_type == R_X86_64_REX_GOTPCRELX)))
2928 relative_reloc = true;
2929 }
2930 }
2931
2932 if (relative_reloc)
2933 {
2934 asection *s;
2935 Elf_Internal_Rela outrel;
2936
2937 /* We need to generate a R_X86_64_RELATIVE reloc
2938 for the dynamic linker. */
2939 s = htab->elf.srelgot;
2940 if (s == NULL)
2941 abort ();
2942
2943 outrel.r_offset = (base_got->output_section->vma
2944 + base_got->output_offset
2945 + off);
2946 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2947 outrel.r_addend = relocation;
2948
2949 if (htab->params->report_relative_reloc)
2950 _bfd_x86_elf_link_report_relative_reloc
2951 (info, input_section, h, sym, "R_X86_64_RELATIVE",
2952 &outrel);
2953
2954 elf_append_rela (output_bfd, s, &outrel);
2955 }
2956
2957 if (off >= (bfd_vma) -2)
2958 abort ();
2959
2960 relocation = base_got->output_section->vma
2961 + base_got->output_offset + off;
2962 if (r_type != R_X86_64_GOTPCREL
2963 && r_type != R_X86_64_GOTPCRELX
2964 && r_type != R_X86_64_REX_GOTPCRELX
2965 && r_type != R_X86_64_GOTPCREL64)
2966 relocation -= htab->elf.sgotplt->output_section->vma
2967 - htab->elf.sgotplt->output_offset;
2968
2969 break;
2970
2971 case R_X86_64_GOTOFF64:
2972 /* Relocation is relative to the start of the global offset
2973 table. */
2974
2975 /* Check to make sure it isn't a protected function or data
2976 symbol for shared library since it may not be local when
2977 used as function address or with copy relocation. We also
2978 need to make sure that a symbol is referenced locally. */
2979 if (bfd_link_pic (info) && h)
2980 {
2981 if (!h->def_regular)
2982 {
2983 const char *v;
2984
2985 switch (ELF_ST_VISIBILITY (h->other))
2986 {
2987 case STV_HIDDEN:
2988 v = _("hidden symbol");
2989 break;
2990 case STV_INTERNAL:
2991 v = _("internal symbol");
2992 break;
2993 case STV_PROTECTED:
2994 v = _("protected symbol");
2995 break;
2996 default:
2997 v = _("symbol");
2998 break;
2999 }
3000
3001 _bfd_error_handler
3002 /* xgettext:c-format */
3003 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3004 " `%s' can not be used when making a shared object"),
3005 input_bfd, v, h->root.root.string);
3006 bfd_set_error (bfd_error_bad_value);
3007 return false;
3008 }
3009 else if (!bfd_link_executable (info)
3010 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3011 && (h->type == STT_FUNC
3012 || h->type == STT_OBJECT)
3013 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3014 {
3015 _bfd_error_handler
3016 /* xgettext:c-format */
3017 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3018 " `%s' can not be used when making a shared object"),
3019 input_bfd,
3020 h->type == STT_FUNC ? "function" : "data",
3021 h->root.root.string);
3022 bfd_set_error (bfd_error_bad_value);
3023 return false;
3024 }
3025 }
3026
3027 /* Note that sgot is not involved in this
3028 calculation. We always want the start of .got.plt. If we
3029 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3030 permitted by the ABI, we might have to change this
3031 calculation. */
3032 relocation -= htab->elf.sgotplt->output_section->vma
3033 + htab->elf.sgotplt->output_offset;
3034 break;
3035
3036 case R_X86_64_GOTPC32:
3037 case R_X86_64_GOTPC64:
3038 /* Use global offset table as symbol value. */
3039 relocation = htab->elf.sgotplt->output_section->vma
3040 + htab->elf.sgotplt->output_offset;
3041 unresolved_reloc = false;
3042 break;
3043
3044 case R_X86_64_PLTOFF64:
3045 /* Relocation is PLT entry relative to GOT. For local
3046 symbols it's the symbol itself relative to GOT. */
3047 if (h != NULL
3048 /* See PLT32 handling. */
3049 && (h->plt.offset != (bfd_vma) -1
3050 || eh->plt_got.offset != (bfd_vma) -1)
3051 && htab->elf.splt != NULL)
3052 {
3053 if (eh->plt_got.offset != (bfd_vma) -1)
3054 {
3055 /* Use the GOT PLT. */
3056 resolved_plt = htab->plt_got;
3057 plt_offset = eh->plt_got.offset;
3058 }
3059 else if (htab->plt_second != NULL)
3060 {
3061 resolved_plt = htab->plt_second;
3062 plt_offset = eh->plt_second.offset;
3063 }
3064 else
3065 {
3066 resolved_plt = htab->elf.splt;
3067 plt_offset = h->plt.offset;
3068 }
3069
3070 relocation = (resolved_plt->output_section->vma
3071 + resolved_plt->output_offset
3072 + plt_offset);
3073 unresolved_reloc = false;
3074 }
3075
3076 relocation -= htab->elf.sgotplt->output_section->vma
3077 + htab->elf.sgotplt->output_offset;
3078 break;
3079
3080 case R_X86_64_PLT32:
3081 case R_X86_64_PLT32_BND:
3082 /* Relocation is to the entry for this symbol in the
3083 procedure linkage table. */
3084
3085 /* Resolve a PLT32 reloc against a local symbol directly,
3086 without using the procedure linkage table. */
3087 if (h == NULL)
3088 break;
3089
3090 if ((h->plt.offset == (bfd_vma) -1
3091 && eh->plt_got.offset == (bfd_vma) -1)
3092 || htab->elf.splt == NULL)
3093 {
3094 /* We didn't make a PLT entry for this symbol. This
3095 happens when statically linking PIC code, or when
3096 using -Bsymbolic. */
3097 break;
3098 }
3099
3100 use_plt:
3101 if (h->plt.offset != (bfd_vma) -1)
3102 {
3103 if (htab->plt_second != NULL)
3104 {
3105 resolved_plt = htab->plt_second;
3106 plt_offset = eh->plt_second.offset;
3107 }
3108 else
3109 {
3110 resolved_plt = htab->elf.splt;
3111 plt_offset = h->plt.offset;
3112 }
3113 }
3114 else
3115 {
3116 /* Use the GOT PLT. */
3117 resolved_plt = htab->plt_got;
3118 plt_offset = eh->plt_got.offset;
3119 }
3120
3121 relocation = (resolved_plt->output_section->vma
3122 + resolved_plt->output_offset
3123 + plt_offset);
3124 unresolved_reloc = false;
3125 break;
3126
3127 case R_X86_64_SIZE32:
3128 case R_X86_64_SIZE64:
3129 /* Set to symbol size. */
3130 relocation = st_size;
3131 goto direct;
3132
3133 case R_X86_64_PC8:
3134 case R_X86_64_PC16:
3135 case R_X86_64_PC32:
3136 case R_X86_64_PC32_BND:
3137 /* Don't complain about -fPIC if the symbol is undefined when
3138 building executable unless it is unresolved weak symbol,
3139 references a dynamic definition in PIE or -z nocopyreloc
3140 is used. */
3141 no_copyreloc_p
3142 = (info->nocopyreloc
3143 || (h != NULL
3144 && !h->root.linker_def
3145 && !h->root.ldscript_def
3146 && eh->def_protected
3147 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3148
3149 if ((input_section->flags & SEC_ALLOC) != 0
3150 && (input_section->flags & SEC_READONLY) != 0
3151 && h != NULL
3152 && ((bfd_link_executable (info)
3153 && ((h->root.type == bfd_link_hash_undefweak
3154 && (eh == NULL
3155 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3156 eh)))
3157 || (bfd_link_pie (info)
3158 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3159 && h->def_dynamic)
3160 || (no_copyreloc_p
3161 && h->def_dynamic
3162 && !(h->root.u.def.section->flags & SEC_CODE))))
3163 || (bfd_link_pie (info)
3164 && h->root.type == bfd_link_hash_undefweak)
3165 || bfd_link_dll (info)))
3166 {
3167 bool fail = false;
3168 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3169 {
3170 /* Symbol is referenced locally. Make sure it is
3171 defined locally. */
3172 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3173 }
3174 else if (bfd_link_pie (info))
3175 {
3176 /* We can only use PC-relative relocations in PIE
3177 from non-code sections. */
3178 if (h->root.type == bfd_link_hash_undefweak
3179 || (h->type == STT_FUNC
3180 && (sec->flags & SEC_CODE) != 0))
3181 fail = true;
3182 }
3183 else if (no_copyreloc_p || bfd_link_dll (info))
3184 {
3185 /* Symbol doesn't need copy reloc and isn't
3186 referenced locally. Don't allow PC-relative
3187 relocations against default and protected
3188 symbols since address of protected function
3189 and location of protected data may not be in
3190 the shared object. */
3191 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3192 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3193 }
3194
3195 if (fail)
3196 return elf_x86_64_need_pic (info, input_bfd, input_section,
3197 h, NULL, NULL, howto);
3198 }
3199 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3200 as function address. */
3201 else if (h != NULL
3202 && (input_section->flags & SEC_CODE) == 0
3203 && bfd_link_pie (info)
3204 && h->type == STT_FUNC
3205 && !h->def_regular
3206 && h->def_dynamic)
3207 goto use_plt;
3208 /* Fall through. */
3209
3210 case R_X86_64_8:
3211 case R_X86_64_16:
3212 case R_X86_64_32:
3213 case R_X86_64_PC64:
3214 case R_X86_64_64:
3215 /* FIXME: The ABI says the linker should make sure the value is
3216 the same when it's zeroextended to 64 bit. */
3217
3218 direct:
3219 if ((input_section->flags & SEC_ALLOC) == 0)
3220 break;
3221
3222 need_copy_reloc_in_pie = (bfd_link_pie (info)
3223 && h != NULL
3224 && (h->needs_copy
3225 || eh->needs_copy
3226 || (h->root.type
3227 == bfd_link_hash_undefined))
3228 && (X86_PCREL_TYPE_P (r_type)
3229 || X86_SIZE_TYPE_P (r_type)));
3230
3231 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
3232 need_copy_reloc_in_pie,
3233 resolved_to_zero, false))
3234 {
3235 Elf_Internal_Rela outrel;
3236 bool skip, relocate;
3237 asection *sreloc;
3238 const char *relative_reloc_name = NULL;
3239
3240 /* When generating a shared object, these relocations
3241 are copied into the output file to be resolved at run
3242 time. */
3243 skip = false;
3244 relocate = false;
3245
3246 outrel.r_offset =
3247 _bfd_elf_section_offset (output_bfd, info, input_section,
3248 rel->r_offset);
3249 if (outrel.r_offset == (bfd_vma) -1)
3250 skip = true;
3251 else if (outrel.r_offset == (bfd_vma) -2)
3252 skip = true, relocate = true;
3253
3254 outrel.r_offset += (input_section->output_section->vma
3255 + input_section->output_offset);
3256
3257 if (skip)
3258 memset (&outrel, 0, sizeof outrel);
3259
3260 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3261 {
3262 outrel.r_info = htab->r_info (h->dynindx, r_type);
3263 outrel.r_addend = rel->r_addend;
3264 }
3265 else
3266 {
3267 /* This symbol is local, or marked to become local.
3268 When relocation overflow check is disabled, we
3269 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3270 if (r_type == htab->pointer_r_type
3271 || (r_type == R_X86_64_32
3272 && htab->params->no_reloc_overflow_check))
3273 {
3274 relocate = true;
3275 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3276 outrel.r_addend = relocation + rel->r_addend;
3277 relative_reloc_name = "R_X86_64_RELATIVE";
3278 }
3279 else if (r_type == R_X86_64_64
3280 && !ABI_64_P (output_bfd))
3281 {
3282 relocate = true;
3283 outrel.r_info = htab->r_info (0,
3284 R_X86_64_RELATIVE64);
3285 outrel.r_addend = relocation + rel->r_addend;
3286 relative_reloc_name = "R_X86_64_RELATIVE64";
3287 /* Check addend overflow. */
3288 if ((outrel.r_addend & 0x80000000)
3289 != (rel->r_addend & 0x80000000))
3290 {
3291 const char *name;
3292 int addend = rel->r_addend;
3293 if (h && h->root.root.string)
3294 name = h->root.root.string;
3295 else
3296 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3297 sym, NULL);
3298 _bfd_error_handler
3299 /* xgettext:c-format */
3300 (_("%pB: addend %s%#x in relocation %s against "
3301 "symbol `%s' at %#" PRIx64
3302 " in section `%pA' is out of range"),
3303 input_bfd, addend < 0 ? "-" : "", addend,
3304 howto->name, name, (uint64_t) rel->r_offset,
3305 input_section);
3306 bfd_set_error (bfd_error_bad_value);
3307 return false;
3308 }
3309 }
3310 else
3311 {
3312 long sindx;
3313
3314 if (bfd_is_abs_section (sec))
3315 sindx = 0;
3316 else if (sec == NULL || sec->owner == NULL)
3317 {
3318 bfd_set_error (bfd_error_bad_value);
3319 return false;
3320 }
3321 else
3322 {
3323 asection *osec;
3324
3325 /* We are turning this relocation into one
3326 against a section symbol. It would be
3327 proper to subtract the symbol's value,
3328 osec->vma, from the emitted reloc addend,
3329 but ld.so expects buggy relocs. */
3330 osec = sec->output_section;
3331 sindx = elf_section_data (osec)->dynindx;
3332 if (sindx == 0)
3333 {
3334 asection *oi = htab->elf.text_index_section;
3335 sindx = elf_section_data (oi)->dynindx;
3336 }
3337 BFD_ASSERT (sindx != 0);
3338 }
3339
3340 outrel.r_info = htab->r_info (sindx, r_type);
3341 outrel.r_addend = relocation + rel->r_addend;
3342 }
3343 }
3344
3345 sreloc = elf_section_data (input_section)->sreloc;
3346
3347 if (sreloc == NULL || sreloc->contents == NULL)
3348 {
3349 r = bfd_reloc_notsupported;
3350 goto check_relocation_error;
3351 }
3352
3353 if (relative_reloc_name
3354 && htab->params->report_relative_reloc)
3355 _bfd_x86_elf_link_report_relative_reloc
3356 (info, input_section, h, sym, relative_reloc_name,
3357 &outrel);
3358
3359 elf_append_rela (output_bfd, sreloc, &outrel);
3360
3361 /* If this reloc is against an external symbol, we do
3362 not want to fiddle with the addend. Otherwise, we
3363 need to include the symbol value so that it becomes
3364 an addend for the dynamic reloc. */
3365 if (! relocate)
3366 continue;
3367 }
3368
3369 break;
3370
3371 case R_X86_64_TLSGD:
3372 case R_X86_64_GOTPC32_TLSDESC:
3373 case R_X86_64_TLSDESC_CALL:
3374 case R_X86_64_GOTTPOFF:
3375 tls_type = GOT_UNKNOWN;
3376 if (h == NULL && local_got_offsets)
3377 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3378 else if (h != NULL)
3379 tls_type = elf_x86_hash_entry (h)->tls_type;
3380
3381 r_type_tls = r_type;
3382 if (! elf_x86_64_tls_transition (info, input_bfd,
3383 input_section, contents,
3384 symtab_hdr, sym_hashes,
3385 &r_type_tls, tls_type, rel,
3386 relend, h, r_symndx, true))
3387 return false;
3388
3389 if (r_type_tls == R_X86_64_TPOFF32)
3390 {
3391 bfd_vma roff = rel->r_offset;
3392
3393 BFD_ASSERT (! unresolved_reloc);
3394
3395 if (r_type == R_X86_64_TLSGD)
3396 {
3397 /* GD->LE transition. For 64bit, change
3398 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3399 .word 0x6666; rex64; call __tls_get_addr@PLT
3400 or
3401 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3402 .byte 0x66; rex64
3403 call *__tls_get_addr@GOTPCREL(%rip)
3404 which may be converted to
3405 addr32 call __tls_get_addr
3406 into:
3407 movq %fs:0, %rax
3408 leaq foo@tpoff(%rax), %rax
3409 For 32bit, change
3410 leaq foo@tlsgd(%rip), %rdi
3411 .word 0x6666; rex64; call __tls_get_addr@PLT
3412 or
3413 leaq foo@tlsgd(%rip), %rdi
3414 .byte 0x66; rex64
3415 call *__tls_get_addr@GOTPCREL(%rip)
3416 which may be converted to
3417 addr32 call __tls_get_addr
3418 into:
3419 movl %fs:0, %eax
3420 leaq foo@tpoff(%rax), %rax
3421 For largepic, change:
3422 leaq foo@tlsgd(%rip), %rdi
3423 movabsq $__tls_get_addr@pltoff, %rax
3424 addq %r15, %rax
3425 call *%rax
3426 into:
3427 movq %fs:0, %rax
3428 leaq foo@tpoff(%rax), %rax
3429 nopw 0x0(%rax,%rax,1) */
3430 int largepic = 0;
3431 if (ABI_64_P (output_bfd))
3432 {
3433 if (contents[roff + 5] == 0xb8)
3434 {
3435 if (roff < 3
3436 || (roff - 3 + 22) > input_section->size)
3437 {
3438 corrupt_input:
3439 info->callbacks->einfo
3440 (_("%F%P: corrupt input: %pB\n"),
3441 input_bfd);
3442 return false;
3443 }
3444 memcpy (contents + roff - 3,
3445 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3446 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3447 largepic = 1;
3448 }
3449 else
3450 {
3451 if (roff < 4
3452 || (roff - 4 + 16) > input_section->size)
3453 goto corrupt_input;
3454 memcpy (contents + roff - 4,
3455 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3456 16);
3457 }
3458 }
3459 else
3460 {
3461 if (roff < 3
3462 || (roff - 3 + 15) > input_section->size)
3463 goto corrupt_input;
3464 memcpy (contents + roff - 3,
3465 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3466 15);
3467 }
3468 bfd_put_32 (output_bfd,
3469 elf_x86_64_tpoff (info, relocation),
3470 contents + roff + 8 + largepic);
3471 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3472 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3473 rel++;
3474 wrel++;
3475 continue;
3476 }
3477 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3478 {
3479 /* GDesc -> LE transition.
3480 It's originally something like:
3481 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3482 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3483
3484 Change it to:
3485 movq $x@tpoff, %rax <--- LP64 mode.
3486 rex movl $x@tpoff, %eax <--- X32 mode.
3487 */
3488
3489 unsigned int val, type;
3490
3491 if (roff < 3)
3492 goto corrupt_input;
3493 type = bfd_get_8 (input_bfd, contents + roff - 3);
3494 val = bfd_get_8 (input_bfd, contents + roff - 1);
3495 bfd_put_8 (output_bfd,
3496 (type & 0x48) | ((type >> 2) & 1),
3497 contents + roff - 3);
3498 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3499 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3500 contents + roff - 1);
3501 bfd_put_32 (output_bfd,
3502 elf_x86_64_tpoff (info, relocation),
3503 contents + roff);
3504 continue;
3505 }
3506 else if (r_type == R_X86_64_TLSDESC_CALL)
3507 {
3508 /* GDesc -> LE transition.
3509 It's originally:
3510 call *(%rax) <--- LP64 mode.
3511 call *(%eax) <--- X32 mode.
3512 Turn it into:
3513 xchg %ax,%ax <-- LP64 mode.
3514 nopl (%rax) <-- X32 mode.
3515 */
3516 unsigned int prefix = 0;
3517 if (!ABI_64_P (input_bfd))
3518 {
3519 /* Check for call *x@tlsdesc(%eax). */
3520 if (contents[roff] == 0x67)
3521 prefix = 1;
3522 }
3523 if (prefix)
3524 {
3525 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3526 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3527 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3528 }
3529 else
3530 {
3531 bfd_put_8 (output_bfd, 0x66, contents + roff);
3532 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3533 }
3534 continue;
3535 }
3536 else if (r_type == R_X86_64_GOTTPOFF)
3537 {
3538 /* IE->LE transition:
3539 For 64bit, originally it can be one of:
3540 movq foo@gottpoff(%rip), %reg
3541 addq foo@gottpoff(%rip), %reg
3542 We change it into:
3543 movq $foo, %reg
3544 leaq foo(%reg), %reg
3545 addq $foo, %reg.
3546 For 32bit, originally it can be one of:
3547 movq foo@gottpoff(%rip), %reg
3548 addl foo@gottpoff(%rip), %reg
3549 We change it into:
3550 movq $foo, %reg
3551 leal foo(%reg), %reg
3552 addl $foo, %reg. */
3553
3554 unsigned int val, type, reg;
3555
3556 if (roff >= 3)
3557 val = bfd_get_8 (input_bfd, contents + roff - 3);
3558 else
3559 {
3560 if (roff < 2)
3561 goto corrupt_input;
3562 val = 0;
3563 }
3564 type = bfd_get_8 (input_bfd, contents + roff - 2);
3565 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3566 reg >>= 3;
3567 if (type == 0x8b)
3568 {
3569 /* movq */
3570 if (val == 0x4c)
3571 {
3572 if (roff < 3)
3573 goto corrupt_input;
3574 bfd_put_8 (output_bfd, 0x49,
3575 contents + roff - 3);
3576 }
3577 else if (!ABI_64_P (output_bfd) && val == 0x44)
3578 {
3579 if (roff < 3)
3580 goto corrupt_input;
3581 bfd_put_8 (output_bfd, 0x41,
3582 contents + roff - 3);
3583 }
3584 bfd_put_8 (output_bfd, 0xc7,
3585 contents + roff - 2);
3586 bfd_put_8 (output_bfd, 0xc0 | reg,
3587 contents + roff - 1);
3588 }
3589 else if (reg == 4)
3590 {
3591 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3592 is special */
3593 if (val == 0x4c)
3594 {
3595 if (roff < 3)
3596 goto corrupt_input;
3597 bfd_put_8 (output_bfd, 0x49,
3598 contents + roff - 3);
3599 }
3600 else if (!ABI_64_P (output_bfd) && val == 0x44)
3601 {
3602 if (roff < 3)
3603 goto corrupt_input;
3604 bfd_put_8 (output_bfd, 0x41,
3605 contents + roff - 3);
3606 }
3607 bfd_put_8 (output_bfd, 0x81,
3608 contents + roff - 2);
3609 bfd_put_8 (output_bfd, 0xc0 | reg,
3610 contents + roff - 1);
3611 }
3612 else
3613 {
3614 /* addq/addl -> leaq/leal */
3615 if (val == 0x4c)
3616 {
3617 if (roff < 3)
3618 goto corrupt_input;
3619 bfd_put_8 (output_bfd, 0x4d,
3620 contents + roff - 3);
3621 }
3622 else if (!ABI_64_P (output_bfd) && val == 0x44)
3623 {
3624 if (roff < 3)
3625 goto corrupt_input;
3626 bfd_put_8 (output_bfd, 0x45,
3627 contents + roff - 3);
3628 }
3629 bfd_put_8 (output_bfd, 0x8d,
3630 contents + roff - 2);
3631 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3632 contents + roff - 1);
3633 }
3634 bfd_put_32 (output_bfd,
3635 elf_x86_64_tpoff (info, relocation),
3636 contents + roff);
3637 continue;
3638 }
3639 else
3640 BFD_ASSERT (false);
3641 }
3642
3643 if (htab->elf.sgot == NULL)
3644 abort ();
3645
3646 if (h != NULL)
3647 {
3648 off = h->got.offset;
3649 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3650 }
3651 else
3652 {
3653 if (local_got_offsets == NULL)
3654 abort ();
3655
3656 off = local_got_offsets[r_symndx];
3657 offplt = local_tlsdesc_gotents[r_symndx];
3658 }
3659
3660 if ((off & 1) != 0)
3661 off &= ~1;
3662 else
3663 {
3664 Elf_Internal_Rela outrel;
3665 int dr_type, indx;
3666 asection *sreloc;
3667
3668 if (htab->elf.srelgot == NULL)
3669 abort ();
3670
3671 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3672
3673 if (GOT_TLS_GDESC_P (tls_type))
3674 {
3675 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3676 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3677 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3678 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3679 + htab->elf.sgotplt->output_offset
3680 + offplt
3681 + htab->sgotplt_jump_table_size);
3682 sreloc = htab->elf.srelplt;
3683 if (indx == 0)
3684 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3685 else
3686 outrel.r_addend = 0;
3687 elf_append_rela (output_bfd, sreloc, &outrel);
3688 }
3689
3690 sreloc = htab->elf.srelgot;
3691
3692 outrel.r_offset = (htab->elf.sgot->output_section->vma
3693 + htab->elf.sgot->output_offset + off);
3694
3695 if (GOT_TLS_GD_P (tls_type))
3696 dr_type = R_X86_64_DTPMOD64;
3697 else if (GOT_TLS_GDESC_P (tls_type))
3698 goto dr_done;
3699 else
3700 dr_type = R_X86_64_TPOFF64;
3701
3702 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3703 outrel.r_addend = 0;
3704 if ((dr_type == R_X86_64_TPOFF64
3705 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3706 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3707 outrel.r_info = htab->r_info (indx, dr_type);
3708
3709 elf_append_rela (output_bfd, sreloc, &outrel);
3710
3711 if (GOT_TLS_GD_P (tls_type))
3712 {
3713 if (indx == 0)
3714 {
3715 BFD_ASSERT (! unresolved_reloc);
3716 bfd_put_64 (output_bfd,
3717 relocation - _bfd_x86_elf_dtpoff_base (info),
3718 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3719 }
3720 else
3721 {
3722 bfd_put_64 (output_bfd, 0,
3723 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3724 outrel.r_info = htab->r_info (indx,
3725 R_X86_64_DTPOFF64);
3726 outrel.r_offset += GOT_ENTRY_SIZE;
3727 elf_append_rela (output_bfd, sreloc,
3728 &outrel);
3729 }
3730 }
3731
3732 dr_done:
3733 if (h != NULL)
3734 h->got.offset |= 1;
3735 else
3736 local_got_offsets[r_symndx] |= 1;
3737 }
3738
3739 if (off >= (bfd_vma) -2
3740 && ! GOT_TLS_GDESC_P (tls_type))
3741 abort ();
3742 if (r_type_tls == r_type)
3743 {
3744 if (r_type == R_X86_64_GOTPC32_TLSDESC
3745 || r_type == R_X86_64_TLSDESC_CALL)
3746 relocation = htab->elf.sgotplt->output_section->vma
3747 + htab->elf.sgotplt->output_offset
3748 + offplt + htab->sgotplt_jump_table_size;
3749 else
3750 relocation = htab->elf.sgot->output_section->vma
3751 + htab->elf.sgot->output_offset + off;
3752 unresolved_reloc = false;
3753 }
3754 else
3755 {
3756 bfd_vma roff = rel->r_offset;
3757
3758 if (r_type == R_X86_64_TLSGD)
3759 {
3760 /* GD->IE transition. For 64bit, change
3761 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3762 .word 0x6666; rex64; call __tls_get_addr@PLT
3763 or
3764 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3765 .byte 0x66; rex64
3766 call *__tls_get_addr@GOTPCREL(%rip
3767 which may be converted to
3768 addr32 call __tls_get_addr
3769 into:
3770 movq %fs:0, %rax
3771 addq foo@gottpoff(%rip), %rax
3772 For 32bit, change
3773 leaq foo@tlsgd(%rip), %rdi
3774 .word 0x6666; rex64; call __tls_get_addr@PLT
3775 or
3776 leaq foo@tlsgd(%rip), %rdi
3777 .byte 0x66; rex64;
3778 call *__tls_get_addr@GOTPCREL(%rip)
3779 which may be converted to
3780 addr32 call __tls_get_addr
3781 into:
3782 movl %fs:0, %eax
3783 addq foo@gottpoff(%rip), %rax
3784 For largepic, change:
3785 leaq foo@tlsgd(%rip), %rdi
3786 movabsq $__tls_get_addr@pltoff, %rax
3787 addq %r15, %rax
3788 call *%rax
3789 into:
3790 movq %fs:0, %rax
3791 addq foo@gottpoff(%rax), %rax
3792 nopw 0x0(%rax,%rax,1) */
3793 int largepic = 0;
3794 if (ABI_64_P (output_bfd))
3795 {
3796 if (contents[roff + 5] == 0xb8)
3797 {
3798 if (roff < 3
3799 || (roff - 3 + 22) > input_section->size)
3800 goto corrupt_input;
3801 memcpy (contents + roff - 3,
3802 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3803 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3804 largepic = 1;
3805 }
3806 else
3807 {
3808 if (roff < 4
3809 || (roff - 4 + 16) > input_section->size)
3810 goto corrupt_input;
3811 memcpy (contents + roff - 4,
3812 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3813 16);
3814 }
3815 }
3816 else
3817 {
3818 if (roff < 3
3819 || (roff - 3 + 15) > input_section->size)
3820 goto corrupt_input;
3821 memcpy (contents + roff - 3,
3822 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3823 15);
3824 }
3825
3826 relocation = (htab->elf.sgot->output_section->vma
3827 + htab->elf.sgot->output_offset + off
3828 - roff
3829 - largepic
3830 - input_section->output_section->vma
3831 - input_section->output_offset
3832 - 12);
3833 bfd_put_32 (output_bfd, relocation,
3834 contents + roff + 8 + largepic);
3835 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3836 rel++;
3837 wrel++;
3838 continue;
3839 }
3840 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3841 {
3842 /* GDesc -> IE transition.
3843 It's originally something like:
3844 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3845 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3846
3847 Change it to:
3848 # before xchg %ax,%ax in LP64 mode.
3849 movq x@gottpoff(%rip), %rax
3850 # before nopl (%rax) in X32 mode.
3851 rex movl x@gottpoff(%rip), %eax
3852 */
3853
3854 /* Now modify the instruction as appropriate. To
3855 turn a lea into a mov in the form we use it, it
3856 suffices to change the second byte from 0x8d to
3857 0x8b. */
3858 if (roff < 2)
3859 goto corrupt_input;
3860 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3861
3862 bfd_put_32 (output_bfd,
3863 htab->elf.sgot->output_section->vma
3864 + htab->elf.sgot->output_offset + off
3865 - rel->r_offset
3866 - input_section->output_section->vma
3867 - input_section->output_offset
3868 - 4,
3869 contents + roff);
3870 continue;
3871 }
3872 else if (r_type == R_X86_64_TLSDESC_CALL)
3873 {
3874 /* GDesc -> IE transition.
3875 It's originally:
3876 call *(%rax) <--- LP64 mode.
3877 call *(%eax) <--- X32 mode.
3878
3879 Change it to:
3880 xchg %ax, %ax <-- LP64 mode.
3881 nopl (%rax) <-- X32 mode.
3882 */
3883
3884 unsigned int prefix = 0;
3885 if (!ABI_64_P (input_bfd))
3886 {
3887 /* Check for call *x@tlsdesc(%eax). */
3888 if (contents[roff] == 0x67)
3889 prefix = 1;
3890 }
3891 if (prefix)
3892 {
3893 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3894 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3895 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3896 }
3897 else
3898 {
3899 bfd_put_8 (output_bfd, 0x66, contents + roff);
3900 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3901 }
3902 continue;
3903 }
3904 else
3905 BFD_ASSERT (false);
3906 }
3907 break;
3908
3909 case R_X86_64_TLSLD:
3910 if (! elf_x86_64_tls_transition (info, input_bfd,
3911 input_section, contents,
3912 symtab_hdr, sym_hashes,
3913 &r_type, GOT_UNKNOWN, rel,
3914 relend, h, r_symndx, true))
3915 return false;
3916
3917 if (r_type != R_X86_64_TLSLD)
3918 {
3919 /* LD->LE transition:
3920 leaq foo@tlsld(%rip), %rdi
3921 call __tls_get_addr@PLT
3922 For 64bit, we change it into:
3923 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3924 For 32bit, we change it into:
3925 nopl 0x0(%rax); movl %fs:0, %eax
3926 Or
3927 leaq foo@tlsld(%rip), %rdi;
3928 call *__tls_get_addr@GOTPCREL(%rip)
3929 which may be converted to
3930 addr32 call __tls_get_addr
3931 For 64bit, we change it into:
3932 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3933 For 32bit, we change it into:
3934 nopw 0x0(%rax); movl %fs:0, %eax
3935 For largepic, change:
3936 leaq foo@tlsgd(%rip), %rdi
3937 movabsq $__tls_get_addr@pltoff, %rax
3938 addq %rbx, %rax
3939 call *%rax
3940 into
3941 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3942 movq %fs:0, %eax */
3943
3944 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3945 if (ABI_64_P (output_bfd))
3946 {
3947 if ((rel->r_offset + 5) >= input_section->size)
3948 goto corrupt_input;
3949 if (contents[rel->r_offset + 5] == 0xb8)
3950 {
3951 if (rel->r_offset < 3
3952 || (rel->r_offset - 3 + 22) > input_section->size)
3953 goto corrupt_input;
3954 memcpy (contents + rel->r_offset - 3,
3955 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3956 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3957 }
3958 else if (contents[rel->r_offset + 4] == 0xff
3959 || contents[rel->r_offset + 4] == 0x67)
3960 {
3961 if (rel->r_offset < 3
3962 || (rel->r_offset - 3 + 13) > input_section->size)
3963 goto corrupt_input;
3964 memcpy (contents + rel->r_offset - 3,
3965 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3966 13);
3967
3968 }
3969 else
3970 {
3971 if (rel->r_offset < 3
3972 || (rel->r_offset - 3 + 12) > input_section->size)
3973 goto corrupt_input;
3974 memcpy (contents + rel->r_offset - 3,
3975 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3976 }
3977 }
3978 else
3979 {
3980 if ((rel->r_offset + 4) >= input_section->size)
3981 goto corrupt_input;
3982 if (contents[rel->r_offset + 4] == 0xff)
3983 {
3984 if (rel->r_offset < 3
3985 || (rel->r_offset - 3 + 13) > input_section->size)
3986 goto corrupt_input;
3987 memcpy (contents + rel->r_offset - 3,
3988 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3989 13);
3990 }
3991 else
3992 {
3993 if (rel->r_offset < 3
3994 || (rel->r_offset - 3 + 12) > input_section->size)
3995 goto corrupt_input;
3996 memcpy (contents + rel->r_offset - 3,
3997 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3998 }
3999 }
4000 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4001 and R_X86_64_PLTOFF64. */
4002 rel++;
4003 wrel++;
4004 continue;
4005 }
4006
4007 if (htab->elf.sgot == NULL)
4008 abort ();
4009
4010 off = htab->tls_ld_or_ldm_got.offset;
4011 if (off & 1)
4012 off &= ~1;
4013 else
4014 {
4015 Elf_Internal_Rela outrel;
4016
4017 if (htab->elf.srelgot == NULL)
4018 abort ();
4019
4020 outrel.r_offset = (htab->elf.sgot->output_section->vma
4021 + htab->elf.sgot->output_offset + off);
4022
4023 bfd_put_64 (output_bfd, 0,
4024 htab->elf.sgot->contents + off);
4025 bfd_put_64 (output_bfd, 0,
4026 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4027 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4028 outrel.r_addend = 0;
4029 elf_append_rela (output_bfd, htab->elf.srelgot,
4030 &outrel);
4031 htab->tls_ld_or_ldm_got.offset |= 1;
4032 }
4033 relocation = htab->elf.sgot->output_section->vma
4034 + htab->elf.sgot->output_offset + off;
4035 unresolved_reloc = false;
4036 break;
4037
4038 case R_X86_64_DTPOFF32:
4039 if (!bfd_link_executable (info)
4040 || (input_section->flags & SEC_CODE) == 0)
4041 relocation -= _bfd_x86_elf_dtpoff_base (info);
4042 else
4043 relocation = elf_x86_64_tpoff (info, relocation);
4044 break;
4045
4046 case R_X86_64_TPOFF32:
4047 case R_X86_64_TPOFF64:
4048 BFD_ASSERT (bfd_link_executable (info));
4049 relocation = elf_x86_64_tpoff (info, relocation);
4050 break;
4051
4052 case R_X86_64_DTPOFF64:
4053 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4054 relocation -= _bfd_x86_elf_dtpoff_base (info);
4055 break;
4056
4057 default:
4058 break;
4059 }
4060
4061 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4062 because such sections are not SEC_ALLOC and thus ld.so will
4063 not process them. */
4064 if (unresolved_reloc
4065 && !((input_section->flags & SEC_DEBUGGING) != 0
4066 && h->def_dynamic)
4067 && _bfd_elf_section_offset (output_bfd, info, input_section,
4068 rel->r_offset) != (bfd_vma) -1)
4069 {
4070 switch (r_type)
4071 {
4072 case R_X86_64_32S:
4073 sec = h->root.u.def.section;
4074 if ((info->nocopyreloc
4075 || (eh->def_protected
4076 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4077 && !(h->root.u.def.section->flags & SEC_CODE))
4078 return elf_x86_64_need_pic (info, input_bfd, input_section,
4079 h, NULL, NULL, howto);
4080 /* Fall through. */
4081
4082 default:
4083 _bfd_error_handler
4084 /* xgettext:c-format */
4085 (_("%pB(%pA+%#" PRIx64 "): "
4086 "unresolvable %s relocation against symbol `%s'"),
4087 input_bfd,
4088 input_section,
4089 (uint64_t) rel->r_offset,
4090 howto->name,
4091 h->root.root.string);
4092 return false;
4093 }
4094 }
4095
4096 do_relocation:
4097 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4098 contents, rel->r_offset,
4099 relocation, rel->r_addend);
4100
4101 check_relocation_error:
4102 if (r != bfd_reloc_ok)
4103 {
4104 const char *name;
4105
4106 if (h != NULL)
4107 name = h->root.root.string;
4108 else
4109 {
4110 name = bfd_elf_string_from_elf_section (input_bfd,
4111 symtab_hdr->sh_link,
4112 sym->st_name);
4113 if (name == NULL)
4114 return false;
4115 if (*name == '\0')
4116 name = bfd_section_name (sec);
4117 }
4118
4119 if (r == bfd_reloc_overflow)
4120 {
4121 if (converted_reloc)
4122 {
4123 info->callbacks->einfo
4124 ("%X%H:", input_bfd, input_section, rel->r_offset);
4125 info->callbacks->einfo
4126 (_(" failed to convert GOTPCREL relocation against "
4127 "'%s'; relink with --no-relax\n"),
4128 name);
4129 status = false;
4130 continue;
4131 }
4132 (*info->callbacks->reloc_overflow)
4133 (info, (h ? &h->root : NULL), name, howto->name,
4134 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4135 }
4136 else
4137 {
4138 _bfd_error_handler
4139 /* xgettext:c-format */
4140 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4141 input_bfd, input_section,
4142 (uint64_t) rel->r_offset, name, (int) r);
4143 return false;
4144 }
4145 }
4146
4147 if (wrel != rel)
4148 *wrel = *rel;
4149 }
4150
4151 if (wrel != rel)
4152 {
4153 Elf_Internal_Shdr *rel_hdr;
4154 size_t deleted = rel - wrel;
4155
4156 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4157 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4158 if (rel_hdr->sh_size == 0)
4159 {
4160 /* It is too late to remove an empty reloc section. Leave
4161 one NONE reloc.
4162 ??? What is wrong with an empty section??? */
4163 rel_hdr->sh_size = rel_hdr->sh_entsize;
4164 deleted -= 1;
4165 }
4166 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4167 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4168 input_section->reloc_count -= deleted;
4169 }
4170
4171 return status;
4172 }
4173
4174 /* Finish up dynamic symbol handling. We set the contents of various
4175 dynamic sections here. */
4176
4177 static bool
elf_x86_64_finish_dynamic_symbol(bfd * output_bfd,struct bfd_link_info * info,struct elf_link_hash_entry * h,Elf_Internal_Sym * sym)4178 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4179 struct bfd_link_info *info,
4180 struct elf_link_hash_entry *h,
4181 Elf_Internal_Sym *sym)
4182 {
4183 struct elf_x86_link_hash_table *htab;
4184 bool use_plt_second;
4185 struct elf_x86_link_hash_entry *eh;
4186 bool local_undefweak;
4187
4188 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4189 if (htab == NULL)
4190 return false;
4191
4192 /* Use the second PLT section only if there is .plt section. */
4193 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4194
4195 eh = (struct elf_x86_link_hash_entry *) h;
4196 if (eh->no_finish_dynamic_symbol)
4197 abort ();
4198
4199 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4200 resolved undefined weak symbols in executable so that their
4201 references have value 0 at run-time. */
4202 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4203
4204 if (h->plt.offset != (bfd_vma) -1)
4205 {
4206 bfd_vma plt_index;
4207 bfd_vma got_offset, plt_offset;
4208 Elf_Internal_Rela rela;
4209 bfd_byte *loc;
4210 asection *plt, *gotplt, *relplt, *resolved_plt;
4211 const struct elf_backend_data *bed;
4212 bfd_vma plt_got_pcrel_offset;
4213
4214 /* When building a static executable, use .iplt, .igot.plt and
4215 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4216 if (htab->elf.splt != NULL)
4217 {
4218 plt = htab->elf.splt;
4219 gotplt = htab->elf.sgotplt;
4220 relplt = htab->elf.srelplt;
4221 }
4222 else
4223 {
4224 plt = htab->elf.iplt;
4225 gotplt = htab->elf.igotplt;
4226 relplt = htab->elf.irelplt;
4227 }
4228
4229 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4230
4231 /* Get the index in the procedure linkage table which
4232 corresponds to this symbol. This is the index of this symbol
4233 in all the symbols for which we are making plt entries. The
4234 first entry in the procedure linkage table is reserved.
4235
4236 Get the offset into the .got table of the entry that
4237 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4238 bytes. The first three are reserved for the dynamic linker.
4239
4240 For static executables, we don't reserve anything. */
4241
4242 if (plt == htab->elf.splt)
4243 {
4244 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4245 - htab->plt.has_plt0);
4246 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4247 }
4248 else
4249 {
4250 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4251 got_offset = got_offset * GOT_ENTRY_SIZE;
4252 }
4253
4254 /* Fill in the entry in the procedure linkage table. */
4255 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4256 htab->plt.plt_entry_size);
4257 if (use_plt_second)
4258 {
4259 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4260 htab->non_lazy_plt->plt_entry,
4261 htab->non_lazy_plt->plt_entry_size);
4262
4263 resolved_plt = htab->plt_second;
4264 plt_offset = eh->plt_second.offset;
4265 }
4266 else
4267 {
4268 resolved_plt = plt;
4269 plt_offset = h->plt.offset;
4270 }
4271
4272 /* Insert the relocation positions of the plt section. */
4273
4274 /* Put offset the PC-relative instruction referring to the GOT entry,
4275 subtracting the size of that instruction. */
4276 plt_got_pcrel_offset = (gotplt->output_section->vma
4277 + gotplt->output_offset
4278 + got_offset
4279 - resolved_plt->output_section->vma
4280 - resolved_plt->output_offset
4281 - plt_offset
4282 - htab->plt.plt_got_insn_size);
4283
4284 /* Check PC-relative offset overflow in PLT entry. */
4285 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4286 /* xgettext:c-format */
4287 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4288 output_bfd, h->root.root.string);
4289
4290 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4291 (resolved_plt->contents + plt_offset
4292 + htab->plt.plt_got_offset));
4293
4294 /* Fill in the entry in the global offset table, initially this
4295 points to the second part of the PLT entry. Leave the entry
4296 as zero for undefined weak symbol in PIE. No PLT relocation
4297 against undefined weak symbol in PIE. */
4298 if (!local_undefweak)
4299 {
4300 if (htab->plt.has_plt0)
4301 bfd_put_64 (output_bfd, (plt->output_section->vma
4302 + plt->output_offset
4303 + h->plt.offset
4304 + htab->lazy_plt->plt_lazy_offset),
4305 gotplt->contents + got_offset);
4306
4307 /* Fill in the entry in the .rela.plt section. */
4308 rela.r_offset = (gotplt->output_section->vma
4309 + gotplt->output_offset
4310 + got_offset);
4311 if (PLT_LOCAL_IFUNC_P (info, h))
4312 {
4313 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4314 h->root.root.string,
4315 h->root.u.def.section->owner);
4316
4317 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4318 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4319 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4320 rela.r_addend = (h->root.u.def.value
4321 + h->root.u.def.section->output_section->vma
4322 + h->root.u.def.section->output_offset);
4323
4324 if (htab->params->report_relative_reloc)
4325 _bfd_x86_elf_link_report_relative_reloc
4326 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4327
4328 /* R_X86_64_IRELATIVE comes last. */
4329 plt_index = htab->next_irelative_index--;
4330 }
4331 else
4332 {
4333 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4334 rela.r_addend = 0;
4335 plt_index = htab->next_jump_slot_index++;
4336 }
4337
4338 /* Don't fill the second and third slots in PLT entry for
4339 static executables nor without PLT0. */
4340 if (plt == htab->elf.splt && htab->plt.has_plt0)
4341 {
4342 bfd_vma plt0_offset
4343 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4344
4345 /* Put relocation index. */
4346 bfd_put_32 (output_bfd, plt_index,
4347 (plt->contents + h->plt.offset
4348 + htab->lazy_plt->plt_reloc_offset));
4349
4350 /* Put offset for jmp .PLT0 and check for overflow. We don't
4351 check relocation index for overflow since branch displacement
4352 will overflow first. */
4353 if (plt0_offset > 0x80000000)
4354 /* xgettext:c-format */
4355 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4356 output_bfd, h->root.root.string);
4357 bfd_put_32 (output_bfd, - plt0_offset,
4358 (plt->contents + h->plt.offset
4359 + htab->lazy_plt->plt_plt_offset));
4360 }
4361
4362 bed = get_elf_backend_data (output_bfd);
4363 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4364 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4365 }
4366 }
4367 else if (eh->plt_got.offset != (bfd_vma) -1)
4368 {
4369 bfd_vma got_offset, plt_offset;
4370 asection *plt, *got;
4371 bool got_after_plt;
4372 int32_t got_pcrel_offset;
4373
4374 /* Set the entry in the GOT procedure linkage table. */
4375 plt = htab->plt_got;
4376 got = htab->elf.sgot;
4377 got_offset = h->got.offset;
4378
4379 if (got_offset == (bfd_vma) -1
4380 || (h->type == STT_GNU_IFUNC && h->def_regular)
4381 || plt == NULL
4382 || got == NULL)
4383 abort ();
4384
4385 /* Use the non-lazy PLT entry template for the GOT PLT since they
4386 are the identical. */
4387 /* Fill in the entry in the GOT procedure linkage table. */
4388 plt_offset = eh->plt_got.offset;
4389 memcpy (plt->contents + plt_offset,
4390 htab->non_lazy_plt->plt_entry,
4391 htab->non_lazy_plt->plt_entry_size);
4392
4393 /* Put offset the PC-relative instruction referring to the GOT
4394 entry, subtracting the size of that instruction. */
4395 got_pcrel_offset = (got->output_section->vma
4396 + got->output_offset
4397 + got_offset
4398 - plt->output_section->vma
4399 - plt->output_offset
4400 - plt_offset
4401 - htab->non_lazy_plt->plt_got_insn_size);
4402
4403 /* Check PC-relative offset overflow in GOT PLT entry. */
4404 got_after_plt = got->output_section->vma > plt->output_section->vma;
4405 if ((got_after_plt && got_pcrel_offset < 0)
4406 || (!got_after_plt && got_pcrel_offset > 0))
4407 /* xgettext:c-format */
4408 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4409 output_bfd, h->root.root.string);
4410
4411 bfd_put_32 (output_bfd, got_pcrel_offset,
4412 (plt->contents + plt_offset
4413 + htab->non_lazy_plt->plt_got_offset));
4414 }
4415
4416 if (!local_undefweak
4417 && !h->def_regular
4418 && (h->plt.offset != (bfd_vma) -1
4419 || eh->plt_got.offset != (bfd_vma) -1))
4420 {
4421 /* Mark the symbol as undefined, rather than as defined in
4422 the .plt section. Leave the value if there were any
4423 relocations where pointer equality matters (this is a clue
4424 for the dynamic linker, to make function pointer
4425 comparisons work between an application and shared
4426 library), otherwise set it to zero. If a function is only
4427 called from a binary, there is no need to slow down
4428 shared libraries because of that. */
4429 sym->st_shndx = SHN_UNDEF;
4430 if (!h->pointer_equality_needed)
4431 sym->st_value = 0;
4432 }
4433
4434 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4435
4436 /* Don't generate dynamic GOT relocation against undefined weak
4437 symbol in executable. */
4438 if (h->got.offset != (bfd_vma) -1
4439 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4440 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4441 && !local_undefweak)
4442 {
4443 Elf_Internal_Rela rela;
4444 asection *relgot = htab->elf.srelgot;
4445 const char *relative_reloc_name = NULL;
4446
4447 /* This symbol has an entry in the global offset table. Set it
4448 up. */
4449 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4450 abort ();
4451
4452 rela.r_offset = (htab->elf.sgot->output_section->vma
4453 + htab->elf.sgot->output_offset
4454 + (h->got.offset &~ (bfd_vma) 1));
4455
4456 /* If this is a static link, or it is a -Bsymbolic link and the
4457 symbol is defined locally or was forced to be local because
4458 of a version file, we just want to emit a RELATIVE reloc.
4459 The entry in the global offset table will already have been
4460 initialized in the relocate_section function. */
4461 if (h->def_regular
4462 && h->type == STT_GNU_IFUNC)
4463 {
4464 if (h->plt.offset == (bfd_vma) -1)
4465 {
4466 /* STT_GNU_IFUNC is referenced without PLT. */
4467 if (htab->elf.splt == NULL)
4468 {
4469 /* use .rel[a].iplt section to store .got relocations
4470 in static executable. */
4471 relgot = htab->elf.irelplt;
4472 }
4473 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4474 {
4475 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4476 h->root.root.string,
4477 h->root.u.def.section->owner);
4478
4479 rela.r_info = htab->r_info (0,
4480 R_X86_64_IRELATIVE);
4481 rela.r_addend = (h->root.u.def.value
4482 + h->root.u.def.section->output_section->vma
4483 + h->root.u.def.section->output_offset);
4484 relative_reloc_name = "R_X86_64_IRELATIVE";
4485 }
4486 else
4487 goto do_glob_dat;
4488 }
4489 else if (bfd_link_pic (info))
4490 {
4491 /* Generate R_X86_64_GLOB_DAT. */
4492 goto do_glob_dat;
4493 }
4494 else
4495 {
4496 asection *plt;
4497 bfd_vma plt_offset;
4498
4499 if (!h->pointer_equality_needed)
4500 abort ();
4501
4502 /* For non-shared object, we can't use .got.plt, which
4503 contains the real function addres if we need pointer
4504 equality. We load the GOT entry with the PLT entry. */
4505 if (htab->plt_second != NULL)
4506 {
4507 plt = htab->plt_second;
4508 plt_offset = eh->plt_second.offset;
4509 }
4510 else
4511 {
4512 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4513 plt_offset = h->plt.offset;
4514 }
4515 bfd_put_64 (output_bfd, (plt->output_section->vma
4516 + plt->output_offset
4517 + plt_offset),
4518 htab->elf.sgot->contents + h->got.offset);
4519 return true;
4520 }
4521 }
4522 else if (bfd_link_pic (info)
4523 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4524 {
4525 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4526 return false;
4527 BFD_ASSERT((h->got.offset & 1) != 0);
4528 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4529 rela.r_addend = (h->root.u.def.value
4530 + h->root.u.def.section->output_section->vma
4531 + h->root.u.def.section->output_offset);
4532 relative_reloc_name = "R_X86_64_RELATIVE";
4533 }
4534 else
4535 {
4536 BFD_ASSERT((h->got.offset & 1) == 0);
4537 do_glob_dat:
4538 bfd_put_64 (output_bfd, (bfd_vma) 0,
4539 htab->elf.sgot->contents + h->got.offset);
4540 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4541 rela.r_addend = 0;
4542 }
4543
4544 if (relative_reloc_name != NULL
4545 && htab->params->report_relative_reloc)
4546 _bfd_x86_elf_link_report_relative_reloc
4547 (info, relgot, h, sym, relative_reloc_name, &rela);
4548
4549 elf_append_rela (output_bfd, relgot, &rela);
4550 }
4551
4552 if (h->needs_copy)
4553 {
4554 Elf_Internal_Rela rela;
4555 asection *s;
4556
4557 /* This symbol needs a copy reloc. Set it up. */
4558 VERIFY_COPY_RELOC (h, htab)
4559
4560 rela.r_offset = (h->root.u.def.value
4561 + h->root.u.def.section->output_section->vma
4562 + h->root.u.def.section->output_offset);
4563 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4564 rela.r_addend = 0;
4565 if (h->root.u.def.section == htab->elf.sdynrelro)
4566 s = htab->elf.sreldynrelro;
4567 else
4568 s = htab->elf.srelbss;
4569 elf_append_rela (output_bfd, s, &rela);
4570 }
4571
4572 return true;
4573 }
4574
4575 /* Finish up local dynamic symbol handling. We set the contents of
4576 various dynamic sections here. */
4577
4578 static int
elf_x86_64_finish_local_dynamic_symbol(void ** slot,void * inf)4579 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4580 {
4581 struct elf_link_hash_entry *h
4582 = (struct elf_link_hash_entry *) *slot;
4583 struct bfd_link_info *info
4584 = (struct bfd_link_info *) inf;
4585
4586 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4587 info, h, NULL);
4588 }
4589
4590 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4591 here since undefined weak symbol may not be dynamic and may not be
4592 called for elf_x86_64_finish_dynamic_symbol. */
4593
4594 static bool
elf_x86_64_pie_finish_undefweak_symbol(struct bfd_hash_entry * bh,void * inf)4595 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4596 void *inf)
4597 {
4598 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4599 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4600
4601 if (h->root.type != bfd_link_hash_undefweak
4602 || h->dynindx != -1)
4603 return true;
4604
4605 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4606 info, h, NULL);
4607 }
4608
4609 /* Used to decide how to sort relocs in an optimal manner for the
4610 dynamic linker, before writing them out. */
4611
4612 static enum elf_reloc_type_class
elf_x86_64_reloc_type_class(const struct bfd_link_info * info,const asection * rel_sec ATTRIBUTE_UNUSED,const Elf_Internal_Rela * rela)4613 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4614 const asection *rel_sec ATTRIBUTE_UNUSED,
4615 const Elf_Internal_Rela *rela)
4616 {
4617 bfd *abfd = info->output_bfd;
4618 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4619 struct elf_x86_link_hash_table *htab
4620 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4621
4622 if (htab->elf.dynsym != NULL
4623 && htab->elf.dynsym->contents != NULL)
4624 {
4625 /* Check relocation against STT_GNU_IFUNC symbol if there are
4626 dynamic symbols. */
4627 unsigned long r_symndx = htab->r_sym (rela->r_info);
4628 if (r_symndx != STN_UNDEF)
4629 {
4630 Elf_Internal_Sym sym;
4631 if (!bed->s->swap_symbol_in (abfd,
4632 (htab->elf.dynsym->contents
4633 + r_symndx * bed->s->sizeof_sym),
4634 0, &sym))
4635 abort ();
4636
4637 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4638 return reloc_class_ifunc;
4639 }
4640 }
4641
4642 switch ((int) ELF32_R_TYPE (rela->r_info))
4643 {
4644 case R_X86_64_IRELATIVE:
4645 return reloc_class_ifunc;
4646 case R_X86_64_RELATIVE:
4647 case R_X86_64_RELATIVE64:
4648 return reloc_class_relative;
4649 case R_X86_64_JUMP_SLOT:
4650 return reloc_class_plt;
4651 case R_X86_64_COPY:
4652 return reloc_class_copy;
4653 default:
4654 return reloc_class_normal;
4655 }
4656 }
4657
4658 /* Finish up the dynamic sections. */
4659
4660 static bool
elf_x86_64_finish_dynamic_sections(bfd * output_bfd,struct bfd_link_info * info)4661 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4662 struct bfd_link_info *info)
4663 {
4664 struct elf_x86_link_hash_table *htab;
4665
4666 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4667 if (htab == NULL)
4668 return false;
4669
4670 if (! htab->elf.dynamic_sections_created)
4671 return true;
4672
4673 if (htab->elf.splt && htab->elf.splt->size > 0)
4674 {
4675 elf_section_data (htab->elf.splt->output_section)
4676 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4677
4678 if (htab->plt.has_plt0)
4679 {
4680 /* Fill in the special first entry in the procedure linkage
4681 table. */
4682 memcpy (htab->elf.splt->contents,
4683 htab->lazy_plt->plt0_entry,
4684 htab->lazy_plt->plt0_entry_size);
4685 /* Add offset for pushq GOT+8(%rip), since the instruction
4686 uses 6 bytes subtract this value. */
4687 bfd_put_32 (output_bfd,
4688 (htab->elf.sgotplt->output_section->vma
4689 + htab->elf.sgotplt->output_offset
4690 + 8
4691 - htab->elf.splt->output_section->vma
4692 - htab->elf.splt->output_offset
4693 - 6),
4694 (htab->elf.splt->contents
4695 + htab->lazy_plt->plt0_got1_offset));
4696 /* Add offset for the PC-relative instruction accessing
4697 GOT+16, subtracting the offset to the end of that
4698 instruction. */
4699 bfd_put_32 (output_bfd,
4700 (htab->elf.sgotplt->output_section->vma
4701 + htab->elf.sgotplt->output_offset
4702 + 16
4703 - htab->elf.splt->output_section->vma
4704 - htab->elf.splt->output_offset
4705 - htab->lazy_plt->plt0_got2_insn_end),
4706 (htab->elf.splt->contents
4707 + htab->lazy_plt->plt0_got2_offset));
4708 }
4709
4710 if (htab->elf.tlsdesc_plt)
4711 {
4712 bfd_put_64 (output_bfd, (bfd_vma) 0,
4713 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4714
4715 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4716 htab->lazy_plt->plt_tlsdesc_entry,
4717 htab->lazy_plt->plt_tlsdesc_entry_size);
4718
4719 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4720 bytes and the instruction uses 6 bytes, subtract these
4721 values. */
4722 bfd_put_32 (output_bfd,
4723 (htab->elf.sgotplt->output_section->vma
4724 + htab->elf.sgotplt->output_offset
4725 + 8
4726 - htab->elf.splt->output_section->vma
4727 - htab->elf.splt->output_offset
4728 - htab->elf.tlsdesc_plt
4729 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4730 (htab->elf.splt->contents
4731 + htab->elf.tlsdesc_plt
4732 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4733 /* Add offset for indirect branch via GOT+TDG, where TDG
4734 stands for htab->tlsdesc_got, subtracting the offset
4735 to the end of that instruction. */
4736 bfd_put_32 (output_bfd,
4737 (htab->elf.sgot->output_section->vma
4738 + htab->elf.sgot->output_offset
4739 + htab->elf.tlsdesc_got
4740 - htab->elf.splt->output_section->vma
4741 - htab->elf.splt->output_offset
4742 - htab->elf.tlsdesc_plt
4743 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4744 (htab->elf.splt->contents
4745 + htab->elf.tlsdesc_plt
4746 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4747 }
4748 }
4749
4750 /* Fill PLT entries for undefined weak symbols in PIE. */
4751 if (bfd_link_pie (info))
4752 bfd_hash_traverse (&info->hash->table,
4753 elf_x86_64_pie_finish_undefweak_symbol,
4754 info);
4755
4756 return true;
4757 }
4758
4759 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4760 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4761 It has to be done before elf_link_sort_relocs is called so that
4762 dynamic relocations are properly sorted. */
4763
4764 static bool
elf_x86_64_output_arch_local_syms(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,void * flaginfo ATTRIBUTE_UNUSED,int (* func)(void *,const char *,Elf_Internal_Sym *,asection *,struct elf_link_hash_entry *)ATTRIBUTE_UNUSED)4765 elf_x86_64_output_arch_local_syms
4766 (bfd *output_bfd ATTRIBUTE_UNUSED,
4767 struct bfd_link_info *info,
4768 void *flaginfo ATTRIBUTE_UNUSED,
4769 int (*func) (void *, const char *,
4770 Elf_Internal_Sym *,
4771 asection *,
4772 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4773 {
4774 struct elf_x86_link_hash_table *htab
4775 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4776 if (htab == NULL)
4777 return false;
4778
4779 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4780 htab_traverse (htab->loc_hash_table,
4781 elf_x86_64_finish_local_dynamic_symbol,
4782 info);
4783
4784 return true;
4785 }
4786
4787 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4788 dynamic relocations. */
4789
4790 static long
elf_x86_64_get_synthetic_symtab(bfd * abfd,long symcount ATTRIBUTE_UNUSED,asymbol ** syms ATTRIBUTE_UNUSED,long dynsymcount,asymbol ** dynsyms,asymbol ** ret)4791 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4792 long symcount ATTRIBUTE_UNUSED,
4793 asymbol **syms ATTRIBUTE_UNUSED,
4794 long dynsymcount,
4795 asymbol **dynsyms,
4796 asymbol **ret)
4797 {
4798 long count, i, n;
4799 int j;
4800 bfd_byte *plt_contents;
4801 long relsize;
4802 const struct elf_x86_lazy_plt_layout *lazy_plt;
4803 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4804 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4805 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4806 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4807 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4808 asection *plt;
4809 enum elf_x86_plt_type plt_type;
4810 struct elf_x86_plt plts[] =
4811 {
4812 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4813 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4814 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4815 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4816 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4817 };
4818
4819 *ret = NULL;
4820
4821 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4822 return 0;
4823
4824 if (dynsymcount <= 0)
4825 return 0;
4826
4827 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4828 if (relsize <= 0)
4829 return -1;
4830
4831 lazy_plt = &elf_x86_64_lazy_plt;
4832 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4833 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4834 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4835 if (ABI_64_P (abfd))
4836 {
4837 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4838 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4839 }
4840 else
4841 {
4842 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4843 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4844 }
4845
4846 count = 0;
4847 for (j = 0; plts[j].name != NULL; j++)
4848 {
4849 plt = bfd_get_section_by_name (abfd, plts[j].name);
4850 if (plt == NULL || plt->size == 0)
4851 continue;
4852
4853 /* Get the PLT section contents. */
4854 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4855 break;
4856
4857 /* Check what kind of PLT it is. */
4858 plt_type = plt_unknown;
4859 if (plts[j].type == plt_unknown
4860 && (plt->size >= (lazy_plt->plt_entry_size
4861 + lazy_plt->plt_entry_size)))
4862 {
4863 /* Match lazy PLT first. Need to check the first two
4864 instructions. */
4865 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4866 lazy_plt->plt0_got1_offset) == 0)
4867 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4868 2) == 0))
4869 plt_type = plt_lazy;
4870 else if (lazy_bnd_plt != NULL
4871 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4872 lazy_bnd_plt->plt0_got1_offset) == 0)
4873 && (memcmp (plt_contents + 6,
4874 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4875 {
4876 plt_type = plt_lazy | plt_second;
4877 /* The fist entry in the lazy IBT PLT is the same as the
4878 lazy BND PLT. */
4879 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4880 lazy_ibt_plt->plt_entry,
4881 lazy_ibt_plt->plt_got_offset) == 0))
4882 lazy_plt = lazy_ibt_plt;
4883 else
4884 lazy_plt = lazy_bnd_plt;
4885 }
4886 }
4887
4888 if (non_lazy_plt != NULL
4889 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4890 && plt->size >= non_lazy_plt->plt_entry_size)
4891 {
4892 /* Match non-lazy PLT. */
4893 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4894 non_lazy_plt->plt_got_offset) == 0)
4895 plt_type = plt_non_lazy;
4896 }
4897
4898 if (plt_type == plt_unknown || plt_type == plt_second)
4899 {
4900 if (non_lazy_bnd_plt != NULL
4901 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4902 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4903 non_lazy_bnd_plt->plt_got_offset) == 0))
4904 {
4905 /* Match BND PLT. */
4906 plt_type = plt_second;
4907 non_lazy_plt = non_lazy_bnd_plt;
4908 }
4909 else if (non_lazy_ibt_plt != NULL
4910 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4911 && (memcmp (plt_contents,
4912 non_lazy_ibt_plt->plt_entry,
4913 non_lazy_ibt_plt->plt_got_offset) == 0))
4914 {
4915 /* Match IBT PLT. */
4916 plt_type = plt_second;
4917 non_lazy_plt = non_lazy_ibt_plt;
4918 }
4919 }
4920
4921 if (plt_type == plt_unknown)
4922 {
4923 free (plt_contents);
4924 continue;
4925 }
4926
4927 plts[j].sec = plt;
4928 plts[j].type = plt_type;
4929
4930 if ((plt_type & plt_lazy))
4931 {
4932 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4933 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4934 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4935 /* Skip PLT0 in lazy PLT. */
4936 i = 1;
4937 }
4938 else
4939 {
4940 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4941 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4942 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4943 i = 0;
4944 }
4945
4946 /* Skip lazy PLT when the second PLT is used. */
4947 if (plt_type == (plt_lazy | plt_second))
4948 plts[j].count = 0;
4949 else
4950 {
4951 n = plt->size / plts[j].plt_entry_size;
4952 plts[j].count = n;
4953 count += n - i;
4954 }
4955
4956 plts[j].contents = plt_contents;
4957 }
4958
4959 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4960 (bfd_vma) 0, plts, dynsyms,
4961 ret);
4962 }
4963
4964 /* Handle an x86-64 specific section when reading an object file. This
4965 is called when elfcode.h finds a section with an unknown type. */
4966
4967 static bool
elf_x86_64_section_from_shdr(bfd * abfd,Elf_Internal_Shdr * hdr,const char * name,int shindex)4968 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4969 const char *name, int shindex)
4970 {
4971 if (hdr->sh_type != SHT_X86_64_UNWIND)
4972 return false;
4973
4974 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4975 return false;
4976
4977 return true;
4978 }
4979
4980 /* Hook called by the linker routine which adds symbols from an object
4981 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4982 of .bss. */
4983
4984 static bool
elf_x86_64_add_symbol_hook(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED,Elf_Internal_Sym * sym,const char ** namep ATTRIBUTE_UNUSED,flagword * flagsp ATTRIBUTE_UNUSED,asection ** secp,bfd_vma * valp)4985 elf_x86_64_add_symbol_hook (bfd *abfd,
4986 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4987 Elf_Internal_Sym *sym,
4988 const char **namep ATTRIBUTE_UNUSED,
4989 flagword *flagsp ATTRIBUTE_UNUSED,
4990 asection **secp,
4991 bfd_vma *valp)
4992 {
4993 asection *lcomm;
4994
4995 switch (sym->st_shndx)
4996 {
4997 case SHN_X86_64_LCOMMON:
4998 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4999 if (lcomm == NULL)
5000 {
5001 lcomm = bfd_make_section_with_flags (abfd,
5002 "LARGE_COMMON",
5003 (SEC_ALLOC
5004 | SEC_IS_COMMON
5005 | SEC_LINKER_CREATED));
5006 if (lcomm == NULL)
5007 return false;
5008 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5009 }
5010 *secp = lcomm;
5011 *valp = sym->st_size;
5012 return true;
5013 }
5014
5015 return true;
5016 }
5017
5018
5019 /* Given a BFD section, try to locate the corresponding ELF section
5020 index. */
5021
5022 static bool
elf_x86_64_elf_section_from_bfd_section(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,int * index_return)5023 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5024 asection *sec, int *index_return)
5025 {
5026 if (sec == &_bfd_elf_large_com_section)
5027 {
5028 *index_return = SHN_X86_64_LCOMMON;
5029 return true;
5030 }
5031 return false;
5032 }
5033
5034 /* Process a symbol. */
5035
5036 static void
elf_x86_64_symbol_processing(bfd * abfd ATTRIBUTE_UNUSED,asymbol * asym)5037 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5038 asymbol *asym)
5039 {
5040 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5041
5042 switch (elfsym->internal_elf_sym.st_shndx)
5043 {
5044 case SHN_X86_64_LCOMMON:
5045 asym->section = &_bfd_elf_large_com_section;
5046 asym->value = elfsym->internal_elf_sym.st_size;
5047 /* Common symbol doesn't set BSF_GLOBAL. */
5048 asym->flags &= ~BSF_GLOBAL;
5049 break;
5050 }
5051 }
5052
5053 static bool
elf_x86_64_common_definition(Elf_Internal_Sym * sym)5054 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5055 {
5056 return (sym->st_shndx == SHN_COMMON
5057 || sym->st_shndx == SHN_X86_64_LCOMMON);
5058 }
5059
5060 static unsigned int
elf_x86_64_common_section_index(asection * sec)5061 elf_x86_64_common_section_index (asection *sec)
5062 {
5063 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5064 return SHN_COMMON;
5065 else
5066 return SHN_X86_64_LCOMMON;
5067 }
5068
5069 static asection *
elf_x86_64_common_section(asection * sec)5070 elf_x86_64_common_section (asection *sec)
5071 {
5072 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5073 return bfd_com_section_ptr;
5074 else
5075 return &_bfd_elf_large_com_section;
5076 }
5077
5078 static bool
elf_x86_64_merge_symbol(struct elf_link_hash_entry * h,const Elf_Internal_Sym * sym,asection ** psec,bool newdef,bool olddef,bfd * oldbfd,const asection * oldsec)5079 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5080 const Elf_Internal_Sym *sym,
5081 asection **psec,
5082 bool newdef,
5083 bool olddef,
5084 bfd *oldbfd,
5085 const asection *oldsec)
5086 {
5087 /* A normal common symbol and a large common symbol result in a
5088 normal common symbol. We turn the large common symbol into a
5089 normal one. */
5090 if (!olddef
5091 && h->root.type == bfd_link_hash_common
5092 && !newdef
5093 && bfd_is_com_section (*psec)
5094 && oldsec != *psec)
5095 {
5096 if (sym->st_shndx == SHN_COMMON
5097 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5098 {
5099 h->root.u.c.p->section
5100 = bfd_make_section_old_way (oldbfd, "COMMON");
5101 h->root.u.c.p->section->flags = SEC_ALLOC;
5102 }
5103 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5104 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5105 *psec = bfd_com_section_ptr;
5106 }
5107
5108 return true;
5109 }
5110
5111 static int
elf_x86_64_additional_program_headers(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED)5112 elf_x86_64_additional_program_headers (bfd *abfd,
5113 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5114 {
5115 asection *s;
5116 int count = 0;
5117
5118 /* Check to see if we need a large readonly segment. */
5119 s = bfd_get_section_by_name (abfd, ".lrodata");
5120 if (s && (s->flags & SEC_LOAD))
5121 count++;
5122
5123 /* Check to see if we need a large data segment. Since .lbss sections
5124 is placed right after the .bss section, there should be no need for
5125 a large data segment just because of .lbss. */
5126 s = bfd_get_section_by_name (abfd, ".ldata");
5127 if (s && (s->flags & SEC_LOAD))
5128 count++;
5129
5130 return count;
5131 }
5132
5133 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5134
5135 static bool
elf_x86_64_relocs_compatible(const bfd_target * input,const bfd_target * output)5136 elf_x86_64_relocs_compatible (const bfd_target *input,
5137 const bfd_target *output)
5138 {
5139 return ((xvec_get_elf_backend_data (input)->s->elfclass
5140 == xvec_get_elf_backend_data (output)->s->elfclass)
5141 && _bfd_elf_relocs_compatible (input, output));
5142 }
5143
5144 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5145 with GNU properties if found. Otherwise, return NULL. */
5146
5147 static bfd *
elf_x86_64_link_setup_gnu_properties(struct bfd_link_info * info)5148 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5149 {
5150 struct elf_x86_init_table init_table;
5151 const struct elf_backend_data *bed;
5152 struct elf_x86_link_hash_table *htab;
5153
5154 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5155 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5156 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5157 != (int) R_X86_64_GNU_VTINHERIT)
5158 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5159 != (int) R_X86_64_GNU_VTENTRY))
5160 abort ();
5161
5162 /* This is unused for x86-64. */
5163 init_table.plt0_pad_byte = 0x90;
5164
5165 bed = get_elf_backend_data (info->output_bfd);
5166 htab = elf_x86_hash_table (info, bed->target_id);
5167 if (!htab)
5168 abort ();
5169 if (htab->params->bndplt)
5170 {
5171 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5172 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5173 }
5174 else
5175 {
5176 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5177 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5178 }
5179
5180 if (ABI_64_P (info->output_bfd))
5181 {
5182 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5183 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5184 }
5185 else
5186 {
5187 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5188 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5189 }
5190
5191 if (ABI_64_P (info->output_bfd))
5192 {
5193 init_table.r_info = elf64_r_info;
5194 init_table.r_sym = elf64_r_sym;
5195 }
5196 else
5197 {
5198 init_table.r_info = elf32_r_info;
5199 init_table.r_sym = elf32_r_sym;
5200 }
5201
5202 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5203 }
5204
5205 static const struct bfd_elf_special_section
5206 elf_x86_64_special_sections[]=
5207 {
5208 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5209 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5210 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5211 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5212 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5213 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5214 { NULL, 0, 0, 0, 0 }
5215 };
5216
5217 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5218 #define TARGET_LITTLE_NAME "elf64-x86-64"
5219 #define ELF_ARCH bfd_arch_i386
5220 #define ELF_TARGET_ID X86_64_ELF_DATA
5221 #define ELF_MACHINE_CODE EM_X86_64
5222 #if DEFAULT_LD_Z_SEPARATE_CODE
5223 # define ELF_MAXPAGESIZE 0x1000
5224 #else
5225 # define ELF_MAXPAGESIZE 0x200000
5226 #endif
5227 #define ELF_MINPAGESIZE 0x1000
5228 #define ELF_COMMONPAGESIZE 0x1000
5229
5230 #define elf_backend_can_gc_sections 1
5231 #define elf_backend_can_refcount 1
5232 #define elf_backend_want_got_plt 1
5233 #define elf_backend_plt_readonly 1
5234 #define elf_backend_want_plt_sym 0
5235 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5236 #define elf_backend_rela_normal 1
5237 #define elf_backend_plt_alignment 4
5238 #define elf_backend_extern_protected_data 1
5239 #define elf_backend_caches_rawsize 1
5240 #define elf_backend_dtrel_excludes_plt 1
5241 #define elf_backend_want_dynrelro 1
5242
5243 #define elf_info_to_howto elf_x86_64_info_to_howto
5244
5245 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5246 #define bfd_elf64_bfd_reloc_name_lookup \
5247 elf_x86_64_reloc_name_lookup
5248
5249 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5250 #define elf_backend_check_relocs elf_x86_64_check_relocs
5251 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5252 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5253 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5254 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5255 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5256 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5257 #ifdef CORE_HEADER
5258 #define elf_backend_write_core_note elf_x86_64_write_core_note
5259 #endif
5260 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5261 #define elf_backend_relocate_section elf_x86_64_relocate_section
5262 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5263 #define elf_backend_object_p elf64_x86_64_elf_object_p
5264 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5265
5266 #define elf_backend_section_from_shdr \
5267 elf_x86_64_section_from_shdr
5268
5269 #define elf_backend_section_from_bfd_section \
5270 elf_x86_64_elf_section_from_bfd_section
5271 #define elf_backend_add_symbol_hook \
5272 elf_x86_64_add_symbol_hook
5273 #define elf_backend_symbol_processing \
5274 elf_x86_64_symbol_processing
5275 #define elf_backend_common_section_index \
5276 elf_x86_64_common_section_index
5277 #define elf_backend_common_section \
5278 elf_x86_64_common_section
5279 #define elf_backend_common_definition \
5280 elf_x86_64_common_definition
5281 #define elf_backend_merge_symbol \
5282 elf_x86_64_merge_symbol
5283 #define elf_backend_special_sections \
5284 elf_x86_64_special_sections
5285 #define elf_backend_additional_program_headers \
5286 elf_x86_64_additional_program_headers
5287 #define elf_backend_setup_gnu_properties \
5288 elf_x86_64_link_setup_gnu_properties
5289 #define elf_backend_hide_symbol \
5290 _bfd_x86_elf_hide_symbol
5291
5292 #undef elf64_bed
5293 #define elf64_bed elf64_x86_64_bed
5294
5295 #include "elf64-target.h"
5296
5297 /* CloudABI support. */
5298
5299 #undef TARGET_LITTLE_SYM
5300 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5301 #undef TARGET_LITTLE_NAME
5302 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5303
5304 #undef ELF_OSABI
5305 #define ELF_OSABI ELFOSABI_CLOUDABI
5306
5307 #undef elf64_bed
5308 #define elf64_bed elf64_x86_64_cloudabi_bed
5309
5310 #include "elf64-target.h"
5311
5312 /* FreeBSD support. */
5313
5314 #undef TARGET_LITTLE_SYM
5315 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5316 #undef TARGET_LITTLE_NAME
5317 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5318
5319 #undef ELF_OSABI
5320 #define ELF_OSABI ELFOSABI_FREEBSD
5321
5322 #undef elf64_bed
5323 #define elf64_bed elf64_x86_64_fbsd_bed
5324
5325 #include "elf64-target.h"
5326
5327 /* Solaris 2 support. */
5328
5329 #undef TARGET_LITTLE_SYM
5330 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5331 #undef TARGET_LITTLE_NAME
5332 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5333
5334 #undef ELF_TARGET_OS
5335 #define ELF_TARGET_OS is_solaris
5336
5337 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5338 objects won't be recognized. */
5339 #undef ELF_OSABI
5340
5341 #undef elf64_bed
5342 #define elf64_bed elf64_x86_64_sol2_bed
5343
5344 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5345 boundary. */
5346 #undef elf_backend_static_tls_alignment
5347 #define elf_backend_static_tls_alignment 16
5348
5349 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5350
5351 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5352 File, p.63. */
5353 #undef elf_backend_want_plt_sym
5354 #define elf_backend_want_plt_sym 1
5355
5356 #undef elf_backend_strtab_flags
5357 #define elf_backend_strtab_flags SHF_STRINGS
5358
5359 static bool
elf64_x86_64_copy_solaris_special_section_fields(const bfd * ibfd ATTRIBUTE_UNUSED,bfd * obfd ATTRIBUTE_UNUSED,const Elf_Internal_Shdr * isection ATTRIBUTE_UNUSED,Elf_Internal_Shdr * osection ATTRIBUTE_UNUSED)5360 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5361 bfd *obfd ATTRIBUTE_UNUSED,
5362 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5363 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5364 {
5365 /* PR 19938: FIXME: Need to add code for setting the sh_info
5366 and sh_link fields of Solaris specific section types. */
5367 return false;
5368 }
5369
5370 #undef elf_backend_copy_special_section_fields
5371 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5372
5373 #include "elf64-target.h"
5374
5375 /* Restore defaults. */
5376 #undef ELF_OSABI
5377 #undef elf_backend_static_tls_alignment
5378 #undef elf_backend_want_plt_sym
5379 #define elf_backend_want_plt_sym 0
5380 #undef elf_backend_strtab_flags
5381 #undef elf_backend_copy_special_section_fields
5382
5383 /* Intel L1OM support. */
5384
5385 static bool
elf64_l1om_elf_object_p(bfd * abfd)5386 elf64_l1om_elf_object_p (bfd *abfd)
5387 {
5388 /* Set the right machine number for an L1OM elf64 file. */
5389 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5390 return true;
5391 }
5392
5393 #undef TARGET_LITTLE_SYM
5394 #define TARGET_LITTLE_SYM l1om_elf64_vec
5395 #undef TARGET_LITTLE_NAME
5396 #define TARGET_LITTLE_NAME "elf64-l1om"
5397 #undef ELF_ARCH
5398 #define ELF_ARCH bfd_arch_l1om
5399
5400 #undef ELF_MACHINE_CODE
5401 #define ELF_MACHINE_CODE EM_L1OM
5402
5403 #undef ELF_OSABI
5404
5405 #undef elf64_bed
5406 #define elf64_bed elf64_l1om_bed
5407
5408 #undef elf_backend_object_p
5409 #define elf_backend_object_p elf64_l1om_elf_object_p
5410
5411 /* Restore defaults. */
5412 #undef ELF_MAXPAGESIZE
5413 #undef ELF_MINPAGESIZE
5414 #undef ELF_COMMONPAGESIZE
5415 #if DEFAULT_LD_Z_SEPARATE_CODE
5416 # define ELF_MAXPAGESIZE 0x1000
5417 #else
5418 # define ELF_MAXPAGESIZE 0x200000
5419 #endif
5420 #define ELF_MINPAGESIZE 0x1000
5421 #define ELF_COMMONPAGESIZE 0x1000
5422 #undef elf_backend_plt_alignment
5423 #define elf_backend_plt_alignment 4
5424 #undef ELF_TARGET_OS
5425
5426 #include "elf64-target.h"
5427
5428 /* FreeBSD L1OM support. */
5429
5430 #undef TARGET_LITTLE_SYM
5431 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5432 #undef TARGET_LITTLE_NAME
5433 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5434
5435 #undef ELF_OSABI
5436 #define ELF_OSABI ELFOSABI_FREEBSD
5437
5438 #undef elf64_bed
5439 #define elf64_bed elf64_l1om_fbsd_bed
5440
5441 #include "elf64-target.h"
5442
5443 /* Intel K1OM support. */
5444
5445 static bool
elf64_k1om_elf_object_p(bfd * abfd)5446 elf64_k1om_elf_object_p (bfd *abfd)
5447 {
5448 /* Set the right machine number for an K1OM elf64 file. */
5449 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5450 return true;
5451 }
5452
5453 #undef TARGET_LITTLE_SYM
5454 #define TARGET_LITTLE_SYM k1om_elf64_vec
5455 #undef TARGET_LITTLE_NAME
5456 #define TARGET_LITTLE_NAME "elf64-k1om"
5457 #undef ELF_ARCH
5458 #define ELF_ARCH bfd_arch_k1om
5459
5460 #undef ELF_MACHINE_CODE
5461 #define ELF_MACHINE_CODE EM_K1OM
5462
5463 #undef ELF_OSABI
5464
5465 #undef elf64_bed
5466 #define elf64_bed elf64_k1om_bed
5467
5468 #undef elf_backend_object_p
5469 #define elf_backend_object_p elf64_k1om_elf_object_p
5470
5471 #include "elf64-target.h"
5472
5473 /* FreeBSD K1OM support. */
5474
5475 #undef TARGET_LITTLE_SYM
5476 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5477 #undef TARGET_LITTLE_NAME
5478 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5479
5480 #undef ELF_OSABI
5481 #define ELF_OSABI ELFOSABI_FREEBSD
5482
5483 #undef elf64_bed
5484 #define elf64_bed elf64_k1om_fbsd_bed
5485
5486 #include "elf64-target.h"
5487
5488 /* 32bit x86-64 support. */
5489
5490 #undef TARGET_LITTLE_SYM
5491 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5492 #undef TARGET_LITTLE_NAME
5493 #define TARGET_LITTLE_NAME "elf32-x86-64"
5494 #undef elf32_bed
5495 #define elf32_bed elf32_x86_64_bed
5496
5497 #undef ELF_ARCH
5498 #define ELF_ARCH bfd_arch_i386
5499
5500 #undef ELF_MACHINE_CODE
5501 #define ELF_MACHINE_CODE EM_X86_64
5502
5503 #undef ELF_OSABI
5504
5505 #define bfd_elf32_bfd_reloc_type_lookup \
5506 elf_x86_64_reloc_type_lookup
5507 #define bfd_elf32_bfd_reloc_name_lookup \
5508 elf_x86_64_reloc_name_lookup
5509 #define bfd_elf32_get_synthetic_symtab \
5510 elf_x86_64_get_synthetic_symtab
5511
5512 #undef elf_backend_object_p
5513 #define elf_backend_object_p \
5514 elf32_x86_64_elf_object_p
5515
5516 #undef elf_backend_bfd_from_remote_memory
5517 #define elf_backend_bfd_from_remote_memory \
5518 _bfd_elf32_bfd_from_remote_memory
5519
5520 #undef elf_backend_size_info
5521 #define elf_backend_size_info \
5522 _bfd_elf32_size_info
5523
5524 #include "elf32-target.h"
5525