1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2020 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
elf_x86_64_rtype_to_howto(bfd * abfd,unsigned r_type)268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
elf_x86_64_reloc_type_lookup(bfd * abfd,bfd_reloc_code_real_type code)300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
elf_x86_64_reloc_name_lookup(bfd * abfd,const char * r_name)316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
elf_x86_64_info_to_howto(bfd * abfd,arelent * cache_ptr,Elf_Internal_Rela * dst)341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
elf_x86_64_grok_prstatus(bfd * abfd,Elf_Internal_Note * note)356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
elf_x86_64_grok_psinfo(bfd * abfd,Elf_Internal_Note * note)401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
elf_x86_64_write_core_note(bfd * abfd,char * buf,int * bufsiz,int note_type,...)447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
elf64_x86_64_elf_object_p(bfd * abfd)999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
elf32_x86_64_elf_object_p(bfd * abfd)1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
elf_x86_64_check_tls_transition(bfd * abfd,struct bfd_link_info * info,asection * sec,bfd_byte * contents,Elf_Internal_Shdr * symtab_hdr,struct elf_link_hash_entry ** sym_hashes,unsigned int r_type,const Elf_Internal_Rela * rel,const Elf_Internal_Rela * relend)1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
elf_x86_64_tls_transition(struct bfd_link_info * info,bfd * abfd,asection * sec,bfd_byte * contents,Elf_Internal_Shdr * symtab_hdr,struct elf_link_hash_entry ** sym_hashes,unsigned int * r_type,int tls_type,const Elf_Internal_Rela * rel,const Elf_Internal_Rela * relend,struct elf_link_hash_entry * h,unsigned long r_symndx,bfd_boolean from_relocate_section)1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
elf_x86_64_need_pic(struct bfd_link_info * info,bfd * input_bfd,asection * sec,struct elf_link_hash_entry * h,Elf_Internal_Shdr * symtab_hdr,Elf_Internal_Sym * isym,reloc_howto_type * howto)1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = NULL;
1426 break;
1427 }
1428
1429 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = NULL;
1436 }
1437
1438 if (bfd_link_dll (info))
1439 {
1440 object = _("a shared object");
1441 if (!pic)
1442 pic = _("; recompile with -fPIC");
1443 }
1444 else
1445 {
1446 if (bfd_link_pie (info))
1447 object = _("a PIE object");
1448 else
1449 object = _("a PDE object");
1450 if (!pic)
1451 pic = _("; recompile with -fPIE");
1452 }
1453
1454 /* xgettext:c-format */
1455 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1456 "not be used when making %s%s"),
1457 input_bfd, howto->name, und, v, name,
1458 object, pic);
1459 bfd_set_error (bfd_error_bad_value);
1460 sec->check_relocs_failed = 1;
1461 return FALSE;
1462 }
1463
1464 /* With the local symbol, foo, we convert
1465 mov foo@GOTPCREL(%rip), %reg
1466 to
1467 lea foo(%rip), %reg
1468 and convert
1469 call/jmp *foo@GOTPCREL(%rip)
1470 to
1471 nop call foo/jmp foo nop
1472 When PIC is false, convert
1473 test %reg, foo@GOTPCREL(%rip)
1474 to
1475 test $foo, %reg
1476 and convert
1477 binop foo@GOTPCREL(%rip), %reg
1478 to
1479 binop $foo, %reg
1480 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1481 instructions. */
1482
1483 static bfd_boolean
elf_x86_64_convert_load_reloc(bfd * abfd,bfd_byte * contents,unsigned int * r_type_p,Elf_Internal_Rela * irel,struct elf_link_hash_entry * h,bfd_boolean * converted,struct bfd_link_info * link_info)1484 elf_x86_64_convert_load_reloc (bfd *abfd,
1485 bfd_byte *contents,
1486 unsigned int *r_type_p,
1487 Elf_Internal_Rela *irel,
1488 struct elf_link_hash_entry *h,
1489 bfd_boolean *converted,
1490 struct bfd_link_info *link_info)
1491 {
1492 struct elf_x86_link_hash_table *htab;
1493 bfd_boolean is_pic;
1494 bfd_boolean no_overflow;
1495 bfd_boolean relocx;
1496 bfd_boolean to_reloc_pc32;
1497 asection *tsec;
1498 bfd_signed_vma raddend;
1499 unsigned int opcode;
1500 unsigned int modrm;
1501 unsigned int r_type = *r_type_p;
1502 unsigned int r_symndx;
1503 bfd_vma roff = irel->r_offset;
1504
1505 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1506 return TRUE;
1507
1508 raddend = irel->r_addend;
1509 /* Addend for 32-bit PC-relative relocation must be -4. */
1510 if (raddend != -4)
1511 return TRUE;
1512
1513 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1514 is_pic = bfd_link_pic (link_info);
1515
1516 relocx = (r_type == R_X86_64_GOTPCRELX
1517 || r_type == R_X86_64_REX_GOTPCRELX);
1518
1519 /* TRUE if --no-relax is used. */
1520 no_overflow = link_info->disable_target_specific_optimizations > 1;
1521
1522 r_symndx = htab->r_sym (irel->r_info);
1523
1524 opcode = bfd_get_8 (abfd, contents + roff - 2);
1525
1526 /* Convert mov to lea since it has been done for a while. */
1527 if (opcode != 0x8b)
1528 {
1529 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1530 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1531 test, xor instructions. */
1532 if (!relocx)
1533 return TRUE;
1534 }
1535
1536 /* We convert only to R_X86_64_PC32:
1537 1. Branch.
1538 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1539 3. no_overflow is true.
1540 4. PIC.
1541 */
1542 to_reloc_pc32 = (opcode == 0xff
1543 || !relocx
1544 || no_overflow
1545 || is_pic);
1546
1547 /* Get the symbol referred to by the reloc. */
1548 if (h == NULL)
1549 {
1550 Elf_Internal_Sym *isym
1551 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1552
1553 /* Skip relocation against undefined symbols. */
1554 if (isym->st_shndx == SHN_UNDEF)
1555 return TRUE;
1556
1557 if (isym->st_shndx == SHN_ABS)
1558 tsec = bfd_abs_section_ptr;
1559 else if (isym->st_shndx == SHN_COMMON)
1560 tsec = bfd_com_section_ptr;
1561 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1562 tsec = &_bfd_elf_large_com_section;
1563 else
1564 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1565 }
1566 else
1567 {
1568 /* Undefined weak symbol is only bound locally in executable
1569 and its reference is resolved as 0 without relocation
1570 overflow. We can only perform this optimization for
1571 GOTPCRELX relocations since we need to modify REX byte.
1572 It is OK convert mov with R_X86_64_GOTPCREL to
1573 R_X86_64_PC32. */
1574 bfd_boolean local_ref;
1575 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1576
1577 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1578 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1579 if ((relocx || opcode == 0x8b)
1580 && (h->root.type == bfd_link_hash_undefweak
1581 && !eh->linker_def
1582 && local_ref))
1583 {
1584 if (opcode == 0xff)
1585 {
1586 /* Skip for branch instructions since R_X86_64_PC32
1587 may overflow. */
1588 if (no_overflow)
1589 return TRUE;
1590 }
1591 else if (relocx)
1592 {
1593 /* For non-branch instructions, we can convert to
1594 R_X86_64_32/R_X86_64_32S since we know if there
1595 is a REX byte. */
1596 to_reloc_pc32 = FALSE;
1597 }
1598
1599 /* Since we don't know the current PC when PIC is true,
1600 we can't convert to R_X86_64_PC32. */
1601 if (to_reloc_pc32 && is_pic)
1602 return TRUE;
1603
1604 goto convert;
1605 }
1606 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1607 ld.so may use its link-time address. */
1608 else if (h->start_stop
1609 || eh->linker_def
1610 || ((h->def_regular
1611 || h->root.type == bfd_link_hash_defined
1612 || h->root.type == bfd_link_hash_defweak)
1613 && h != htab->elf.hdynamic
1614 && local_ref))
1615 {
1616 /* bfd_link_hash_new or bfd_link_hash_undefined is
1617 set by an assignment in a linker script in
1618 bfd_elf_record_link_assignment. start_stop is set
1619 on __start_SECNAME/__stop_SECNAME which mark section
1620 SECNAME. */
1621 if (h->start_stop
1622 || eh->linker_def
1623 || (h->def_regular
1624 && (h->root.type == bfd_link_hash_new
1625 || h->root.type == bfd_link_hash_undefined
1626 || ((h->root.type == bfd_link_hash_defined
1627 || h->root.type == bfd_link_hash_defweak)
1628 && h->root.u.def.section == bfd_und_section_ptr))))
1629 {
1630 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1631 if (no_overflow)
1632 return TRUE;
1633 goto convert;
1634 }
1635 tsec = h->root.u.def.section;
1636 }
1637 else
1638 return TRUE;
1639 }
1640
1641 /* Don't convert GOTPCREL relocation against large section. */
1642 if (elf_section_data (tsec) != NULL
1643 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1644 return TRUE;
1645
1646 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1647 if (no_overflow)
1648 return TRUE;
1649
1650 convert:
1651 if (opcode == 0xff)
1652 {
1653 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1654 unsigned int nop;
1655 unsigned int disp;
1656 bfd_vma nop_offset;
1657
1658 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1659 R_X86_64_PC32. */
1660 modrm = bfd_get_8 (abfd, contents + roff - 1);
1661 if (modrm == 0x25)
1662 {
1663 /* Convert to "jmp foo nop". */
1664 modrm = 0xe9;
1665 nop = NOP_OPCODE;
1666 nop_offset = irel->r_offset + 3;
1667 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1668 irel->r_offset -= 1;
1669 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1670 }
1671 else
1672 {
1673 struct elf_x86_link_hash_entry *eh
1674 = (struct elf_x86_link_hash_entry *) h;
1675
1676 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1677 is a nop prefix. */
1678 modrm = 0xe8;
1679 /* To support TLS optimization, always use addr32 prefix for
1680 "call *__tls_get_addr@GOTPCREL(%rip)". */
1681 if (eh && eh->tls_get_addr)
1682 {
1683 nop = 0x67;
1684 nop_offset = irel->r_offset - 2;
1685 }
1686 else
1687 {
1688 nop = htab->params->call_nop_byte;
1689 if (htab->params->call_nop_as_suffix)
1690 {
1691 nop_offset = irel->r_offset + 3;
1692 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1693 irel->r_offset -= 1;
1694 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1695 }
1696 else
1697 nop_offset = irel->r_offset - 2;
1698 }
1699 }
1700 bfd_put_8 (abfd, nop, contents + nop_offset);
1701 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1702 r_type = R_X86_64_PC32;
1703 }
1704 else
1705 {
1706 unsigned int rex;
1707 unsigned int rex_mask = REX_R;
1708
1709 if (r_type == R_X86_64_REX_GOTPCRELX)
1710 rex = bfd_get_8 (abfd, contents + roff - 3);
1711 else
1712 rex = 0;
1713
1714 if (opcode == 0x8b)
1715 {
1716 if (to_reloc_pc32)
1717 {
1718 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1719 "lea foo(%rip), %reg". */
1720 opcode = 0x8d;
1721 r_type = R_X86_64_PC32;
1722 }
1723 else
1724 {
1725 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1726 "mov $foo, %reg". */
1727 opcode = 0xc7;
1728 modrm = bfd_get_8 (abfd, contents + roff - 1);
1729 modrm = 0xc0 | (modrm & 0x38) >> 3;
1730 if ((rex & REX_W) != 0
1731 && ABI_64_P (link_info->output_bfd))
1732 {
1733 /* Keep the REX_W bit in REX byte for LP64. */
1734 r_type = R_X86_64_32S;
1735 goto rewrite_modrm_rex;
1736 }
1737 else
1738 {
1739 /* If the REX_W bit in REX byte isn't needed,
1740 use R_X86_64_32 and clear the W bit to avoid
1741 sign-extend imm32 to imm64. */
1742 r_type = R_X86_64_32;
1743 /* Clear the W bit in REX byte. */
1744 rex_mask |= REX_W;
1745 goto rewrite_modrm_rex;
1746 }
1747 }
1748 }
1749 else
1750 {
1751 /* R_X86_64_PC32 isn't supported. */
1752 if (to_reloc_pc32)
1753 return TRUE;
1754
1755 modrm = bfd_get_8 (abfd, contents + roff - 1);
1756 if (opcode == 0x85)
1757 {
1758 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1759 "test $foo, %reg". */
1760 modrm = 0xc0 | (modrm & 0x38) >> 3;
1761 opcode = 0xf7;
1762 }
1763 else
1764 {
1765 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1766 "binop $foo, %reg". */
1767 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1768 opcode = 0x81;
1769 }
1770
1771 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1772 overflow when sign-extending imm32 to imm64. */
1773 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1774
1775 rewrite_modrm_rex:
1776 bfd_put_8 (abfd, modrm, contents + roff - 1);
1777
1778 if (rex)
1779 {
1780 /* Move the R bit to the B bit in REX byte. */
1781 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1782 bfd_put_8 (abfd, rex, contents + roff - 3);
1783 }
1784
1785 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1786 irel->r_addend = 0;
1787 }
1788
1789 bfd_put_8 (abfd, opcode, contents + roff - 2);
1790 }
1791
1792 *r_type_p = r_type;
1793 irel->r_info = htab->r_info (r_symndx,
1794 r_type | R_X86_64_converted_reloc_bit);
1795
1796 *converted = TRUE;
1797
1798 return TRUE;
1799 }
1800
1801 /* Look through the relocs for a section during the first phase, and
1802 calculate needed space in the global offset table, procedure
1803 linkage table, and dynamic reloc sections. */
1804
1805 static bfd_boolean
elf_x86_64_check_relocs(bfd * abfd,struct bfd_link_info * info,asection * sec,const Elf_Internal_Rela * relocs)1806 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1807 asection *sec,
1808 const Elf_Internal_Rela *relocs)
1809 {
1810 struct elf_x86_link_hash_table *htab;
1811 Elf_Internal_Shdr *symtab_hdr;
1812 struct elf_link_hash_entry **sym_hashes;
1813 const Elf_Internal_Rela *rel;
1814 const Elf_Internal_Rela *rel_end;
1815 asection *sreloc;
1816 bfd_byte *contents;
1817 bfd_boolean converted;
1818
1819 if (bfd_link_relocatable (info))
1820 return TRUE;
1821
1822 /* Don't do anything special with non-loaded, non-alloced sections.
1823 In particular, any relocs in such sections should not affect GOT
1824 and PLT reference counting (ie. we don't allow them to create GOT
1825 or PLT entries), there's no possibility or desire to optimize TLS
1826 relocs, and there's not much point in propagating relocs to shared
1827 libs that the dynamic linker won't relocate. */
1828 if ((sec->flags & SEC_ALLOC) == 0)
1829 return TRUE;
1830
1831 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1832 if (htab == NULL)
1833 {
1834 sec->check_relocs_failed = 1;
1835 return FALSE;
1836 }
1837
1838 BFD_ASSERT (is_x86_elf (abfd, htab));
1839
1840 /* Get the section contents. */
1841 if (elf_section_data (sec)->this_hdr.contents != NULL)
1842 contents = elf_section_data (sec)->this_hdr.contents;
1843 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1844 {
1845 sec->check_relocs_failed = 1;
1846 return FALSE;
1847 }
1848
1849 symtab_hdr = &elf_symtab_hdr (abfd);
1850 sym_hashes = elf_sym_hashes (abfd);
1851
1852 converted = FALSE;
1853
1854 sreloc = NULL;
1855
1856 rel_end = relocs + sec->reloc_count;
1857 for (rel = relocs; rel < rel_end; rel++)
1858 {
1859 unsigned int r_type;
1860 unsigned int r_symndx;
1861 struct elf_link_hash_entry *h;
1862 struct elf_x86_link_hash_entry *eh;
1863 Elf_Internal_Sym *isym;
1864 const char *name;
1865 bfd_boolean size_reloc;
1866 bfd_boolean converted_reloc;
1867
1868 r_symndx = htab->r_sym (rel->r_info);
1869 r_type = ELF32_R_TYPE (rel->r_info);
1870
1871 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1872 {
1873 /* xgettext:c-format */
1874 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1875 abfd, r_symndx);
1876 goto error_return;
1877 }
1878
1879 if (r_symndx < symtab_hdr->sh_info)
1880 {
1881 /* A local symbol. */
1882 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1883 abfd, r_symndx);
1884 if (isym == NULL)
1885 goto error_return;
1886
1887 /* Check relocation against local STT_GNU_IFUNC symbol. */
1888 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1889 {
1890 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1891 TRUE);
1892 if (h == NULL)
1893 goto error_return;
1894
1895 /* Fake a STT_GNU_IFUNC symbol. */
1896 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1897 isym, NULL);
1898 h->type = STT_GNU_IFUNC;
1899 h->def_regular = 1;
1900 h->ref_regular = 1;
1901 h->forced_local = 1;
1902 h->root.type = bfd_link_hash_defined;
1903 }
1904 else
1905 h = NULL;
1906 }
1907 else
1908 {
1909 isym = NULL;
1910 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1911 while (h->root.type == bfd_link_hash_indirect
1912 || h->root.type == bfd_link_hash_warning)
1913 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1914 }
1915
1916 /* Check invalid x32 relocations. */
1917 if (!ABI_64_P (abfd))
1918 switch (r_type)
1919 {
1920 default:
1921 break;
1922
1923 case R_X86_64_DTPOFF64:
1924 case R_X86_64_TPOFF64:
1925 case R_X86_64_PC64:
1926 case R_X86_64_GOTOFF64:
1927 case R_X86_64_GOT64:
1928 case R_X86_64_GOTPCREL64:
1929 case R_X86_64_GOTPC64:
1930 case R_X86_64_GOTPLT64:
1931 case R_X86_64_PLTOFF64:
1932 {
1933 if (h)
1934 name = h->root.root.string;
1935 else
1936 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1937 NULL);
1938 _bfd_error_handler
1939 /* xgettext:c-format */
1940 (_("%pB: relocation %s against symbol `%s' isn't "
1941 "supported in x32 mode"), abfd,
1942 x86_64_elf_howto_table[r_type].name, name);
1943 bfd_set_error (bfd_error_bad_value);
1944 goto error_return;
1945 }
1946 break;
1947 }
1948
1949 if (h != NULL)
1950 {
1951 /* It is referenced by a non-shared object. */
1952 h->ref_regular = 1;
1953 }
1954
1955 converted_reloc = FALSE;
1956 if ((r_type == R_X86_64_GOTPCREL
1957 || r_type == R_X86_64_GOTPCRELX
1958 || r_type == R_X86_64_REX_GOTPCRELX)
1959 && (h == NULL || h->type != STT_GNU_IFUNC))
1960 {
1961 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1962 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1963 irel, h, &converted_reloc,
1964 info))
1965 goto error_return;
1966
1967 if (converted_reloc)
1968 converted = TRUE;
1969 }
1970
1971 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1972 symtab_hdr, sym_hashes,
1973 &r_type, GOT_UNKNOWN,
1974 rel, rel_end, h, r_symndx, FALSE))
1975 goto error_return;
1976
1977 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1978 if (h == htab->elf.hgot)
1979 htab->got_referenced = TRUE;
1980
1981 eh = (struct elf_x86_link_hash_entry *) h;
1982 switch (r_type)
1983 {
1984 case R_X86_64_TLSLD:
1985 htab->tls_ld_or_ldm_got.refcount = 1;
1986 goto create_got;
1987
1988 case R_X86_64_TPOFF32:
1989 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1990 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1991 &x86_64_elf_howto_table[r_type]);
1992 if (eh != NULL)
1993 eh->zero_undefweak &= 0x2;
1994 break;
1995
1996 case R_X86_64_GOTTPOFF:
1997 if (!bfd_link_executable (info))
1998 info->flags |= DF_STATIC_TLS;
1999 /* Fall through */
2000
2001 case R_X86_64_GOT32:
2002 case R_X86_64_GOTPCREL:
2003 case R_X86_64_GOTPCRELX:
2004 case R_X86_64_REX_GOTPCRELX:
2005 case R_X86_64_TLSGD:
2006 case R_X86_64_GOT64:
2007 case R_X86_64_GOTPCREL64:
2008 case R_X86_64_GOTPLT64:
2009 case R_X86_64_GOTPC32_TLSDESC:
2010 case R_X86_64_TLSDESC_CALL:
2011 /* This symbol requires a global offset table entry. */
2012 {
2013 int tls_type, old_tls_type;
2014
2015 switch (r_type)
2016 {
2017 default: tls_type = GOT_NORMAL; break;
2018 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2019 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2020 case R_X86_64_GOTPC32_TLSDESC:
2021 case R_X86_64_TLSDESC_CALL:
2022 tls_type = GOT_TLS_GDESC; break;
2023 }
2024
2025 if (h != NULL)
2026 {
2027 h->got.refcount = 1;
2028 old_tls_type = eh->tls_type;
2029 }
2030 else
2031 {
2032 bfd_signed_vma *local_got_refcounts;
2033
2034 /* This is a global offset table entry for a local symbol. */
2035 local_got_refcounts = elf_local_got_refcounts (abfd);
2036 if (local_got_refcounts == NULL)
2037 {
2038 bfd_size_type size;
2039
2040 size = symtab_hdr->sh_info;
2041 size *= sizeof (bfd_signed_vma)
2042 + sizeof (bfd_vma) + sizeof (char);
2043 local_got_refcounts = ((bfd_signed_vma *)
2044 bfd_zalloc (abfd, size));
2045 if (local_got_refcounts == NULL)
2046 goto error_return;
2047 elf_local_got_refcounts (abfd) = local_got_refcounts;
2048 elf_x86_local_tlsdesc_gotent (abfd)
2049 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2050 elf_x86_local_got_tls_type (abfd)
2051 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2052 }
2053 local_got_refcounts[r_symndx] = 1;
2054 old_tls_type
2055 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2056 }
2057
2058 /* If a TLS symbol is accessed using IE at least once,
2059 there is no point to use dynamic model for it. */
2060 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2061 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2062 || tls_type != GOT_TLS_IE))
2063 {
2064 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2065 tls_type = old_tls_type;
2066 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2067 && GOT_TLS_GD_ANY_P (tls_type))
2068 tls_type |= old_tls_type;
2069 else
2070 {
2071 if (h)
2072 name = h->root.root.string;
2073 else
2074 name = bfd_elf_sym_name (abfd, symtab_hdr,
2075 isym, NULL);
2076 _bfd_error_handler
2077 /* xgettext:c-format */
2078 (_("%pB: '%s' accessed both as normal and"
2079 " thread local symbol"),
2080 abfd, name);
2081 bfd_set_error (bfd_error_bad_value);
2082 goto error_return;
2083 }
2084 }
2085
2086 if (old_tls_type != tls_type)
2087 {
2088 if (eh != NULL)
2089 eh->tls_type = tls_type;
2090 else
2091 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2092 }
2093 }
2094 /* Fall through */
2095
2096 case R_X86_64_GOTOFF64:
2097 case R_X86_64_GOTPC32:
2098 case R_X86_64_GOTPC64:
2099 create_got:
2100 if (eh != NULL)
2101 eh->zero_undefweak &= 0x2;
2102 break;
2103
2104 case R_X86_64_PLT32:
2105 case R_X86_64_PLT32_BND:
2106 /* This symbol requires a procedure linkage table entry. We
2107 actually build the entry in adjust_dynamic_symbol,
2108 because this might be a case of linking PIC code which is
2109 never referenced by a dynamic object, in which case we
2110 don't need to generate a procedure linkage table entry
2111 after all. */
2112
2113 /* If this is a local symbol, we resolve it directly without
2114 creating a procedure linkage table entry. */
2115 if (h == NULL)
2116 continue;
2117
2118 eh->zero_undefweak &= 0x2;
2119 h->needs_plt = 1;
2120 h->plt.refcount = 1;
2121 break;
2122
2123 case R_X86_64_PLTOFF64:
2124 /* This tries to form the 'address' of a function relative
2125 to GOT. For global symbols we need a PLT entry. */
2126 if (h != NULL)
2127 {
2128 h->needs_plt = 1;
2129 h->plt.refcount = 1;
2130 }
2131 goto create_got;
2132
2133 case R_X86_64_SIZE32:
2134 case R_X86_64_SIZE64:
2135 size_reloc = TRUE;
2136 goto do_size;
2137
2138 case R_X86_64_32:
2139 if (!ABI_64_P (abfd))
2140 goto pointer;
2141 /* Fall through. */
2142 case R_X86_64_8:
2143 case R_X86_64_16:
2144 case R_X86_64_32S:
2145 /* Check relocation overflow as these relocs may lead to
2146 run-time relocation overflow. Don't error out for
2147 sections we don't care about, such as debug sections or
2148 when relocation overflow check is disabled. */
2149 if (!htab->params->no_reloc_overflow_check
2150 && !converted_reloc
2151 && (bfd_link_pic (info)
2152 || (bfd_link_executable (info)
2153 && h != NULL
2154 && !h->def_regular
2155 && h->def_dynamic
2156 && (sec->flags & SEC_READONLY) == 0)))
2157 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2158 &x86_64_elf_howto_table[r_type]);
2159 /* Fall through. */
2160
2161 case R_X86_64_PC8:
2162 case R_X86_64_PC16:
2163 case R_X86_64_PC32:
2164 case R_X86_64_PC32_BND:
2165 case R_X86_64_PC64:
2166 case R_X86_64_64:
2167 pointer:
2168 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2169 eh->zero_undefweak |= 0x2;
2170 /* We are called after all symbols have been resolved. Only
2171 relocation against STT_GNU_IFUNC symbol must go through
2172 PLT. */
2173 if (h != NULL
2174 && (bfd_link_executable (info)
2175 || h->type == STT_GNU_IFUNC))
2176 {
2177 bfd_boolean func_pointer_ref = FALSE;
2178
2179 if (r_type == R_X86_64_PC32)
2180 {
2181 /* Since something like ".long foo - ." may be used
2182 as pointer, make sure that PLT is used if foo is
2183 a function defined in a shared library. */
2184 if ((sec->flags & SEC_CODE) == 0)
2185 {
2186 h->pointer_equality_needed = 1;
2187 if (bfd_link_pie (info)
2188 && h->type == STT_FUNC
2189 && !h->def_regular
2190 && h->def_dynamic)
2191 {
2192 h->needs_plt = 1;
2193 h->plt.refcount = 1;
2194 }
2195 }
2196 }
2197 else if (r_type != R_X86_64_PC32_BND
2198 && r_type != R_X86_64_PC64)
2199 {
2200 h->pointer_equality_needed = 1;
2201 /* At run-time, R_X86_64_64 can be resolved for both
2202 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2203 can only be resolved for x32. */
2204 if ((sec->flags & SEC_READONLY) == 0
2205 && (r_type == R_X86_64_64
2206 || (!ABI_64_P (abfd)
2207 && (r_type == R_X86_64_32
2208 || r_type == R_X86_64_32S))))
2209 func_pointer_ref = TRUE;
2210 }
2211
2212 if (!func_pointer_ref)
2213 {
2214 /* If this reloc is in a read-only section, we might
2215 need a copy reloc. We can't check reliably at this
2216 stage whether the section is read-only, as input
2217 sections have not yet been mapped to output sections.
2218 Tentatively set the flag for now, and correct in
2219 adjust_dynamic_symbol. */
2220 h->non_got_ref = 1;
2221
2222 /* We may need a .plt entry if the symbol is a function
2223 defined in a shared lib or is a function referenced
2224 from the code or read-only section. */
2225 if (!h->def_regular
2226 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2227 h->plt.refcount = 1;
2228 }
2229 }
2230
2231 size_reloc = FALSE;
2232 do_size:
2233 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2234 htab->pointer_r_type))
2235 {
2236 struct elf_dyn_relocs *p;
2237 struct elf_dyn_relocs **head;
2238
2239 /* We must copy these reloc types into the output file.
2240 Create a reloc section in dynobj and make room for
2241 this reloc. */
2242 if (sreloc == NULL)
2243 {
2244 sreloc = _bfd_elf_make_dynamic_reloc_section
2245 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2246 abfd, /*rela?*/ TRUE);
2247
2248 if (sreloc == NULL)
2249 goto error_return;
2250 }
2251
2252 /* If this is a global symbol, we count the number of
2253 relocations we need for this symbol. */
2254 if (h != NULL)
2255 head = &eh->dyn_relocs;
2256 else
2257 {
2258 /* Track dynamic relocs needed for local syms too.
2259 We really need local syms available to do this
2260 easily. Oh well. */
2261 asection *s;
2262 void **vpp;
2263
2264 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2265 abfd, r_symndx);
2266 if (isym == NULL)
2267 goto error_return;
2268
2269 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2270 if (s == NULL)
2271 s = sec;
2272
2273 /* Beware of type punned pointers vs strict aliasing
2274 rules. */
2275 vpp = &(elf_section_data (s)->local_dynrel);
2276 head = (struct elf_dyn_relocs **)vpp;
2277 }
2278
2279 p = *head;
2280 if (p == NULL || p->sec != sec)
2281 {
2282 bfd_size_type amt = sizeof *p;
2283
2284 p = ((struct elf_dyn_relocs *)
2285 bfd_alloc (htab->elf.dynobj, amt));
2286 if (p == NULL)
2287 goto error_return;
2288 p->next = *head;
2289 *head = p;
2290 p->sec = sec;
2291 p->count = 0;
2292 p->pc_count = 0;
2293 }
2294
2295 p->count += 1;
2296 /* Count size relocation as PC-relative relocation. */
2297 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2298 p->pc_count += 1;
2299 }
2300 break;
2301
2302 /* This relocation describes the C++ object vtable hierarchy.
2303 Reconstruct it for later use during GC. */
2304 case R_X86_64_GNU_VTINHERIT:
2305 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2306 goto error_return;
2307 break;
2308
2309 /* This relocation describes which C++ vtable entries are actually
2310 used. Record for later use during GC. */
2311 case R_X86_64_GNU_VTENTRY:
2312 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2313 goto error_return;
2314 break;
2315
2316 default:
2317 break;
2318 }
2319 }
2320
2321 if (elf_section_data (sec)->this_hdr.contents != contents)
2322 {
2323 if (!converted && !info->keep_memory)
2324 free (contents);
2325 else
2326 {
2327 /* Cache the section contents for elf_link_input_bfd if any
2328 load is converted or --no-keep-memory isn't used. */
2329 elf_section_data (sec)->this_hdr.contents = contents;
2330 }
2331 }
2332
2333 /* Cache relocations if any load is converted. */
2334 if (elf_section_data (sec)->relocs != relocs && converted)
2335 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2336
2337 return TRUE;
2338
2339 error_return:
2340 if (elf_section_data (sec)->this_hdr.contents != contents)
2341 free (contents);
2342 sec->check_relocs_failed = 1;
2343 return FALSE;
2344 }
2345
2346 /* Return the relocation value for @tpoff relocation
2347 if STT_TLS virtual address is ADDRESS. */
2348
2349 static bfd_vma
elf_x86_64_tpoff(struct bfd_link_info * info,bfd_vma address)2350 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2351 {
2352 struct elf_link_hash_table *htab = elf_hash_table (info);
2353 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2354 bfd_vma static_tls_size;
2355
2356 /* If tls_segment is NULL, we should have signalled an error already. */
2357 if (htab->tls_sec == NULL)
2358 return 0;
2359
2360 /* Consider special static TLS alignment requirements. */
2361 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2362 return address - static_tls_size - htab->tls_sec->vma;
2363 }
2364
2365 /* Relocate an x86_64 ELF section. */
2366
2367 static bfd_boolean
elf_x86_64_relocate_section(bfd * output_bfd,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * relocs,Elf_Internal_Sym * local_syms,asection ** local_sections)2368 elf_x86_64_relocate_section (bfd *output_bfd,
2369 struct bfd_link_info *info,
2370 bfd *input_bfd,
2371 asection *input_section,
2372 bfd_byte *contents,
2373 Elf_Internal_Rela *relocs,
2374 Elf_Internal_Sym *local_syms,
2375 asection **local_sections)
2376 {
2377 struct elf_x86_link_hash_table *htab;
2378 Elf_Internal_Shdr *symtab_hdr;
2379 struct elf_link_hash_entry **sym_hashes;
2380 bfd_vma *local_got_offsets;
2381 bfd_vma *local_tlsdesc_gotents;
2382 Elf_Internal_Rela *rel;
2383 Elf_Internal_Rela *wrel;
2384 Elf_Internal_Rela *relend;
2385 unsigned int plt_entry_size;
2386
2387 /* Skip if check_relocs failed. */
2388 if (input_section->check_relocs_failed)
2389 return FALSE;
2390
2391 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2392 if (htab == NULL)
2393 return FALSE;
2394
2395 if (!is_x86_elf (input_bfd, htab))
2396 {
2397 bfd_set_error (bfd_error_wrong_format);
2398 return FALSE;
2399 }
2400
2401 plt_entry_size = htab->plt.plt_entry_size;
2402 symtab_hdr = &elf_symtab_hdr (input_bfd);
2403 sym_hashes = elf_sym_hashes (input_bfd);
2404 local_got_offsets = elf_local_got_offsets (input_bfd);
2405 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2406
2407 _bfd_x86_elf_set_tls_module_base (info);
2408
2409 rel = wrel = relocs;
2410 relend = relocs + input_section->reloc_count;
2411 for (; rel < relend; wrel++, rel++)
2412 {
2413 unsigned int r_type, r_type_tls;
2414 reloc_howto_type *howto;
2415 unsigned long r_symndx;
2416 struct elf_link_hash_entry *h;
2417 struct elf_x86_link_hash_entry *eh;
2418 Elf_Internal_Sym *sym;
2419 asection *sec;
2420 bfd_vma off, offplt, plt_offset;
2421 bfd_vma relocation;
2422 bfd_boolean unresolved_reloc;
2423 bfd_reloc_status_type r;
2424 int tls_type;
2425 asection *base_got, *resolved_plt;
2426 bfd_vma st_size;
2427 bfd_boolean resolved_to_zero;
2428 bfd_boolean relative_reloc;
2429 bfd_boolean converted_reloc;
2430 bfd_boolean need_copy_reloc_in_pie;
2431 bfd_boolean no_copyreloc_p;
2432
2433 r_type = ELF32_R_TYPE (rel->r_info);
2434 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2435 || r_type == (int) R_X86_64_GNU_VTENTRY)
2436 {
2437 if (wrel != rel)
2438 *wrel = *rel;
2439 continue;
2440 }
2441
2442 r_symndx = htab->r_sym (rel->r_info);
2443 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2444 if (converted_reloc)
2445 {
2446 r_type &= ~R_X86_64_converted_reloc_bit;
2447 rel->r_info = htab->r_info (r_symndx, r_type);
2448 }
2449
2450 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2451 if (howto == NULL)
2452 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2453
2454 h = NULL;
2455 sym = NULL;
2456 sec = NULL;
2457 unresolved_reloc = FALSE;
2458 if (r_symndx < symtab_hdr->sh_info)
2459 {
2460 sym = local_syms + r_symndx;
2461 sec = local_sections[r_symndx];
2462
2463 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2464 &sec, rel);
2465 st_size = sym->st_size;
2466
2467 /* Relocate against local STT_GNU_IFUNC symbol. */
2468 if (!bfd_link_relocatable (info)
2469 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2470 {
2471 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2472 rel, FALSE);
2473 if (h == NULL)
2474 abort ();
2475
2476 /* Set STT_GNU_IFUNC symbol value. */
2477 h->root.u.def.value = sym->st_value;
2478 h->root.u.def.section = sec;
2479 }
2480 }
2481 else
2482 {
2483 bfd_boolean warned ATTRIBUTE_UNUSED;
2484 bfd_boolean ignored ATTRIBUTE_UNUSED;
2485
2486 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2487 r_symndx, symtab_hdr, sym_hashes,
2488 h, sec, relocation,
2489 unresolved_reloc, warned, ignored);
2490 st_size = h->size;
2491 }
2492
2493 if (sec != NULL && discarded_section (sec))
2494 {
2495 _bfd_clear_contents (howto, input_bfd, input_section,
2496 contents, rel->r_offset);
2497 wrel->r_offset = rel->r_offset;
2498 wrel->r_info = 0;
2499 wrel->r_addend = 0;
2500
2501 /* For ld -r, remove relocations in debug sections against
2502 sections defined in discarded sections. Not done for
2503 eh_frame editing code expects to be present. */
2504 if (bfd_link_relocatable (info)
2505 && (input_section->flags & SEC_DEBUGGING))
2506 wrel--;
2507
2508 continue;
2509 }
2510
2511 if (bfd_link_relocatable (info))
2512 {
2513 if (wrel != rel)
2514 *wrel = *rel;
2515 continue;
2516 }
2517
2518 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2519 {
2520 if (r_type == R_X86_64_64)
2521 {
2522 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2523 zero-extend it to 64bit if addend is zero. */
2524 r_type = R_X86_64_32;
2525 memset (contents + rel->r_offset + 4, 0, 4);
2526 }
2527 else if (r_type == R_X86_64_SIZE64)
2528 {
2529 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2530 zero-extend it to 64bit if addend is zero. */
2531 r_type = R_X86_64_SIZE32;
2532 memset (contents + rel->r_offset + 4, 0, 4);
2533 }
2534 }
2535
2536 eh = (struct elf_x86_link_hash_entry *) h;
2537
2538 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2539 it here if it is defined in a non-shared object. */
2540 if (h != NULL
2541 && h->type == STT_GNU_IFUNC
2542 && h->def_regular)
2543 {
2544 bfd_vma plt_index;
2545 const char *name;
2546
2547 if ((input_section->flags & SEC_ALLOC) == 0)
2548 {
2549 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2550 STT_GNU_IFUNC symbol as STT_FUNC. */
2551 if (elf_section_type (input_section) == SHT_NOTE)
2552 goto skip_ifunc;
2553 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2554 sections because such sections are not SEC_ALLOC and
2555 thus ld.so will not process them. */
2556 if ((input_section->flags & SEC_DEBUGGING) != 0)
2557 continue;
2558 abort ();
2559 }
2560
2561 switch (r_type)
2562 {
2563 default:
2564 break;
2565
2566 case R_X86_64_GOTPCREL:
2567 case R_X86_64_GOTPCRELX:
2568 case R_X86_64_REX_GOTPCRELX:
2569 case R_X86_64_GOTPCREL64:
2570 base_got = htab->elf.sgot;
2571 off = h->got.offset;
2572
2573 if (base_got == NULL)
2574 abort ();
2575
2576 if (off == (bfd_vma) -1)
2577 {
2578 /* We can't use h->got.offset here to save state, or
2579 even just remember the offset, as finish_dynamic_symbol
2580 would use that as offset into .got. */
2581
2582 if (h->plt.offset == (bfd_vma) -1)
2583 abort ();
2584
2585 if (htab->elf.splt != NULL)
2586 {
2587 plt_index = (h->plt.offset / plt_entry_size
2588 - htab->plt.has_plt0);
2589 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2590 base_got = htab->elf.sgotplt;
2591 }
2592 else
2593 {
2594 plt_index = h->plt.offset / plt_entry_size;
2595 off = plt_index * GOT_ENTRY_SIZE;
2596 base_got = htab->elf.igotplt;
2597 }
2598
2599 if (h->dynindx == -1
2600 || h->forced_local
2601 || info->symbolic)
2602 {
2603 /* This references the local defitionion. We must
2604 initialize this entry in the global offset table.
2605 Since the offset must always be a multiple of 8,
2606 we use the least significant bit to record
2607 whether we have initialized it already.
2608
2609 When doing a dynamic link, we create a .rela.got
2610 relocation entry to initialize the value. This
2611 is done in the finish_dynamic_symbol routine. */
2612 if ((off & 1) != 0)
2613 off &= ~1;
2614 else
2615 {
2616 bfd_put_64 (output_bfd, relocation,
2617 base_got->contents + off);
2618 /* Note that this is harmless for the GOTPLT64
2619 case, as -1 | 1 still is -1. */
2620 h->got.offset |= 1;
2621 }
2622 }
2623 }
2624
2625 relocation = (base_got->output_section->vma
2626 + base_got->output_offset + off);
2627
2628 goto do_relocation;
2629 }
2630
2631 if (h->plt.offset == (bfd_vma) -1)
2632 {
2633 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2634 if (r_type == htab->pointer_r_type
2635 && (input_section->flags & SEC_CODE) == 0)
2636 goto do_ifunc_pointer;
2637 goto bad_ifunc_reloc;
2638 }
2639
2640 /* STT_GNU_IFUNC symbol must go through PLT. */
2641 if (htab->elf.splt != NULL)
2642 {
2643 if (htab->plt_second != NULL)
2644 {
2645 resolved_plt = htab->plt_second;
2646 plt_offset = eh->plt_second.offset;
2647 }
2648 else
2649 {
2650 resolved_plt = htab->elf.splt;
2651 plt_offset = h->plt.offset;
2652 }
2653 }
2654 else
2655 {
2656 resolved_plt = htab->elf.iplt;
2657 plt_offset = h->plt.offset;
2658 }
2659
2660 relocation = (resolved_plt->output_section->vma
2661 + resolved_plt->output_offset + plt_offset);
2662
2663 switch (r_type)
2664 {
2665 default:
2666 bad_ifunc_reloc:
2667 if (h->root.root.string)
2668 name = h->root.root.string;
2669 else
2670 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2671 NULL);
2672 _bfd_error_handler
2673 /* xgettext:c-format */
2674 (_("%pB: relocation %s against STT_GNU_IFUNC "
2675 "symbol `%s' isn't supported"), input_bfd,
2676 howto->name, name);
2677 bfd_set_error (bfd_error_bad_value);
2678 return FALSE;
2679
2680 case R_X86_64_32S:
2681 if (bfd_link_pic (info))
2682 abort ();
2683 goto do_relocation;
2684
2685 case R_X86_64_32:
2686 if (ABI_64_P (output_bfd))
2687 goto do_relocation;
2688 /* FALLTHROUGH */
2689 case R_X86_64_64:
2690 do_ifunc_pointer:
2691 if (rel->r_addend != 0)
2692 {
2693 if (h->root.root.string)
2694 name = h->root.root.string;
2695 else
2696 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2697 sym, NULL);
2698 _bfd_error_handler
2699 /* xgettext:c-format */
2700 (_("%pB: relocation %s against STT_GNU_IFUNC "
2701 "symbol `%s' has non-zero addend: %" PRId64),
2702 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2703 bfd_set_error (bfd_error_bad_value);
2704 return FALSE;
2705 }
2706
2707 /* Generate dynamic relcoation only when there is a
2708 non-GOT reference in a shared object or there is no
2709 PLT. */
2710 if ((bfd_link_pic (info) && h->non_got_ref)
2711 || h->plt.offset == (bfd_vma) -1)
2712 {
2713 Elf_Internal_Rela outrel;
2714 asection *sreloc;
2715
2716 /* Need a dynamic relocation to get the real function
2717 address. */
2718 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2719 info,
2720 input_section,
2721 rel->r_offset);
2722 if (outrel.r_offset == (bfd_vma) -1
2723 || outrel.r_offset == (bfd_vma) -2)
2724 abort ();
2725
2726 outrel.r_offset += (input_section->output_section->vma
2727 + input_section->output_offset);
2728
2729 if (POINTER_LOCAL_IFUNC_P (info, h))
2730 {
2731 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2732 h->root.root.string,
2733 h->root.u.def.section->owner);
2734
2735 /* This symbol is resolved locally. */
2736 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2737 outrel.r_addend = (h->root.u.def.value
2738 + h->root.u.def.section->output_section->vma
2739 + h->root.u.def.section->output_offset);
2740 }
2741 else
2742 {
2743 outrel.r_info = htab->r_info (h->dynindx, r_type);
2744 outrel.r_addend = 0;
2745 }
2746
2747 /* Dynamic relocations are stored in
2748 1. .rela.ifunc section in PIC object.
2749 2. .rela.got section in dynamic executable.
2750 3. .rela.iplt section in static executable. */
2751 if (bfd_link_pic (info))
2752 sreloc = htab->elf.irelifunc;
2753 else if (htab->elf.splt != NULL)
2754 sreloc = htab->elf.srelgot;
2755 else
2756 sreloc = htab->elf.irelplt;
2757 elf_append_rela (output_bfd, sreloc, &outrel);
2758
2759 /* If this reloc is against an external symbol, we
2760 do not want to fiddle with the addend. Otherwise,
2761 we need to include the symbol value so that it
2762 becomes an addend for the dynamic reloc. For an
2763 internal symbol, we have updated addend. */
2764 continue;
2765 }
2766 /* FALLTHROUGH */
2767 case R_X86_64_PC32:
2768 case R_X86_64_PC32_BND:
2769 case R_X86_64_PC64:
2770 case R_X86_64_PLT32:
2771 case R_X86_64_PLT32_BND:
2772 goto do_relocation;
2773 }
2774 }
2775
2776 skip_ifunc:
2777 resolved_to_zero = (eh != NULL
2778 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2779
2780 /* When generating a shared object, the relocations handled here are
2781 copied into the output file to be resolved at run time. */
2782 switch (r_type)
2783 {
2784 case R_X86_64_GOT32:
2785 case R_X86_64_GOT64:
2786 /* Relocation is to the entry for this symbol in the global
2787 offset table. */
2788 case R_X86_64_GOTPCREL:
2789 case R_X86_64_GOTPCRELX:
2790 case R_X86_64_REX_GOTPCRELX:
2791 case R_X86_64_GOTPCREL64:
2792 /* Use global offset table entry as symbol value. */
2793 case R_X86_64_GOTPLT64:
2794 /* This is obsolete and treated the same as GOT64. */
2795 base_got = htab->elf.sgot;
2796
2797 if (htab->elf.sgot == NULL)
2798 abort ();
2799
2800 relative_reloc = FALSE;
2801 if (h != NULL)
2802 {
2803 off = h->got.offset;
2804 if (h->needs_plt
2805 && h->plt.offset != (bfd_vma)-1
2806 && off == (bfd_vma)-1)
2807 {
2808 /* We can't use h->got.offset here to save
2809 state, or even just remember the offset, as
2810 finish_dynamic_symbol would use that as offset into
2811 .got. */
2812 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2813 - htab->plt.has_plt0);
2814 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2815 base_got = htab->elf.sgotplt;
2816 }
2817
2818 if (RESOLVED_LOCALLY_P (info, h, htab))
2819 {
2820 /* We must initialize this entry in the global offset
2821 table. Since the offset must always be a multiple
2822 of 8, we use the least significant bit to record
2823 whether we have initialized it already.
2824
2825 When doing a dynamic link, we create a .rela.got
2826 relocation entry to initialize the value. This is
2827 done in the finish_dynamic_symbol routine. */
2828 if ((off & 1) != 0)
2829 off &= ~1;
2830 else
2831 {
2832 bfd_put_64 (output_bfd, relocation,
2833 base_got->contents + off);
2834 /* Note that this is harmless for the GOTPLT64 case,
2835 as -1 | 1 still is -1. */
2836 h->got.offset |= 1;
2837
2838 if (GENERATE_RELATIVE_RELOC_P (info, h))
2839 {
2840 /* If this symbol isn't dynamic in PIC,
2841 generate R_X86_64_RELATIVE here. */
2842 eh->no_finish_dynamic_symbol = 1;
2843 relative_reloc = TRUE;
2844 }
2845 }
2846 }
2847 else
2848 unresolved_reloc = FALSE;
2849 }
2850 else
2851 {
2852 if (local_got_offsets == NULL)
2853 abort ();
2854
2855 off = local_got_offsets[r_symndx];
2856
2857 /* The offset must always be a multiple of 8. We use
2858 the least significant bit to record whether we have
2859 already generated the necessary reloc. */
2860 if ((off & 1) != 0)
2861 off &= ~1;
2862 else
2863 {
2864 bfd_put_64 (output_bfd, relocation,
2865 base_got->contents + off);
2866 local_got_offsets[r_symndx] |= 1;
2867
2868 if (bfd_link_pic (info))
2869 relative_reloc = TRUE;
2870 }
2871 }
2872
2873 if (relative_reloc)
2874 {
2875 asection *s;
2876 Elf_Internal_Rela outrel;
2877
2878 /* We need to generate a R_X86_64_RELATIVE reloc
2879 for the dynamic linker. */
2880 s = htab->elf.srelgot;
2881 if (s == NULL)
2882 abort ();
2883
2884 outrel.r_offset = (base_got->output_section->vma
2885 + base_got->output_offset
2886 + off);
2887 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2888 outrel.r_addend = relocation;
2889 elf_append_rela (output_bfd, s, &outrel);
2890 }
2891
2892 if (off >= (bfd_vma) -2)
2893 abort ();
2894
2895 relocation = base_got->output_section->vma
2896 + base_got->output_offset + off;
2897 if (r_type != R_X86_64_GOTPCREL
2898 && r_type != R_X86_64_GOTPCRELX
2899 && r_type != R_X86_64_REX_GOTPCRELX
2900 && r_type != R_X86_64_GOTPCREL64)
2901 relocation -= htab->elf.sgotplt->output_section->vma
2902 - htab->elf.sgotplt->output_offset;
2903
2904 break;
2905
2906 case R_X86_64_GOTOFF64:
2907 /* Relocation is relative to the start of the global offset
2908 table. */
2909
2910 /* Check to make sure it isn't a protected function or data
2911 symbol for shared library since it may not be local when
2912 used as function address or with copy relocation. We also
2913 need to make sure that a symbol is referenced locally. */
2914 if (bfd_link_pic (info) && h)
2915 {
2916 if (!h->def_regular)
2917 {
2918 const char *v;
2919
2920 switch (ELF_ST_VISIBILITY (h->other))
2921 {
2922 case STV_HIDDEN:
2923 v = _("hidden symbol");
2924 break;
2925 case STV_INTERNAL:
2926 v = _("internal symbol");
2927 break;
2928 case STV_PROTECTED:
2929 v = _("protected symbol");
2930 break;
2931 default:
2932 v = _("symbol");
2933 break;
2934 }
2935
2936 _bfd_error_handler
2937 /* xgettext:c-format */
2938 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2939 " `%s' can not be used when making a shared object"),
2940 input_bfd, v, h->root.root.string);
2941 bfd_set_error (bfd_error_bad_value);
2942 return FALSE;
2943 }
2944 else if (!bfd_link_executable (info)
2945 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2946 && (h->type == STT_FUNC
2947 || h->type == STT_OBJECT)
2948 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2949 {
2950 _bfd_error_handler
2951 /* xgettext:c-format */
2952 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2953 " `%s' can not be used when making a shared object"),
2954 input_bfd,
2955 h->type == STT_FUNC ? "function" : "data",
2956 h->root.root.string);
2957 bfd_set_error (bfd_error_bad_value);
2958 return FALSE;
2959 }
2960 }
2961
2962 /* Note that sgot is not involved in this
2963 calculation. We always want the start of .got.plt. If we
2964 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2965 permitted by the ABI, we might have to change this
2966 calculation. */
2967 relocation -= htab->elf.sgotplt->output_section->vma
2968 + htab->elf.sgotplt->output_offset;
2969 break;
2970
2971 case R_X86_64_GOTPC32:
2972 case R_X86_64_GOTPC64:
2973 /* Use global offset table as symbol value. */
2974 relocation = htab->elf.sgotplt->output_section->vma
2975 + htab->elf.sgotplt->output_offset;
2976 unresolved_reloc = FALSE;
2977 break;
2978
2979 case R_X86_64_PLTOFF64:
2980 /* Relocation is PLT entry relative to GOT. For local
2981 symbols it's the symbol itself relative to GOT. */
2982 if (h != NULL
2983 /* See PLT32 handling. */
2984 && (h->plt.offset != (bfd_vma) -1
2985 || eh->plt_got.offset != (bfd_vma) -1)
2986 && htab->elf.splt != NULL)
2987 {
2988 if (eh->plt_got.offset != (bfd_vma) -1)
2989 {
2990 /* Use the GOT PLT. */
2991 resolved_plt = htab->plt_got;
2992 plt_offset = eh->plt_got.offset;
2993 }
2994 else if (htab->plt_second != NULL)
2995 {
2996 resolved_plt = htab->plt_second;
2997 plt_offset = eh->plt_second.offset;
2998 }
2999 else
3000 {
3001 resolved_plt = htab->elf.splt;
3002 plt_offset = h->plt.offset;
3003 }
3004
3005 relocation = (resolved_plt->output_section->vma
3006 + resolved_plt->output_offset
3007 + plt_offset);
3008 unresolved_reloc = FALSE;
3009 }
3010
3011 relocation -= htab->elf.sgotplt->output_section->vma
3012 + htab->elf.sgotplt->output_offset;
3013 break;
3014
3015 case R_X86_64_PLT32:
3016 case R_X86_64_PLT32_BND:
3017 /* Relocation is to the entry for this symbol in the
3018 procedure linkage table. */
3019
3020 /* Resolve a PLT32 reloc against a local symbol directly,
3021 without using the procedure linkage table. */
3022 if (h == NULL)
3023 break;
3024
3025 if ((h->plt.offset == (bfd_vma) -1
3026 && eh->plt_got.offset == (bfd_vma) -1)
3027 || htab->elf.splt == NULL)
3028 {
3029 /* We didn't make a PLT entry for this symbol. This
3030 happens when statically linking PIC code, or when
3031 using -Bsymbolic. */
3032 break;
3033 }
3034
3035 use_plt:
3036 if (h->plt.offset != (bfd_vma) -1)
3037 {
3038 if (htab->plt_second != NULL)
3039 {
3040 resolved_plt = htab->plt_second;
3041 plt_offset = eh->plt_second.offset;
3042 }
3043 else
3044 {
3045 resolved_plt = htab->elf.splt;
3046 plt_offset = h->plt.offset;
3047 }
3048 }
3049 else
3050 {
3051 /* Use the GOT PLT. */
3052 resolved_plt = htab->plt_got;
3053 plt_offset = eh->plt_got.offset;
3054 }
3055
3056 relocation = (resolved_plt->output_section->vma
3057 + resolved_plt->output_offset
3058 + plt_offset);
3059 unresolved_reloc = FALSE;
3060 break;
3061
3062 case R_X86_64_SIZE32:
3063 case R_X86_64_SIZE64:
3064 /* Set to symbol size. */
3065 relocation = st_size;
3066 goto direct;
3067
3068 case R_X86_64_PC8:
3069 case R_X86_64_PC16:
3070 case R_X86_64_PC32:
3071 case R_X86_64_PC32_BND:
3072 /* Don't complain about -fPIC if the symbol is undefined when
3073 building executable unless it is unresolved weak symbol,
3074 references a dynamic definition in PIE or -z nocopyreloc
3075 is used. */
3076 no_copyreloc_p
3077 = (info->nocopyreloc
3078 || (h != NULL
3079 && !h->root.linker_def
3080 && !h->root.ldscript_def
3081 && eh->def_protected
3082 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3083
3084 if ((input_section->flags & SEC_ALLOC) != 0
3085 && (input_section->flags & SEC_READONLY) != 0
3086 && h != NULL
3087 && ((bfd_link_executable (info)
3088 && ((h->root.type == bfd_link_hash_undefweak
3089 && (eh == NULL
3090 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3091 eh)))
3092 || (bfd_link_pie (info)
3093 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3094 && h->def_dynamic)
3095 || (no_copyreloc_p
3096 && h->def_dynamic
3097 && !(h->root.u.def.section->flags & SEC_CODE))))
3098 || bfd_link_dll (info)))
3099 {
3100 bfd_boolean fail = FALSE;
3101 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3102 {
3103 /* Symbol is referenced locally. Make sure it is
3104 defined locally. */
3105 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3106 }
3107 else if (bfd_link_pie (info))
3108 {
3109 /* We can only use PC-relative relocations in PIE
3110 from non-code sections. */
3111 if (h->type == STT_FUNC
3112 && (sec->flags & SEC_CODE) != 0)
3113 fail = TRUE;
3114 }
3115 else if (no_copyreloc_p || bfd_link_dll (info))
3116 {
3117 /* Symbol doesn't need copy reloc and isn't
3118 referenced locally. Don't allow PC-relative
3119 relocations against default and protected
3120 symbols since address of protected function
3121 and location of protected data may not be in
3122 the shared object. */
3123 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3124 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3125 }
3126
3127 if (fail)
3128 return elf_x86_64_need_pic (info, input_bfd, input_section,
3129 h, NULL, NULL, howto);
3130 }
3131 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3132 as function address. */
3133 else if (h != NULL
3134 && (input_section->flags & SEC_CODE) == 0
3135 && bfd_link_pie (info)
3136 && h->type == STT_FUNC
3137 && !h->def_regular
3138 && h->def_dynamic)
3139 goto use_plt;
3140 /* Fall through. */
3141
3142 case R_X86_64_8:
3143 case R_X86_64_16:
3144 case R_X86_64_32:
3145 case R_X86_64_PC64:
3146 case R_X86_64_64:
3147 /* FIXME: The ABI says the linker should make sure the value is
3148 the same when it's zeroextended to 64 bit. */
3149
3150 direct:
3151 if ((input_section->flags & SEC_ALLOC) == 0)
3152 break;
3153
3154 need_copy_reloc_in_pie = (bfd_link_pie (info)
3155 && h != NULL
3156 && (h->needs_copy
3157 || eh->needs_copy
3158 || (h->root.type
3159 == bfd_link_hash_undefined))
3160 && (X86_PCREL_TYPE_P (r_type)
3161 || X86_SIZE_TYPE_P (r_type)));
3162
3163 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3164 need_copy_reloc_in_pie,
3165 resolved_to_zero, FALSE))
3166 {
3167 Elf_Internal_Rela outrel;
3168 bfd_boolean skip, relocate;
3169 asection *sreloc;
3170
3171 /* When generating a shared object, these relocations
3172 are copied into the output file to be resolved at run
3173 time. */
3174 skip = FALSE;
3175 relocate = FALSE;
3176
3177 outrel.r_offset =
3178 _bfd_elf_section_offset (output_bfd, info, input_section,
3179 rel->r_offset);
3180 if (outrel.r_offset == (bfd_vma) -1)
3181 skip = TRUE;
3182 else if (outrel.r_offset == (bfd_vma) -2)
3183 skip = TRUE, relocate = TRUE;
3184
3185 outrel.r_offset += (input_section->output_section->vma
3186 + input_section->output_offset);
3187
3188 if (skip)
3189 memset (&outrel, 0, sizeof outrel);
3190
3191 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3192 {
3193 outrel.r_info = htab->r_info (h->dynindx, r_type);
3194 outrel.r_addend = rel->r_addend;
3195 }
3196 else
3197 {
3198 /* This symbol is local, or marked to become local.
3199 When relocation overflow check is disabled, we
3200 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3201 if (r_type == htab->pointer_r_type
3202 || (r_type == R_X86_64_32
3203 && htab->params->no_reloc_overflow_check))
3204 {
3205 relocate = TRUE;
3206 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3207 outrel.r_addend = relocation + rel->r_addend;
3208 }
3209 else if (r_type == R_X86_64_64
3210 && !ABI_64_P (output_bfd))
3211 {
3212 relocate = TRUE;
3213 outrel.r_info = htab->r_info (0,
3214 R_X86_64_RELATIVE64);
3215 outrel.r_addend = relocation + rel->r_addend;
3216 /* Check addend overflow. */
3217 if ((outrel.r_addend & 0x80000000)
3218 != (rel->r_addend & 0x80000000))
3219 {
3220 const char *name;
3221 int addend = rel->r_addend;
3222 if (h && h->root.root.string)
3223 name = h->root.root.string;
3224 else
3225 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3226 sym, NULL);
3227 _bfd_error_handler
3228 /* xgettext:c-format */
3229 (_("%pB: addend %s%#x in relocation %s against "
3230 "symbol `%s' at %#" PRIx64
3231 " in section `%pA' is out of range"),
3232 input_bfd, addend < 0 ? "-" : "", addend,
3233 howto->name, name, (uint64_t) rel->r_offset,
3234 input_section);
3235 bfd_set_error (bfd_error_bad_value);
3236 return FALSE;
3237 }
3238 }
3239 else
3240 {
3241 long sindx;
3242
3243 if (bfd_is_abs_section (sec))
3244 sindx = 0;
3245 else if (sec == NULL || sec->owner == NULL)
3246 {
3247 bfd_set_error (bfd_error_bad_value);
3248 return FALSE;
3249 }
3250 else
3251 {
3252 asection *osec;
3253
3254 /* We are turning this relocation into one
3255 against a section symbol. It would be
3256 proper to subtract the symbol's value,
3257 osec->vma, from the emitted reloc addend,
3258 but ld.so expects buggy relocs. */
3259 osec = sec->output_section;
3260 sindx = elf_section_data (osec)->dynindx;
3261 if (sindx == 0)
3262 {
3263 asection *oi = htab->elf.text_index_section;
3264 sindx = elf_section_data (oi)->dynindx;
3265 }
3266 BFD_ASSERT (sindx != 0);
3267 }
3268
3269 outrel.r_info = htab->r_info (sindx, r_type);
3270 outrel.r_addend = relocation + rel->r_addend;
3271 }
3272 }
3273
3274 sreloc = elf_section_data (input_section)->sreloc;
3275
3276 if (sreloc == NULL || sreloc->contents == NULL)
3277 {
3278 r = bfd_reloc_notsupported;
3279 goto check_relocation_error;
3280 }
3281
3282 elf_append_rela (output_bfd, sreloc, &outrel);
3283
3284 /* If this reloc is against an external symbol, we do
3285 not want to fiddle with the addend. Otherwise, we
3286 need to include the symbol value so that it becomes
3287 an addend for the dynamic reloc. */
3288 if (! relocate)
3289 continue;
3290 }
3291
3292 break;
3293
3294 case R_X86_64_TLSGD:
3295 case R_X86_64_GOTPC32_TLSDESC:
3296 case R_X86_64_TLSDESC_CALL:
3297 case R_X86_64_GOTTPOFF:
3298 tls_type = GOT_UNKNOWN;
3299 if (h == NULL && local_got_offsets)
3300 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3301 else if (h != NULL)
3302 tls_type = elf_x86_hash_entry (h)->tls_type;
3303
3304 r_type_tls = r_type;
3305 if (! elf_x86_64_tls_transition (info, input_bfd,
3306 input_section, contents,
3307 symtab_hdr, sym_hashes,
3308 &r_type_tls, tls_type, rel,
3309 relend, h, r_symndx, TRUE))
3310 return FALSE;
3311
3312 if (r_type_tls == R_X86_64_TPOFF32)
3313 {
3314 bfd_vma roff = rel->r_offset;
3315
3316 BFD_ASSERT (! unresolved_reloc);
3317
3318 if (r_type == R_X86_64_TLSGD)
3319 {
3320 /* GD->LE transition. For 64bit, change
3321 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3322 .word 0x6666; rex64; call __tls_get_addr@PLT
3323 or
3324 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3325 .byte 0x66; rex64
3326 call *__tls_get_addr@GOTPCREL(%rip)
3327 which may be converted to
3328 addr32 call __tls_get_addr
3329 into:
3330 movq %fs:0, %rax
3331 leaq foo@tpoff(%rax), %rax
3332 For 32bit, change
3333 leaq foo@tlsgd(%rip), %rdi
3334 .word 0x6666; rex64; call __tls_get_addr@PLT
3335 or
3336 leaq foo@tlsgd(%rip), %rdi
3337 .byte 0x66; rex64
3338 call *__tls_get_addr@GOTPCREL(%rip)
3339 which may be converted to
3340 addr32 call __tls_get_addr
3341 into:
3342 movl %fs:0, %eax
3343 leaq foo@tpoff(%rax), %rax
3344 For largepic, change:
3345 leaq foo@tlsgd(%rip), %rdi
3346 movabsq $__tls_get_addr@pltoff, %rax
3347 addq %r15, %rax
3348 call *%rax
3349 into:
3350 movq %fs:0, %rax
3351 leaq foo@tpoff(%rax), %rax
3352 nopw 0x0(%rax,%rax,1) */
3353 int largepic = 0;
3354 if (ABI_64_P (output_bfd))
3355 {
3356 if (contents[roff + 5] == 0xb8)
3357 {
3358 if (roff < 3
3359 || (roff - 3 + 22) > input_section->size)
3360 {
3361 corrupt_input:
3362 info->callbacks->einfo
3363 (_("%F%P: corrupt input: %pB\n"),
3364 input_bfd);
3365 return FALSE;
3366 }
3367 memcpy (contents + roff - 3,
3368 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3369 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3370 largepic = 1;
3371 }
3372 else
3373 {
3374 if (roff < 4
3375 || (roff - 4 + 16) > input_section->size)
3376 goto corrupt_input;
3377 memcpy (contents + roff - 4,
3378 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3379 16);
3380 }
3381 }
3382 else
3383 {
3384 if (roff < 3
3385 || (roff - 3 + 15) > input_section->size)
3386 goto corrupt_input;
3387 memcpy (contents + roff - 3,
3388 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3389 15);
3390 }
3391 bfd_put_32 (output_bfd,
3392 elf_x86_64_tpoff (info, relocation),
3393 contents + roff + 8 + largepic);
3394 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3395 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3396 rel++;
3397 wrel++;
3398 continue;
3399 }
3400 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3401 {
3402 /* GDesc -> LE transition.
3403 It's originally something like:
3404 leaq x@tlsdesc(%rip), %rax
3405
3406 Change it to:
3407 movl $x@tpoff, %rax. */
3408
3409 unsigned int val, type;
3410
3411 if (roff < 3)
3412 goto corrupt_input;
3413 type = bfd_get_8 (input_bfd, contents + roff - 3);
3414 val = bfd_get_8 (input_bfd, contents + roff - 1);
3415 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3416 contents + roff - 3);
3417 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3418 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3419 contents + roff - 1);
3420 bfd_put_32 (output_bfd,
3421 elf_x86_64_tpoff (info, relocation),
3422 contents + roff);
3423 continue;
3424 }
3425 else if (r_type == R_X86_64_TLSDESC_CALL)
3426 {
3427 /* GDesc -> LE transition.
3428 It's originally:
3429 call *(%rax)
3430 Turn it into:
3431 xchg %ax,%ax. */
3432 bfd_put_8 (output_bfd, 0x66, contents + roff);
3433 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3434 continue;
3435 }
3436 else if (r_type == R_X86_64_GOTTPOFF)
3437 {
3438 /* IE->LE transition:
3439 For 64bit, originally it can be one of:
3440 movq foo@gottpoff(%rip), %reg
3441 addq foo@gottpoff(%rip), %reg
3442 We change it into:
3443 movq $foo, %reg
3444 leaq foo(%reg), %reg
3445 addq $foo, %reg.
3446 For 32bit, originally it can be one of:
3447 movq foo@gottpoff(%rip), %reg
3448 addl foo@gottpoff(%rip), %reg
3449 We change it into:
3450 movq $foo, %reg
3451 leal foo(%reg), %reg
3452 addl $foo, %reg. */
3453
3454 unsigned int val, type, reg;
3455
3456 if (roff >= 3)
3457 val = bfd_get_8 (input_bfd, contents + roff - 3);
3458 else
3459 {
3460 if (roff < 2)
3461 goto corrupt_input;
3462 val = 0;
3463 }
3464 type = bfd_get_8 (input_bfd, contents + roff - 2);
3465 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3466 reg >>= 3;
3467 if (type == 0x8b)
3468 {
3469 /* movq */
3470 if (val == 0x4c)
3471 {
3472 if (roff < 3)
3473 goto corrupt_input;
3474 bfd_put_8 (output_bfd, 0x49,
3475 contents + roff - 3);
3476 }
3477 else if (!ABI_64_P (output_bfd) && val == 0x44)
3478 {
3479 if (roff < 3)
3480 goto corrupt_input;
3481 bfd_put_8 (output_bfd, 0x41,
3482 contents + roff - 3);
3483 }
3484 bfd_put_8 (output_bfd, 0xc7,
3485 contents + roff - 2);
3486 bfd_put_8 (output_bfd, 0xc0 | reg,
3487 contents + roff - 1);
3488 }
3489 else if (reg == 4)
3490 {
3491 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3492 is special */
3493 if (val == 0x4c)
3494 {
3495 if (roff < 3)
3496 goto corrupt_input;
3497 bfd_put_8 (output_bfd, 0x49,
3498 contents + roff - 3);
3499 }
3500 else if (!ABI_64_P (output_bfd) && val == 0x44)
3501 {
3502 if (roff < 3)
3503 goto corrupt_input;
3504 bfd_put_8 (output_bfd, 0x41,
3505 contents + roff - 3);
3506 }
3507 bfd_put_8 (output_bfd, 0x81,
3508 contents + roff - 2);
3509 bfd_put_8 (output_bfd, 0xc0 | reg,
3510 contents + roff - 1);
3511 }
3512 else
3513 {
3514 /* addq/addl -> leaq/leal */
3515 if (val == 0x4c)
3516 {
3517 if (roff < 3)
3518 goto corrupt_input;
3519 bfd_put_8 (output_bfd, 0x4d,
3520 contents + roff - 3);
3521 }
3522 else if (!ABI_64_P (output_bfd) && val == 0x44)
3523 {
3524 if (roff < 3)
3525 goto corrupt_input;
3526 bfd_put_8 (output_bfd, 0x45,
3527 contents + roff - 3);
3528 }
3529 bfd_put_8 (output_bfd, 0x8d,
3530 contents + roff - 2);
3531 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3532 contents + roff - 1);
3533 }
3534 bfd_put_32 (output_bfd,
3535 elf_x86_64_tpoff (info, relocation),
3536 contents + roff);
3537 continue;
3538 }
3539 else
3540 BFD_ASSERT (FALSE);
3541 }
3542
3543 if (htab->elf.sgot == NULL)
3544 abort ();
3545
3546 if (h != NULL)
3547 {
3548 off = h->got.offset;
3549 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3550 }
3551 else
3552 {
3553 if (local_got_offsets == NULL)
3554 abort ();
3555
3556 off = local_got_offsets[r_symndx];
3557 offplt = local_tlsdesc_gotents[r_symndx];
3558 }
3559
3560 if ((off & 1) != 0)
3561 off &= ~1;
3562 else
3563 {
3564 Elf_Internal_Rela outrel;
3565 int dr_type, indx;
3566 asection *sreloc;
3567
3568 if (htab->elf.srelgot == NULL)
3569 abort ();
3570
3571 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3572
3573 if (GOT_TLS_GDESC_P (tls_type))
3574 {
3575 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3576 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3577 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3578 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3579 + htab->elf.sgotplt->output_offset
3580 + offplt
3581 + htab->sgotplt_jump_table_size);
3582 sreloc = htab->elf.srelplt;
3583 if (indx == 0)
3584 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3585 else
3586 outrel.r_addend = 0;
3587 elf_append_rela (output_bfd, sreloc, &outrel);
3588 }
3589
3590 sreloc = htab->elf.srelgot;
3591
3592 outrel.r_offset = (htab->elf.sgot->output_section->vma
3593 + htab->elf.sgot->output_offset + off);
3594
3595 if (GOT_TLS_GD_P (tls_type))
3596 dr_type = R_X86_64_DTPMOD64;
3597 else if (GOT_TLS_GDESC_P (tls_type))
3598 goto dr_done;
3599 else
3600 dr_type = R_X86_64_TPOFF64;
3601
3602 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3603 outrel.r_addend = 0;
3604 if ((dr_type == R_X86_64_TPOFF64
3605 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3606 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3607 outrel.r_info = htab->r_info (indx, dr_type);
3608
3609 elf_append_rela (output_bfd, sreloc, &outrel);
3610
3611 if (GOT_TLS_GD_P (tls_type))
3612 {
3613 if (indx == 0)
3614 {
3615 BFD_ASSERT (! unresolved_reloc);
3616 bfd_put_64 (output_bfd,
3617 relocation - _bfd_x86_elf_dtpoff_base (info),
3618 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3619 }
3620 else
3621 {
3622 bfd_put_64 (output_bfd, 0,
3623 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3624 outrel.r_info = htab->r_info (indx,
3625 R_X86_64_DTPOFF64);
3626 outrel.r_offset += GOT_ENTRY_SIZE;
3627 elf_append_rela (output_bfd, sreloc,
3628 &outrel);
3629 }
3630 }
3631
3632 dr_done:
3633 if (h != NULL)
3634 h->got.offset |= 1;
3635 else
3636 local_got_offsets[r_symndx] |= 1;
3637 }
3638
3639 if (off >= (bfd_vma) -2
3640 && ! GOT_TLS_GDESC_P (tls_type))
3641 abort ();
3642 if (r_type_tls == r_type)
3643 {
3644 if (r_type == R_X86_64_GOTPC32_TLSDESC
3645 || r_type == R_X86_64_TLSDESC_CALL)
3646 relocation = htab->elf.sgotplt->output_section->vma
3647 + htab->elf.sgotplt->output_offset
3648 + offplt + htab->sgotplt_jump_table_size;
3649 else
3650 relocation = htab->elf.sgot->output_section->vma
3651 + htab->elf.sgot->output_offset + off;
3652 unresolved_reloc = FALSE;
3653 }
3654 else
3655 {
3656 bfd_vma roff = rel->r_offset;
3657
3658 if (r_type == R_X86_64_TLSGD)
3659 {
3660 /* GD->IE transition. For 64bit, change
3661 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3662 .word 0x6666; rex64; call __tls_get_addr@PLT
3663 or
3664 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3665 .byte 0x66; rex64
3666 call *__tls_get_addr@GOTPCREL(%rip
3667 which may be converted to
3668 addr32 call __tls_get_addr
3669 into:
3670 movq %fs:0, %rax
3671 addq foo@gottpoff(%rip), %rax
3672 For 32bit, change
3673 leaq foo@tlsgd(%rip), %rdi
3674 .word 0x6666; rex64; call __tls_get_addr@PLT
3675 or
3676 leaq foo@tlsgd(%rip), %rdi
3677 .byte 0x66; rex64;
3678 call *__tls_get_addr@GOTPCREL(%rip)
3679 which may be converted to
3680 addr32 call __tls_get_addr
3681 into:
3682 movl %fs:0, %eax
3683 addq foo@gottpoff(%rip), %rax
3684 For largepic, change:
3685 leaq foo@tlsgd(%rip), %rdi
3686 movabsq $__tls_get_addr@pltoff, %rax
3687 addq %r15, %rax
3688 call *%rax
3689 into:
3690 movq %fs:0, %rax
3691 addq foo@gottpoff(%rax), %rax
3692 nopw 0x0(%rax,%rax,1) */
3693 int largepic = 0;
3694 if (ABI_64_P (output_bfd))
3695 {
3696 if (contents[roff + 5] == 0xb8)
3697 {
3698 if (roff < 3
3699 || (roff - 3 + 22) > input_section->size)
3700 goto corrupt_input;
3701 memcpy (contents + roff - 3,
3702 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3703 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3704 largepic = 1;
3705 }
3706 else
3707 {
3708 if (roff < 4
3709 || (roff - 4 + 16) > input_section->size)
3710 goto corrupt_input;
3711 memcpy (contents + roff - 4,
3712 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3713 16);
3714 }
3715 }
3716 else
3717 {
3718 if (roff < 3
3719 || (roff - 3 + 15) > input_section->size)
3720 goto corrupt_input;
3721 memcpy (contents + roff - 3,
3722 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3723 15);
3724 }
3725
3726 relocation = (htab->elf.sgot->output_section->vma
3727 + htab->elf.sgot->output_offset + off
3728 - roff
3729 - largepic
3730 - input_section->output_section->vma
3731 - input_section->output_offset
3732 - 12);
3733 bfd_put_32 (output_bfd, relocation,
3734 contents + roff + 8 + largepic);
3735 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3736 rel++;
3737 wrel++;
3738 continue;
3739 }
3740 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3741 {
3742 /* GDesc -> IE transition.
3743 It's originally something like:
3744 leaq x@tlsdesc(%rip), %rax
3745
3746 Change it to:
3747 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3748
3749 /* Now modify the instruction as appropriate. To
3750 turn a leaq into a movq in the form we use it, it
3751 suffices to change the second byte from 0x8d to
3752 0x8b. */
3753 if (roff < 2)
3754 goto corrupt_input;
3755 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3756
3757 bfd_put_32 (output_bfd,
3758 htab->elf.sgot->output_section->vma
3759 + htab->elf.sgot->output_offset + off
3760 - rel->r_offset
3761 - input_section->output_section->vma
3762 - input_section->output_offset
3763 - 4,
3764 contents + roff);
3765 continue;
3766 }
3767 else if (r_type == R_X86_64_TLSDESC_CALL)
3768 {
3769 /* GDesc -> IE transition.
3770 It's originally:
3771 call *(%rax)
3772
3773 Change it to:
3774 xchg %ax, %ax. */
3775
3776 bfd_put_8 (output_bfd, 0x66, contents + roff);
3777 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3778 continue;
3779 }
3780 else
3781 BFD_ASSERT (FALSE);
3782 }
3783 break;
3784
3785 case R_X86_64_TLSLD:
3786 if (! elf_x86_64_tls_transition (info, input_bfd,
3787 input_section, contents,
3788 symtab_hdr, sym_hashes,
3789 &r_type, GOT_UNKNOWN, rel,
3790 relend, h, r_symndx, TRUE))
3791 return FALSE;
3792
3793 if (r_type != R_X86_64_TLSLD)
3794 {
3795 /* LD->LE transition:
3796 leaq foo@tlsld(%rip), %rdi
3797 call __tls_get_addr@PLT
3798 For 64bit, we change it into:
3799 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3800 For 32bit, we change it into:
3801 nopl 0x0(%rax); movl %fs:0, %eax
3802 Or
3803 leaq foo@tlsld(%rip), %rdi;
3804 call *__tls_get_addr@GOTPCREL(%rip)
3805 which may be converted to
3806 addr32 call __tls_get_addr
3807 For 64bit, we change it into:
3808 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3809 For 32bit, we change it into:
3810 nopw 0x0(%rax); movl %fs:0, %eax
3811 For largepic, change:
3812 leaq foo@tlsgd(%rip), %rdi
3813 movabsq $__tls_get_addr@pltoff, %rax
3814 addq %rbx, %rax
3815 call *%rax
3816 into
3817 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3818 movq %fs:0, %eax */
3819
3820 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3821 if (ABI_64_P (output_bfd))
3822 {
3823 if ((rel->r_offset + 5) >= input_section->size)
3824 goto corrupt_input;
3825 if (contents[rel->r_offset + 5] == 0xb8)
3826 {
3827 if (rel->r_offset < 3
3828 || (rel->r_offset - 3 + 22) > input_section->size)
3829 goto corrupt_input;
3830 memcpy (contents + rel->r_offset - 3,
3831 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3832 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3833 }
3834 else if (contents[rel->r_offset + 4] == 0xff
3835 || contents[rel->r_offset + 4] == 0x67)
3836 {
3837 if (rel->r_offset < 3
3838 || (rel->r_offset - 3 + 13) > input_section->size)
3839 goto corrupt_input;
3840 memcpy (contents + rel->r_offset - 3,
3841 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3842 13);
3843
3844 }
3845 else
3846 {
3847 if (rel->r_offset < 3
3848 || (rel->r_offset - 3 + 12) > input_section->size)
3849 goto corrupt_input;
3850 memcpy (contents + rel->r_offset - 3,
3851 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3852 }
3853 }
3854 else
3855 {
3856 if ((rel->r_offset + 4) >= input_section->size)
3857 goto corrupt_input;
3858 if (contents[rel->r_offset + 4] == 0xff)
3859 {
3860 if (rel->r_offset < 3
3861 || (rel->r_offset - 3 + 13) > input_section->size)
3862 goto corrupt_input;
3863 memcpy (contents + rel->r_offset - 3,
3864 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3865 13);
3866 }
3867 else
3868 {
3869 if (rel->r_offset < 3
3870 || (rel->r_offset - 3 + 12) > input_section->size)
3871 goto corrupt_input;
3872 memcpy (contents + rel->r_offset - 3,
3873 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3874 }
3875 }
3876 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3877 and R_X86_64_PLTOFF64. */
3878 rel++;
3879 wrel++;
3880 continue;
3881 }
3882
3883 if (htab->elf.sgot == NULL)
3884 abort ();
3885
3886 off = htab->tls_ld_or_ldm_got.offset;
3887 if (off & 1)
3888 off &= ~1;
3889 else
3890 {
3891 Elf_Internal_Rela outrel;
3892
3893 if (htab->elf.srelgot == NULL)
3894 abort ();
3895
3896 outrel.r_offset = (htab->elf.sgot->output_section->vma
3897 + htab->elf.sgot->output_offset + off);
3898
3899 bfd_put_64 (output_bfd, 0,
3900 htab->elf.sgot->contents + off);
3901 bfd_put_64 (output_bfd, 0,
3902 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3903 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3904 outrel.r_addend = 0;
3905 elf_append_rela (output_bfd, htab->elf.srelgot,
3906 &outrel);
3907 htab->tls_ld_or_ldm_got.offset |= 1;
3908 }
3909 relocation = htab->elf.sgot->output_section->vma
3910 + htab->elf.sgot->output_offset + off;
3911 unresolved_reloc = FALSE;
3912 break;
3913
3914 case R_X86_64_DTPOFF32:
3915 if (!bfd_link_executable (info)
3916 || (input_section->flags & SEC_CODE) == 0)
3917 relocation -= _bfd_x86_elf_dtpoff_base (info);
3918 else
3919 relocation = elf_x86_64_tpoff (info, relocation);
3920 break;
3921
3922 case R_X86_64_TPOFF32:
3923 case R_X86_64_TPOFF64:
3924 BFD_ASSERT (bfd_link_executable (info));
3925 relocation = elf_x86_64_tpoff (info, relocation);
3926 break;
3927
3928 case R_X86_64_DTPOFF64:
3929 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3930 relocation -= _bfd_x86_elf_dtpoff_base (info);
3931 break;
3932
3933 default:
3934 break;
3935 }
3936
3937 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3938 because such sections are not SEC_ALLOC and thus ld.so will
3939 not process them. */
3940 if (unresolved_reloc
3941 && !((input_section->flags & SEC_DEBUGGING) != 0
3942 && h->def_dynamic)
3943 && _bfd_elf_section_offset (output_bfd, info, input_section,
3944 rel->r_offset) != (bfd_vma) -1)
3945 {
3946 switch (r_type)
3947 {
3948 case R_X86_64_32S:
3949 sec = h->root.u.def.section;
3950 if ((info->nocopyreloc
3951 || (eh->def_protected
3952 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3953 && !(h->root.u.def.section->flags & SEC_CODE))
3954 return elf_x86_64_need_pic (info, input_bfd, input_section,
3955 h, NULL, NULL, howto);
3956 /* Fall through. */
3957
3958 default:
3959 _bfd_error_handler
3960 /* xgettext:c-format */
3961 (_("%pB(%pA+%#" PRIx64 "): "
3962 "unresolvable %s relocation against symbol `%s'"),
3963 input_bfd,
3964 input_section,
3965 (uint64_t) rel->r_offset,
3966 howto->name,
3967 h->root.root.string);
3968 return FALSE;
3969 }
3970 }
3971
3972 do_relocation:
3973 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3974 contents, rel->r_offset,
3975 relocation, rel->r_addend);
3976
3977 check_relocation_error:
3978 if (r != bfd_reloc_ok)
3979 {
3980 const char *name;
3981
3982 if (h != NULL)
3983 name = h->root.root.string;
3984 else
3985 {
3986 name = bfd_elf_string_from_elf_section (input_bfd,
3987 symtab_hdr->sh_link,
3988 sym->st_name);
3989 if (name == NULL)
3990 return FALSE;
3991 if (*name == '\0')
3992 name = bfd_section_name (sec);
3993 }
3994
3995 if (r == bfd_reloc_overflow)
3996 {
3997 if (converted_reloc)
3998 {
3999 info->callbacks->einfo
4000 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4001 return FALSE;
4002 }
4003 (*info->callbacks->reloc_overflow)
4004 (info, (h ? &h->root : NULL), name, howto->name,
4005 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4006 }
4007 else
4008 {
4009 _bfd_error_handler
4010 /* xgettext:c-format */
4011 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4012 input_bfd, input_section,
4013 (uint64_t) rel->r_offset, name, (int) r);
4014 return FALSE;
4015 }
4016 }
4017
4018 if (wrel != rel)
4019 *wrel = *rel;
4020 }
4021
4022 if (wrel != rel)
4023 {
4024 Elf_Internal_Shdr *rel_hdr;
4025 size_t deleted = rel - wrel;
4026
4027 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4028 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4029 if (rel_hdr->sh_size == 0)
4030 {
4031 /* It is too late to remove an empty reloc section. Leave
4032 one NONE reloc.
4033 ??? What is wrong with an empty section??? */
4034 rel_hdr->sh_size = rel_hdr->sh_entsize;
4035 deleted -= 1;
4036 }
4037 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4038 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4039 input_section->reloc_count -= deleted;
4040 }
4041
4042 return TRUE;
4043 }
4044
4045 /* Finish up dynamic symbol handling. We set the contents of various
4046 dynamic sections here. */
4047
4048 static bfd_boolean
elf_x86_64_finish_dynamic_symbol(bfd * output_bfd,struct bfd_link_info * info,struct elf_link_hash_entry * h,Elf_Internal_Sym * sym)4049 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4050 struct bfd_link_info *info,
4051 struct elf_link_hash_entry *h,
4052 Elf_Internal_Sym *sym)
4053 {
4054 struct elf_x86_link_hash_table *htab;
4055 bfd_boolean use_plt_second;
4056 struct elf_x86_link_hash_entry *eh;
4057 bfd_boolean local_undefweak;
4058
4059 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4060 if (htab == NULL)
4061 return FALSE;
4062
4063 /* Use the second PLT section only if there is .plt section. */
4064 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4065
4066 eh = (struct elf_x86_link_hash_entry *) h;
4067 if (eh->no_finish_dynamic_symbol)
4068 abort ();
4069
4070 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4071 resolved undefined weak symbols in executable so that their
4072 references have value 0 at run-time. */
4073 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4074
4075 if (h->plt.offset != (bfd_vma) -1)
4076 {
4077 bfd_vma plt_index;
4078 bfd_vma got_offset, plt_offset;
4079 Elf_Internal_Rela rela;
4080 bfd_byte *loc;
4081 asection *plt, *gotplt, *relplt, *resolved_plt;
4082 const struct elf_backend_data *bed;
4083 bfd_vma plt_got_pcrel_offset;
4084
4085 /* When building a static executable, use .iplt, .igot.plt and
4086 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4087 if (htab->elf.splt != NULL)
4088 {
4089 plt = htab->elf.splt;
4090 gotplt = htab->elf.sgotplt;
4091 relplt = htab->elf.srelplt;
4092 }
4093 else
4094 {
4095 plt = htab->elf.iplt;
4096 gotplt = htab->elf.igotplt;
4097 relplt = htab->elf.irelplt;
4098 }
4099
4100 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4101
4102 /* Get the index in the procedure linkage table which
4103 corresponds to this symbol. This is the index of this symbol
4104 in all the symbols for which we are making plt entries. The
4105 first entry in the procedure linkage table is reserved.
4106
4107 Get the offset into the .got table of the entry that
4108 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4109 bytes. The first three are reserved for the dynamic linker.
4110
4111 For static executables, we don't reserve anything. */
4112
4113 if (plt == htab->elf.splt)
4114 {
4115 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4116 - htab->plt.has_plt0);
4117 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4118 }
4119 else
4120 {
4121 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4122 got_offset = got_offset * GOT_ENTRY_SIZE;
4123 }
4124
4125 /* Fill in the entry in the procedure linkage table. */
4126 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4127 htab->plt.plt_entry_size);
4128 if (use_plt_second)
4129 {
4130 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4131 htab->non_lazy_plt->plt_entry,
4132 htab->non_lazy_plt->plt_entry_size);
4133
4134 resolved_plt = htab->plt_second;
4135 plt_offset = eh->plt_second.offset;
4136 }
4137 else
4138 {
4139 resolved_plt = plt;
4140 plt_offset = h->plt.offset;
4141 }
4142
4143 /* Insert the relocation positions of the plt section. */
4144
4145 /* Put offset the PC-relative instruction referring to the GOT entry,
4146 subtracting the size of that instruction. */
4147 plt_got_pcrel_offset = (gotplt->output_section->vma
4148 + gotplt->output_offset
4149 + got_offset
4150 - resolved_plt->output_section->vma
4151 - resolved_plt->output_offset
4152 - plt_offset
4153 - htab->plt.plt_got_insn_size);
4154
4155 /* Check PC-relative offset overflow in PLT entry. */
4156 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4157 /* xgettext:c-format */
4158 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4159 output_bfd, h->root.root.string);
4160
4161 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4162 (resolved_plt->contents + plt_offset
4163 + htab->plt.plt_got_offset));
4164
4165 /* Fill in the entry in the global offset table, initially this
4166 points to the second part of the PLT entry. Leave the entry
4167 as zero for undefined weak symbol in PIE. No PLT relocation
4168 against undefined weak symbol in PIE. */
4169 if (!local_undefweak)
4170 {
4171 if (htab->plt.has_plt0)
4172 bfd_put_64 (output_bfd, (plt->output_section->vma
4173 + plt->output_offset
4174 + h->plt.offset
4175 + htab->lazy_plt->plt_lazy_offset),
4176 gotplt->contents + got_offset);
4177
4178 /* Fill in the entry in the .rela.plt section. */
4179 rela.r_offset = (gotplt->output_section->vma
4180 + gotplt->output_offset
4181 + got_offset);
4182 if (PLT_LOCAL_IFUNC_P (info, h))
4183 {
4184 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4185 h->root.root.string,
4186 h->root.u.def.section->owner);
4187
4188 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4189 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4190 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4191 rela.r_addend = (h->root.u.def.value
4192 + h->root.u.def.section->output_section->vma
4193 + h->root.u.def.section->output_offset);
4194 /* R_X86_64_IRELATIVE comes last. */
4195 plt_index = htab->next_irelative_index--;
4196 }
4197 else
4198 {
4199 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4200 rela.r_addend = 0;
4201 plt_index = htab->next_jump_slot_index++;
4202 }
4203
4204 /* Don't fill the second and third slots in PLT entry for
4205 static executables nor without PLT0. */
4206 if (plt == htab->elf.splt && htab->plt.has_plt0)
4207 {
4208 bfd_vma plt0_offset
4209 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4210
4211 /* Put relocation index. */
4212 bfd_put_32 (output_bfd, plt_index,
4213 (plt->contents + h->plt.offset
4214 + htab->lazy_plt->plt_reloc_offset));
4215
4216 /* Put offset for jmp .PLT0 and check for overflow. We don't
4217 check relocation index for overflow since branch displacement
4218 will overflow first. */
4219 if (plt0_offset > 0x80000000)
4220 /* xgettext:c-format */
4221 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4222 output_bfd, h->root.root.string);
4223 bfd_put_32 (output_bfd, - plt0_offset,
4224 (plt->contents + h->plt.offset
4225 + htab->lazy_plt->plt_plt_offset));
4226 }
4227
4228 bed = get_elf_backend_data (output_bfd);
4229 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4230 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4231 }
4232 }
4233 else if (eh->plt_got.offset != (bfd_vma) -1)
4234 {
4235 bfd_vma got_offset, plt_offset;
4236 asection *plt, *got;
4237 bfd_boolean got_after_plt;
4238 int32_t got_pcrel_offset;
4239
4240 /* Set the entry in the GOT procedure linkage table. */
4241 plt = htab->plt_got;
4242 got = htab->elf.sgot;
4243 got_offset = h->got.offset;
4244
4245 if (got_offset == (bfd_vma) -1
4246 || (h->type == STT_GNU_IFUNC && h->def_regular)
4247 || plt == NULL
4248 || got == NULL)
4249 abort ();
4250
4251 /* Use the non-lazy PLT entry template for the GOT PLT since they
4252 are the identical. */
4253 /* Fill in the entry in the GOT procedure linkage table. */
4254 plt_offset = eh->plt_got.offset;
4255 memcpy (plt->contents + plt_offset,
4256 htab->non_lazy_plt->plt_entry,
4257 htab->non_lazy_plt->plt_entry_size);
4258
4259 /* Put offset the PC-relative instruction referring to the GOT
4260 entry, subtracting the size of that instruction. */
4261 got_pcrel_offset = (got->output_section->vma
4262 + got->output_offset
4263 + got_offset
4264 - plt->output_section->vma
4265 - plt->output_offset
4266 - plt_offset
4267 - htab->non_lazy_plt->plt_got_insn_size);
4268
4269 /* Check PC-relative offset overflow in GOT PLT entry. */
4270 got_after_plt = got->output_section->vma > plt->output_section->vma;
4271 if ((got_after_plt && got_pcrel_offset < 0)
4272 || (!got_after_plt && got_pcrel_offset > 0))
4273 /* xgettext:c-format */
4274 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4275 output_bfd, h->root.root.string);
4276
4277 bfd_put_32 (output_bfd, got_pcrel_offset,
4278 (plt->contents + plt_offset
4279 + htab->non_lazy_plt->plt_got_offset));
4280 }
4281
4282 if (!local_undefweak
4283 && !h->def_regular
4284 && (h->plt.offset != (bfd_vma) -1
4285 || eh->plt_got.offset != (bfd_vma) -1))
4286 {
4287 /* Mark the symbol as undefined, rather than as defined in
4288 the .plt section. Leave the value if there were any
4289 relocations where pointer equality matters (this is a clue
4290 for the dynamic linker, to make function pointer
4291 comparisons work between an application and shared
4292 library), otherwise set it to zero. If a function is only
4293 called from a binary, there is no need to slow down
4294 shared libraries because of that. */
4295 sym->st_shndx = SHN_UNDEF;
4296 if (!h->pointer_equality_needed)
4297 sym->st_value = 0;
4298 }
4299
4300 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4301
4302 /* Don't generate dynamic GOT relocation against undefined weak
4303 symbol in executable. */
4304 if (h->got.offset != (bfd_vma) -1
4305 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4306 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4307 && !local_undefweak)
4308 {
4309 Elf_Internal_Rela rela;
4310 asection *relgot = htab->elf.srelgot;
4311
4312 /* This symbol has an entry in the global offset table. Set it
4313 up. */
4314 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4315 abort ();
4316
4317 rela.r_offset = (htab->elf.sgot->output_section->vma
4318 + htab->elf.sgot->output_offset
4319 + (h->got.offset &~ (bfd_vma) 1));
4320
4321 /* If this is a static link, or it is a -Bsymbolic link and the
4322 symbol is defined locally or was forced to be local because
4323 of a version file, we just want to emit a RELATIVE reloc.
4324 The entry in the global offset table will already have been
4325 initialized in the relocate_section function. */
4326 if (h->def_regular
4327 && h->type == STT_GNU_IFUNC)
4328 {
4329 if (h->plt.offset == (bfd_vma) -1)
4330 {
4331 /* STT_GNU_IFUNC is referenced without PLT. */
4332 if (htab->elf.splt == NULL)
4333 {
4334 /* use .rel[a].iplt section to store .got relocations
4335 in static executable. */
4336 relgot = htab->elf.irelplt;
4337 }
4338 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4339 {
4340 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4341 h->root.root.string,
4342 h->root.u.def.section->owner);
4343
4344 rela.r_info = htab->r_info (0,
4345 R_X86_64_IRELATIVE);
4346 rela.r_addend = (h->root.u.def.value
4347 + h->root.u.def.section->output_section->vma
4348 + h->root.u.def.section->output_offset);
4349 }
4350 else
4351 goto do_glob_dat;
4352 }
4353 else if (bfd_link_pic (info))
4354 {
4355 /* Generate R_X86_64_GLOB_DAT. */
4356 goto do_glob_dat;
4357 }
4358 else
4359 {
4360 asection *plt;
4361 bfd_vma plt_offset;
4362
4363 if (!h->pointer_equality_needed)
4364 abort ();
4365
4366 /* For non-shared object, we can't use .got.plt, which
4367 contains the real function addres if we need pointer
4368 equality. We load the GOT entry with the PLT entry. */
4369 if (htab->plt_second != NULL)
4370 {
4371 plt = htab->plt_second;
4372 plt_offset = eh->plt_second.offset;
4373 }
4374 else
4375 {
4376 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4377 plt_offset = h->plt.offset;
4378 }
4379 bfd_put_64 (output_bfd, (plt->output_section->vma
4380 + plt->output_offset
4381 + plt_offset),
4382 htab->elf.sgot->contents + h->got.offset);
4383 return TRUE;
4384 }
4385 }
4386 else if (bfd_link_pic (info)
4387 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4388 {
4389 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4390 return FALSE;
4391 BFD_ASSERT((h->got.offset & 1) != 0);
4392 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4393 rela.r_addend = (h->root.u.def.value
4394 + h->root.u.def.section->output_section->vma
4395 + h->root.u.def.section->output_offset);
4396 }
4397 else
4398 {
4399 BFD_ASSERT((h->got.offset & 1) == 0);
4400 do_glob_dat:
4401 bfd_put_64 (output_bfd, (bfd_vma) 0,
4402 htab->elf.sgot->contents + h->got.offset);
4403 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4404 rela.r_addend = 0;
4405 }
4406
4407 elf_append_rela (output_bfd, relgot, &rela);
4408 }
4409
4410 if (h->needs_copy)
4411 {
4412 Elf_Internal_Rela rela;
4413 asection *s;
4414
4415 /* This symbol needs a copy reloc. Set it up. */
4416 VERIFY_COPY_RELOC (h, htab)
4417
4418 rela.r_offset = (h->root.u.def.value
4419 + h->root.u.def.section->output_section->vma
4420 + h->root.u.def.section->output_offset);
4421 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4422 rela.r_addend = 0;
4423 if (h->root.u.def.section == htab->elf.sdynrelro)
4424 s = htab->elf.sreldynrelro;
4425 else
4426 s = htab->elf.srelbss;
4427 elf_append_rela (output_bfd, s, &rela);
4428 }
4429
4430 return TRUE;
4431 }
4432
4433 /* Finish up local dynamic symbol handling. We set the contents of
4434 various dynamic sections here. */
4435
4436 static bfd_boolean
elf_x86_64_finish_local_dynamic_symbol(void ** slot,void * inf)4437 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4438 {
4439 struct elf_link_hash_entry *h
4440 = (struct elf_link_hash_entry *) *slot;
4441 struct bfd_link_info *info
4442 = (struct bfd_link_info *) inf;
4443
4444 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4445 info, h, NULL);
4446 }
4447
4448 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4449 here since undefined weak symbol may not be dynamic and may not be
4450 called for elf_x86_64_finish_dynamic_symbol. */
4451
4452 static bfd_boolean
elf_x86_64_pie_finish_undefweak_symbol(struct bfd_hash_entry * bh,void * inf)4453 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4454 void *inf)
4455 {
4456 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4457 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4458
4459 if (h->root.type != bfd_link_hash_undefweak
4460 || h->dynindx != -1)
4461 return TRUE;
4462
4463 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4464 info, h, NULL);
4465 }
4466
4467 /* Used to decide how to sort relocs in an optimal manner for the
4468 dynamic linker, before writing them out. */
4469
4470 static enum elf_reloc_type_class
elf_x86_64_reloc_type_class(const struct bfd_link_info * info,const asection * rel_sec ATTRIBUTE_UNUSED,const Elf_Internal_Rela * rela)4471 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4472 const asection *rel_sec ATTRIBUTE_UNUSED,
4473 const Elf_Internal_Rela *rela)
4474 {
4475 bfd *abfd = info->output_bfd;
4476 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4477 struct elf_x86_link_hash_table *htab
4478 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4479
4480 if (htab->elf.dynsym != NULL
4481 && htab->elf.dynsym->contents != NULL)
4482 {
4483 /* Check relocation against STT_GNU_IFUNC symbol if there are
4484 dynamic symbols. */
4485 unsigned long r_symndx = htab->r_sym (rela->r_info);
4486 if (r_symndx != STN_UNDEF)
4487 {
4488 Elf_Internal_Sym sym;
4489 if (!bed->s->swap_symbol_in (abfd,
4490 (htab->elf.dynsym->contents
4491 + r_symndx * bed->s->sizeof_sym),
4492 0, &sym))
4493 abort ();
4494
4495 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4496 return reloc_class_ifunc;
4497 }
4498 }
4499
4500 switch ((int) ELF32_R_TYPE (rela->r_info))
4501 {
4502 case R_X86_64_IRELATIVE:
4503 return reloc_class_ifunc;
4504 case R_X86_64_RELATIVE:
4505 case R_X86_64_RELATIVE64:
4506 return reloc_class_relative;
4507 case R_X86_64_JUMP_SLOT:
4508 return reloc_class_plt;
4509 case R_X86_64_COPY:
4510 return reloc_class_copy;
4511 default:
4512 return reloc_class_normal;
4513 }
4514 }
4515
4516 /* Finish up the dynamic sections. */
4517
4518 static bfd_boolean
elf_x86_64_finish_dynamic_sections(bfd * output_bfd,struct bfd_link_info * info)4519 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4520 struct bfd_link_info *info)
4521 {
4522 struct elf_x86_link_hash_table *htab;
4523
4524 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4525 if (htab == NULL)
4526 return FALSE;
4527
4528 if (! htab->elf.dynamic_sections_created)
4529 return TRUE;
4530
4531 if (htab->elf.splt && htab->elf.splt->size > 0)
4532 {
4533 elf_section_data (htab->elf.splt->output_section)
4534 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4535
4536 if (htab->plt.has_plt0)
4537 {
4538 /* Fill in the special first entry in the procedure linkage
4539 table. */
4540 memcpy (htab->elf.splt->contents,
4541 htab->lazy_plt->plt0_entry,
4542 htab->lazy_plt->plt0_entry_size);
4543 /* Add offset for pushq GOT+8(%rip), since the instruction
4544 uses 6 bytes subtract this value. */
4545 bfd_put_32 (output_bfd,
4546 (htab->elf.sgotplt->output_section->vma
4547 + htab->elf.sgotplt->output_offset
4548 + 8
4549 - htab->elf.splt->output_section->vma
4550 - htab->elf.splt->output_offset
4551 - 6),
4552 (htab->elf.splt->contents
4553 + htab->lazy_plt->plt0_got1_offset));
4554 /* Add offset for the PC-relative instruction accessing
4555 GOT+16, subtracting the offset to the end of that
4556 instruction. */
4557 bfd_put_32 (output_bfd,
4558 (htab->elf.sgotplt->output_section->vma
4559 + htab->elf.sgotplt->output_offset
4560 + 16
4561 - htab->elf.splt->output_section->vma
4562 - htab->elf.splt->output_offset
4563 - htab->lazy_plt->plt0_got2_insn_end),
4564 (htab->elf.splt->contents
4565 + htab->lazy_plt->plt0_got2_offset));
4566 }
4567
4568 if (htab->tlsdesc_plt)
4569 {
4570 bfd_put_64 (output_bfd, (bfd_vma) 0,
4571 htab->elf.sgot->contents + htab->tlsdesc_got);
4572
4573 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4574 htab->lazy_plt->plt_tlsdesc_entry,
4575 htab->lazy_plt->plt_tlsdesc_entry_size);
4576
4577 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4578 bytes and the instruction uses 6 bytes, subtract these
4579 values. */
4580 bfd_put_32 (output_bfd,
4581 (htab->elf.sgotplt->output_section->vma
4582 + htab->elf.sgotplt->output_offset
4583 + 8
4584 - htab->elf.splt->output_section->vma
4585 - htab->elf.splt->output_offset
4586 - htab->tlsdesc_plt
4587 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4588 (htab->elf.splt->contents
4589 + htab->tlsdesc_plt
4590 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4591 /* Add offset for indirect branch via GOT+TDG, where TDG
4592 stands for htab->tlsdesc_got, subtracting the offset
4593 to the end of that instruction. */
4594 bfd_put_32 (output_bfd,
4595 (htab->elf.sgot->output_section->vma
4596 + htab->elf.sgot->output_offset
4597 + htab->tlsdesc_got
4598 - htab->elf.splt->output_section->vma
4599 - htab->elf.splt->output_offset
4600 - htab->tlsdesc_plt
4601 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4602 (htab->elf.splt->contents
4603 + htab->tlsdesc_plt
4604 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4605 }
4606 }
4607
4608 /* Fill PLT entries for undefined weak symbols in PIE. */
4609 if (bfd_link_pie (info))
4610 bfd_hash_traverse (&info->hash->table,
4611 elf_x86_64_pie_finish_undefweak_symbol,
4612 info);
4613
4614 return TRUE;
4615 }
4616
4617 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4618 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4619 It has to be done before elf_link_sort_relocs is called so that
4620 dynamic relocations are properly sorted. */
4621
4622 static bfd_boolean
elf_x86_64_output_arch_local_syms(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,void * flaginfo ATTRIBUTE_UNUSED,int (* func)(void *,const char *,Elf_Internal_Sym *,asection *,struct elf_link_hash_entry *)ATTRIBUTE_UNUSED)4623 elf_x86_64_output_arch_local_syms
4624 (bfd *output_bfd ATTRIBUTE_UNUSED,
4625 struct bfd_link_info *info,
4626 void *flaginfo ATTRIBUTE_UNUSED,
4627 int (*func) (void *, const char *,
4628 Elf_Internal_Sym *,
4629 asection *,
4630 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4631 {
4632 struct elf_x86_link_hash_table *htab
4633 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4634 if (htab == NULL)
4635 return FALSE;
4636
4637 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4638 htab_traverse (htab->loc_hash_table,
4639 elf_x86_64_finish_local_dynamic_symbol,
4640 info);
4641
4642 return TRUE;
4643 }
4644
4645 /* Forward declaration. */
4646 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4647
4648 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4649 dynamic relocations. */
4650
4651 static long
elf_x86_64_get_synthetic_symtab(bfd * abfd,long symcount ATTRIBUTE_UNUSED,asymbol ** syms ATTRIBUTE_UNUSED,long dynsymcount,asymbol ** dynsyms,asymbol ** ret)4652 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4653 long symcount ATTRIBUTE_UNUSED,
4654 asymbol **syms ATTRIBUTE_UNUSED,
4655 long dynsymcount,
4656 asymbol **dynsyms,
4657 asymbol **ret)
4658 {
4659 long count, i, n;
4660 int j;
4661 bfd_byte *plt_contents;
4662 long relsize;
4663 const struct elf_x86_lazy_plt_layout *lazy_plt;
4664 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4665 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4666 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4667 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4668 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4669 asection *plt;
4670 enum elf_x86_plt_type plt_type;
4671 struct elf_x86_plt plts[] =
4672 {
4673 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4674 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4675 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4676 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4677 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4678 };
4679
4680 *ret = NULL;
4681
4682 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4683 return 0;
4684
4685 if (dynsymcount <= 0)
4686 return 0;
4687
4688 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4689 if (relsize <= 0)
4690 return -1;
4691
4692 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4693 {
4694 lazy_plt = &elf_x86_64_lazy_plt;
4695 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4696 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4697 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4698 if (ABI_64_P (abfd))
4699 {
4700 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4701 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4702 }
4703 else
4704 {
4705 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4706 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4707 }
4708 }
4709 else
4710 {
4711 lazy_plt = &elf_x86_64_nacl_plt;
4712 non_lazy_plt = NULL;
4713 lazy_bnd_plt = NULL;
4714 non_lazy_bnd_plt = NULL;
4715 lazy_ibt_plt = NULL;
4716 non_lazy_ibt_plt = NULL;
4717 }
4718
4719 count = 0;
4720 for (j = 0; plts[j].name != NULL; j++)
4721 {
4722 plt = bfd_get_section_by_name (abfd, plts[j].name);
4723 if (plt == NULL || plt->size == 0)
4724 continue;
4725
4726 /* Get the PLT section contents. */
4727 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4728 if (plt_contents == NULL)
4729 break;
4730 if (!bfd_get_section_contents (abfd, (asection *) plt,
4731 plt_contents, 0, plt->size))
4732 {
4733 free (plt_contents);
4734 break;
4735 }
4736
4737 /* Check what kind of PLT it is. */
4738 plt_type = plt_unknown;
4739 if (plts[j].type == plt_unknown
4740 && (plt->size >= (lazy_plt->plt_entry_size
4741 + lazy_plt->plt_entry_size)))
4742 {
4743 /* Match lazy PLT first. Need to check the first two
4744 instructions. */
4745 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4746 lazy_plt->plt0_got1_offset) == 0)
4747 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4748 2) == 0))
4749 plt_type = plt_lazy;
4750 else if (lazy_bnd_plt != NULL
4751 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4752 lazy_bnd_plt->plt0_got1_offset) == 0)
4753 && (memcmp (plt_contents + 6,
4754 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4755 {
4756 plt_type = plt_lazy | plt_second;
4757 /* The fist entry in the lazy IBT PLT is the same as the
4758 lazy BND PLT. */
4759 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4760 lazy_ibt_plt->plt_entry,
4761 lazy_ibt_plt->plt_got_offset) == 0))
4762 lazy_plt = lazy_ibt_plt;
4763 else
4764 lazy_plt = lazy_bnd_plt;
4765 }
4766 }
4767
4768 if (non_lazy_plt != NULL
4769 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4770 && plt->size >= non_lazy_plt->plt_entry_size)
4771 {
4772 /* Match non-lazy PLT. */
4773 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4774 non_lazy_plt->plt_got_offset) == 0)
4775 plt_type = plt_non_lazy;
4776 }
4777
4778 if (plt_type == plt_unknown || plt_type == plt_second)
4779 {
4780 if (non_lazy_bnd_plt != NULL
4781 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4782 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4783 non_lazy_bnd_plt->plt_got_offset) == 0))
4784 {
4785 /* Match BND PLT. */
4786 plt_type = plt_second;
4787 non_lazy_plt = non_lazy_bnd_plt;
4788 }
4789 else if (non_lazy_ibt_plt != NULL
4790 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4791 && (memcmp (plt_contents,
4792 non_lazy_ibt_plt->plt_entry,
4793 non_lazy_ibt_plt->plt_got_offset) == 0))
4794 {
4795 /* Match IBT PLT. */
4796 plt_type = plt_second;
4797 non_lazy_plt = non_lazy_ibt_plt;
4798 }
4799 }
4800
4801 if (plt_type == plt_unknown)
4802 {
4803 free (plt_contents);
4804 continue;
4805 }
4806
4807 plts[j].sec = plt;
4808 plts[j].type = plt_type;
4809
4810 if ((plt_type & plt_lazy))
4811 {
4812 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4813 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4814 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4815 /* Skip PLT0 in lazy PLT. */
4816 i = 1;
4817 }
4818 else
4819 {
4820 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4821 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4822 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4823 i = 0;
4824 }
4825
4826 /* Skip lazy PLT when the second PLT is used. */
4827 if (plt_type == (plt_lazy | plt_second))
4828 plts[j].count = 0;
4829 else
4830 {
4831 n = plt->size / plts[j].plt_entry_size;
4832 plts[j].count = n;
4833 count += n - i;
4834 }
4835
4836 plts[j].contents = plt_contents;
4837 }
4838
4839 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4840 (bfd_vma) 0, plts, dynsyms,
4841 ret);
4842 }
4843
4844 /* Handle an x86-64 specific section when reading an object file. This
4845 is called when elfcode.h finds a section with an unknown type. */
4846
4847 static bfd_boolean
elf_x86_64_section_from_shdr(bfd * abfd,Elf_Internal_Shdr * hdr,const char * name,int shindex)4848 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4849 const char *name, int shindex)
4850 {
4851 if (hdr->sh_type != SHT_X86_64_UNWIND)
4852 return FALSE;
4853
4854 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4855 return FALSE;
4856
4857 return TRUE;
4858 }
4859
4860 /* Hook called by the linker routine which adds symbols from an object
4861 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4862 of .bss. */
4863
4864 static bfd_boolean
elf_x86_64_add_symbol_hook(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED,Elf_Internal_Sym * sym,const char ** namep ATTRIBUTE_UNUSED,flagword * flagsp ATTRIBUTE_UNUSED,asection ** secp,bfd_vma * valp)4865 elf_x86_64_add_symbol_hook (bfd *abfd,
4866 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4867 Elf_Internal_Sym *sym,
4868 const char **namep ATTRIBUTE_UNUSED,
4869 flagword *flagsp ATTRIBUTE_UNUSED,
4870 asection **secp,
4871 bfd_vma *valp)
4872 {
4873 asection *lcomm;
4874
4875 switch (sym->st_shndx)
4876 {
4877 case SHN_X86_64_LCOMMON:
4878 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4879 if (lcomm == NULL)
4880 {
4881 lcomm = bfd_make_section_with_flags (abfd,
4882 "LARGE_COMMON",
4883 (SEC_ALLOC
4884 | SEC_IS_COMMON
4885 | SEC_LINKER_CREATED));
4886 if (lcomm == NULL)
4887 return FALSE;
4888 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4889 }
4890 *secp = lcomm;
4891 *valp = sym->st_size;
4892 return TRUE;
4893 }
4894
4895 return TRUE;
4896 }
4897
4898
4899 /* Given a BFD section, try to locate the corresponding ELF section
4900 index. */
4901
4902 static bfd_boolean
elf_x86_64_elf_section_from_bfd_section(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,int * index_return)4903 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4904 asection *sec, int *index_return)
4905 {
4906 if (sec == &_bfd_elf_large_com_section)
4907 {
4908 *index_return = SHN_X86_64_LCOMMON;
4909 return TRUE;
4910 }
4911 return FALSE;
4912 }
4913
4914 /* Process a symbol. */
4915
4916 static void
elf_x86_64_symbol_processing(bfd * abfd ATTRIBUTE_UNUSED,asymbol * asym)4917 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4918 asymbol *asym)
4919 {
4920 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4921
4922 switch (elfsym->internal_elf_sym.st_shndx)
4923 {
4924 case SHN_X86_64_LCOMMON:
4925 asym->section = &_bfd_elf_large_com_section;
4926 asym->value = elfsym->internal_elf_sym.st_size;
4927 /* Common symbol doesn't set BSF_GLOBAL. */
4928 asym->flags &= ~BSF_GLOBAL;
4929 break;
4930 }
4931 }
4932
4933 static bfd_boolean
elf_x86_64_common_definition(Elf_Internal_Sym * sym)4934 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4935 {
4936 return (sym->st_shndx == SHN_COMMON
4937 || sym->st_shndx == SHN_X86_64_LCOMMON);
4938 }
4939
4940 static unsigned int
elf_x86_64_common_section_index(asection * sec)4941 elf_x86_64_common_section_index (asection *sec)
4942 {
4943 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4944 return SHN_COMMON;
4945 else
4946 return SHN_X86_64_LCOMMON;
4947 }
4948
4949 static asection *
elf_x86_64_common_section(asection * sec)4950 elf_x86_64_common_section (asection *sec)
4951 {
4952 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4953 return bfd_com_section_ptr;
4954 else
4955 return &_bfd_elf_large_com_section;
4956 }
4957
4958 static bfd_boolean
elf_x86_64_merge_symbol(struct elf_link_hash_entry * h,const Elf_Internal_Sym * sym,asection ** psec,bfd_boolean newdef,bfd_boolean olddef,bfd * oldbfd,const asection * oldsec)4959 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4960 const Elf_Internal_Sym *sym,
4961 asection **psec,
4962 bfd_boolean newdef,
4963 bfd_boolean olddef,
4964 bfd *oldbfd,
4965 const asection *oldsec)
4966 {
4967 /* A normal common symbol and a large common symbol result in a
4968 normal common symbol. We turn the large common symbol into a
4969 normal one. */
4970 if (!olddef
4971 && h->root.type == bfd_link_hash_common
4972 && !newdef
4973 && bfd_is_com_section (*psec)
4974 && oldsec != *psec)
4975 {
4976 if (sym->st_shndx == SHN_COMMON
4977 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4978 {
4979 h->root.u.c.p->section
4980 = bfd_make_section_old_way (oldbfd, "COMMON");
4981 h->root.u.c.p->section->flags = SEC_ALLOC;
4982 }
4983 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4984 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4985 *psec = bfd_com_section_ptr;
4986 }
4987
4988 return TRUE;
4989 }
4990
4991 static int
elf_x86_64_additional_program_headers(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED)4992 elf_x86_64_additional_program_headers (bfd *abfd,
4993 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4994 {
4995 asection *s;
4996 int count = 0;
4997
4998 /* Check to see if we need a large readonly segment. */
4999 s = bfd_get_section_by_name (abfd, ".lrodata");
5000 if (s && (s->flags & SEC_LOAD))
5001 count++;
5002
5003 /* Check to see if we need a large data segment. Since .lbss sections
5004 is placed right after the .bss section, there should be no need for
5005 a large data segment just because of .lbss. */
5006 s = bfd_get_section_by_name (abfd, ".ldata");
5007 if (s && (s->flags & SEC_LOAD))
5008 count++;
5009
5010 return count;
5011 }
5012
5013 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5014
5015 static bfd_boolean
elf_x86_64_relocs_compatible(const bfd_target * input,const bfd_target * output)5016 elf_x86_64_relocs_compatible (const bfd_target *input,
5017 const bfd_target *output)
5018 {
5019 return ((xvec_get_elf_backend_data (input)->s->elfclass
5020 == xvec_get_elf_backend_data (output)->s->elfclass)
5021 && _bfd_elf_relocs_compatible (input, output));
5022 }
5023
5024 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5025 with GNU properties if found. Otherwise, return NULL. */
5026
5027 static bfd *
elf_x86_64_link_setup_gnu_properties(struct bfd_link_info * info)5028 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5029 {
5030 struct elf_x86_init_table init_table;
5031
5032 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5033 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5034 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5035 != (int) R_X86_64_GNU_VTINHERIT)
5036 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5037 != (int) R_X86_64_GNU_VTENTRY))
5038 abort ();
5039
5040 /* This is unused for x86-64. */
5041 init_table.plt0_pad_byte = 0x90;
5042
5043 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5044 {
5045 const struct elf_backend_data *bed
5046 = get_elf_backend_data (info->output_bfd);
5047 struct elf_x86_link_hash_table *htab
5048 = elf_x86_hash_table (info, bed->target_id);
5049 if (!htab)
5050 abort ();
5051 if (htab->params->bndplt)
5052 {
5053 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5054 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5055 }
5056 else
5057 {
5058 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5059 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5060 }
5061
5062 if (ABI_64_P (info->output_bfd))
5063 {
5064 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5065 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5066 }
5067 else
5068 {
5069 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5070 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5071 }
5072 }
5073 else
5074 {
5075 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5076 init_table.non_lazy_plt = NULL;
5077 init_table.lazy_ibt_plt = NULL;
5078 init_table.non_lazy_ibt_plt = NULL;
5079 }
5080
5081 if (ABI_64_P (info->output_bfd))
5082 {
5083 init_table.r_info = elf64_r_info;
5084 init_table.r_sym = elf64_r_sym;
5085 }
5086 else
5087 {
5088 init_table.r_info = elf32_r_info;
5089 init_table.r_sym = elf32_r_sym;
5090 }
5091
5092 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5093 }
5094
5095 static const struct bfd_elf_special_section
5096 elf_x86_64_special_sections[]=
5097 {
5098 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5099 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5100 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5101 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5102 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5103 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5104 { NULL, 0, 0, 0, 0 }
5105 };
5106
5107 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5108 #define TARGET_LITTLE_NAME "elf64-x86-64"
5109 #define ELF_ARCH bfd_arch_i386
5110 #define ELF_TARGET_ID X86_64_ELF_DATA
5111 #define ELF_MACHINE_CODE EM_X86_64
5112 #if DEFAULT_LD_Z_SEPARATE_CODE
5113 # define ELF_MAXPAGESIZE 0x1000
5114 #else
5115 # define ELF_MAXPAGESIZE 0x200000
5116 #endif
5117 #define ELF_MINPAGESIZE 0x1000
5118 #define ELF_COMMONPAGESIZE 0x1000
5119
5120 #define elf_backend_can_gc_sections 1
5121 #define elf_backend_can_refcount 1
5122 #define elf_backend_want_got_plt 1
5123 #define elf_backend_plt_readonly 1
5124 #define elf_backend_want_plt_sym 0
5125 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5126 #define elf_backend_rela_normal 1
5127 #define elf_backend_plt_alignment 4
5128 #define elf_backend_extern_protected_data 1
5129 #define elf_backend_caches_rawsize 1
5130 #define elf_backend_dtrel_excludes_plt 1
5131 #define elf_backend_want_dynrelro 1
5132
5133 #define elf_info_to_howto elf_x86_64_info_to_howto
5134
5135 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5136 #define bfd_elf64_bfd_reloc_name_lookup \
5137 elf_x86_64_reloc_name_lookup
5138
5139 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5140 #define elf_backend_check_relocs elf_x86_64_check_relocs
5141 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5142 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5143 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5144 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5145 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5146 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5147 #ifdef CORE_HEADER
5148 #define elf_backend_write_core_note elf_x86_64_write_core_note
5149 #endif
5150 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5151 #define elf_backend_relocate_section elf_x86_64_relocate_section
5152 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5153 #define elf_backend_object_p elf64_x86_64_elf_object_p
5154 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5155
5156 #define elf_backend_section_from_shdr \
5157 elf_x86_64_section_from_shdr
5158
5159 #define elf_backend_section_from_bfd_section \
5160 elf_x86_64_elf_section_from_bfd_section
5161 #define elf_backend_add_symbol_hook \
5162 elf_x86_64_add_symbol_hook
5163 #define elf_backend_symbol_processing \
5164 elf_x86_64_symbol_processing
5165 #define elf_backend_common_section_index \
5166 elf_x86_64_common_section_index
5167 #define elf_backend_common_section \
5168 elf_x86_64_common_section
5169 #define elf_backend_common_definition \
5170 elf_x86_64_common_definition
5171 #define elf_backend_merge_symbol \
5172 elf_x86_64_merge_symbol
5173 #define elf_backend_special_sections \
5174 elf_x86_64_special_sections
5175 #define elf_backend_additional_program_headers \
5176 elf_x86_64_additional_program_headers
5177 #define elf_backend_setup_gnu_properties \
5178 elf_x86_64_link_setup_gnu_properties
5179 #define elf_backend_hide_symbol \
5180 _bfd_x86_elf_hide_symbol
5181
5182 #undef elf64_bed
5183 #define elf64_bed elf64_x86_64_bed
5184
5185 #include "elf64-target.h"
5186
5187 /* CloudABI support. */
5188
5189 #undef TARGET_LITTLE_SYM
5190 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5191 #undef TARGET_LITTLE_NAME
5192 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5193
5194 #undef ELF_OSABI
5195 #define ELF_OSABI ELFOSABI_CLOUDABI
5196
5197 #undef elf64_bed
5198 #define elf64_bed elf64_x86_64_cloudabi_bed
5199
5200 #include "elf64-target.h"
5201
5202 /* FreeBSD support. */
5203
5204 #undef TARGET_LITTLE_SYM
5205 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5206 #undef TARGET_LITTLE_NAME
5207 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5208
5209 #undef ELF_OSABI
5210 #define ELF_OSABI ELFOSABI_FREEBSD
5211
5212 #undef elf64_bed
5213 #define elf64_bed elf64_x86_64_fbsd_bed
5214
5215 #include "elf64-target.h"
5216
5217 /* Solaris 2 support. */
5218
5219 #undef TARGET_LITTLE_SYM
5220 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5221 #undef TARGET_LITTLE_NAME
5222 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5223
5224 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5225 {
5226 is_solaris /* os */
5227 };
5228
5229 #undef elf_backend_arch_data
5230 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5231
5232 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5233 objects won't be recognized. */
5234 #undef ELF_OSABI
5235
5236 #undef elf64_bed
5237 #define elf64_bed elf64_x86_64_sol2_bed
5238
5239 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5240 boundary. */
5241 #undef elf_backend_static_tls_alignment
5242 #define elf_backend_static_tls_alignment 16
5243
5244 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5245
5246 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5247 File, p.63. */
5248 #undef elf_backend_want_plt_sym
5249 #define elf_backend_want_plt_sym 1
5250
5251 #undef elf_backend_strtab_flags
5252 #define elf_backend_strtab_flags SHF_STRINGS
5253
5254 static bfd_boolean
elf64_x86_64_copy_solaris_special_section_fields(const bfd * ibfd ATTRIBUTE_UNUSED,bfd * obfd ATTRIBUTE_UNUSED,const Elf_Internal_Shdr * isection ATTRIBUTE_UNUSED,Elf_Internal_Shdr * osection ATTRIBUTE_UNUSED)5255 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5256 bfd *obfd ATTRIBUTE_UNUSED,
5257 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5258 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5259 {
5260 /* PR 19938: FIXME: Need to add code for setting the sh_info
5261 and sh_link fields of Solaris specific section types. */
5262 return FALSE;
5263 }
5264
5265 #undef elf_backend_copy_special_section_fields
5266 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5267
5268 #include "elf64-target.h"
5269
5270 /* Native Client support. */
5271
5272 static bfd_boolean
elf64_x86_64_nacl_elf_object_p(bfd * abfd)5273 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5274 {
5275 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5276 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5277 return TRUE;
5278 }
5279
5280 #undef TARGET_LITTLE_SYM
5281 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5282 #undef TARGET_LITTLE_NAME
5283 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5284 #undef elf64_bed
5285 #define elf64_bed elf64_x86_64_nacl_bed
5286
5287 #undef ELF_MAXPAGESIZE
5288 #undef ELF_MINPAGESIZE
5289 #undef ELF_COMMONPAGESIZE
5290 #define ELF_MAXPAGESIZE 0x10000
5291 #define ELF_MINPAGESIZE 0x10000
5292 #define ELF_COMMONPAGESIZE 0x10000
5293
5294 /* Restore defaults. */
5295 #undef ELF_OSABI
5296 #undef elf_backend_static_tls_alignment
5297 #undef elf_backend_want_plt_sym
5298 #define elf_backend_want_plt_sym 0
5299 #undef elf_backend_strtab_flags
5300 #undef elf_backend_copy_special_section_fields
5301
5302 /* NaCl uses substantially different PLT entries for the same effects. */
5303
5304 #undef elf_backend_plt_alignment
5305 #define elf_backend_plt_alignment 5
5306 #define NACL_PLT_ENTRY_SIZE 64
5307 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5308
5309 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5310 {
5311 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5312 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5313 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5314 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5315 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5316
5317 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5318 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5319
5320 /* 32 bytes of nop to pad out to the standard size. */
5321 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5322 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5323 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5324 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5325 0x66, /* excess data16 prefix */
5326 0x90 /* nop */
5327 };
5328
5329 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5330 {
5331 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5332 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5333 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5334 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5335
5336 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5337 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5338 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5339
5340 /* Lazy GOT entries point here (32-byte aligned). */
5341 0x68, /* pushq immediate */
5342 0, 0, 0, 0, /* replaced with index into relocation table. */
5343 0xe9, /* jmp relative */
5344 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5345
5346 /* 22 bytes of nop to pad out to the standard size. */
5347 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5348 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5349 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5350 };
5351
5352 /* .eh_frame covering the .plt section. */
5353
5354 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5355 {
5356 #if (PLT_CIE_LENGTH != 20 \
5357 || PLT_FDE_LENGTH != 36 \
5358 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5359 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5360 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5361 #endif
5362 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5363 0, 0, 0, 0, /* CIE ID */
5364 1, /* CIE version */
5365 'z', 'R', 0, /* Augmentation string */
5366 1, /* Code alignment factor */
5367 0x78, /* Data alignment factor */
5368 16, /* Return address column */
5369 1, /* Augmentation size */
5370 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5371 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5372 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5373 DW_CFA_nop, DW_CFA_nop,
5374
5375 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5376 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5377 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5378 0, 0, 0, 0, /* .plt size goes here */
5379 0, /* Augmentation size */
5380 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5381 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5382 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5383 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5384 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5385 13, /* Block length */
5386 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5387 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5388 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5389 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5390 DW_CFA_nop, DW_CFA_nop
5391 };
5392
5393 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5394 {
5395 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5396 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5397 elf_x86_64_nacl_plt_entry, /* plt_entry */
5398 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5399 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5400 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5401 2, /* plt_tlsdesc_got1_offset */
5402 9, /* plt_tlsdesc_got2_offset */
5403 6, /* plt_tlsdesc_got1_insn_end */
5404 13, /* plt_tlsdesc_got2_insn_end */
5405 2, /* plt0_got1_offset */
5406 9, /* plt0_got2_offset */
5407 13, /* plt0_got2_insn_end */
5408 3, /* plt_got_offset */
5409 33, /* plt_reloc_offset */
5410 38, /* plt_plt_offset */
5411 7, /* plt_got_insn_size */
5412 42, /* plt_plt_insn_end */
5413 32, /* plt_lazy_offset */
5414 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5415 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5416 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5417 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5418 };
5419
5420 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5421 {
5422 is_nacl /* os */
5423 };
5424
5425 #undef elf_backend_arch_data
5426 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5427
5428 #undef elf_backend_object_p
5429 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5430 #undef elf_backend_modify_segment_map
5431 #define elf_backend_modify_segment_map nacl_modify_segment_map
5432 #undef elf_backend_modify_headers
5433 #define elf_backend_modify_headers nacl_modify_headers
5434 #undef elf_backend_final_write_processing
5435 #define elf_backend_final_write_processing nacl_final_write_processing
5436
5437 #include "elf64-target.h"
5438
5439 /* Native Client x32 support. */
5440
5441 static bfd_boolean
elf32_x86_64_nacl_elf_object_p(bfd * abfd)5442 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5443 {
5444 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5445 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5446 return TRUE;
5447 }
5448
5449 #undef TARGET_LITTLE_SYM
5450 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5451 #undef TARGET_LITTLE_NAME
5452 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5453 #undef elf32_bed
5454 #define elf32_bed elf32_x86_64_nacl_bed
5455
5456 #define bfd_elf32_bfd_reloc_type_lookup \
5457 elf_x86_64_reloc_type_lookup
5458 #define bfd_elf32_bfd_reloc_name_lookup \
5459 elf_x86_64_reloc_name_lookup
5460 #define bfd_elf32_get_synthetic_symtab \
5461 elf_x86_64_get_synthetic_symtab
5462
5463 #undef elf_backend_object_p
5464 #define elf_backend_object_p \
5465 elf32_x86_64_nacl_elf_object_p
5466
5467 #undef elf_backend_bfd_from_remote_memory
5468 #define elf_backend_bfd_from_remote_memory \
5469 _bfd_elf32_bfd_from_remote_memory
5470
5471 #undef elf_backend_size_info
5472 #define elf_backend_size_info \
5473 _bfd_elf32_size_info
5474
5475 #undef elf32_bed
5476 #define elf32_bed elf32_x86_64_bed
5477
5478 #include "elf32-target.h"
5479
5480 /* Restore defaults. */
5481 #undef elf_backend_object_p
5482 #define elf_backend_object_p elf64_x86_64_elf_object_p
5483 #undef elf_backend_bfd_from_remote_memory
5484 #undef elf_backend_size_info
5485 #undef elf_backend_modify_segment_map
5486 #undef elf_backend_modify_headers
5487 #undef elf_backend_final_write_processing
5488
5489 /* Intel L1OM support. */
5490
5491 static bfd_boolean
elf64_l1om_elf_object_p(bfd * abfd)5492 elf64_l1om_elf_object_p (bfd *abfd)
5493 {
5494 /* Set the right machine number for an L1OM elf64 file. */
5495 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5496 return TRUE;
5497 }
5498
5499 #undef TARGET_LITTLE_SYM
5500 #define TARGET_LITTLE_SYM l1om_elf64_vec
5501 #undef TARGET_LITTLE_NAME
5502 #define TARGET_LITTLE_NAME "elf64-l1om"
5503 #undef ELF_ARCH
5504 #define ELF_ARCH bfd_arch_l1om
5505
5506 #undef ELF_MACHINE_CODE
5507 #define ELF_MACHINE_CODE EM_L1OM
5508
5509 #undef ELF_OSABI
5510
5511 #undef elf64_bed
5512 #define elf64_bed elf64_l1om_bed
5513
5514 #undef elf_backend_object_p
5515 #define elf_backend_object_p elf64_l1om_elf_object_p
5516
5517 /* Restore defaults. */
5518 #undef ELF_MAXPAGESIZE
5519 #undef ELF_MINPAGESIZE
5520 #undef ELF_COMMONPAGESIZE
5521 #if DEFAULT_LD_Z_SEPARATE_CODE
5522 # define ELF_MAXPAGESIZE 0x1000
5523 #else
5524 # define ELF_MAXPAGESIZE 0x200000
5525 #endif
5526 #define ELF_MINPAGESIZE 0x1000
5527 #define ELF_COMMONPAGESIZE 0x1000
5528 #undef elf_backend_plt_alignment
5529 #define elf_backend_plt_alignment 4
5530 #undef elf_backend_arch_data
5531 #define elf_backend_arch_data &elf_x86_64_arch_bed
5532
5533 #include "elf64-target.h"
5534
5535 /* FreeBSD L1OM support. */
5536
5537 #undef TARGET_LITTLE_SYM
5538 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5539 #undef TARGET_LITTLE_NAME
5540 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5541
5542 #undef ELF_OSABI
5543 #define ELF_OSABI ELFOSABI_FREEBSD
5544
5545 #undef elf64_bed
5546 #define elf64_bed elf64_l1om_fbsd_bed
5547
5548 #include "elf64-target.h"
5549
5550 /* Intel K1OM support. */
5551
5552 static bfd_boolean
elf64_k1om_elf_object_p(bfd * abfd)5553 elf64_k1om_elf_object_p (bfd *abfd)
5554 {
5555 /* Set the right machine number for an K1OM elf64 file. */
5556 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5557 return TRUE;
5558 }
5559
5560 #undef TARGET_LITTLE_SYM
5561 #define TARGET_LITTLE_SYM k1om_elf64_vec
5562 #undef TARGET_LITTLE_NAME
5563 #define TARGET_LITTLE_NAME "elf64-k1om"
5564 #undef ELF_ARCH
5565 #define ELF_ARCH bfd_arch_k1om
5566
5567 #undef ELF_MACHINE_CODE
5568 #define ELF_MACHINE_CODE EM_K1OM
5569
5570 #undef ELF_OSABI
5571
5572 #undef elf64_bed
5573 #define elf64_bed elf64_k1om_bed
5574
5575 #undef elf_backend_object_p
5576 #define elf_backend_object_p elf64_k1om_elf_object_p
5577
5578 #undef elf_backend_static_tls_alignment
5579
5580 #undef elf_backend_want_plt_sym
5581 #define elf_backend_want_plt_sym 0
5582
5583 #include "elf64-target.h"
5584
5585 /* FreeBSD K1OM support. */
5586
5587 #undef TARGET_LITTLE_SYM
5588 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5589 #undef TARGET_LITTLE_NAME
5590 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5591
5592 #undef ELF_OSABI
5593 #define ELF_OSABI ELFOSABI_FREEBSD
5594
5595 #undef elf64_bed
5596 #define elf64_bed elf64_k1om_fbsd_bed
5597
5598 #include "elf64-target.h"
5599
5600 /* 32bit x86-64 support. */
5601
5602 #undef TARGET_LITTLE_SYM
5603 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5604 #undef TARGET_LITTLE_NAME
5605 #define TARGET_LITTLE_NAME "elf32-x86-64"
5606 #undef elf32_bed
5607
5608 #undef ELF_ARCH
5609 #define ELF_ARCH bfd_arch_i386
5610
5611 #undef ELF_MACHINE_CODE
5612 #define ELF_MACHINE_CODE EM_X86_64
5613
5614 #undef ELF_OSABI
5615
5616 #undef elf_backend_object_p
5617 #define elf_backend_object_p \
5618 elf32_x86_64_elf_object_p
5619
5620 #undef elf_backend_bfd_from_remote_memory
5621 #define elf_backend_bfd_from_remote_memory \
5622 _bfd_elf32_bfd_from_remote_memory
5623
5624 #undef elf_backend_size_info
5625 #define elf_backend_size_info \
5626 _bfd_elf32_size_info
5627
5628 #include "elf32-target.h"
5629