gas: Fix ip2k-elf and xstormy16-elf build
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2020 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 #define X86_PCREL_TYPE_P(TYPE) \
200 ( ((TYPE) == R_X86_64_PC8) \
201 || ((TYPE) == R_X86_64_PC16) \
202 || ((TYPE) == R_X86_64_PC32) \
203 || ((TYPE) == R_X86_64_PC32_BND) \
204 || ((TYPE) == R_X86_64_PC64))
205
206 #define X86_SIZE_TYPE_P(TYPE) \
207 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
258 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
259 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
260 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
261 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
262 };
263
264 static reloc_howto_type *
265 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
266 {
267 unsigned i;
268
269 if (r_type == (unsigned int) R_X86_64_32)
270 {
271 if (ABI_64_P (abfd))
272 i = r_type;
273 else
274 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
275 }
276 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
277 || r_type >= (unsigned int) R_X86_64_max)
278 {
279 if (r_type >= (unsigned int) R_X86_64_standard)
280 {
281 /* xgettext:c-format */
282 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
283 abfd, r_type);
284 bfd_set_error (bfd_error_bad_value);
285 return NULL;
286 }
287 i = r_type;
288 }
289 else
290 i = r_type - (unsigned int) R_X86_64_vt_offset;
291 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
292 return &x86_64_elf_howto_table[i];
293 }
294
295 /* Given a BFD reloc type, return a HOWTO structure. */
296 static reloc_howto_type *
297 elf_x86_64_reloc_type_lookup (bfd *abfd,
298 bfd_reloc_code_real_type code)
299 {
300 unsigned int i;
301
302 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
303 i++)
304 {
305 if (x86_64_reloc_map[i].bfd_reloc_val == code)
306 return elf_x86_64_rtype_to_howto (abfd,
307 x86_64_reloc_map[i].elf_reloc_val);
308 }
309 return NULL;
310 }
311
312 static reloc_howto_type *
313 elf_x86_64_reloc_name_lookup (bfd *abfd,
314 const char *r_name)
315 {
316 unsigned int i;
317
318 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
319 {
320 /* Get x32 R_X86_64_32. */
321 reloc_howto_type *reloc
322 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
323 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
324 return reloc;
325 }
326
327 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
328 if (x86_64_elf_howto_table[i].name != NULL
329 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
330 return &x86_64_elf_howto_table[i];
331
332 return NULL;
333 }
334
335 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
336
337 static bfd_boolean
338 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
339 Elf_Internal_Rela *dst)
340 {
341 unsigned r_type;
342
343 r_type = ELF32_R_TYPE (dst->r_info);
344 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
345 if (cache_ptr->howto == NULL)
346 return FALSE;
347 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
348 return TRUE;
349 }
350 \f
351 /* Support for core dump NOTE sections. */
352 static bfd_boolean
353 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
354 {
355 int offset;
356 size_t size;
357
358 switch (note->descsz)
359 {
360 default:
361 return FALSE;
362
363 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
364 /* pr_cursig */
365 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
366
367 /* pr_pid */
368 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
369
370 /* pr_reg */
371 offset = 72;
372 size = 216;
373
374 break;
375
376 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
377 /* pr_cursig */
378 elf_tdata (abfd)->core->signal
379 = bfd_get_16 (abfd, note->descdata + 12);
380
381 /* pr_pid */
382 elf_tdata (abfd)->core->lwpid
383 = bfd_get_32 (abfd, note->descdata + 32);
384
385 /* pr_reg */
386 offset = 112;
387 size = 216;
388
389 break;
390 }
391
392 /* Make a ".reg/999" section. */
393 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
394 size, note->descpos + offset);
395 }
396
397 static bfd_boolean
398 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
399 {
400 switch (note->descsz)
401 {
402 default:
403 return FALSE;
404
405 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
406 elf_tdata (abfd)->core->pid
407 = bfd_get_32 (abfd, note->descdata + 12);
408 elf_tdata (abfd)->core->program
409 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
410 elf_tdata (abfd)->core->command
411 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
412 break;
413
414 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
415 elf_tdata (abfd)->core->pid
416 = bfd_get_32 (abfd, note->descdata + 24);
417 elf_tdata (abfd)->core->program
418 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
419 elf_tdata (abfd)->core->command
420 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
421 }
422
423 /* Note that for some reason, a spurious space is tacked
424 onto the end of the args in some (at least one anyway)
425 implementations, so strip it off if it exists. */
426
427 {
428 char *command = elf_tdata (abfd)->core->command;
429 int n = strlen (command);
430
431 if (0 < n && command[n - 1] == ' ')
432 command[n - 1] = '\0';
433 }
434
435 return TRUE;
436 }
437
438 #ifdef CORE_HEADER
439 # if GCC_VERSION >= 8000
440 # pragma GCC diagnostic push
441 # pragma GCC diagnostic ignored "-Wstringop-truncation"
442 # endif
443 static char *
444 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
445 int note_type, ...)
446 {
447 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
448 va_list ap;
449 const char *fname, *psargs;
450 long pid;
451 int cursig;
452 const void *gregs;
453
454 switch (note_type)
455 {
456 default:
457 return NULL;
458
459 case NT_PRPSINFO:
460 va_start (ap, note_type);
461 fname = va_arg (ap, const char *);
462 psargs = va_arg (ap, const char *);
463 va_end (ap);
464
465 if (bed->s->elfclass == ELFCLASS32)
466 {
467 prpsinfo32_t data;
468 memset (&data, 0, sizeof (data));
469 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
470 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
471 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
472 &data, sizeof (data));
473 }
474 else
475 {
476 prpsinfo64_t data;
477 memset (&data, 0, sizeof (data));
478 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
479 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
480 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
481 &data, sizeof (data));
482 }
483 /* NOTREACHED */
484
485 case NT_PRSTATUS:
486 va_start (ap, note_type);
487 pid = va_arg (ap, long);
488 cursig = va_arg (ap, int);
489 gregs = va_arg (ap, const void *);
490 va_end (ap);
491
492 if (bed->s->elfclass == ELFCLASS32)
493 {
494 if (bed->elf_machine_code == EM_X86_64)
495 {
496 prstatusx32_t prstat;
497 memset (&prstat, 0, sizeof (prstat));
498 prstat.pr_pid = pid;
499 prstat.pr_cursig = cursig;
500 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
501 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
502 &prstat, sizeof (prstat));
503 }
504 else
505 {
506 prstatus32_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 else
516 {
517 prstatus64_t prstat;
518 memset (&prstat, 0, sizeof (prstat));
519 prstat.pr_pid = pid;
520 prstat.pr_cursig = cursig;
521 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
522 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
523 &prstat, sizeof (prstat));
524 }
525 }
526 /* NOTREACHED */
527 }
528 # if GCC_VERSION >= 8000
529 # pragma GCC diagnostic pop
530 # endif
531 #endif
532 \f
533 /* Functions for the x86-64 ELF linker. */
534
535 /* The size in bytes of an entry in the global offset table. */
536
537 #define GOT_ENTRY_SIZE 8
538
539 /* The size in bytes of an entry in the lazy procedure linkage table. */
540
541 #define LAZY_PLT_ENTRY_SIZE 16
542
543 /* The size in bytes of an entry in the non-lazy procedure linkage
544 table. */
545
546 #define NON_LAZY_PLT_ENTRY_SIZE 8
547
548 /* The first entry in a lazy procedure linkage table looks like this.
549 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
550 works. */
551
552 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
553 {
554 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
555 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
556 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
557 };
558
559 /* Subsequent entries in a lazy procedure linkage table look like this. */
560
561 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
562 {
563 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
564 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
565 0x68, /* pushq immediate */
566 0, 0, 0, 0, /* replaced with index into relocation table. */
567 0xe9, /* jmp relative */
568 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
569 };
570
571 /* The first entry in a lazy procedure linkage table with BND prefix
572 like this. */
573
574 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
575 {
576 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
577 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
578 0x0f, 0x1f, 0 /* nopl (%rax) */
579 };
580
581 /* Subsequent entries for branches with BND prefx in a lazy procedure
582 linkage table look like this. */
583
584 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
585 {
586 0x68, 0, 0, 0, 0, /* pushq immediate */
587 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
588 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
589 };
590
591 /* The first entry in the IBT-enabled lazy procedure linkage table is the
592 the same as the lazy PLT with BND prefix so that bound registers are
593 preserved when control is passed to dynamic linker. Subsequent
594 entries for a IBT-enabled lazy procedure linkage table look like
595 this. */
596
597 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
598 {
599 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
600 0x68, 0, 0, 0, 0, /* pushq immediate */
601 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
602 0x90 /* nop */
603 };
604
605 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
606 is the same as the normal lazy PLT. Subsequent entries for an
607 x32 IBT-enabled lazy procedure linkage table look like this. */
608
609 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
610 {
611 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
612 0x68, 0, 0, 0, 0, /* pushq immediate */
613 0xe9, 0, 0, 0, 0, /* jmpq relative */
614 0x66, 0x90 /* xchg %ax,%ax */
615 };
616
617 /* Entries in the non-lazey procedure linkage table look like this. */
618
619 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
620 {
621 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
622 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
623 0x66, 0x90 /* xchg %ax,%ax */
624 };
625
626 /* Entries for branches with BND prefix in the non-lazey procedure
627 linkage table look like this. */
628
629 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
630 {
631 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
632 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
633 0x90 /* nop */
634 };
635
636 /* Entries for branches with IBT-enabled in the non-lazey procedure
637 linkage table look like this. They have the same size as the lazy
638 PLT entry. */
639
640 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
641 {
642 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
643 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
644 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
645 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
646 };
647
648 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
649 linkage table look like this. They have the same size as the lazy
650 PLT entry. */
651
652 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
653 {
654 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
655 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
656 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
657 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
658 };
659
660 /* The TLSDESC entry in a lazy procedure linkage table. */
661 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
662 {
663 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
664 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
665 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
666 };
667
668 /* .eh_frame covering the lazy .plt section. */
669
670 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
671 {
672 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
673 0, 0, 0, 0, /* CIE ID */
674 1, /* CIE version */
675 'z', 'R', 0, /* Augmentation string */
676 1, /* Code alignment factor */
677 0x78, /* Data alignment factor */
678 16, /* Return address column */
679 1, /* Augmentation size */
680 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
681 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
682 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
683 DW_CFA_nop, DW_CFA_nop,
684
685 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
686 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
687 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
688 0, 0, 0, 0, /* .plt size goes here */
689 0, /* Augmentation size */
690 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
691 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
692 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
693 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
694 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
695 11, /* Block length */
696 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
697 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
698 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
699 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
700 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
701 };
702
703 /* .eh_frame covering the lazy BND .plt section. */
704
705 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
706 {
707 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
708 0, 0, 0, 0, /* CIE ID */
709 1, /* CIE version */
710 'z', 'R', 0, /* Augmentation string */
711 1, /* Code alignment factor */
712 0x78, /* Data alignment factor */
713 16, /* Return address column */
714 1, /* Augmentation size */
715 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
716 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
717 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
718 DW_CFA_nop, DW_CFA_nop,
719
720 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
721 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
722 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
723 0, 0, 0, 0, /* .plt size goes here */
724 0, /* Augmentation size */
725 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
726 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
727 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
728 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
729 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
730 11, /* Block length */
731 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
732 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
733 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
734 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
735 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
736 };
737
738 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
739
740 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
741 {
742 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
743 0, 0, 0, 0, /* CIE ID */
744 1, /* CIE version */
745 'z', 'R', 0, /* Augmentation string */
746 1, /* Code alignment factor */
747 0x78, /* Data alignment factor */
748 16, /* Return address column */
749 1, /* Augmentation size */
750 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
751 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
752 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
753 DW_CFA_nop, DW_CFA_nop,
754
755 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
756 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
757 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
758 0, 0, 0, 0, /* .plt size goes here */
759 0, /* Augmentation size */
760 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
761 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
762 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
763 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
764 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
765 11, /* Block length */
766 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
767 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
768 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
769 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
770 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
771 };
772
773 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
774
775 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
776 {
777 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
778 0, 0, 0, 0, /* CIE ID */
779 1, /* CIE version */
780 'z', 'R', 0, /* Augmentation string */
781 1, /* Code alignment factor */
782 0x78, /* Data alignment factor */
783 16, /* Return address column */
784 1, /* Augmentation size */
785 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
786 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
787 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
788 DW_CFA_nop, DW_CFA_nop,
789
790 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
791 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
792 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
793 0, 0, 0, 0, /* .plt size goes here */
794 0, /* Augmentation size */
795 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
796 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
797 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
798 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
799 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
800 11, /* Block length */
801 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
802 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
803 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
804 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
805 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
806 };
807
808 /* .eh_frame covering the non-lazy .plt section. */
809
810 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
811 {
812 #define PLT_GOT_FDE_LENGTH 20
813 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
814 0, 0, 0, 0, /* CIE ID */
815 1, /* CIE version */
816 'z', 'R', 0, /* Augmentation string */
817 1, /* Code alignment factor */
818 0x78, /* Data alignment factor */
819 16, /* Return address column */
820 1, /* Augmentation size */
821 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
822 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
823 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
824 DW_CFA_nop, DW_CFA_nop,
825
826 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
827 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
828 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
829 0, 0, 0, 0, /* non-lazy .plt size goes here */
830 0, /* Augmentation size */
831 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
832 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
833 };
834
835 /* These are the standard parameters. */
836 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
837 {
838 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
839 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
840 elf_x86_64_lazy_plt_entry, /* plt_entry */
841 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
842 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
843 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
844 6, /* plt_tlsdesc_got1_offset */
845 12, /* plt_tlsdesc_got2_offset */
846 10, /* plt_tlsdesc_got1_insn_end */
847 16, /* plt_tlsdesc_got2_insn_end */
848 2, /* plt0_got1_offset */
849 8, /* plt0_got2_offset */
850 12, /* plt0_got2_insn_end */
851 2, /* plt_got_offset */
852 7, /* plt_reloc_offset */
853 12, /* plt_plt_offset */
854 6, /* plt_got_insn_size */
855 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
856 6, /* plt_lazy_offset */
857 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
858 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
859 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
860 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
861 };
862
863 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
864 {
865 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
866 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
867 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
868 2, /* plt_got_offset */
869 6, /* plt_got_insn_size */
870 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
871 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
872 };
873
874 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
875 {
876 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
877 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
878 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
879 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
880 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
881 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
882 6, /* plt_tlsdesc_got1_offset */
883 12, /* plt_tlsdesc_got2_offset */
884 10, /* plt_tlsdesc_got1_insn_end */
885 16, /* plt_tlsdesc_got2_insn_end */
886 2, /* plt0_got1_offset */
887 1+8, /* plt0_got2_offset */
888 1+12, /* plt0_got2_insn_end */
889 1+2, /* plt_got_offset */
890 1, /* plt_reloc_offset */
891 7, /* plt_plt_offset */
892 1+6, /* plt_got_insn_size */
893 11, /* plt_plt_insn_end */
894 0, /* plt_lazy_offset */
895 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
896 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
897 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
898 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
899 };
900
901 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
902 {
903 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
904 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
905 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
906 1+2, /* plt_got_offset */
907 1+6, /* plt_got_insn_size */
908 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
909 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
910 };
911
912 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
913 {
914 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
915 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
916 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
917 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
918 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
919 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
920 6, /* plt_tlsdesc_got1_offset */
921 12, /* plt_tlsdesc_got2_offset */
922 10, /* plt_tlsdesc_got1_insn_end */
923 16, /* plt_tlsdesc_got2_insn_end */
924 2, /* plt0_got1_offset */
925 1+8, /* plt0_got2_offset */
926 1+12, /* plt0_got2_insn_end */
927 4+1+2, /* plt_got_offset */
928 4+1, /* plt_reloc_offset */
929 4+1+6, /* plt_plt_offset */
930 4+1+6, /* plt_got_insn_size */
931 4+1+5+5, /* plt_plt_insn_end */
932 0, /* plt_lazy_offset */
933 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
934 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
935 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
936 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
937 };
938
939 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
940 {
941 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
942 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
943 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
944 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
945 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
946 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
947 6, /* plt_tlsdesc_got1_offset */
948 12, /* plt_tlsdesc_got2_offset */
949 10, /* plt_tlsdesc_got1_insn_end */
950 16, /* plt_tlsdesc_got2_insn_end */
951 2, /* plt0_got1_offset */
952 8, /* plt0_got2_offset */
953 12, /* plt0_got2_insn_end */
954 4+2, /* plt_got_offset */
955 4+1, /* plt_reloc_offset */
956 4+6, /* plt_plt_offset */
957 4+6, /* plt_got_insn_size */
958 4+5+5, /* plt_plt_insn_end */
959 0, /* plt_lazy_offset */
960 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
961 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
962 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
963 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
964 };
965
966 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
967 {
968 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
969 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
970 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
971 4+1+2, /* plt_got_offset */
972 4+1+6, /* plt_got_insn_size */
973 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
974 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
975 };
976
977 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
978 {
979 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
980 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
981 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
982 4+2, /* plt_got_offset */
983 4+6, /* plt_got_insn_size */
984 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
985 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
986 };
987
988 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
989 {
990 is_normal /* os */
991 };
992
993 #define elf_backend_arch_data &elf_x86_64_arch_bed
994
995 static bfd_boolean
996 elf64_x86_64_elf_object_p (bfd *abfd)
997 {
998 /* Set the right machine number for an x86-64 elf64 file. */
999 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1000 return TRUE;
1001 }
1002
1003 static bfd_boolean
1004 elf32_x86_64_elf_object_p (bfd *abfd)
1005 {
1006 /* Set the right machine number for an x86-64 elf32 file. */
1007 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1008 return TRUE;
1009 }
1010
1011 /* Return TRUE if the TLS access code sequence support transition
1012 from R_TYPE. */
1013
1014 static bfd_boolean
1015 elf_x86_64_check_tls_transition (bfd *abfd,
1016 struct bfd_link_info *info,
1017 asection *sec,
1018 bfd_byte *contents,
1019 Elf_Internal_Shdr *symtab_hdr,
1020 struct elf_link_hash_entry **sym_hashes,
1021 unsigned int r_type,
1022 const Elf_Internal_Rela *rel,
1023 const Elf_Internal_Rela *relend)
1024 {
1025 unsigned int val;
1026 unsigned long r_symndx;
1027 bfd_boolean largepic = FALSE;
1028 struct elf_link_hash_entry *h;
1029 bfd_vma offset;
1030 struct elf_x86_link_hash_table *htab;
1031 bfd_byte *call;
1032 bfd_boolean indirect_call;
1033
1034 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1035 offset = rel->r_offset;
1036 switch (r_type)
1037 {
1038 case R_X86_64_TLSGD:
1039 case R_X86_64_TLSLD:
1040 if ((rel + 1) >= relend)
1041 return FALSE;
1042
1043 if (r_type == R_X86_64_TLSGD)
1044 {
1045 /* Check transition from GD access model. For 64bit, only
1046 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1047 .word 0x6666; rex64; call __tls_get_addr@PLT
1048 or
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .byte 0x66; rex64
1051 call *__tls_get_addr@GOTPCREL(%rip)
1052 which may be converted to
1053 addr32 call __tls_get_addr
1054 can transit to different access model. For 32bit, only
1055 leaq foo@tlsgd(%rip), %rdi
1056 .word 0x6666; rex64; call __tls_get_addr@PLT
1057 or
1058 leaq foo@tlsgd(%rip), %rdi
1059 .byte 0x66; rex64
1060 call *__tls_get_addr@GOTPCREL(%rip)
1061 which may be converted to
1062 addr32 call __tls_get_addr
1063 can transit to different access model. For largepic,
1064 we also support:
1065 leaq foo@tlsgd(%rip), %rdi
1066 movabsq $__tls_get_addr@pltoff, %rax
1067 addq $r15, %rax
1068 call *%rax
1069 or
1070 leaq foo@tlsgd(%rip), %rdi
1071 movabsq $__tls_get_addr@pltoff, %rax
1072 addq $rbx, %rax
1073 call *%rax */
1074
1075 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1076
1077 if ((offset + 12) > sec->size)
1078 return FALSE;
1079
1080 call = contents + offset + 4;
1081 if (call[0] != 0x66
1082 || !((call[1] == 0x48
1083 && call[2] == 0xff
1084 && call[3] == 0x15)
1085 || (call[1] == 0x48
1086 && call[2] == 0x67
1087 && call[3] == 0xe8)
1088 || (call[1] == 0x66
1089 && call[2] == 0x48
1090 && call[3] == 0xe8)))
1091 {
1092 if (!ABI_64_P (abfd)
1093 || (offset + 19) > sec->size
1094 || offset < 3
1095 || memcmp (call - 7, leaq + 1, 3) != 0
1096 || memcmp (call, "\x48\xb8", 2) != 0
1097 || call[11] != 0x01
1098 || call[13] != 0xff
1099 || call[14] != 0xd0
1100 || !((call[10] == 0x48 && call[12] == 0xd8)
1101 || (call[10] == 0x4c && call[12] == 0xf8)))
1102 return FALSE;
1103 largepic = TRUE;
1104 }
1105 else if (ABI_64_P (abfd))
1106 {
1107 if (offset < 4
1108 || memcmp (contents + offset - 4, leaq, 4) != 0)
1109 return FALSE;
1110 }
1111 else
1112 {
1113 if (offset < 3
1114 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1115 return FALSE;
1116 }
1117 indirect_call = call[2] == 0xff;
1118 }
1119 else
1120 {
1121 /* Check transition from LD access model. Only
1122 leaq foo@tlsld(%rip), %rdi;
1123 call __tls_get_addr@PLT
1124 or
1125 leaq foo@tlsld(%rip), %rdi;
1126 call *__tls_get_addr@GOTPCREL(%rip)
1127 which may be converted to
1128 addr32 call __tls_get_addr
1129 can transit to different access model. For largepic
1130 we also support:
1131 leaq foo@tlsld(%rip), %rdi
1132 movabsq $__tls_get_addr@pltoff, %rax
1133 addq $r15, %rax
1134 call *%rax
1135 or
1136 leaq foo@tlsld(%rip), %rdi
1137 movabsq $__tls_get_addr@pltoff, %rax
1138 addq $rbx, %rax
1139 call *%rax */
1140
1141 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1142
1143 if (offset < 3 || (offset + 9) > sec->size)
1144 return FALSE;
1145
1146 if (memcmp (contents + offset - 3, lea, 3) != 0)
1147 return FALSE;
1148
1149 call = contents + offset + 4;
1150 if (!(call[0] == 0xe8
1151 || (call[0] == 0xff && call[1] == 0x15)
1152 || (call[0] == 0x67 && call[1] == 0xe8)))
1153 {
1154 if (!ABI_64_P (abfd)
1155 || (offset + 19) > sec->size
1156 || memcmp (call, "\x48\xb8", 2) != 0
1157 || call[11] != 0x01
1158 || call[13] != 0xff
1159 || call[14] != 0xd0
1160 || !((call[10] == 0x48 && call[12] == 0xd8)
1161 || (call[10] == 0x4c && call[12] == 0xf8)))
1162 return FALSE;
1163 largepic = TRUE;
1164 }
1165 indirect_call = call[0] == 0xff;
1166 }
1167
1168 r_symndx = htab->r_sym (rel[1].r_info);
1169 if (r_symndx < symtab_hdr->sh_info)
1170 return FALSE;
1171
1172 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1173 if (h == NULL
1174 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1175 return FALSE;
1176 else
1177 {
1178 r_type = (ELF32_R_TYPE (rel[1].r_info)
1179 & ~R_X86_64_converted_reloc_bit);
1180 if (largepic)
1181 return r_type == R_X86_64_PLTOFF64;
1182 else if (indirect_call)
1183 return r_type == R_X86_64_GOTPCRELX;
1184 else
1185 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1186 }
1187
1188 case R_X86_64_GOTTPOFF:
1189 /* Check transition from IE access model:
1190 mov foo@gottpoff(%rip), %reg
1191 add foo@gottpoff(%rip), %reg
1192 */
1193
1194 /* Check REX prefix first. */
1195 if (offset >= 3 && (offset + 4) <= sec->size)
1196 {
1197 val = bfd_get_8 (abfd, contents + offset - 3);
1198 if (val != 0x48 && val != 0x4c)
1199 {
1200 /* X32 may have 0x44 REX prefix or no REX prefix. */
1201 if (ABI_64_P (abfd))
1202 return FALSE;
1203 }
1204 }
1205 else
1206 {
1207 /* X32 may not have any REX prefix. */
1208 if (ABI_64_P (abfd))
1209 return FALSE;
1210 if (offset < 2 || (offset + 3) > sec->size)
1211 return FALSE;
1212 }
1213
1214 val = bfd_get_8 (abfd, contents + offset - 2);
1215 if (val != 0x8b && val != 0x03)
1216 return FALSE;
1217
1218 val = bfd_get_8 (abfd, contents + offset - 1);
1219 return (val & 0xc7) == 5;
1220
1221 case R_X86_64_GOTPC32_TLSDESC:
1222 /* Check transition from GDesc access model:
1223 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1224 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1225
1226 Make sure it's a leaq adding rip to a 32-bit offset
1227 into any register, although it's probably almost always
1228 going to be rax. */
1229
1230 if (offset < 3 || (offset + 4) > sec->size)
1231 return FALSE;
1232
1233 val = bfd_get_8 (abfd, contents + offset - 3);
1234 val &= 0xfb;
1235 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1236 return FALSE;
1237
1238 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1239 return FALSE;
1240
1241 val = bfd_get_8 (abfd, contents + offset - 1);
1242 return (val & 0xc7) == 0x05;
1243
1244 case R_X86_64_TLSDESC_CALL:
1245 /* Check transition from GDesc access model:
1246 call *x@tlsdesc(%rax) <--- LP64 mode.
1247 call *x@tlsdesc(%eax) <--- X32 mode.
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 unsigned int prefix;
1252 call = contents + offset;
1253 prefix = 0;
1254 if (!ABI_64_P (abfd))
1255 {
1256 /* Check for call *x@tlsdesc(%eax). */
1257 if (call[0] == 0x67)
1258 {
1259 prefix = 1;
1260 if (offset + 3 > sec->size)
1261 return FALSE;
1262 }
1263 }
1264 /* Make sure that it's a call *x@tlsdesc(%rax). */
1265 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1266 }
1267
1268 return FALSE;
1269
1270 default:
1271 abort ();
1272 }
1273 }
1274
1275 /* Return TRUE if the TLS access transition is OK or no transition
1276 will be performed. Update R_TYPE if there is a transition. */
1277
1278 static bfd_boolean
1279 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1280 asection *sec, bfd_byte *contents,
1281 Elf_Internal_Shdr *symtab_hdr,
1282 struct elf_link_hash_entry **sym_hashes,
1283 unsigned int *r_type, int tls_type,
1284 const Elf_Internal_Rela *rel,
1285 const Elf_Internal_Rela *relend,
1286 struct elf_link_hash_entry *h,
1287 unsigned long r_symndx,
1288 bfd_boolean from_relocate_section)
1289 {
1290 unsigned int from_type = *r_type;
1291 unsigned int to_type = from_type;
1292 bfd_boolean check = TRUE;
1293
1294 /* Skip TLS transition for functions. */
1295 if (h != NULL
1296 && (h->type == STT_FUNC
1297 || h->type == STT_GNU_IFUNC))
1298 return TRUE;
1299
1300 switch (from_type)
1301 {
1302 case R_X86_64_TLSGD:
1303 case R_X86_64_GOTPC32_TLSDESC:
1304 case R_X86_64_TLSDESC_CALL:
1305 case R_X86_64_GOTTPOFF:
1306 if (bfd_link_executable (info))
1307 {
1308 if (h == NULL)
1309 to_type = R_X86_64_TPOFF32;
1310 else
1311 to_type = R_X86_64_GOTTPOFF;
1312 }
1313
1314 /* When we are called from elf_x86_64_relocate_section, there may
1315 be additional transitions based on TLS_TYPE. */
1316 if (from_relocate_section)
1317 {
1318 unsigned int new_to_type = to_type;
1319
1320 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1321 new_to_type = R_X86_64_TPOFF32;
1322
1323 if (to_type == R_X86_64_TLSGD
1324 || to_type == R_X86_64_GOTPC32_TLSDESC
1325 || to_type == R_X86_64_TLSDESC_CALL)
1326 {
1327 if (tls_type == GOT_TLS_IE)
1328 new_to_type = R_X86_64_GOTTPOFF;
1329 }
1330
1331 /* We checked the transition before when we were called from
1332 elf_x86_64_check_relocs. We only want to check the new
1333 transition which hasn't been checked before. */
1334 check = new_to_type != to_type && from_type == to_type;
1335 to_type = new_to_type;
1336 }
1337
1338 break;
1339
1340 case R_X86_64_TLSLD:
1341 if (bfd_link_executable (info))
1342 to_type = R_X86_64_TPOFF32;
1343 break;
1344
1345 default:
1346 return TRUE;
1347 }
1348
1349 /* Return TRUE if there is no transition. */
1350 if (from_type == to_type)
1351 return TRUE;
1352
1353 /* Check if the transition can be performed. */
1354 if (check
1355 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1356 symtab_hdr, sym_hashes,
1357 from_type, rel, relend))
1358 {
1359 reloc_howto_type *from, *to;
1360 const char *name;
1361
1362 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1363 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1364
1365 if (from == NULL || to == NULL)
1366 return FALSE;
1367
1368 if (h)
1369 name = h->root.root.string;
1370 else
1371 {
1372 struct elf_x86_link_hash_table *htab;
1373
1374 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1375 if (htab == NULL)
1376 name = "*unknown*";
1377 else
1378 {
1379 Elf_Internal_Sym *isym;
1380
1381 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1382 abfd, r_symndx);
1383 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1384 }
1385 }
1386
1387 _bfd_error_handler
1388 /* xgettext:c-format */
1389 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1390 " in section `%pA' failed"),
1391 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1392 bfd_set_error (bfd_error_bad_value);
1393 return FALSE;
1394 }
1395
1396 *r_type = to_type;
1397 return TRUE;
1398 }
1399
1400 /* Rename some of the generic section flags to better document how they
1401 are used here. */
1402 #define check_relocs_failed sec_flg0
1403
1404 static bfd_boolean
1405 elf_x86_64_need_pic (struct bfd_link_info *info,
1406 bfd *input_bfd, asection *sec,
1407 struct elf_link_hash_entry *h,
1408 Elf_Internal_Shdr *symtab_hdr,
1409 Elf_Internal_Sym *isym,
1410 reloc_howto_type *howto)
1411 {
1412 const char *v = "";
1413 const char *und = "";
1414 const char *pic = "";
1415 const char *object;
1416
1417 const char *name;
1418 if (h)
1419 {
1420 name = h->root.root.string;
1421 switch (ELF_ST_VISIBILITY (h->other))
1422 {
1423 case STV_HIDDEN:
1424 v = _("hidden symbol ");
1425 break;
1426 case STV_INTERNAL:
1427 v = _("internal symbol ");
1428 break;
1429 case STV_PROTECTED:
1430 v = _("protected symbol ");
1431 break;
1432 default:
1433 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1434 v = _("protected symbol ");
1435 else
1436 v = _("symbol ");
1437 pic = NULL;
1438 break;
1439 }
1440
1441 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1442 und = _("undefined ");
1443 }
1444 else
1445 {
1446 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1447 pic = NULL;
1448 }
1449
1450 if (bfd_link_dll (info))
1451 {
1452 object = _("a shared object");
1453 if (!pic)
1454 pic = _("; recompile with -fPIC");
1455 }
1456 else
1457 {
1458 if (bfd_link_pie (info))
1459 object = _("a PIE object");
1460 else
1461 object = _("a PDE object");
1462 if (!pic)
1463 pic = _("; recompile with -fPIE");
1464 }
1465
1466 /* xgettext:c-format */
1467 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1468 "not be used when making %s%s"),
1469 input_bfd, howto->name, und, v, name,
1470 object, pic);
1471 bfd_set_error (bfd_error_bad_value);
1472 sec->check_relocs_failed = 1;
1473 return FALSE;
1474 }
1475
1476 /* With the local symbol, foo, we convert
1477 mov foo@GOTPCREL(%rip), %reg
1478 to
1479 lea foo(%rip), %reg
1480 and convert
1481 call/jmp *foo@GOTPCREL(%rip)
1482 to
1483 nop call foo/jmp foo nop
1484 When PIC is false, convert
1485 test %reg, foo@GOTPCREL(%rip)
1486 to
1487 test $foo, %reg
1488 and convert
1489 binop foo@GOTPCREL(%rip), %reg
1490 to
1491 binop $foo, %reg
1492 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1493 instructions. */
1494
1495 static bfd_boolean
1496 elf_x86_64_convert_load_reloc (bfd *abfd,
1497 bfd_byte *contents,
1498 unsigned int *r_type_p,
1499 Elf_Internal_Rela *irel,
1500 struct elf_link_hash_entry *h,
1501 bfd_boolean *converted,
1502 struct bfd_link_info *link_info)
1503 {
1504 struct elf_x86_link_hash_table *htab;
1505 bfd_boolean is_pic;
1506 bfd_boolean no_overflow;
1507 bfd_boolean relocx;
1508 bfd_boolean to_reloc_pc32;
1509 bfd_boolean abs_symbol;
1510 bfd_boolean local_ref;
1511 asection *tsec;
1512 bfd_signed_vma raddend;
1513 unsigned int opcode;
1514 unsigned int modrm;
1515 unsigned int r_type = *r_type_p;
1516 unsigned int r_symndx;
1517 bfd_vma roff = irel->r_offset;
1518 bfd_vma abs_relocation;
1519
1520 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1521 return TRUE;
1522
1523 raddend = irel->r_addend;
1524 /* Addend for 32-bit PC-relative relocation must be -4. */
1525 if (raddend != -4)
1526 return TRUE;
1527
1528 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1529 is_pic = bfd_link_pic (link_info);
1530
1531 relocx = (r_type == R_X86_64_GOTPCRELX
1532 || r_type == R_X86_64_REX_GOTPCRELX);
1533
1534 /* TRUE if --no-relax is used. */
1535 no_overflow = link_info->disable_target_specific_optimizations > 1;
1536
1537 r_symndx = htab->r_sym (irel->r_info);
1538
1539 opcode = bfd_get_8 (abfd, contents + roff - 2);
1540
1541 /* Convert mov to lea since it has been done for a while. */
1542 if (opcode != 0x8b)
1543 {
1544 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1545 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1546 test, xor instructions. */
1547 if (!relocx)
1548 return TRUE;
1549 }
1550
1551 /* We convert only to R_X86_64_PC32:
1552 1. Branch.
1553 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1554 3. no_overflow is true.
1555 4. PIC.
1556 */
1557 to_reloc_pc32 = (opcode == 0xff
1558 || !relocx
1559 || no_overflow
1560 || is_pic);
1561
1562 abs_symbol = FALSE;
1563 abs_relocation = 0;
1564
1565 /* Get the symbol referred to by the reloc. */
1566 if (h == NULL)
1567 {
1568 Elf_Internal_Sym *isym
1569 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1570
1571 /* Skip relocation against undefined symbols. */
1572 if (isym->st_shndx == SHN_UNDEF)
1573 return TRUE;
1574
1575 local_ref = TRUE;
1576 if (isym->st_shndx == SHN_ABS)
1577 {
1578 tsec = bfd_abs_section_ptr;
1579 abs_symbol = TRUE;
1580 abs_relocation = isym->st_value;
1581 }
1582 else if (isym->st_shndx == SHN_COMMON)
1583 tsec = bfd_com_section_ptr;
1584 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1585 tsec = &_bfd_elf_large_com_section;
1586 else
1587 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1588 }
1589 else
1590 {
1591 /* Undefined weak symbol is only bound locally in executable
1592 and its reference is resolved as 0 without relocation
1593 overflow. We can only perform this optimization for
1594 GOTPCRELX relocations since we need to modify REX byte.
1595 It is OK convert mov with R_X86_64_GOTPCREL to
1596 R_X86_64_PC32. */
1597 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1598
1599 abs_symbol = ABS_SYMBOL_P (h);
1600 abs_relocation = h->root.u.def.value;
1601
1602 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1603 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1604 if ((relocx || opcode == 0x8b)
1605 && (h->root.type == bfd_link_hash_undefweak
1606 && !eh->linker_def
1607 && local_ref))
1608 {
1609 if (opcode == 0xff)
1610 {
1611 /* Skip for branch instructions since R_X86_64_PC32
1612 may overflow. */
1613 if (no_overflow)
1614 return TRUE;
1615 }
1616 else if (relocx)
1617 {
1618 /* For non-branch instructions, we can convert to
1619 R_X86_64_32/R_X86_64_32S since we know if there
1620 is a REX byte. */
1621 to_reloc_pc32 = FALSE;
1622 }
1623
1624 /* Since we don't know the current PC when PIC is true,
1625 we can't convert to R_X86_64_PC32. */
1626 if (to_reloc_pc32 && is_pic)
1627 return TRUE;
1628
1629 goto convert;
1630 }
1631 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1632 ld.so may use its link-time address. */
1633 else if (h->start_stop
1634 || eh->linker_def
1635 || ((h->def_regular
1636 || h->root.type == bfd_link_hash_defined
1637 || h->root.type == bfd_link_hash_defweak)
1638 && h != htab->elf.hdynamic
1639 && local_ref))
1640 {
1641 /* bfd_link_hash_new or bfd_link_hash_undefined is
1642 set by an assignment in a linker script in
1643 bfd_elf_record_link_assignment. start_stop is set
1644 on __start_SECNAME/__stop_SECNAME which mark section
1645 SECNAME. */
1646 if (h->start_stop
1647 || eh->linker_def
1648 || (h->def_regular
1649 && (h->root.type == bfd_link_hash_new
1650 || h->root.type == bfd_link_hash_undefined
1651 || ((h->root.type == bfd_link_hash_defined
1652 || h->root.type == bfd_link_hash_defweak)
1653 && h->root.u.def.section == bfd_und_section_ptr))))
1654 {
1655 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1656 if (no_overflow)
1657 return TRUE;
1658 goto convert;
1659 }
1660 tsec = h->root.u.def.section;
1661 }
1662 else
1663 return TRUE;
1664 }
1665
1666 /* Don't convert GOTPCREL relocation against large section. */
1667 if (elf_section_data (tsec) != NULL
1668 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1669 return TRUE;
1670
1671 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1672 if (no_overflow)
1673 return TRUE;
1674
1675 convert:
1676 if (opcode == 0xff)
1677 {
1678 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1679 unsigned int nop;
1680 unsigned int disp;
1681 bfd_vma nop_offset;
1682
1683 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1684 R_X86_64_PC32. */
1685 modrm = bfd_get_8 (abfd, contents + roff - 1);
1686 if (modrm == 0x25)
1687 {
1688 /* Convert to "jmp foo nop". */
1689 modrm = 0xe9;
1690 nop = NOP_OPCODE;
1691 nop_offset = irel->r_offset + 3;
1692 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1693 irel->r_offset -= 1;
1694 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1695 }
1696 else
1697 {
1698 struct elf_x86_link_hash_entry *eh
1699 = (struct elf_x86_link_hash_entry *) h;
1700
1701 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1702 is a nop prefix. */
1703 modrm = 0xe8;
1704 /* To support TLS optimization, always use addr32 prefix for
1705 "call *__tls_get_addr@GOTPCREL(%rip)". */
1706 if (eh && eh->tls_get_addr)
1707 {
1708 nop = 0x67;
1709 nop_offset = irel->r_offset - 2;
1710 }
1711 else
1712 {
1713 nop = htab->params->call_nop_byte;
1714 if (htab->params->call_nop_as_suffix)
1715 {
1716 nop_offset = irel->r_offset + 3;
1717 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1718 irel->r_offset -= 1;
1719 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1720 }
1721 else
1722 nop_offset = irel->r_offset - 2;
1723 }
1724 }
1725 bfd_put_8 (abfd, nop, contents + nop_offset);
1726 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1727 r_type = R_X86_64_PC32;
1728 }
1729 else
1730 {
1731 unsigned int rex;
1732 unsigned int rex_mask = REX_R;
1733
1734 if (r_type == R_X86_64_REX_GOTPCRELX)
1735 rex = bfd_get_8 (abfd, contents + roff - 3);
1736 else
1737 rex = 0;
1738
1739 if (opcode == 0x8b)
1740 {
1741 if (abs_symbol && local_ref)
1742 to_reloc_pc32 = FALSE;
1743
1744 if (to_reloc_pc32)
1745 {
1746 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1747 "lea foo(%rip), %reg". */
1748 opcode = 0x8d;
1749 r_type = R_X86_64_PC32;
1750 }
1751 else
1752 {
1753 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1754 "mov $foo, %reg". */
1755 opcode = 0xc7;
1756 modrm = bfd_get_8 (abfd, contents + roff - 1);
1757 modrm = 0xc0 | (modrm & 0x38) >> 3;
1758 if ((rex & REX_W) != 0
1759 && ABI_64_P (link_info->output_bfd))
1760 {
1761 /* Keep the REX_W bit in REX byte for LP64. */
1762 r_type = R_X86_64_32S;
1763 goto rewrite_modrm_rex;
1764 }
1765 else
1766 {
1767 /* If the REX_W bit in REX byte isn't needed,
1768 use R_X86_64_32 and clear the W bit to avoid
1769 sign-extend imm32 to imm64. */
1770 r_type = R_X86_64_32;
1771 /* Clear the W bit in REX byte. */
1772 rex_mask |= REX_W;
1773 goto rewrite_modrm_rex;
1774 }
1775 }
1776 }
1777 else
1778 {
1779 /* R_X86_64_PC32 isn't supported. */
1780 if (to_reloc_pc32)
1781 return TRUE;
1782
1783 modrm = bfd_get_8 (abfd, contents + roff - 1);
1784 if (opcode == 0x85)
1785 {
1786 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1787 "test $foo, %reg". */
1788 modrm = 0xc0 | (modrm & 0x38) >> 3;
1789 opcode = 0xf7;
1790 }
1791 else
1792 {
1793 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1794 "binop $foo, %reg". */
1795 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1796 opcode = 0x81;
1797 }
1798
1799 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1800 overflow when sign-extending imm32 to imm64. */
1801 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1802
1803 rewrite_modrm_rex:
1804 if (abs_relocation)
1805 {
1806 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1807 if (r_type == R_X86_64_32S)
1808 {
1809 if ((abs_relocation + 0x80000000) > 0xffffffff)
1810 return TRUE;
1811 }
1812 else
1813 {
1814 if (abs_relocation > 0xffffffff)
1815 return TRUE;
1816 }
1817 }
1818
1819 bfd_put_8 (abfd, modrm, contents + roff - 1);
1820
1821 if (rex)
1822 {
1823 /* Move the R bit to the B bit in REX byte. */
1824 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1825 bfd_put_8 (abfd, rex, contents + roff - 3);
1826 }
1827
1828 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1829 irel->r_addend = 0;
1830 }
1831
1832 bfd_put_8 (abfd, opcode, contents + roff - 2);
1833 }
1834
1835 *r_type_p = r_type;
1836 irel->r_info = htab->r_info (r_symndx,
1837 r_type | R_X86_64_converted_reloc_bit);
1838
1839 *converted = TRUE;
1840
1841 return TRUE;
1842 }
1843
1844 /* Look through the relocs for a section during the first phase, and
1845 calculate needed space in the global offset table, procedure
1846 linkage table, and dynamic reloc sections. */
1847
1848 static bfd_boolean
1849 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1850 asection *sec,
1851 const Elf_Internal_Rela *relocs)
1852 {
1853 struct elf_x86_link_hash_table *htab;
1854 Elf_Internal_Shdr *symtab_hdr;
1855 struct elf_link_hash_entry **sym_hashes;
1856 const Elf_Internal_Rela *rel;
1857 const Elf_Internal_Rela *rel_end;
1858 asection *sreloc;
1859 bfd_byte *contents;
1860 bfd_boolean converted;
1861
1862 if (bfd_link_relocatable (info))
1863 return TRUE;
1864
1865 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1866 if (htab == NULL)
1867 {
1868 sec->check_relocs_failed = 1;
1869 return FALSE;
1870 }
1871
1872 BFD_ASSERT (is_x86_elf (abfd, htab));
1873
1874 /* Get the section contents. */
1875 if (elf_section_data (sec)->this_hdr.contents != NULL)
1876 contents = elf_section_data (sec)->this_hdr.contents;
1877 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1878 {
1879 sec->check_relocs_failed = 1;
1880 return FALSE;
1881 }
1882
1883 symtab_hdr = &elf_symtab_hdr (abfd);
1884 sym_hashes = elf_sym_hashes (abfd);
1885
1886 converted = FALSE;
1887
1888 sreloc = NULL;
1889
1890 rel_end = relocs + sec->reloc_count;
1891 for (rel = relocs; rel < rel_end; rel++)
1892 {
1893 unsigned int r_type;
1894 unsigned int r_symndx;
1895 struct elf_link_hash_entry *h;
1896 struct elf_x86_link_hash_entry *eh;
1897 Elf_Internal_Sym *isym;
1898 const char *name;
1899 bfd_boolean size_reloc;
1900 bfd_boolean converted_reloc;
1901 bfd_boolean no_dynreloc;
1902
1903 r_symndx = htab->r_sym (rel->r_info);
1904 r_type = ELF32_R_TYPE (rel->r_info);
1905
1906 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1907 {
1908 /* xgettext:c-format */
1909 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1910 abfd, r_symndx);
1911 goto error_return;
1912 }
1913
1914 if (r_symndx < symtab_hdr->sh_info)
1915 {
1916 /* A local symbol. */
1917 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1918 abfd, r_symndx);
1919 if (isym == NULL)
1920 goto error_return;
1921
1922 /* Check relocation against local STT_GNU_IFUNC symbol. */
1923 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1924 {
1925 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1926 TRUE);
1927 if (h == NULL)
1928 goto error_return;
1929
1930 /* Fake a STT_GNU_IFUNC symbol. */
1931 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1932 isym, NULL);
1933 h->type = STT_GNU_IFUNC;
1934 h->def_regular = 1;
1935 h->ref_regular = 1;
1936 h->forced_local = 1;
1937 h->root.type = bfd_link_hash_defined;
1938 }
1939 else
1940 h = NULL;
1941 }
1942 else
1943 {
1944 isym = NULL;
1945 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1946 while (h->root.type == bfd_link_hash_indirect
1947 || h->root.type == bfd_link_hash_warning)
1948 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1949 }
1950
1951 /* Check invalid x32 relocations. */
1952 if (!ABI_64_P (abfd))
1953 switch (r_type)
1954 {
1955 default:
1956 break;
1957
1958 case R_X86_64_DTPOFF64:
1959 case R_X86_64_TPOFF64:
1960 case R_X86_64_PC64:
1961 case R_X86_64_GOTOFF64:
1962 case R_X86_64_GOT64:
1963 case R_X86_64_GOTPCREL64:
1964 case R_X86_64_GOTPC64:
1965 case R_X86_64_GOTPLT64:
1966 case R_X86_64_PLTOFF64:
1967 {
1968 if (h)
1969 name = h->root.root.string;
1970 else
1971 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1972 NULL);
1973 _bfd_error_handler
1974 /* xgettext:c-format */
1975 (_("%pB: relocation %s against symbol `%s' isn't "
1976 "supported in x32 mode"), abfd,
1977 x86_64_elf_howto_table[r_type].name, name);
1978 bfd_set_error (bfd_error_bad_value);
1979 goto error_return;
1980 }
1981 break;
1982 }
1983
1984 if (h != NULL)
1985 {
1986 /* It is referenced by a non-shared object. */
1987 h->ref_regular = 1;
1988 }
1989
1990 converted_reloc = FALSE;
1991 if ((r_type == R_X86_64_GOTPCREL
1992 || r_type == R_X86_64_GOTPCRELX
1993 || r_type == R_X86_64_REX_GOTPCRELX)
1994 && (h == NULL || h->type != STT_GNU_IFUNC))
1995 {
1996 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1997 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1998 irel, h, &converted_reloc,
1999 info))
2000 goto error_return;
2001
2002 if (converted_reloc)
2003 converted = TRUE;
2004 }
2005
2006 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2007 symtab_hdr, &no_dynreloc))
2008 return FALSE;
2009
2010 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2011 symtab_hdr, sym_hashes,
2012 &r_type, GOT_UNKNOWN,
2013 rel, rel_end, h, r_symndx, FALSE))
2014 goto error_return;
2015
2016 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2017 if (h == htab->elf.hgot)
2018 htab->got_referenced = TRUE;
2019
2020 eh = (struct elf_x86_link_hash_entry *) h;
2021 switch (r_type)
2022 {
2023 case R_X86_64_TLSLD:
2024 htab->tls_ld_or_ldm_got.refcount = 1;
2025 goto create_got;
2026
2027 case R_X86_64_TPOFF32:
2028 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2029 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2030 &x86_64_elf_howto_table[r_type]);
2031 if (eh != NULL)
2032 eh->zero_undefweak &= 0x2;
2033 break;
2034
2035 case R_X86_64_GOTTPOFF:
2036 if (!bfd_link_executable (info))
2037 info->flags |= DF_STATIC_TLS;
2038 /* Fall through */
2039
2040 case R_X86_64_GOT32:
2041 case R_X86_64_GOTPCREL:
2042 case R_X86_64_GOTPCRELX:
2043 case R_X86_64_REX_GOTPCRELX:
2044 case R_X86_64_TLSGD:
2045 case R_X86_64_GOT64:
2046 case R_X86_64_GOTPCREL64:
2047 case R_X86_64_GOTPLT64:
2048 case R_X86_64_GOTPC32_TLSDESC:
2049 case R_X86_64_TLSDESC_CALL:
2050 /* This symbol requires a global offset table entry. */
2051 {
2052 int tls_type, old_tls_type;
2053
2054 switch (r_type)
2055 {
2056 default:
2057 tls_type = GOT_NORMAL;
2058 if (h)
2059 {
2060 if (ABS_SYMBOL_P (h))
2061 tls_type = GOT_ABS;
2062 }
2063 else if (isym->st_shndx == SHN_ABS)
2064 tls_type = GOT_ABS;
2065 break;
2066 case R_X86_64_TLSGD:
2067 tls_type = GOT_TLS_GD;
2068 break;
2069 case R_X86_64_GOTTPOFF:
2070 tls_type = GOT_TLS_IE;
2071 break;
2072 case R_X86_64_GOTPC32_TLSDESC:
2073 case R_X86_64_TLSDESC_CALL:
2074 tls_type = GOT_TLS_GDESC;
2075 break;
2076 }
2077
2078 if (h != NULL)
2079 {
2080 h->got.refcount = 1;
2081 old_tls_type = eh->tls_type;
2082 }
2083 else
2084 {
2085 bfd_signed_vma *local_got_refcounts;
2086
2087 /* This is a global offset table entry for a local symbol. */
2088 local_got_refcounts = elf_local_got_refcounts (abfd);
2089 if (local_got_refcounts == NULL)
2090 {
2091 bfd_size_type size;
2092
2093 size = symtab_hdr->sh_info;
2094 size *= sizeof (bfd_signed_vma)
2095 + sizeof (bfd_vma) + sizeof (char);
2096 local_got_refcounts = ((bfd_signed_vma *)
2097 bfd_zalloc (abfd, size));
2098 if (local_got_refcounts == NULL)
2099 goto error_return;
2100 elf_local_got_refcounts (abfd) = local_got_refcounts;
2101 elf_x86_local_tlsdesc_gotent (abfd)
2102 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2103 elf_x86_local_got_tls_type (abfd)
2104 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2105 }
2106 local_got_refcounts[r_symndx] = 1;
2107 old_tls_type
2108 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2109 }
2110
2111 /* If a TLS symbol is accessed using IE at least once,
2112 there is no point to use dynamic model for it. */
2113 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2114 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2115 || tls_type != GOT_TLS_IE))
2116 {
2117 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2118 tls_type = old_tls_type;
2119 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2120 && GOT_TLS_GD_ANY_P (tls_type))
2121 tls_type |= old_tls_type;
2122 else
2123 {
2124 if (h)
2125 name = h->root.root.string;
2126 else
2127 name = bfd_elf_sym_name (abfd, symtab_hdr,
2128 isym, NULL);
2129 _bfd_error_handler
2130 /* xgettext:c-format */
2131 (_("%pB: '%s' accessed both as normal and"
2132 " thread local symbol"),
2133 abfd, name);
2134 bfd_set_error (bfd_error_bad_value);
2135 goto error_return;
2136 }
2137 }
2138
2139 if (old_tls_type != tls_type)
2140 {
2141 if (eh != NULL)
2142 eh->tls_type = tls_type;
2143 else
2144 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2145 }
2146 }
2147 /* Fall through */
2148
2149 case R_X86_64_GOTOFF64:
2150 case R_X86_64_GOTPC32:
2151 case R_X86_64_GOTPC64:
2152 create_got:
2153 if (eh != NULL)
2154 eh->zero_undefweak &= 0x2;
2155 break;
2156
2157 case R_X86_64_PLT32:
2158 case R_X86_64_PLT32_BND:
2159 /* This symbol requires a procedure linkage table entry. We
2160 actually build the entry in adjust_dynamic_symbol,
2161 because this might be a case of linking PIC code which is
2162 never referenced by a dynamic object, in which case we
2163 don't need to generate a procedure linkage table entry
2164 after all. */
2165
2166 /* If this is a local symbol, we resolve it directly without
2167 creating a procedure linkage table entry. */
2168 if (h == NULL)
2169 continue;
2170
2171 eh->zero_undefweak &= 0x2;
2172 h->needs_plt = 1;
2173 h->plt.refcount = 1;
2174 break;
2175
2176 case R_X86_64_PLTOFF64:
2177 /* This tries to form the 'address' of a function relative
2178 to GOT. For global symbols we need a PLT entry. */
2179 if (h != NULL)
2180 {
2181 h->needs_plt = 1;
2182 h->plt.refcount = 1;
2183 }
2184 goto create_got;
2185
2186 case R_X86_64_SIZE32:
2187 case R_X86_64_SIZE64:
2188 size_reloc = TRUE;
2189 goto do_size;
2190
2191 case R_X86_64_32:
2192 if (!ABI_64_P (abfd))
2193 goto pointer;
2194 /* Fall through. */
2195 case R_X86_64_8:
2196 case R_X86_64_16:
2197 case R_X86_64_32S:
2198 /* Check relocation overflow as these relocs may lead to
2199 run-time relocation overflow. Don't error out for
2200 sections we don't care about, such as debug sections or
2201 when relocation overflow check is disabled. */
2202 if (!htab->params->no_reloc_overflow_check
2203 && !converted_reloc
2204 && (bfd_link_pic (info)
2205 || (bfd_link_executable (info)
2206 && h != NULL
2207 && !h->def_regular
2208 && h->def_dynamic
2209 && (sec->flags & SEC_READONLY) == 0)))
2210 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2211 &x86_64_elf_howto_table[r_type]);
2212 /* Fall through. */
2213
2214 case R_X86_64_PC8:
2215 case R_X86_64_PC16:
2216 case R_X86_64_PC32:
2217 case R_X86_64_PC32_BND:
2218 case R_X86_64_PC64:
2219 case R_X86_64_64:
2220 pointer:
2221 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2222 eh->zero_undefweak |= 0x2;
2223 /* We are called after all symbols have been resolved. Only
2224 relocation against STT_GNU_IFUNC symbol must go through
2225 PLT. */
2226 if (h != NULL
2227 && (bfd_link_executable (info)
2228 || h->type == STT_GNU_IFUNC))
2229 {
2230 bfd_boolean func_pointer_ref = FALSE;
2231
2232 if (r_type == R_X86_64_PC32)
2233 {
2234 /* Since something like ".long foo - ." may be used
2235 as pointer, make sure that PLT is used if foo is
2236 a function defined in a shared library. */
2237 if ((sec->flags & SEC_CODE) == 0)
2238 {
2239 h->pointer_equality_needed = 1;
2240 if (bfd_link_pie (info)
2241 && h->type == STT_FUNC
2242 && !h->def_regular
2243 && h->def_dynamic)
2244 {
2245 h->needs_plt = 1;
2246 h->plt.refcount = 1;
2247 }
2248 }
2249 }
2250 else if (r_type != R_X86_64_PC32_BND
2251 && r_type != R_X86_64_PC64)
2252 {
2253 h->pointer_equality_needed = 1;
2254 /* At run-time, R_X86_64_64 can be resolved for both
2255 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2256 can only be resolved for x32. */
2257 if ((sec->flags & SEC_READONLY) == 0
2258 && (r_type == R_X86_64_64
2259 || (!ABI_64_P (abfd)
2260 && (r_type == R_X86_64_32
2261 || r_type == R_X86_64_32S))))
2262 func_pointer_ref = TRUE;
2263 }
2264
2265 if (!func_pointer_ref)
2266 {
2267 /* If this reloc is in a read-only section, we might
2268 need a copy reloc. We can't check reliably at this
2269 stage whether the section is read-only, as input
2270 sections have not yet been mapped to output sections.
2271 Tentatively set the flag for now, and correct in
2272 adjust_dynamic_symbol. */
2273 h->non_got_ref = 1;
2274
2275 /* We may need a .plt entry if the symbol is a function
2276 defined in a shared lib or is a function referenced
2277 from the code or read-only section. */
2278 if (!h->def_regular
2279 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2280 h->plt.refcount = 1;
2281 }
2282 }
2283
2284 size_reloc = FALSE;
2285 do_size:
2286 if (!no_dynreloc
2287 && NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2288 htab->pointer_r_type))
2289 {
2290 struct elf_dyn_relocs *p;
2291 struct elf_dyn_relocs **head;
2292
2293 /* We must copy these reloc types into the output file.
2294 Create a reloc section in dynobj and make room for
2295 this reloc. */
2296 if (sreloc == NULL)
2297 {
2298 sreloc = _bfd_elf_make_dynamic_reloc_section
2299 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2300 abfd, /*rela?*/ TRUE);
2301
2302 if (sreloc == NULL)
2303 goto error_return;
2304 }
2305
2306 /* If this is a global symbol, we count the number of
2307 relocations we need for this symbol. */
2308 if (h != NULL)
2309 head = &h->dyn_relocs;
2310 else
2311 {
2312 /* Track dynamic relocs needed for local syms too.
2313 We really need local syms available to do this
2314 easily. Oh well. */
2315 asection *s;
2316 void **vpp;
2317
2318 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2319 abfd, r_symndx);
2320 if (isym == NULL)
2321 goto error_return;
2322
2323 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2324 if (s == NULL)
2325 s = sec;
2326
2327 /* Beware of type punned pointers vs strict aliasing
2328 rules. */
2329 vpp = &(elf_section_data (s)->local_dynrel);
2330 head = (struct elf_dyn_relocs **)vpp;
2331 }
2332
2333 p = *head;
2334 if (p == NULL || p->sec != sec)
2335 {
2336 size_t amt = sizeof *p;
2337
2338 p = ((struct elf_dyn_relocs *)
2339 bfd_alloc (htab->elf.dynobj, amt));
2340 if (p == NULL)
2341 goto error_return;
2342 p->next = *head;
2343 *head = p;
2344 p->sec = sec;
2345 p->count = 0;
2346 p->pc_count = 0;
2347 }
2348
2349 p->count += 1;
2350 /* Count size relocation as PC-relative relocation. */
2351 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2352 p->pc_count += 1;
2353 }
2354 break;
2355
2356 /* This relocation describes the C++ object vtable hierarchy.
2357 Reconstruct it for later use during GC. */
2358 case R_X86_64_GNU_VTINHERIT:
2359 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2360 goto error_return;
2361 break;
2362
2363 /* This relocation describes which C++ vtable entries are actually
2364 used. Record for later use during GC. */
2365 case R_X86_64_GNU_VTENTRY:
2366 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2367 goto error_return;
2368 break;
2369
2370 default:
2371 break;
2372 }
2373 }
2374
2375 if (elf_section_data (sec)->this_hdr.contents != contents)
2376 {
2377 if (!converted && !info->keep_memory)
2378 free (contents);
2379 else
2380 {
2381 /* Cache the section contents for elf_link_input_bfd if any
2382 load is converted or --no-keep-memory isn't used. */
2383 elf_section_data (sec)->this_hdr.contents = contents;
2384 }
2385 }
2386
2387 /* Cache relocations if any load is converted. */
2388 if (elf_section_data (sec)->relocs != relocs && converted)
2389 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2390
2391 return TRUE;
2392
2393 error_return:
2394 if (elf_section_data (sec)->this_hdr.contents != contents)
2395 free (contents);
2396 sec->check_relocs_failed = 1;
2397 return FALSE;
2398 }
2399
2400 /* Return the relocation value for @tpoff relocation
2401 if STT_TLS virtual address is ADDRESS. */
2402
2403 static bfd_vma
2404 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2405 {
2406 struct elf_link_hash_table *htab = elf_hash_table (info);
2407 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2408 bfd_vma static_tls_size;
2409
2410 /* If tls_segment is NULL, we should have signalled an error already. */
2411 if (htab->tls_sec == NULL)
2412 return 0;
2413
2414 /* Consider special static TLS alignment requirements. */
2415 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2416 return address - static_tls_size - htab->tls_sec->vma;
2417 }
2418
2419 /* Relocate an x86_64 ELF section. */
2420
2421 static bfd_boolean
2422 elf_x86_64_relocate_section (bfd *output_bfd,
2423 struct bfd_link_info *info,
2424 bfd *input_bfd,
2425 asection *input_section,
2426 bfd_byte *contents,
2427 Elf_Internal_Rela *relocs,
2428 Elf_Internal_Sym *local_syms,
2429 asection **local_sections)
2430 {
2431 struct elf_x86_link_hash_table *htab;
2432 Elf_Internal_Shdr *symtab_hdr;
2433 struct elf_link_hash_entry **sym_hashes;
2434 bfd_vma *local_got_offsets;
2435 bfd_vma *local_tlsdesc_gotents;
2436 Elf_Internal_Rela *rel;
2437 Elf_Internal_Rela *wrel;
2438 Elf_Internal_Rela *relend;
2439 unsigned int plt_entry_size;
2440
2441 /* Skip if check_relocs failed. */
2442 if (input_section->check_relocs_failed)
2443 return FALSE;
2444
2445 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2446 if (htab == NULL)
2447 return FALSE;
2448
2449 if (!is_x86_elf (input_bfd, htab))
2450 {
2451 bfd_set_error (bfd_error_wrong_format);
2452 return FALSE;
2453 }
2454
2455 plt_entry_size = htab->plt.plt_entry_size;
2456 symtab_hdr = &elf_symtab_hdr (input_bfd);
2457 sym_hashes = elf_sym_hashes (input_bfd);
2458 local_got_offsets = elf_local_got_offsets (input_bfd);
2459 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2460
2461 _bfd_x86_elf_set_tls_module_base (info);
2462
2463 rel = wrel = relocs;
2464 relend = relocs + input_section->reloc_count;
2465 for (; rel < relend; wrel++, rel++)
2466 {
2467 unsigned int r_type, r_type_tls;
2468 reloc_howto_type *howto;
2469 unsigned long r_symndx;
2470 struct elf_link_hash_entry *h;
2471 struct elf_x86_link_hash_entry *eh;
2472 Elf_Internal_Sym *sym;
2473 asection *sec;
2474 bfd_vma off, offplt, plt_offset;
2475 bfd_vma relocation;
2476 bfd_boolean unresolved_reloc;
2477 bfd_reloc_status_type r;
2478 int tls_type;
2479 asection *base_got, *resolved_plt;
2480 bfd_vma st_size;
2481 bfd_boolean resolved_to_zero;
2482 bfd_boolean relative_reloc;
2483 bfd_boolean converted_reloc;
2484 bfd_boolean need_copy_reloc_in_pie;
2485 bfd_boolean no_copyreloc_p;
2486
2487 r_type = ELF32_R_TYPE (rel->r_info);
2488 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2489 || r_type == (int) R_X86_64_GNU_VTENTRY)
2490 {
2491 if (wrel != rel)
2492 *wrel = *rel;
2493 continue;
2494 }
2495
2496 r_symndx = htab->r_sym (rel->r_info);
2497 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2498 if (converted_reloc)
2499 {
2500 r_type &= ~R_X86_64_converted_reloc_bit;
2501 rel->r_info = htab->r_info (r_symndx, r_type);
2502 }
2503
2504 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2505 if (howto == NULL)
2506 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2507
2508 h = NULL;
2509 sym = NULL;
2510 sec = NULL;
2511 unresolved_reloc = FALSE;
2512 if (r_symndx < symtab_hdr->sh_info)
2513 {
2514 sym = local_syms + r_symndx;
2515 sec = local_sections[r_symndx];
2516
2517 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2518 &sec, rel);
2519 st_size = sym->st_size;
2520
2521 /* Relocate against local STT_GNU_IFUNC symbol. */
2522 if (!bfd_link_relocatable (info)
2523 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2524 {
2525 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2526 rel, FALSE);
2527 if (h == NULL)
2528 abort ();
2529
2530 /* Set STT_GNU_IFUNC symbol value. */
2531 h->root.u.def.value = sym->st_value;
2532 h->root.u.def.section = sec;
2533 }
2534 }
2535 else
2536 {
2537 bfd_boolean warned ATTRIBUTE_UNUSED;
2538 bfd_boolean ignored ATTRIBUTE_UNUSED;
2539
2540 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2541 r_symndx, symtab_hdr, sym_hashes,
2542 h, sec, relocation,
2543 unresolved_reloc, warned, ignored);
2544 st_size = h->size;
2545 }
2546
2547 if (sec != NULL && discarded_section (sec))
2548 {
2549 _bfd_clear_contents (howto, input_bfd, input_section,
2550 contents, rel->r_offset);
2551 wrel->r_offset = rel->r_offset;
2552 wrel->r_info = 0;
2553 wrel->r_addend = 0;
2554
2555 /* For ld -r, remove relocations in debug sections against
2556 sections defined in discarded sections. Not done for
2557 eh_frame editing code expects to be present. */
2558 if (bfd_link_relocatable (info)
2559 && (input_section->flags & SEC_DEBUGGING))
2560 wrel--;
2561
2562 continue;
2563 }
2564
2565 if (bfd_link_relocatable (info))
2566 {
2567 if (wrel != rel)
2568 *wrel = *rel;
2569 continue;
2570 }
2571
2572 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2573 {
2574 if (r_type == R_X86_64_64)
2575 {
2576 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2577 zero-extend it to 64bit if addend is zero. */
2578 r_type = R_X86_64_32;
2579 memset (contents + rel->r_offset + 4, 0, 4);
2580 }
2581 else if (r_type == R_X86_64_SIZE64)
2582 {
2583 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2584 zero-extend it to 64bit if addend is zero. */
2585 r_type = R_X86_64_SIZE32;
2586 memset (contents + rel->r_offset + 4, 0, 4);
2587 }
2588 }
2589
2590 eh = (struct elf_x86_link_hash_entry *) h;
2591
2592 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2593 it here if it is defined in a non-shared object. */
2594 if (h != NULL
2595 && h->type == STT_GNU_IFUNC
2596 && h->def_regular)
2597 {
2598 bfd_vma plt_index;
2599 const char *name;
2600
2601 if ((input_section->flags & SEC_ALLOC) == 0)
2602 {
2603 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2604 STT_GNU_IFUNC symbol as STT_FUNC. */
2605 if (elf_section_type (input_section) == SHT_NOTE)
2606 goto skip_ifunc;
2607 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2608 sections because such sections are not SEC_ALLOC and
2609 thus ld.so will not process them. */
2610 if ((input_section->flags & SEC_DEBUGGING) != 0)
2611 continue;
2612 abort ();
2613 }
2614
2615 switch (r_type)
2616 {
2617 default:
2618 break;
2619
2620 case R_X86_64_GOTPCREL:
2621 case R_X86_64_GOTPCRELX:
2622 case R_X86_64_REX_GOTPCRELX:
2623 case R_X86_64_GOTPCREL64:
2624 base_got = htab->elf.sgot;
2625 off = h->got.offset;
2626
2627 if (base_got == NULL)
2628 abort ();
2629
2630 if (off == (bfd_vma) -1)
2631 {
2632 /* We can't use h->got.offset here to save state, or
2633 even just remember the offset, as finish_dynamic_symbol
2634 would use that as offset into .got. */
2635
2636 if (h->plt.offset == (bfd_vma) -1)
2637 abort ();
2638
2639 if (htab->elf.splt != NULL)
2640 {
2641 plt_index = (h->plt.offset / plt_entry_size
2642 - htab->plt.has_plt0);
2643 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2644 base_got = htab->elf.sgotplt;
2645 }
2646 else
2647 {
2648 plt_index = h->plt.offset / plt_entry_size;
2649 off = plt_index * GOT_ENTRY_SIZE;
2650 base_got = htab->elf.igotplt;
2651 }
2652
2653 if (h->dynindx == -1
2654 || h->forced_local
2655 || info->symbolic)
2656 {
2657 /* This references the local defitionion. We must
2658 initialize this entry in the global offset table.
2659 Since the offset must always be a multiple of 8,
2660 we use the least significant bit to record
2661 whether we have initialized it already.
2662
2663 When doing a dynamic link, we create a .rela.got
2664 relocation entry to initialize the value. This
2665 is done in the finish_dynamic_symbol routine. */
2666 if ((off & 1) != 0)
2667 off &= ~1;
2668 else
2669 {
2670 bfd_put_64 (output_bfd, relocation,
2671 base_got->contents + off);
2672 /* Note that this is harmless for the GOTPLT64
2673 case, as -1 | 1 still is -1. */
2674 h->got.offset |= 1;
2675 }
2676 }
2677 }
2678
2679 relocation = (base_got->output_section->vma
2680 + base_got->output_offset + off);
2681
2682 goto do_relocation;
2683 }
2684
2685 if (h->plt.offset == (bfd_vma) -1)
2686 {
2687 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2688 if (r_type == htab->pointer_r_type
2689 && (input_section->flags & SEC_CODE) == 0)
2690 goto do_ifunc_pointer;
2691 goto bad_ifunc_reloc;
2692 }
2693
2694 /* STT_GNU_IFUNC symbol must go through PLT. */
2695 if (htab->elf.splt != NULL)
2696 {
2697 if (htab->plt_second != NULL)
2698 {
2699 resolved_plt = htab->plt_second;
2700 plt_offset = eh->plt_second.offset;
2701 }
2702 else
2703 {
2704 resolved_plt = htab->elf.splt;
2705 plt_offset = h->plt.offset;
2706 }
2707 }
2708 else
2709 {
2710 resolved_plt = htab->elf.iplt;
2711 plt_offset = h->plt.offset;
2712 }
2713
2714 relocation = (resolved_plt->output_section->vma
2715 + resolved_plt->output_offset + plt_offset);
2716
2717 switch (r_type)
2718 {
2719 default:
2720 bad_ifunc_reloc:
2721 if (h->root.root.string)
2722 name = h->root.root.string;
2723 else
2724 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2725 NULL);
2726 _bfd_error_handler
2727 /* xgettext:c-format */
2728 (_("%pB: relocation %s against STT_GNU_IFUNC "
2729 "symbol `%s' isn't supported"), input_bfd,
2730 howto->name, name);
2731 bfd_set_error (bfd_error_bad_value);
2732 return FALSE;
2733
2734 case R_X86_64_32S:
2735 if (bfd_link_pic (info))
2736 abort ();
2737 goto do_relocation;
2738
2739 case R_X86_64_32:
2740 if (ABI_64_P (output_bfd))
2741 goto do_relocation;
2742 /* FALLTHROUGH */
2743 case R_X86_64_64:
2744 do_ifunc_pointer:
2745 if (rel->r_addend != 0)
2746 {
2747 if (h->root.root.string)
2748 name = h->root.root.string;
2749 else
2750 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2751 sym, NULL);
2752 _bfd_error_handler
2753 /* xgettext:c-format */
2754 (_("%pB: relocation %s against STT_GNU_IFUNC "
2755 "symbol `%s' has non-zero addend: %" PRId64),
2756 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2757 bfd_set_error (bfd_error_bad_value);
2758 return FALSE;
2759 }
2760
2761 /* Generate dynamic relcoation only when there is a
2762 non-GOT reference in a shared object or there is no
2763 PLT. */
2764 if ((bfd_link_pic (info) && h->non_got_ref)
2765 || h->plt.offset == (bfd_vma) -1)
2766 {
2767 Elf_Internal_Rela outrel;
2768 asection *sreloc;
2769
2770 /* Need a dynamic relocation to get the real function
2771 address. */
2772 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2773 info,
2774 input_section,
2775 rel->r_offset);
2776 if (outrel.r_offset == (bfd_vma) -1
2777 || outrel.r_offset == (bfd_vma) -2)
2778 abort ();
2779
2780 outrel.r_offset += (input_section->output_section->vma
2781 + input_section->output_offset);
2782
2783 if (POINTER_LOCAL_IFUNC_P (info, h))
2784 {
2785 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2786 h->root.root.string,
2787 h->root.u.def.section->owner);
2788
2789 /* This symbol is resolved locally. */
2790 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2791 outrel.r_addend = (h->root.u.def.value
2792 + h->root.u.def.section->output_section->vma
2793 + h->root.u.def.section->output_offset);
2794 }
2795 else
2796 {
2797 outrel.r_info = htab->r_info (h->dynindx, r_type);
2798 outrel.r_addend = 0;
2799 }
2800
2801 /* Dynamic relocations are stored in
2802 1. .rela.ifunc section in PIC object.
2803 2. .rela.got section in dynamic executable.
2804 3. .rela.iplt section in static executable. */
2805 if (bfd_link_pic (info))
2806 sreloc = htab->elf.irelifunc;
2807 else if (htab->elf.splt != NULL)
2808 sreloc = htab->elf.srelgot;
2809 else
2810 sreloc = htab->elf.irelplt;
2811 elf_append_rela (output_bfd, sreloc, &outrel);
2812
2813 /* If this reloc is against an external symbol, we
2814 do not want to fiddle with the addend. Otherwise,
2815 we need to include the symbol value so that it
2816 becomes an addend for the dynamic reloc. For an
2817 internal symbol, we have updated addend. */
2818 continue;
2819 }
2820 /* FALLTHROUGH */
2821 case R_X86_64_PC32:
2822 case R_X86_64_PC32_BND:
2823 case R_X86_64_PC64:
2824 case R_X86_64_PLT32:
2825 case R_X86_64_PLT32_BND:
2826 goto do_relocation;
2827 }
2828 }
2829
2830 skip_ifunc:
2831 resolved_to_zero = (eh != NULL
2832 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2833
2834 /* When generating a shared object, the relocations handled here are
2835 copied into the output file to be resolved at run time. */
2836 switch (r_type)
2837 {
2838 case R_X86_64_GOT32:
2839 case R_X86_64_GOT64:
2840 /* Relocation is to the entry for this symbol in the global
2841 offset table. */
2842 case R_X86_64_GOTPCREL:
2843 case R_X86_64_GOTPCRELX:
2844 case R_X86_64_REX_GOTPCRELX:
2845 case R_X86_64_GOTPCREL64:
2846 /* Use global offset table entry as symbol value. */
2847 case R_X86_64_GOTPLT64:
2848 /* This is obsolete and treated the same as GOT64. */
2849 base_got = htab->elf.sgot;
2850
2851 if (htab->elf.sgot == NULL)
2852 abort ();
2853
2854 relative_reloc = FALSE;
2855 if (h != NULL)
2856 {
2857 off = h->got.offset;
2858 if (h->needs_plt
2859 && h->plt.offset != (bfd_vma)-1
2860 && off == (bfd_vma)-1)
2861 {
2862 /* We can't use h->got.offset here to save
2863 state, or even just remember the offset, as
2864 finish_dynamic_symbol would use that as offset into
2865 .got. */
2866 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2867 - htab->plt.has_plt0);
2868 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2869 base_got = htab->elf.sgotplt;
2870 }
2871
2872 if (RESOLVED_LOCALLY_P (info, h, htab))
2873 {
2874 /* We must initialize this entry in the global offset
2875 table. Since the offset must always be a multiple
2876 of 8, we use the least significant bit to record
2877 whether we have initialized it already.
2878
2879 When doing a dynamic link, we create a .rela.got
2880 relocation entry to initialize the value. This is
2881 done in the finish_dynamic_symbol routine. */
2882 if ((off & 1) != 0)
2883 off &= ~1;
2884 else
2885 {
2886 bfd_put_64 (output_bfd, relocation,
2887 base_got->contents + off);
2888 /* Note that this is harmless for the GOTPLT64 case,
2889 as -1 | 1 still is -1. */
2890 h->got.offset |= 1;
2891
2892 if (GENERATE_RELATIVE_RELOC_P (info, h))
2893 {
2894 /* If this symbol isn't dynamic in PIC,
2895 generate R_X86_64_RELATIVE here. */
2896 eh->no_finish_dynamic_symbol = 1;
2897 relative_reloc = TRUE;
2898 }
2899 }
2900 }
2901 else
2902 unresolved_reloc = FALSE;
2903 }
2904 else
2905 {
2906 if (local_got_offsets == NULL)
2907 abort ();
2908
2909 off = local_got_offsets[r_symndx];
2910
2911 /* The offset must always be a multiple of 8. We use
2912 the least significant bit to record whether we have
2913 already generated the necessary reloc. */
2914 if ((off & 1) != 0)
2915 off &= ~1;
2916 else
2917 {
2918 bfd_put_64 (output_bfd, relocation,
2919 base_got->contents + off);
2920 local_got_offsets[r_symndx] |= 1;
2921
2922 /* NB: GOTPCREL relocations against local absolute
2923 symbol store relocation value in the GOT slot
2924 without relative relocation. */
2925 if (bfd_link_pic (info)
2926 && !(sym->st_shndx == SHN_ABS
2927 && (r_type == R_X86_64_GOTPCREL
2928 || r_type == R_X86_64_GOTPCRELX
2929 || r_type == R_X86_64_REX_GOTPCRELX)))
2930 relative_reloc = TRUE;
2931 }
2932 }
2933
2934 if (relative_reloc)
2935 {
2936 asection *s;
2937 Elf_Internal_Rela outrel;
2938
2939 /* We need to generate a R_X86_64_RELATIVE reloc
2940 for the dynamic linker. */
2941 s = htab->elf.srelgot;
2942 if (s == NULL)
2943 abort ();
2944
2945 outrel.r_offset = (base_got->output_section->vma
2946 + base_got->output_offset
2947 + off);
2948 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2949 outrel.r_addend = relocation;
2950 elf_append_rela (output_bfd, s, &outrel);
2951 }
2952
2953 if (off >= (bfd_vma) -2)
2954 abort ();
2955
2956 relocation = base_got->output_section->vma
2957 + base_got->output_offset + off;
2958 if (r_type != R_X86_64_GOTPCREL
2959 && r_type != R_X86_64_GOTPCRELX
2960 && r_type != R_X86_64_REX_GOTPCRELX
2961 && r_type != R_X86_64_GOTPCREL64)
2962 relocation -= htab->elf.sgotplt->output_section->vma
2963 - htab->elf.sgotplt->output_offset;
2964
2965 break;
2966
2967 case R_X86_64_GOTOFF64:
2968 /* Relocation is relative to the start of the global offset
2969 table. */
2970
2971 /* Check to make sure it isn't a protected function or data
2972 symbol for shared library since it may not be local when
2973 used as function address or with copy relocation. We also
2974 need to make sure that a symbol is referenced locally. */
2975 if (bfd_link_pic (info) && h)
2976 {
2977 if (!h->def_regular)
2978 {
2979 const char *v;
2980
2981 switch (ELF_ST_VISIBILITY (h->other))
2982 {
2983 case STV_HIDDEN:
2984 v = _("hidden symbol");
2985 break;
2986 case STV_INTERNAL:
2987 v = _("internal symbol");
2988 break;
2989 case STV_PROTECTED:
2990 v = _("protected symbol");
2991 break;
2992 default:
2993 v = _("symbol");
2994 break;
2995 }
2996
2997 _bfd_error_handler
2998 /* xgettext:c-format */
2999 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3000 " `%s' can not be used when making a shared object"),
3001 input_bfd, v, h->root.root.string);
3002 bfd_set_error (bfd_error_bad_value);
3003 return FALSE;
3004 }
3005 else if (!bfd_link_executable (info)
3006 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3007 && (h->type == STT_FUNC
3008 || h->type == STT_OBJECT)
3009 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3010 {
3011 _bfd_error_handler
3012 /* xgettext:c-format */
3013 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3014 " `%s' can not be used when making a shared object"),
3015 input_bfd,
3016 h->type == STT_FUNC ? "function" : "data",
3017 h->root.root.string);
3018 bfd_set_error (bfd_error_bad_value);
3019 return FALSE;
3020 }
3021 }
3022
3023 /* Note that sgot is not involved in this
3024 calculation. We always want the start of .got.plt. If we
3025 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3026 permitted by the ABI, we might have to change this
3027 calculation. */
3028 relocation -= htab->elf.sgotplt->output_section->vma
3029 + htab->elf.sgotplt->output_offset;
3030 break;
3031
3032 case R_X86_64_GOTPC32:
3033 case R_X86_64_GOTPC64:
3034 /* Use global offset table as symbol value. */
3035 relocation = htab->elf.sgotplt->output_section->vma
3036 + htab->elf.sgotplt->output_offset;
3037 unresolved_reloc = FALSE;
3038 break;
3039
3040 case R_X86_64_PLTOFF64:
3041 /* Relocation is PLT entry relative to GOT. For local
3042 symbols it's the symbol itself relative to GOT. */
3043 if (h != NULL
3044 /* See PLT32 handling. */
3045 && (h->plt.offset != (bfd_vma) -1
3046 || eh->plt_got.offset != (bfd_vma) -1)
3047 && htab->elf.splt != NULL)
3048 {
3049 if (eh->plt_got.offset != (bfd_vma) -1)
3050 {
3051 /* Use the GOT PLT. */
3052 resolved_plt = htab->plt_got;
3053 plt_offset = eh->plt_got.offset;
3054 }
3055 else if (htab->plt_second != NULL)
3056 {
3057 resolved_plt = htab->plt_second;
3058 plt_offset = eh->plt_second.offset;
3059 }
3060 else
3061 {
3062 resolved_plt = htab->elf.splt;
3063 plt_offset = h->plt.offset;
3064 }
3065
3066 relocation = (resolved_plt->output_section->vma
3067 + resolved_plt->output_offset
3068 + plt_offset);
3069 unresolved_reloc = FALSE;
3070 }
3071
3072 relocation -= htab->elf.sgotplt->output_section->vma
3073 + htab->elf.sgotplt->output_offset;
3074 break;
3075
3076 case R_X86_64_PLT32:
3077 case R_X86_64_PLT32_BND:
3078 /* Relocation is to the entry for this symbol in the
3079 procedure linkage table. */
3080
3081 /* Resolve a PLT32 reloc against a local symbol directly,
3082 without using the procedure linkage table. */
3083 if (h == NULL)
3084 break;
3085
3086 if ((h->plt.offset == (bfd_vma) -1
3087 && eh->plt_got.offset == (bfd_vma) -1)
3088 || htab->elf.splt == NULL)
3089 {
3090 /* We didn't make a PLT entry for this symbol. This
3091 happens when statically linking PIC code, or when
3092 using -Bsymbolic. */
3093 break;
3094 }
3095
3096 use_plt:
3097 if (h->plt.offset != (bfd_vma) -1)
3098 {
3099 if (htab->plt_second != NULL)
3100 {
3101 resolved_plt = htab->plt_second;
3102 plt_offset = eh->plt_second.offset;
3103 }
3104 else
3105 {
3106 resolved_plt = htab->elf.splt;
3107 plt_offset = h->plt.offset;
3108 }
3109 }
3110 else
3111 {
3112 /* Use the GOT PLT. */
3113 resolved_plt = htab->plt_got;
3114 plt_offset = eh->plt_got.offset;
3115 }
3116
3117 relocation = (resolved_plt->output_section->vma
3118 + resolved_plt->output_offset
3119 + plt_offset);
3120 unresolved_reloc = FALSE;
3121 break;
3122
3123 case R_X86_64_SIZE32:
3124 case R_X86_64_SIZE64:
3125 /* Set to symbol size. */
3126 relocation = st_size;
3127 goto direct;
3128
3129 case R_X86_64_PC8:
3130 case R_X86_64_PC16:
3131 case R_X86_64_PC32:
3132 case R_X86_64_PC32_BND:
3133 /* Don't complain about -fPIC if the symbol is undefined when
3134 building executable unless it is unresolved weak symbol,
3135 references a dynamic definition in PIE or -z nocopyreloc
3136 is used. */
3137 no_copyreloc_p
3138 = (info->nocopyreloc
3139 || (h != NULL
3140 && !h->root.linker_def
3141 && !h->root.ldscript_def
3142 && eh->def_protected
3143 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3144
3145 if ((input_section->flags & SEC_ALLOC) != 0
3146 && (input_section->flags & SEC_READONLY) != 0
3147 && h != NULL
3148 && ((bfd_link_executable (info)
3149 && ((h->root.type == bfd_link_hash_undefweak
3150 && (eh == NULL
3151 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3152 eh)))
3153 || (bfd_link_pie (info)
3154 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3155 && h->def_dynamic)
3156 || (no_copyreloc_p
3157 && h->def_dynamic
3158 && !(h->root.u.def.section->flags & SEC_CODE))))
3159 || bfd_link_dll (info)))
3160 {
3161 bfd_boolean fail = FALSE;
3162 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3163 {
3164 /* Symbol is referenced locally. Make sure it is
3165 defined locally. */
3166 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3167 }
3168 else if (bfd_link_pie (info))
3169 {
3170 /* We can only use PC-relative relocations in PIE
3171 from non-code sections. */
3172 if (h->type == STT_FUNC
3173 && (sec->flags & SEC_CODE) != 0)
3174 fail = TRUE;
3175 }
3176 else if (no_copyreloc_p || bfd_link_dll (info))
3177 {
3178 /* Symbol doesn't need copy reloc and isn't
3179 referenced locally. Don't allow PC-relative
3180 relocations against default and protected
3181 symbols since address of protected function
3182 and location of protected data may not be in
3183 the shared object. */
3184 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3185 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3186 }
3187
3188 if (fail)
3189 return elf_x86_64_need_pic (info, input_bfd, input_section,
3190 h, NULL, NULL, howto);
3191 }
3192 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3193 as function address. */
3194 else if (h != NULL
3195 && (input_section->flags & SEC_CODE) == 0
3196 && bfd_link_pie (info)
3197 && h->type == STT_FUNC
3198 && !h->def_regular
3199 && h->def_dynamic)
3200 goto use_plt;
3201 /* Fall through. */
3202
3203 case R_X86_64_8:
3204 case R_X86_64_16:
3205 case R_X86_64_32:
3206 case R_X86_64_PC64:
3207 case R_X86_64_64:
3208 /* FIXME: The ABI says the linker should make sure the value is
3209 the same when it's zeroextended to 64 bit. */
3210
3211 direct:
3212 if ((input_section->flags & SEC_ALLOC) == 0)
3213 break;
3214
3215 need_copy_reloc_in_pie = (bfd_link_pie (info)
3216 && h != NULL
3217 && (h->needs_copy
3218 || eh->needs_copy
3219 || (h->root.type
3220 == bfd_link_hash_undefined))
3221 && (X86_PCREL_TYPE_P (r_type)
3222 || X86_SIZE_TYPE_P (r_type)));
3223
3224 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
3225 need_copy_reloc_in_pie,
3226 resolved_to_zero, FALSE))
3227 {
3228 Elf_Internal_Rela outrel;
3229 bfd_boolean skip, relocate;
3230 asection *sreloc;
3231
3232 /* When generating a shared object, these relocations
3233 are copied into the output file to be resolved at run
3234 time. */
3235 skip = FALSE;
3236 relocate = FALSE;
3237
3238 outrel.r_offset =
3239 _bfd_elf_section_offset (output_bfd, info, input_section,
3240 rel->r_offset);
3241 if (outrel.r_offset == (bfd_vma) -1)
3242 skip = TRUE;
3243 else if (outrel.r_offset == (bfd_vma) -2)
3244 skip = TRUE, relocate = TRUE;
3245
3246 outrel.r_offset += (input_section->output_section->vma
3247 + input_section->output_offset);
3248
3249 if (skip)
3250 memset (&outrel, 0, sizeof outrel);
3251
3252 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3253 {
3254 outrel.r_info = htab->r_info (h->dynindx, r_type);
3255 outrel.r_addend = rel->r_addend;
3256 }
3257 else
3258 {
3259 /* This symbol is local, or marked to become local.
3260 When relocation overflow check is disabled, we
3261 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3262 if (r_type == htab->pointer_r_type
3263 || (r_type == R_X86_64_32
3264 && htab->params->no_reloc_overflow_check))
3265 {
3266 relocate = TRUE;
3267 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3268 outrel.r_addend = relocation + rel->r_addend;
3269 }
3270 else if (r_type == R_X86_64_64
3271 && !ABI_64_P (output_bfd))
3272 {
3273 relocate = TRUE;
3274 outrel.r_info = htab->r_info (0,
3275 R_X86_64_RELATIVE64);
3276 outrel.r_addend = relocation + rel->r_addend;
3277 /* Check addend overflow. */
3278 if ((outrel.r_addend & 0x80000000)
3279 != (rel->r_addend & 0x80000000))
3280 {
3281 const char *name;
3282 int addend = rel->r_addend;
3283 if (h && h->root.root.string)
3284 name = h->root.root.string;
3285 else
3286 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3287 sym, NULL);
3288 _bfd_error_handler
3289 /* xgettext:c-format */
3290 (_("%pB: addend %s%#x in relocation %s against "
3291 "symbol `%s' at %#" PRIx64
3292 " in section `%pA' is out of range"),
3293 input_bfd, addend < 0 ? "-" : "", addend,
3294 howto->name, name, (uint64_t) rel->r_offset,
3295 input_section);
3296 bfd_set_error (bfd_error_bad_value);
3297 return FALSE;
3298 }
3299 }
3300 else
3301 {
3302 long sindx;
3303
3304 if (bfd_is_abs_section (sec))
3305 sindx = 0;
3306 else if (sec == NULL || sec->owner == NULL)
3307 {
3308 bfd_set_error (bfd_error_bad_value);
3309 return FALSE;
3310 }
3311 else
3312 {
3313 asection *osec;
3314
3315 /* We are turning this relocation into one
3316 against a section symbol. It would be
3317 proper to subtract the symbol's value,
3318 osec->vma, from the emitted reloc addend,
3319 but ld.so expects buggy relocs. */
3320 osec = sec->output_section;
3321 sindx = elf_section_data (osec)->dynindx;
3322 if (sindx == 0)
3323 {
3324 asection *oi = htab->elf.text_index_section;
3325 sindx = elf_section_data (oi)->dynindx;
3326 }
3327 BFD_ASSERT (sindx != 0);
3328 }
3329
3330 outrel.r_info = htab->r_info (sindx, r_type);
3331 outrel.r_addend = relocation + rel->r_addend;
3332 }
3333 }
3334
3335 sreloc = elf_section_data (input_section)->sreloc;
3336
3337 if (sreloc == NULL || sreloc->contents == NULL)
3338 {
3339 r = bfd_reloc_notsupported;
3340 goto check_relocation_error;
3341 }
3342
3343 elf_append_rela (output_bfd, sreloc, &outrel);
3344
3345 /* If this reloc is against an external symbol, we do
3346 not want to fiddle with the addend. Otherwise, we
3347 need to include the symbol value so that it becomes
3348 an addend for the dynamic reloc. */
3349 if (! relocate)
3350 continue;
3351 }
3352
3353 break;
3354
3355 case R_X86_64_TLSGD:
3356 case R_X86_64_GOTPC32_TLSDESC:
3357 case R_X86_64_TLSDESC_CALL:
3358 case R_X86_64_GOTTPOFF:
3359 tls_type = GOT_UNKNOWN;
3360 if (h == NULL && local_got_offsets)
3361 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3362 else if (h != NULL)
3363 tls_type = elf_x86_hash_entry (h)->tls_type;
3364
3365 r_type_tls = r_type;
3366 if (! elf_x86_64_tls_transition (info, input_bfd,
3367 input_section, contents,
3368 symtab_hdr, sym_hashes,
3369 &r_type_tls, tls_type, rel,
3370 relend, h, r_symndx, TRUE))
3371 return FALSE;
3372
3373 if (r_type_tls == R_X86_64_TPOFF32)
3374 {
3375 bfd_vma roff = rel->r_offset;
3376
3377 BFD_ASSERT (! unresolved_reloc);
3378
3379 if (r_type == R_X86_64_TLSGD)
3380 {
3381 /* GD->LE transition. For 64bit, change
3382 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3383 .word 0x6666; rex64; call __tls_get_addr@PLT
3384 or
3385 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3386 .byte 0x66; rex64
3387 call *__tls_get_addr@GOTPCREL(%rip)
3388 which may be converted to
3389 addr32 call __tls_get_addr
3390 into:
3391 movq %fs:0, %rax
3392 leaq foo@tpoff(%rax), %rax
3393 For 32bit, change
3394 leaq foo@tlsgd(%rip), %rdi
3395 .word 0x6666; rex64; call __tls_get_addr@PLT
3396 or
3397 leaq foo@tlsgd(%rip), %rdi
3398 .byte 0x66; rex64
3399 call *__tls_get_addr@GOTPCREL(%rip)
3400 which may be converted to
3401 addr32 call __tls_get_addr
3402 into:
3403 movl %fs:0, %eax
3404 leaq foo@tpoff(%rax), %rax
3405 For largepic, change:
3406 leaq foo@tlsgd(%rip), %rdi
3407 movabsq $__tls_get_addr@pltoff, %rax
3408 addq %r15, %rax
3409 call *%rax
3410 into:
3411 movq %fs:0, %rax
3412 leaq foo@tpoff(%rax), %rax
3413 nopw 0x0(%rax,%rax,1) */
3414 int largepic = 0;
3415 if (ABI_64_P (output_bfd))
3416 {
3417 if (contents[roff + 5] == 0xb8)
3418 {
3419 if (roff < 3
3420 || (roff - 3 + 22) > input_section->size)
3421 {
3422 corrupt_input:
3423 info->callbacks->einfo
3424 (_("%F%P: corrupt input: %pB\n"),
3425 input_bfd);
3426 return FALSE;
3427 }
3428 memcpy (contents + roff - 3,
3429 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3430 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3431 largepic = 1;
3432 }
3433 else
3434 {
3435 if (roff < 4
3436 || (roff - 4 + 16) > input_section->size)
3437 goto corrupt_input;
3438 memcpy (contents + roff - 4,
3439 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3440 16);
3441 }
3442 }
3443 else
3444 {
3445 if (roff < 3
3446 || (roff - 3 + 15) > input_section->size)
3447 goto corrupt_input;
3448 memcpy (contents + roff - 3,
3449 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3450 15);
3451 }
3452 bfd_put_32 (output_bfd,
3453 elf_x86_64_tpoff (info, relocation),
3454 contents + roff + 8 + largepic);
3455 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3456 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3457 rel++;
3458 wrel++;
3459 continue;
3460 }
3461 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3462 {
3463 /* GDesc -> LE transition.
3464 It's originally something like:
3465 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3466 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3467
3468 Change it to:
3469 movq $x@tpoff, %rax <--- LP64 mode.
3470 rex movl $x@tpoff, %eax <--- X32 mode.
3471 */
3472
3473 unsigned int val, type;
3474
3475 if (roff < 3)
3476 goto corrupt_input;
3477 type = bfd_get_8 (input_bfd, contents + roff - 3);
3478 val = bfd_get_8 (input_bfd, contents + roff - 1);
3479 bfd_put_8 (output_bfd,
3480 (type & 0x48) | ((type >> 2) & 1),
3481 contents + roff - 3);
3482 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3483 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3484 contents + roff - 1);
3485 bfd_put_32 (output_bfd,
3486 elf_x86_64_tpoff (info, relocation),
3487 contents + roff);
3488 continue;
3489 }
3490 else if (r_type == R_X86_64_TLSDESC_CALL)
3491 {
3492 /* GDesc -> LE transition.
3493 It's originally:
3494 call *(%rax) <--- LP64 mode.
3495 call *(%eax) <--- X32 mode.
3496 Turn it into:
3497 xchg %ax,%ax <-- LP64 mode.
3498 nopl (%rax) <-- X32 mode.
3499 */
3500 unsigned int prefix = 0;
3501 if (!ABI_64_P (input_bfd))
3502 {
3503 /* Check for call *x@tlsdesc(%eax). */
3504 if (contents[roff] == 0x67)
3505 prefix = 1;
3506 }
3507 if (prefix)
3508 {
3509 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3510 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3511 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3512 }
3513 else
3514 {
3515 bfd_put_8 (output_bfd, 0x66, contents + roff);
3516 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3517 }
3518 continue;
3519 }
3520 else if (r_type == R_X86_64_GOTTPOFF)
3521 {
3522 /* IE->LE transition:
3523 For 64bit, originally it can be one of:
3524 movq foo@gottpoff(%rip), %reg
3525 addq foo@gottpoff(%rip), %reg
3526 We change it into:
3527 movq $foo, %reg
3528 leaq foo(%reg), %reg
3529 addq $foo, %reg.
3530 For 32bit, originally it can be one of:
3531 movq foo@gottpoff(%rip), %reg
3532 addl foo@gottpoff(%rip), %reg
3533 We change it into:
3534 movq $foo, %reg
3535 leal foo(%reg), %reg
3536 addl $foo, %reg. */
3537
3538 unsigned int val, type, reg;
3539
3540 if (roff >= 3)
3541 val = bfd_get_8 (input_bfd, contents + roff - 3);
3542 else
3543 {
3544 if (roff < 2)
3545 goto corrupt_input;
3546 val = 0;
3547 }
3548 type = bfd_get_8 (input_bfd, contents + roff - 2);
3549 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3550 reg >>= 3;
3551 if (type == 0x8b)
3552 {
3553 /* movq */
3554 if (val == 0x4c)
3555 {
3556 if (roff < 3)
3557 goto corrupt_input;
3558 bfd_put_8 (output_bfd, 0x49,
3559 contents + roff - 3);
3560 }
3561 else if (!ABI_64_P (output_bfd) && val == 0x44)
3562 {
3563 if (roff < 3)
3564 goto corrupt_input;
3565 bfd_put_8 (output_bfd, 0x41,
3566 contents + roff - 3);
3567 }
3568 bfd_put_8 (output_bfd, 0xc7,
3569 contents + roff - 2);
3570 bfd_put_8 (output_bfd, 0xc0 | reg,
3571 contents + roff - 1);
3572 }
3573 else if (reg == 4)
3574 {
3575 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3576 is special */
3577 if (val == 0x4c)
3578 {
3579 if (roff < 3)
3580 goto corrupt_input;
3581 bfd_put_8 (output_bfd, 0x49,
3582 contents + roff - 3);
3583 }
3584 else if (!ABI_64_P (output_bfd) && val == 0x44)
3585 {
3586 if (roff < 3)
3587 goto corrupt_input;
3588 bfd_put_8 (output_bfd, 0x41,
3589 contents + roff - 3);
3590 }
3591 bfd_put_8 (output_bfd, 0x81,
3592 contents + roff - 2);
3593 bfd_put_8 (output_bfd, 0xc0 | reg,
3594 contents + roff - 1);
3595 }
3596 else
3597 {
3598 /* addq/addl -> leaq/leal */
3599 if (val == 0x4c)
3600 {
3601 if (roff < 3)
3602 goto corrupt_input;
3603 bfd_put_8 (output_bfd, 0x4d,
3604 contents + roff - 3);
3605 }
3606 else if (!ABI_64_P (output_bfd) && val == 0x44)
3607 {
3608 if (roff < 3)
3609 goto corrupt_input;
3610 bfd_put_8 (output_bfd, 0x45,
3611 contents + roff - 3);
3612 }
3613 bfd_put_8 (output_bfd, 0x8d,
3614 contents + roff - 2);
3615 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3616 contents + roff - 1);
3617 }
3618 bfd_put_32 (output_bfd,
3619 elf_x86_64_tpoff (info, relocation),
3620 contents + roff);
3621 continue;
3622 }
3623 else
3624 BFD_ASSERT (FALSE);
3625 }
3626
3627 if (htab->elf.sgot == NULL)
3628 abort ();
3629
3630 if (h != NULL)
3631 {
3632 off = h->got.offset;
3633 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3634 }
3635 else
3636 {
3637 if (local_got_offsets == NULL)
3638 abort ();
3639
3640 off = local_got_offsets[r_symndx];
3641 offplt = local_tlsdesc_gotents[r_symndx];
3642 }
3643
3644 if ((off & 1) != 0)
3645 off &= ~1;
3646 else
3647 {
3648 Elf_Internal_Rela outrel;
3649 int dr_type, indx;
3650 asection *sreloc;
3651
3652 if (htab->elf.srelgot == NULL)
3653 abort ();
3654
3655 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3656
3657 if (GOT_TLS_GDESC_P (tls_type))
3658 {
3659 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3660 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3661 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3662 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3663 + htab->elf.sgotplt->output_offset
3664 + offplt
3665 + htab->sgotplt_jump_table_size);
3666 sreloc = htab->elf.srelplt;
3667 if (indx == 0)
3668 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3669 else
3670 outrel.r_addend = 0;
3671 elf_append_rela (output_bfd, sreloc, &outrel);
3672 }
3673
3674 sreloc = htab->elf.srelgot;
3675
3676 outrel.r_offset = (htab->elf.sgot->output_section->vma
3677 + htab->elf.sgot->output_offset + off);
3678
3679 if (GOT_TLS_GD_P (tls_type))
3680 dr_type = R_X86_64_DTPMOD64;
3681 else if (GOT_TLS_GDESC_P (tls_type))
3682 goto dr_done;
3683 else
3684 dr_type = R_X86_64_TPOFF64;
3685
3686 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3687 outrel.r_addend = 0;
3688 if ((dr_type == R_X86_64_TPOFF64
3689 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3690 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3691 outrel.r_info = htab->r_info (indx, dr_type);
3692
3693 elf_append_rela (output_bfd, sreloc, &outrel);
3694
3695 if (GOT_TLS_GD_P (tls_type))
3696 {
3697 if (indx == 0)
3698 {
3699 BFD_ASSERT (! unresolved_reloc);
3700 bfd_put_64 (output_bfd,
3701 relocation - _bfd_x86_elf_dtpoff_base (info),
3702 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3703 }
3704 else
3705 {
3706 bfd_put_64 (output_bfd, 0,
3707 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3708 outrel.r_info = htab->r_info (indx,
3709 R_X86_64_DTPOFF64);
3710 outrel.r_offset += GOT_ENTRY_SIZE;
3711 elf_append_rela (output_bfd, sreloc,
3712 &outrel);
3713 }
3714 }
3715
3716 dr_done:
3717 if (h != NULL)
3718 h->got.offset |= 1;
3719 else
3720 local_got_offsets[r_symndx] |= 1;
3721 }
3722
3723 if (off >= (bfd_vma) -2
3724 && ! GOT_TLS_GDESC_P (tls_type))
3725 abort ();
3726 if (r_type_tls == r_type)
3727 {
3728 if (r_type == R_X86_64_GOTPC32_TLSDESC
3729 || r_type == R_X86_64_TLSDESC_CALL)
3730 relocation = htab->elf.sgotplt->output_section->vma
3731 + htab->elf.sgotplt->output_offset
3732 + offplt + htab->sgotplt_jump_table_size;
3733 else
3734 relocation = htab->elf.sgot->output_section->vma
3735 + htab->elf.sgot->output_offset + off;
3736 unresolved_reloc = FALSE;
3737 }
3738 else
3739 {
3740 bfd_vma roff = rel->r_offset;
3741
3742 if (r_type == R_X86_64_TLSGD)
3743 {
3744 /* GD->IE transition. For 64bit, change
3745 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3746 .word 0x6666; rex64; call __tls_get_addr@PLT
3747 or
3748 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3749 .byte 0x66; rex64
3750 call *__tls_get_addr@GOTPCREL(%rip
3751 which may be converted to
3752 addr32 call __tls_get_addr
3753 into:
3754 movq %fs:0, %rax
3755 addq foo@gottpoff(%rip), %rax
3756 For 32bit, change
3757 leaq foo@tlsgd(%rip), %rdi
3758 .word 0x6666; rex64; call __tls_get_addr@PLT
3759 or
3760 leaq foo@tlsgd(%rip), %rdi
3761 .byte 0x66; rex64;
3762 call *__tls_get_addr@GOTPCREL(%rip)
3763 which may be converted to
3764 addr32 call __tls_get_addr
3765 into:
3766 movl %fs:0, %eax
3767 addq foo@gottpoff(%rip), %rax
3768 For largepic, change:
3769 leaq foo@tlsgd(%rip), %rdi
3770 movabsq $__tls_get_addr@pltoff, %rax
3771 addq %r15, %rax
3772 call *%rax
3773 into:
3774 movq %fs:0, %rax
3775 addq foo@gottpoff(%rax), %rax
3776 nopw 0x0(%rax,%rax,1) */
3777 int largepic = 0;
3778 if (ABI_64_P (output_bfd))
3779 {
3780 if (contents[roff + 5] == 0xb8)
3781 {
3782 if (roff < 3
3783 || (roff - 3 + 22) > input_section->size)
3784 goto corrupt_input;
3785 memcpy (contents + roff - 3,
3786 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3787 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3788 largepic = 1;
3789 }
3790 else
3791 {
3792 if (roff < 4
3793 || (roff - 4 + 16) > input_section->size)
3794 goto corrupt_input;
3795 memcpy (contents + roff - 4,
3796 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3797 16);
3798 }
3799 }
3800 else
3801 {
3802 if (roff < 3
3803 || (roff - 3 + 15) > input_section->size)
3804 goto corrupt_input;
3805 memcpy (contents + roff - 3,
3806 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3807 15);
3808 }
3809
3810 relocation = (htab->elf.sgot->output_section->vma
3811 + htab->elf.sgot->output_offset + off
3812 - roff
3813 - largepic
3814 - input_section->output_section->vma
3815 - input_section->output_offset
3816 - 12);
3817 bfd_put_32 (output_bfd, relocation,
3818 contents + roff + 8 + largepic);
3819 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3820 rel++;
3821 wrel++;
3822 continue;
3823 }
3824 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3825 {
3826 /* GDesc -> IE transition.
3827 It's originally something like:
3828 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3829 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3830
3831 Change it to:
3832 # before xchg %ax,%ax in LP64 mode.
3833 movq x@gottpoff(%rip), %rax
3834 # before nopl (%rax) in X32 mode.
3835 rex movl x@gottpoff(%rip), %eax
3836 */
3837
3838 /* Now modify the instruction as appropriate. To
3839 turn a lea into a mov in the form we use it, it
3840 suffices to change the second byte from 0x8d to
3841 0x8b. */
3842 if (roff < 2)
3843 goto corrupt_input;
3844 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3845
3846 bfd_put_32 (output_bfd,
3847 htab->elf.sgot->output_section->vma
3848 + htab->elf.sgot->output_offset + off
3849 - rel->r_offset
3850 - input_section->output_section->vma
3851 - input_section->output_offset
3852 - 4,
3853 contents + roff);
3854 continue;
3855 }
3856 else if (r_type == R_X86_64_TLSDESC_CALL)
3857 {
3858 /* GDesc -> IE transition.
3859 It's originally:
3860 call *(%rax) <--- LP64 mode.
3861 call *(%eax) <--- X32 mode.
3862
3863 Change it to:
3864 xchg %ax, %ax <-- LP64 mode.
3865 nopl (%rax) <-- X32 mode.
3866 */
3867
3868 unsigned int prefix = 0;
3869 if (!ABI_64_P (input_bfd))
3870 {
3871 /* Check for call *x@tlsdesc(%eax). */
3872 if (contents[roff] == 0x67)
3873 prefix = 1;
3874 }
3875 if (prefix)
3876 {
3877 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3878 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3879 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3880 }
3881 else
3882 {
3883 bfd_put_8 (output_bfd, 0x66, contents + roff);
3884 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3885 }
3886 continue;
3887 }
3888 else
3889 BFD_ASSERT (FALSE);
3890 }
3891 break;
3892
3893 case R_X86_64_TLSLD:
3894 if (! elf_x86_64_tls_transition (info, input_bfd,
3895 input_section, contents,
3896 symtab_hdr, sym_hashes,
3897 &r_type, GOT_UNKNOWN, rel,
3898 relend, h, r_symndx, TRUE))
3899 return FALSE;
3900
3901 if (r_type != R_X86_64_TLSLD)
3902 {
3903 /* LD->LE transition:
3904 leaq foo@tlsld(%rip), %rdi
3905 call __tls_get_addr@PLT
3906 For 64bit, we change it into:
3907 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3908 For 32bit, we change it into:
3909 nopl 0x0(%rax); movl %fs:0, %eax
3910 Or
3911 leaq foo@tlsld(%rip), %rdi;
3912 call *__tls_get_addr@GOTPCREL(%rip)
3913 which may be converted to
3914 addr32 call __tls_get_addr
3915 For 64bit, we change it into:
3916 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3917 For 32bit, we change it into:
3918 nopw 0x0(%rax); movl %fs:0, %eax
3919 For largepic, change:
3920 leaq foo@tlsgd(%rip), %rdi
3921 movabsq $__tls_get_addr@pltoff, %rax
3922 addq %rbx, %rax
3923 call *%rax
3924 into
3925 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3926 movq %fs:0, %eax */
3927
3928 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3929 if (ABI_64_P (output_bfd))
3930 {
3931 if ((rel->r_offset + 5) >= input_section->size)
3932 goto corrupt_input;
3933 if (contents[rel->r_offset + 5] == 0xb8)
3934 {
3935 if (rel->r_offset < 3
3936 || (rel->r_offset - 3 + 22) > input_section->size)
3937 goto corrupt_input;
3938 memcpy (contents + rel->r_offset - 3,
3939 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3940 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3941 }
3942 else if (contents[rel->r_offset + 4] == 0xff
3943 || contents[rel->r_offset + 4] == 0x67)
3944 {
3945 if (rel->r_offset < 3
3946 || (rel->r_offset - 3 + 13) > input_section->size)
3947 goto corrupt_input;
3948 memcpy (contents + rel->r_offset - 3,
3949 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3950 13);
3951
3952 }
3953 else
3954 {
3955 if (rel->r_offset < 3
3956 || (rel->r_offset - 3 + 12) > input_section->size)
3957 goto corrupt_input;
3958 memcpy (contents + rel->r_offset - 3,
3959 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3960 }
3961 }
3962 else
3963 {
3964 if ((rel->r_offset + 4) >= input_section->size)
3965 goto corrupt_input;
3966 if (contents[rel->r_offset + 4] == 0xff)
3967 {
3968 if (rel->r_offset < 3
3969 || (rel->r_offset - 3 + 13) > input_section->size)
3970 goto corrupt_input;
3971 memcpy (contents + rel->r_offset - 3,
3972 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3973 13);
3974 }
3975 else
3976 {
3977 if (rel->r_offset < 3
3978 || (rel->r_offset - 3 + 12) > input_section->size)
3979 goto corrupt_input;
3980 memcpy (contents + rel->r_offset - 3,
3981 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3982 }
3983 }
3984 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3985 and R_X86_64_PLTOFF64. */
3986 rel++;
3987 wrel++;
3988 continue;
3989 }
3990
3991 if (htab->elf.sgot == NULL)
3992 abort ();
3993
3994 off = htab->tls_ld_or_ldm_got.offset;
3995 if (off & 1)
3996 off &= ~1;
3997 else
3998 {
3999 Elf_Internal_Rela outrel;
4000
4001 if (htab->elf.srelgot == NULL)
4002 abort ();
4003
4004 outrel.r_offset = (htab->elf.sgot->output_section->vma
4005 + htab->elf.sgot->output_offset + off);
4006
4007 bfd_put_64 (output_bfd, 0,
4008 htab->elf.sgot->contents + off);
4009 bfd_put_64 (output_bfd, 0,
4010 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4011 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4012 outrel.r_addend = 0;
4013 elf_append_rela (output_bfd, htab->elf.srelgot,
4014 &outrel);
4015 htab->tls_ld_or_ldm_got.offset |= 1;
4016 }
4017 relocation = htab->elf.sgot->output_section->vma
4018 + htab->elf.sgot->output_offset + off;
4019 unresolved_reloc = FALSE;
4020 break;
4021
4022 case R_X86_64_DTPOFF32:
4023 if (!bfd_link_executable (info)
4024 || (input_section->flags & SEC_CODE) == 0)
4025 relocation -= _bfd_x86_elf_dtpoff_base (info);
4026 else
4027 relocation = elf_x86_64_tpoff (info, relocation);
4028 break;
4029
4030 case R_X86_64_TPOFF32:
4031 case R_X86_64_TPOFF64:
4032 BFD_ASSERT (bfd_link_executable (info));
4033 relocation = elf_x86_64_tpoff (info, relocation);
4034 break;
4035
4036 case R_X86_64_DTPOFF64:
4037 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4038 relocation -= _bfd_x86_elf_dtpoff_base (info);
4039 break;
4040
4041 default:
4042 break;
4043 }
4044
4045 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4046 because such sections are not SEC_ALLOC and thus ld.so will
4047 not process them. */
4048 if (unresolved_reloc
4049 && !((input_section->flags & SEC_DEBUGGING) != 0
4050 && h->def_dynamic)
4051 && _bfd_elf_section_offset (output_bfd, info, input_section,
4052 rel->r_offset) != (bfd_vma) -1)
4053 {
4054 switch (r_type)
4055 {
4056 case R_X86_64_32S:
4057 sec = h->root.u.def.section;
4058 if ((info->nocopyreloc
4059 || (eh->def_protected
4060 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4061 && !(h->root.u.def.section->flags & SEC_CODE))
4062 return elf_x86_64_need_pic (info, input_bfd, input_section,
4063 h, NULL, NULL, howto);
4064 /* Fall through. */
4065
4066 default:
4067 _bfd_error_handler
4068 /* xgettext:c-format */
4069 (_("%pB(%pA+%#" PRIx64 "): "
4070 "unresolvable %s relocation against symbol `%s'"),
4071 input_bfd,
4072 input_section,
4073 (uint64_t) rel->r_offset,
4074 howto->name,
4075 h->root.root.string);
4076 return FALSE;
4077 }
4078 }
4079
4080 do_relocation:
4081 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4082 contents, rel->r_offset,
4083 relocation, rel->r_addend);
4084
4085 check_relocation_error:
4086 if (r != bfd_reloc_ok)
4087 {
4088 const char *name;
4089
4090 if (h != NULL)
4091 name = h->root.root.string;
4092 else
4093 {
4094 name = bfd_elf_string_from_elf_section (input_bfd,
4095 symtab_hdr->sh_link,
4096 sym->st_name);
4097 if (name == NULL)
4098 return FALSE;
4099 if (*name == '\0')
4100 name = bfd_section_name (sec);
4101 }
4102
4103 if (r == bfd_reloc_overflow)
4104 {
4105 if (converted_reloc)
4106 {
4107 info->callbacks->einfo
4108 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4109 return FALSE;
4110 }
4111 (*info->callbacks->reloc_overflow)
4112 (info, (h ? &h->root : NULL), name, howto->name,
4113 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4114 }
4115 else
4116 {
4117 _bfd_error_handler
4118 /* xgettext:c-format */
4119 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4120 input_bfd, input_section,
4121 (uint64_t) rel->r_offset, name, (int) r);
4122 return FALSE;
4123 }
4124 }
4125
4126 if (wrel != rel)
4127 *wrel = *rel;
4128 }
4129
4130 if (wrel != rel)
4131 {
4132 Elf_Internal_Shdr *rel_hdr;
4133 size_t deleted = rel - wrel;
4134
4135 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4136 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4137 if (rel_hdr->sh_size == 0)
4138 {
4139 /* It is too late to remove an empty reloc section. Leave
4140 one NONE reloc.
4141 ??? What is wrong with an empty section??? */
4142 rel_hdr->sh_size = rel_hdr->sh_entsize;
4143 deleted -= 1;
4144 }
4145 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4146 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4147 input_section->reloc_count -= deleted;
4148 }
4149
4150 return TRUE;
4151 }
4152
4153 /* Finish up dynamic symbol handling. We set the contents of various
4154 dynamic sections here. */
4155
4156 static bfd_boolean
4157 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4158 struct bfd_link_info *info,
4159 struct elf_link_hash_entry *h,
4160 Elf_Internal_Sym *sym)
4161 {
4162 struct elf_x86_link_hash_table *htab;
4163 bfd_boolean use_plt_second;
4164 struct elf_x86_link_hash_entry *eh;
4165 bfd_boolean local_undefweak;
4166
4167 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4168 if (htab == NULL)
4169 return FALSE;
4170
4171 /* Use the second PLT section only if there is .plt section. */
4172 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4173
4174 eh = (struct elf_x86_link_hash_entry *) h;
4175 if (eh->no_finish_dynamic_symbol)
4176 abort ();
4177
4178 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4179 resolved undefined weak symbols in executable so that their
4180 references have value 0 at run-time. */
4181 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4182
4183 if (h->plt.offset != (bfd_vma) -1)
4184 {
4185 bfd_vma plt_index;
4186 bfd_vma got_offset, plt_offset;
4187 Elf_Internal_Rela rela;
4188 bfd_byte *loc;
4189 asection *plt, *gotplt, *relplt, *resolved_plt;
4190 const struct elf_backend_data *bed;
4191 bfd_vma plt_got_pcrel_offset;
4192
4193 /* When building a static executable, use .iplt, .igot.plt and
4194 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4195 if (htab->elf.splt != NULL)
4196 {
4197 plt = htab->elf.splt;
4198 gotplt = htab->elf.sgotplt;
4199 relplt = htab->elf.srelplt;
4200 }
4201 else
4202 {
4203 plt = htab->elf.iplt;
4204 gotplt = htab->elf.igotplt;
4205 relplt = htab->elf.irelplt;
4206 }
4207
4208 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4209
4210 /* Get the index in the procedure linkage table which
4211 corresponds to this symbol. This is the index of this symbol
4212 in all the symbols for which we are making plt entries. The
4213 first entry in the procedure linkage table is reserved.
4214
4215 Get the offset into the .got table of the entry that
4216 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4217 bytes. The first three are reserved for the dynamic linker.
4218
4219 For static executables, we don't reserve anything. */
4220
4221 if (plt == htab->elf.splt)
4222 {
4223 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4224 - htab->plt.has_plt0);
4225 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4226 }
4227 else
4228 {
4229 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4230 got_offset = got_offset * GOT_ENTRY_SIZE;
4231 }
4232
4233 /* Fill in the entry in the procedure linkage table. */
4234 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4235 htab->plt.plt_entry_size);
4236 if (use_plt_second)
4237 {
4238 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4239 htab->non_lazy_plt->plt_entry,
4240 htab->non_lazy_plt->plt_entry_size);
4241
4242 resolved_plt = htab->plt_second;
4243 plt_offset = eh->plt_second.offset;
4244 }
4245 else
4246 {
4247 resolved_plt = plt;
4248 plt_offset = h->plt.offset;
4249 }
4250
4251 /* Insert the relocation positions of the plt section. */
4252
4253 /* Put offset the PC-relative instruction referring to the GOT entry,
4254 subtracting the size of that instruction. */
4255 plt_got_pcrel_offset = (gotplt->output_section->vma
4256 + gotplt->output_offset
4257 + got_offset
4258 - resolved_plt->output_section->vma
4259 - resolved_plt->output_offset
4260 - plt_offset
4261 - htab->plt.plt_got_insn_size);
4262
4263 /* Check PC-relative offset overflow in PLT entry. */
4264 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4265 /* xgettext:c-format */
4266 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4267 output_bfd, h->root.root.string);
4268
4269 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4270 (resolved_plt->contents + plt_offset
4271 + htab->plt.plt_got_offset));
4272
4273 /* Fill in the entry in the global offset table, initially this
4274 points to the second part of the PLT entry. Leave the entry
4275 as zero for undefined weak symbol in PIE. No PLT relocation
4276 against undefined weak symbol in PIE. */
4277 if (!local_undefweak)
4278 {
4279 if (htab->plt.has_plt0)
4280 bfd_put_64 (output_bfd, (plt->output_section->vma
4281 + plt->output_offset
4282 + h->plt.offset
4283 + htab->lazy_plt->plt_lazy_offset),
4284 gotplt->contents + got_offset);
4285
4286 /* Fill in the entry in the .rela.plt section. */
4287 rela.r_offset = (gotplt->output_section->vma
4288 + gotplt->output_offset
4289 + got_offset);
4290 if (PLT_LOCAL_IFUNC_P (info, h))
4291 {
4292 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4293 h->root.root.string,
4294 h->root.u.def.section->owner);
4295
4296 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4297 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4298 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4299 rela.r_addend = (h->root.u.def.value
4300 + h->root.u.def.section->output_section->vma
4301 + h->root.u.def.section->output_offset);
4302 /* R_X86_64_IRELATIVE comes last. */
4303 plt_index = htab->next_irelative_index--;
4304 }
4305 else
4306 {
4307 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4308 rela.r_addend = 0;
4309 plt_index = htab->next_jump_slot_index++;
4310 }
4311
4312 /* Don't fill the second and third slots in PLT entry for
4313 static executables nor without PLT0. */
4314 if (plt == htab->elf.splt && htab->plt.has_plt0)
4315 {
4316 bfd_vma plt0_offset
4317 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4318
4319 /* Put relocation index. */
4320 bfd_put_32 (output_bfd, plt_index,
4321 (plt->contents + h->plt.offset
4322 + htab->lazy_plt->plt_reloc_offset));
4323
4324 /* Put offset for jmp .PLT0 and check for overflow. We don't
4325 check relocation index for overflow since branch displacement
4326 will overflow first. */
4327 if (plt0_offset > 0x80000000)
4328 /* xgettext:c-format */
4329 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4330 output_bfd, h->root.root.string);
4331 bfd_put_32 (output_bfd, - plt0_offset,
4332 (plt->contents + h->plt.offset
4333 + htab->lazy_plt->plt_plt_offset));
4334 }
4335
4336 bed = get_elf_backend_data (output_bfd);
4337 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4338 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4339 }
4340 }
4341 else if (eh->plt_got.offset != (bfd_vma) -1)
4342 {
4343 bfd_vma got_offset, plt_offset;
4344 asection *plt, *got;
4345 bfd_boolean got_after_plt;
4346 int32_t got_pcrel_offset;
4347
4348 /* Set the entry in the GOT procedure linkage table. */
4349 plt = htab->plt_got;
4350 got = htab->elf.sgot;
4351 got_offset = h->got.offset;
4352
4353 if (got_offset == (bfd_vma) -1
4354 || (h->type == STT_GNU_IFUNC && h->def_regular)
4355 || plt == NULL
4356 || got == NULL)
4357 abort ();
4358
4359 /* Use the non-lazy PLT entry template for the GOT PLT since they
4360 are the identical. */
4361 /* Fill in the entry in the GOT procedure linkage table. */
4362 plt_offset = eh->plt_got.offset;
4363 memcpy (plt->contents + plt_offset,
4364 htab->non_lazy_plt->plt_entry,
4365 htab->non_lazy_plt->plt_entry_size);
4366
4367 /* Put offset the PC-relative instruction referring to the GOT
4368 entry, subtracting the size of that instruction. */
4369 got_pcrel_offset = (got->output_section->vma
4370 + got->output_offset
4371 + got_offset
4372 - plt->output_section->vma
4373 - plt->output_offset
4374 - plt_offset
4375 - htab->non_lazy_plt->plt_got_insn_size);
4376
4377 /* Check PC-relative offset overflow in GOT PLT entry. */
4378 got_after_plt = got->output_section->vma > plt->output_section->vma;
4379 if ((got_after_plt && got_pcrel_offset < 0)
4380 || (!got_after_plt && got_pcrel_offset > 0))
4381 /* xgettext:c-format */
4382 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4383 output_bfd, h->root.root.string);
4384
4385 bfd_put_32 (output_bfd, got_pcrel_offset,
4386 (plt->contents + plt_offset
4387 + htab->non_lazy_plt->plt_got_offset));
4388 }
4389
4390 if (!local_undefweak
4391 && !h->def_regular
4392 && (h->plt.offset != (bfd_vma) -1
4393 || eh->plt_got.offset != (bfd_vma) -1))
4394 {
4395 /* Mark the symbol as undefined, rather than as defined in
4396 the .plt section. Leave the value if there were any
4397 relocations where pointer equality matters (this is a clue
4398 for the dynamic linker, to make function pointer
4399 comparisons work between an application and shared
4400 library), otherwise set it to zero. If a function is only
4401 called from a binary, there is no need to slow down
4402 shared libraries because of that. */
4403 sym->st_shndx = SHN_UNDEF;
4404 if (!h->pointer_equality_needed)
4405 sym->st_value = 0;
4406 }
4407
4408 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4409
4410 /* Don't generate dynamic GOT relocation against undefined weak
4411 symbol in executable. */
4412 if (h->got.offset != (bfd_vma) -1
4413 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4414 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4415 && !local_undefweak)
4416 {
4417 Elf_Internal_Rela rela;
4418 asection *relgot = htab->elf.srelgot;
4419
4420 /* This symbol has an entry in the global offset table. Set it
4421 up. */
4422 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4423 abort ();
4424
4425 rela.r_offset = (htab->elf.sgot->output_section->vma
4426 + htab->elf.sgot->output_offset
4427 + (h->got.offset &~ (bfd_vma) 1));
4428
4429 /* If this is a static link, or it is a -Bsymbolic link and the
4430 symbol is defined locally or was forced to be local because
4431 of a version file, we just want to emit a RELATIVE reloc.
4432 The entry in the global offset table will already have been
4433 initialized in the relocate_section function. */
4434 if (h->def_regular
4435 && h->type == STT_GNU_IFUNC)
4436 {
4437 if (h->plt.offset == (bfd_vma) -1)
4438 {
4439 /* STT_GNU_IFUNC is referenced without PLT. */
4440 if (htab->elf.splt == NULL)
4441 {
4442 /* use .rel[a].iplt section to store .got relocations
4443 in static executable. */
4444 relgot = htab->elf.irelplt;
4445 }
4446 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4447 {
4448 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4449 h->root.root.string,
4450 h->root.u.def.section->owner);
4451
4452 rela.r_info = htab->r_info (0,
4453 R_X86_64_IRELATIVE);
4454 rela.r_addend = (h->root.u.def.value
4455 + h->root.u.def.section->output_section->vma
4456 + h->root.u.def.section->output_offset);
4457 }
4458 else
4459 goto do_glob_dat;
4460 }
4461 else if (bfd_link_pic (info))
4462 {
4463 /* Generate R_X86_64_GLOB_DAT. */
4464 goto do_glob_dat;
4465 }
4466 else
4467 {
4468 asection *plt;
4469 bfd_vma plt_offset;
4470
4471 if (!h->pointer_equality_needed)
4472 abort ();
4473
4474 /* For non-shared object, we can't use .got.plt, which
4475 contains the real function addres if we need pointer
4476 equality. We load the GOT entry with the PLT entry. */
4477 if (htab->plt_second != NULL)
4478 {
4479 plt = htab->plt_second;
4480 plt_offset = eh->plt_second.offset;
4481 }
4482 else
4483 {
4484 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4485 plt_offset = h->plt.offset;
4486 }
4487 bfd_put_64 (output_bfd, (plt->output_section->vma
4488 + plt->output_offset
4489 + plt_offset),
4490 htab->elf.sgot->contents + h->got.offset);
4491 return TRUE;
4492 }
4493 }
4494 else if (bfd_link_pic (info)
4495 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4496 {
4497 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4498 return FALSE;
4499 BFD_ASSERT((h->got.offset & 1) != 0);
4500 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4501 rela.r_addend = (h->root.u.def.value
4502 + h->root.u.def.section->output_section->vma
4503 + h->root.u.def.section->output_offset);
4504 }
4505 else
4506 {
4507 BFD_ASSERT((h->got.offset & 1) == 0);
4508 do_glob_dat:
4509 bfd_put_64 (output_bfd, (bfd_vma) 0,
4510 htab->elf.sgot->contents + h->got.offset);
4511 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4512 rela.r_addend = 0;
4513 }
4514
4515 elf_append_rela (output_bfd, relgot, &rela);
4516 }
4517
4518 if (h->needs_copy)
4519 {
4520 Elf_Internal_Rela rela;
4521 asection *s;
4522
4523 /* This symbol needs a copy reloc. Set it up. */
4524 VERIFY_COPY_RELOC (h, htab)
4525
4526 rela.r_offset = (h->root.u.def.value
4527 + h->root.u.def.section->output_section->vma
4528 + h->root.u.def.section->output_offset);
4529 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4530 rela.r_addend = 0;
4531 if (h->root.u.def.section == htab->elf.sdynrelro)
4532 s = htab->elf.sreldynrelro;
4533 else
4534 s = htab->elf.srelbss;
4535 elf_append_rela (output_bfd, s, &rela);
4536 }
4537
4538 return TRUE;
4539 }
4540
4541 /* Finish up local dynamic symbol handling. We set the contents of
4542 various dynamic sections here. */
4543
4544 static bfd_boolean
4545 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4546 {
4547 struct elf_link_hash_entry *h
4548 = (struct elf_link_hash_entry *) *slot;
4549 struct bfd_link_info *info
4550 = (struct bfd_link_info *) inf;
4551
4552 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4553 info, h, NULL);
4554 }
4555
4556 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4557 here since undefined weak symbol may not be dynamic and may not be
4558 called for elf_x86_64_finish_dynamic_symbol. */
4559
4560 static bfd_boolean
4561 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4562 void *inf)
4563 {
4564 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4565 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4566
4567 if (h->root.type != bfd_link_hash_undefweak
4568 || h->dynindx != -1)
4569 return TRUE;
4570
4571 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4572 info, h, NULL);
4573 }
4574
4575 /* Used to decide how to sort relocs in an optimal manner for the
4576 dynamic linker, before writing them out. */
4577
4578 static enum elf_reloc_type_class
4579 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4580 const asection *rel_sec ATTRIBUTE_UNUSED,
4581 const Elf_Internal_Rela *rela)
4582 {
4583 bfd *abfd = info->output_bfd;
4584 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4585 struct elf_x86_link_hash_table *htab
4586 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4587
4588 if (htab->elf.dynsym != NULL
4589 && htab->elf.dynsym->contents != NULL)
4590 {
4591 /* Check relocation against STT_GNU_IFUNC symbol if there are
4592 dynamic symbols. */
4593 unsigned long r_symndx = htab->r_sym (rela->r_info);
4594 if (r_symndx != STN_UNDEF)
4595 {
4596 Elf_Internal_Sym sym;
4597 if (!bed->s->swap_symbol_in (abfd,
4598 (htab->elf.dynsym->contents
4599 + r_symndx * bed->s->sizeof_sym),
4600 0, &sym))
4601 abort ();
4602
4603 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4604 return reloc_class_ifunc;
4605 }
4606 }
4607
4608 switch ((int) ELF32_R_TYPE (rela->r_info))
4609 {
4610 case R_X86_64_IRELATIVE:
4611 return reloc_class_ifunc;
4612 case R_X86_64_RELATIVE:
4613 case R_X86_64_RELATIVE64:
4614 return reloc_class_relative;
4615 case R_X86_64_JUMP_SLOT:
4616 return reloc_class_plt;
4617 case R_X86_64_COPY:
4618 return reloc_class_copy;
4619 default:
4620 return reloc_class_normal;
4621 }
4622 }
4623
4624 /* Finish up the dynamic sections. */
4625
4626 static bfd_boolean
4627 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4628 struct bfd_link_info *info)
4629 {
4630 struct elf_x86_link_hash_table *htab;
4631
4632 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4633 if (htab == NULL)
4634 return FALSE;
4635
4636 if (! htab->elf.dynamic_sections_created)
4637 return TRUE;
4638
4639 if (htab->elf.splt && htab->elf.splt->size > 0)
4640 {
4641 elf_section_data (htab->elf.splt->output_section)
4642 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4643
4644 if (htab->plt.has_plt0)
4645 {
4646 /* Fill in the special first entry in the procedure linkage
4647 table. */
4648 memcpy (htab->elf.splt->contents,
4649 htab->lazy_plt->plt0_entry,
4650 htab->lazy_plt->plt0_entry_size);
4651 /* Add offset for pushq GOT+8(%rip), since the instruction
4652 uses 6 bytes subtract this value. */
4653 bfd_put_32 (output_bfd,
4654 (htab->elf.sgotplt->output_section->vma
4655 + htab->elf.sgotplt->output_offset
4656 + 8
4657 - htab->elf.splt->output_section->vma
4658 - htab->elf.splt->output_offset
4659 - 6),
4660 (htab->elf.splt->contents
4661 + htab->lazy_plt->plt0_got1_offset));
4662 /* Add offset for the PC-relative instruction accessing
4663 GOT+16, subtracting the offset to the end of that
4664 instruction. */
4665 bfd_put_32 (output_bfd,
4666 (htab->elf.sgotplt->output_section->vma
4667 + htab->elf.sgotplt->output_offset
4668 + 16
4669 - htab->elf.splt->output_section->vma
4670 - htab->elf.splt->output_offset
4671 - htab->lazy_plt->plt0_got2_insn_end),
4672 (htab->elf.splt->contents
4673 + htab->lazy_plt->plt0_got2_offset));
4674 }
4675
4676 if (htab->tlsdesc_plt)
4677 {
4678 bfd_put_64 (output_bfd, (bfd_vma) 0,
4679 htab->elf.sgot->contents + htab->tlsdesc_got);
4680
4681 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4682 htab->lazy_plt->plt_tlsdesc_entry,
4683 htab->lazy_plt->plt_tlsdesc_entry_size);
4684
4685 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4686 bytes and the instruction uses 6 bytes, subtract these
4687 values. */
4688 bfd_put_32 (output_bfd,
4689 (htab->elf.sgotplt->output_section->vma
4690 + htab->elf.sgotplt->output_offset
4691 + 8
4692 - htab->elf.splt->output_section->vma
4693 - htab->elf.splt->output_offset
4694 - htab->tlsdesc_plt
4695 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4696 (htab->elf.splt->contents
4697 + htab->tlsdesc_plt
4698 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4699 /* Add offset for indirect branch via GOT+TDG, where TDG
4700 stands for htab->tlsdesc_got, subtracting the offset
4701 to the end of that instruction. */
4702 bfd_put_32 (output_bfd,
4703 (htab->elf.sgot->output_section->vma
4704 + htab->elf.sgot->output_offset
4705 + htab->tlsdesc_got
4706 - htab->elf.splt->output_section->vma
4707 - htab->elf.splt->output_offset
4708 - htab->tlsdesc_plt
4709 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4710 (htab->elf.splt->contents
4711 + htab->tlsdesc_plt
4712 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4713 }
4714 }
4715
4716 /* Fill PLT entries for undefined weak symbols in PIE. */
4717 if (bfd_link_pie (info))
4718 bfd_hash_traverse (&info->hash->table,
4719 elf_x86_64_pie_finish_undefweak_symbol,
4720 info);
4721
4722 return TRUE;
4723 }
4724
4725 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4726 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4727 It has to be done before elf_link_sort_relocs is called so that
4728 dynamic relocations are properly sorted. */
4729
4730 static bfd_boolean
4731 elf_x86_64_output_arch_local_syms
4732 (bfd *output_bfd ATTRIBUTE_UNUSED,
4733 struct bfd_link_info *info,
4734 void *flaginfo ATTRIBUTE_UNUSED,
4735 int (*func) (void *, const char *,
4736 Elf_Internal_Sym *,
4737 asection *,
4738 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4739 {
4740 struct elf_x86_link_hash_table *htab
4741 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4742 if (htab == NULL)
4743 return FALSE;
4744
4745 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4746 htab_traverse (htab->loc_hash_table,
4747 elf_x86_64_finish_local_dynamic_symbol,
4748 info);
4749
4750 return TRUE;
4751 }
4752
4753 /* Forward declaration. */
4754 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4755
4756 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4757 dynamic relocations. */
4758
4759 static long
4760 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4761 long symcount ATTRIBUTE_UNUSED,
4762 asymbol **syms ATTRIBUTE_UNUSED,
4763 long dynsymcount,
4764 asymbol **dynsyms,
4765 asymbol **ret)
4766 {
4767 long count, i, n;
4768 int j;
4769 bfd_byte *plt_contents;
4770 long relsize;
4771 const struct elf_x86_lazy_plt_layout *lazy_plt;
4772 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4773 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4774 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4775 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4776 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4777 asection *plt;
4778 enum elf_x86_plt_type plt_type;
4779 struct elf_x86_plt plts[] =
4780 {
4781 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4782 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4783 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4784 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4785 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4786 };
4787
4788 *ret = NULL;
4789
4790 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4791 return 0;
4792
4793 if (dynsymcount <= 0)
4794 return 0;
4795
4796 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4797 if (relsize <= 0)
4798 return -1;
4799
4800 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4801 {
4802 lazy_plt = &elf_x86_64_lazy_plt;
4803 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4804 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4805 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4806 if (ABI_64_P (abfd))
4807 {
4808 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4809 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4810 }
4811 else
4812 {
4813 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4814 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4815 }
4816 }
4817 else
4818 {
4819 lazy_plt = &elf_x86_64_nacl_plt;
4820 non_lazy_plt = NULL;
4821 lazy_bnd_plt = NULL;
4822 non_lazy_bnd_plt = NULL;
4823 lazy_ibt_plt = NULL;
4824 non_lazy_ibt_plt = NULL;
4825 }
4826
4827 count = 0;
4828 for (j = 0; plts[j].name != NULL; j++)
4829 {
4830 plt = bfd_get_section_by_name (abfd, plts[j].name);
4831 if (plt == NULL || plt->size == 0)
4832 continue;
4833
4834 /* Get the PLT section contents. */
4835 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4836 if (plt_contents == NULL)
4837 break;
4838 if (!bfd_get_section_contents (abfd, (asection *) plt,
4839 plt_contents, 0, plt->size))
4840 {
4841 free (plt_contents);
4842 break;
4843 }
4844
4845 /* Check what kind of PLT it is. */
4846 plt_type = plt_unknown;
4847 if (plts[j].type == plt_unknown
4848 && (plt->size >= (lazy_plt->plt_entry_size
4849 + lazy_plt->plt_entry_size)))
4850 {
4851 /* Match lazy PLT first. Need to check the first two
4852 instructions. */
4853 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4854 lazy_plt->plt0_got1_offset) == 0)
4855 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4856 2) == 0))
4857 plt_type = plt_lazy;
4858 else if (lazy_bnd_plt != NULL
4859 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4860 lazy_bnd_plt->plt0_got1_offset) == 0)
4861 && (memcmp (plt_contents + 6,
4862 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4863 {
4864 plt_type = plt_lazy | plt_second;
4865 /* The fist entry in the lazy IBT PLT is the same as the
4866 lazy BND PLT. */
4867 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4868 lazy_ibt_plt->plt_entry,
4869 lazy_ibt_plt->plt_got_offset) == 0))
4870 lazy_plt = lazy_ibt_plt;
4871 else
4872 lazy_plt = lazy_bnd_plt;
4873 }
4874 }
4875
4876 if (non_lazy_plt != NULL
4877 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4878 && plt->size >= non_lazy_plt->plt_entry_size)
4879 {
4880 /* Match non-lazy PLT. */
4881 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4882 non_lazy_plt->plt_got_offset) == 0)
4883 plt_type = plt_non_lazy;
4884 }
4885
4886 if (plt_type == plt_unknown || plt_type == plt_second)
4887 {
4888 if (non_lazy_bnd_plt != NULL
4889 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4890 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4891 non_lazy_bnd_plt->plt_got_offset) == 0))
4892 {
4893 /* Match BND PLT. */
4894 plt_type = plt_second;
4895 non_lazy_plt = non_lazy_bnd_plt;
4896 }
4897 else if (non_lazy_ibt_plt != NULL
4898 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4899 && (memcmp (plt_contents,
4900 non_lazy_ibt_plt->plt_entry,
4901 non_lazy_ibt_plt->plt_got_offset) == 0))
4902 {
4903 /* Match IBT PLT. */
4904 plt_type = plt_second;
4905 non_lazy_plt = non_lazy_ibt_plt;
4906 }
4907 }
4908
4909 if (plt_type == plt_unknown)
4910 {
4911 free (plt_contents);
4912 continue;
4913 }
4914
4915 plts[j].sec = plt;
4916 plts[j].type = plt_type;
4917
4918 if ((plt_type & plt_lazy))
4919 {
4920 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4921 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4922 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4923 /* Skip PLT0 in lazy PLT. */
4924 i = 1;
4925 }
4926 else
4927 {
4928 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4929 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4930 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4931 i = 0;
4932 }
4933
4934 /* Skip lazy PLT when the second PLT is used. */
4935 if (plt_type == (plt_lazy | plt_second))
4936 plts[j].count = 0;
4937 else
4938 {
4939 n = plt->size / plts[j].plt_entry_size;
4940 plts[j].count = n;
4941 count += n - i;
4942 }
4943
4944 plts[j].contents = plt_contents;
4945 }
4946
4947 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4948 (bfd_vma) 0, plts, dynsyms,
4949 ret);
4950 }
4951
4952 /* Handle an x86-64 specific section when reading an object file. This
4953 is called when elfcode.h finds a section with an unknown type. */
4954
4955 static bfd_boolean
4956 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4957 const char *name, int shindex)
4958 {
4959 if (hdr->sh_type != SHT_X86_64_UNWIND)
4960 return FALSE;
4961
4962 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4963 return FALSE;
4964
4965 return TRUE;
4966 }
4967
4968 /* Hook called by the linker routine which adds symbols from an object
4969 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4970 of .bss. */
4971
4972 static bfd_boolean
4973 elf_x86_64_add_symbol_hook (bfd *abfd,
4974 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4975 Elf_Internal_Sym *sym,
4976 const char **namep ATTRIBUTE_UNUSED,
4977 flagword *flagsp ATTRIBUTE_UNUSED,
4978 asection **secp,
4979 bfd_vma *valp)
4980 {
4981 asection *lcomm;
4982
4983 switch (sym->st_shndx)
4984 {
4985 case SHN_X86_64_LCOMMON:
4986 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4987 if (lcomm == NULL)
4988 {
4989 lcomm = bfd_make_section_with_flags (abfd,
4990 "LARGE_COMMON",
4991 (SEC_ALLOC
4992 | SEC_IS_COMMON
4993 | SEC_LINKER_CREATED));
4994 if (lcomm == NULL)
4995 return FALSE;
4996 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4997 }
4998 *secp = lcomm;
4999 *valp = sym->st_size;
5000 return TRUE;
5001 }
5002
5003 return TRUE;
5004 }
5005
5006
5007 /* Given a BFD section, try to locate the corresponding ELF section
5008 index. */
5009
5010 static bfd_boolean
5011 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5012 asection *sec, int *index_return)
5013 {
5014 if (sec == &_bfd_elf_large_com_section)
5015 {
5016 *index_return = SHN_X86_64_LCOMMON;
5017 return TRUE;
5018 }
5019 return FALSE;
5020 }
5021
5022 /* Process a symbol. */
5023
5024 static void
5025 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5026 asymbol *asym)
5027 {
5028 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5029
5030 switch (elfsym->internal_elf_sym.st_shndx)
5031 {
5032 case SHN_X86_64_LCOMMON:
5033 asym->section = &_bfd_elf_large_com_section;
5034 asym->value = elfsym->internal_elf_sym.st_size;
5035 /* Common symbol doesn't set BSF_GLOBAL. */
5036 asym->flags &= ~BSF_GLOBAL;
5037 break;
5038 }
5039 }
5040
5041 static bfd_boolean
5042 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5043 {
5044 return (sym->st_shndx == SHN_COMMON
5045 || sym->st_shndx == SHN_X86_64_LCOMMON);
5046 }
5047
5048 static unsigned int
5049 elf_x86_64_common_section_index (asection *sec)
5050 {
5051 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5052 return SHN_COMMON;
5053 else
5054 return SHN_X86_64_LCOMMON;
5055 }
5056
5057 static asection *
5058 elf_x86_64_common_section (asection *sec)
5059 {
5060 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5061 return bfd_com_section_ptr;
5062 else
5063 return &_bfd_elf_large_com_section;
5064 }
5065
5066 static bfd_boolean
5067 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5068 const Elf_Internal_Sym *sym,
5069 asection **psec,
5070 bfd_boolean newdef,
5071 bfd_boolean olddef,
5072 bfd *oldbfd,
5073 const asection *oldsec)
5074 {
5075 /* A normal common symbol and a large common symbol result in a
5076 normal common symbol. We turn the large common symbol into a
5077 normal one. */
5078 if (!olddef
5079 && h->root.type == bfd_link_hash_common
5080 && !newdef
5081 && bfd_is_com_section (*psec)
5082 && oldsec != *psec)
5083 {
5084 if (sym->st_shndx == SHN_COMMON
5085 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5086 {
5087 h->root.u.c.p->section
5088 = bfd_make_section_old_way (oldbfd, "COMMON");
5089 h->root.u.c.p->section->flags = SEC_ALLOC;
5090 }
5091 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5092 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5093 *psec = bfd_com_section_ptr;
5094 }
5095
5096 return TRUE;
5097 }
5098
5099 static int
5100 elf_x86_64_additional_program_headers (bfd *abfd,
5101 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5102 {
5103 asection *s;
5104 int count = 0;
5105
5106 /* Check to see if we need a large readonly segment. */
5107 s = bfd_get_section_by_name (abfd, ".lrodata");
5108 if (s && (s->flags & SEC_LOAD))
5109 count++;
5110
5111 /* Check to see if we need a large data segment. Since .lbss sections
5112 is placed right after the .bss section, there should be no need for
5113 a large data segment just because of .lbss. */
5114 s = bfd_get_section_by_name (abfd, ".ldata");
5115 if (s && (s->flags & SEC_LOAD))
5116 count++;
5117
5118 return count;
5119 }
5120
5121 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5122
5123 static bfd_boolean
5124 elf_x86_64_relocs_compatible (const bfd_target *input,
5125 const bfd_target *output)
5126 {
5127 return ((xvec_get_elf_backend_data (input)->s->elfclass
5128 == xvec_get_elf_backend_data (output)->s->elfclass)
5129 && _bfd_elf_relocs_compatible (input, output));
5130 }
5131
5132 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5133 with GNU properties if found. Otherwise, return NULL. */
5134
5135 static bfd *
5136 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5137 {
5138 struct elf_x86_init_table init_table;
5139
5140 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5141 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5142 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5143 != (int) R_X86_64_GNU_VTINHERIT)
5144 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5145 != (int) R_X86_64_GNU_VTENTRY))
5146 abort ();
5147
5148 /* This is unused for x86-64. */
5149 init_table.plt0_pad_byte = 0x90;
5150
5151 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5152 {
5153 const struct elf_backend_data *bed
5154 = get_elf_backend_data (info->output_bfd);
5155 struct elf_x86_link_hash_table *htab
5156 = elf_x86_hash_table (info, bed->target_id);
5157 if (!htab)
5158 abort ();
5159 if (htab->params->bndplt)
5160 {
5161 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5162 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5163 }
5164 else
5165 {
5166 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5167 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5168 }
5169
5170 if (ABI_64_P (info->output_bfd))
5171 {
5172 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5173 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5174 }
5175 else
5176 {
5177 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5178 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5179 }
5180 }
5181 else
5182 {
5183 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5184 init_table.non_lazy_plt = NULL;
5185 init_table.lazy_ibt_plt = NULL;
5186 init_table.non_lazy_ibt_plt = NULL;
5187 }
5188
5189 if (ABI_64_P (info->output_bfd))
5190 {
5191 init_table.r_info = elf64_r_info;
5192 init_table.r_sym = elf64_r_sym;
5193 }
5194 else
5195 {
5196 init_table.r_info = elf32_r_info;
5197 init_table.r_sym = elf32_r_sym;
5198 }
5199
5200 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5201 }
5202
5203 static const struct bfd_elf_special_section
5204 elf_x86_64_special_sections[]=
5205 {
5206 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5207 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5208 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5209 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5210 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5211 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5212 { NULL, 0, 0, 0, 0 }
5213 };
5214
5215 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5216 #define TARGET_LITTLE_NAME "elf64-x86-64"
5217 #define ELF_ARCH bfd_arch_i386
5218 #define ELF_TARGET_ID X86_64_ELF_DATA
5219 #define ELF_MACHINE_CODE EM_X86_64
5220 #if DEFAULT_LD_Z_SEPARATE_CODE
5221 # define ELF_MAXPAGESIZE 0x1000
5222 #else
5223 # define ELF_MAXPAGESIZE 0x200000
5224 #endif
5225 #define ELF_MINPAGESIZE 0x1000
5226 #define ELF_COMMONPAGESIZE 0x1000
5227
5228 #define elf_backend_can_gc_sections 1
5229 #define elf_backend_can_refcount 1
5230 #define elf_backend_want_got_plt 1
5231 #define elf_backend_plt_readonly 1
5232 #define elf_backend_want_plt_sym 0
5233 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5234 #define elf_backend_rela_normal 1
5235 #define elf_backend_plt_alignment 4
5236 #define elf_backend_extern_protected_data 1
5237 #define elf_backend_caches_rawsize 1
5238 #define elf_backend_dtrel_excludes_plt 1
5239 #define elf_backend_want_dynrelro 1
5240
5241 #define elf_info_to_howto elf_x86_64_info_to_howto
5242
5243 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5244 #define bfd_elf64_bfd_reloc_name_lookup \
5245 elf_x86_64_reloc_name_lookup
5246
5247 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5248 #define elf_backend_check_relocs elf_x86_64_check_relocs
5249 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5250 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5251 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5252 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5253 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5254 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5255 #ifdef CORE_HEADER
5256 #define elf_backend_write_core_note elf_x86_64_write_core_note
5257 #endif
5258 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5259 #define elf_backend_relocate_section elf_x86_64_relocate_section
5260 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5261 #define elf_backend_object_p elf64_x86_64_elf_object_p
5262 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5263
5264 #define elf_backend_section_from_shdr \
5265 elf_x86_64_section_from_shdr
5266
5267 #define elf_backend_section_from_bfd_section \
5268 elf_x86_64_elf_section_from_bfd_section
5269 #define elf_backend_add_symbol_hook \
5270 elf_x86_64_add_symbol_hook
5271 #define elf_backend_symbol_processing \
5272 elf_x86_64_symbol_processing
5273 #define elf_backend_common_section_index \
5274 elf_x86_64_common_section_index
5275 #define elf_backend_common_section \
5276 elf_x86_64_common_section
5277 #define elf_backend_common_definition \
5278 elf_x86_64_common_definition
5279 #define elf_backend_merge_symbol \
5280 elf_x86_64_merge_symbol
5281 #define elf_backend_special_sections \
5282 elf_x86_64_special_sections
5283 #define elf_backend_additional_program_headers \
5284 elf_x86_64_additional_program_headers
5285 #define elf_backend_setup_gnu_properties \
5286 elf_x86_64_link_setup_gnu_properties
5287 #define elf_backend_hide_symbol \
5288 _bfd_x86_elf_hide_symbol
5289
5290 #undef elf64_bed
5291 #define elf64_bed elf64_x86_64_bed
5292
5293 #include "elf64-target.h"
5294
5295 /* CloudABI support. */
5296
5297 #undef TARGET_LITTLE_SYM
5298 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5299 #undef TARGET_LITTLE_NAME
5300 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5301
5302 #undef ELF_OSABI
5303 #define ELF_OSABI ELFOSABI_CLOUDABI
5304
5305 #undef elf64_bed
5306 #define elf64_bed elf64_x86_64_cloudabi_bed
5307
5308 #include "elf64-target.h"
5309
5310 /* FreeBSD support. */
5311
5312 #undef TARGET_LITTLE_SYM
5313 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5314 #undef TARGET_LITTLE_NAME
5315 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5316
5317 #undef ELF_OSABI
5318 #define ELF_OSABI ELFOSABI_FREEBSD
5319
5320 #undef elf64_bed
5321 #define elf64_bed elf64_x86_64_fbsd_bed
5322
5323 #include "elf64-target.h"
5324
5325 /* Solaris 2 support. */
5326
5327 #undef TARGET_LITTLE_SYM
5328 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5329 #undef TARGET_LITTLE_NAME
5330 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5331
5332 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5333 {
5334 is_solaris /* os */
5335 };
5336
5337 #undef elf_backend_arch_data
5338 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5339
5340 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5341 objects won't be recognized. */
5342 #undef ELF_OSABI
5343
5344 #undef elf64_bed
5345 #define elf64_bed elf64_x86_64_sol2_bed
5346
5347 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5348 boundary. */
5349 #undef elf_backend_static_tls_alignment
5350 #define elf_backend_static_tls_alignment 16
5351
5352 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5353
5354 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5355 File, p.63. */
5356 #undef elf_backend_want_plt_sym
5357 #define elf_backend_want_plt_sym 1
5358
5359 #undef elf_backend_strtab_flags
5360 #define elf_backend_strtab_flags SHF_STRINGS
5361
5362 static bfd_boolean
5363 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5364 bfd *obfd ATTRIBUTE_UNUSED,
5365 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5366 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5367 {
5368 /* PR 19938: FIXME: Need to add code for setting the sh_info
5369 and sh_link fields of Solaris specific section types. */
5370 return FALSE;
5371 }
5372
5373 #undef elf_backend_copy_special_section_fields
5374 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5375
5376 #include "elf64-target.h"
5377
5378 /* Native Client support. */
5379
5380 static bfd_boolean
5381 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5382 {
5383 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5384 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5385 return TRUE;
5386 }
5387
5388 #undef TARGET_LITTLE_SYM
5389 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5390 #undef TARGET_LITTLE_NAME
5391 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5392 #undef elf64_bed
5393 #define elf64_bed elf64_x86_64_nacl_bed
5394
5395 #undef ELF_MAXPAGESIZE
5396 #undef ELF_MINPAGESIZE
5397 #undef ELF_COMMONPAGESIZE
5398 #define ELF_MAXPAGESIZE 0x10000
5399 #define ELF_MINPAGESIZE 0x10000
5400 #define ELF_COMMONPAGESIZE 0x10000
5401
5402 /* Restore defaults. */
5403 #undef ELF_OSABI
5404 #undef elf_backend_static_tls_alignment
5405 #undef elf_backend_want_plt_sym
5406 #define elf_backend_want_plt_sym 0
5407 #undef elf_backend_strtab_flags
5408 #undef elf_backend_copy_special_section_fields
5409
5410 /* NaCl uses substantially different PLT entries for the same effects. */
5411
5412 #undef elf_backend_plt_alignment
5413 #define elf_backend_plt_alignment 5
5414 #define NACL_PLT_ENTRY_SIZE 64
5415 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5416
5417 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5418 {
5419 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5420 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5421 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5422 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5423 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5424
5425 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5426 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5427
5428 /* 32 bytes of nop to pad out to the standard size. */
5429 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5430 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5431 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5432 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5433 0x66, /* excess data16 prefix */
5434 0x90 /* nop */
5435 };
5436
5437 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5438 {
5439 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5440 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5441 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5442 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5443
5444 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5445 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5446 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5447
5448 /* Lazy GOT entries point here (32-byte aligned). */
5449 0x68, /* pushq immediate */
5450 0, 0, 0, 0, /* replaced with index into relocation table. */
5451 0xe9, /* jmp relative */
5452 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5453
5454 /* 22 bytes of nop to pad out to the standard size. */
5455 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5456 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5457 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5458 };
5459
5460 /* .eh_frame covering the .plt section. */
5461
5462 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5463 {
5464 #if (PLT_CIE_LENGTH != 20 \
5465 || PLT_FDE_LENGTH != 36 \
5466 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5467 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5468 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5469 #endif
5470 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5471 0, 0, 0, 0, /* CIE ID */
5472 1, /* CIE version */
5473 'z', 'R', 0, /* Augmentation string */
5474 1, /* Code alignment factor */
5475 0x78, /* Data alignment factor */
5476 16, /* Return address column */
5477 1, /* Augmentation size */
5478 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5479 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5480 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5481 DW_CFA_nop, DW_CFA_nop,
5482
5483 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5484 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5485 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5486 0, 0, 0, 0, /* .plt size goes here */
5487 0, /* Augmentation size */
5488 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5489 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5490 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5491 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5492 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5493 13, /* Block length */
5494 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5495 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5496 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5497 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5498 DW_CFA_nop, DW_CFA_nop
5499 };
5500
5501 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5502 {
5503 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5504 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5505 elf_x86_64_nacl_plt_entry, /* plt_entry */
5506 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5507 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5508 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5509 2, /* plt_tlsdesc_got1_offset */
5510 9, /* plt_tlsdesc_got2_offset */
5511 6, /* plt_tlsdesc_got1_insn_end */
5512 13, /* plt_tlsdesc_got2_insn_end */
5513 2, /* plt0_got1_offset */
5514 9, /* plt0_got2_offset */
5515 13, /* plt0_got2_insn_end */
5516 3, /* plt_got_offset */
5517 33, /* plt_reloc_offset */
5518 38, /* plt_plt_offset */
5519 7, /* plt_got_insn_size */
5520 42, /* plt_plt_insn_end */
5521 32, /* plt_lazy_offset */
5522 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5523 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5524 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5525 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5526 };
5527
5528 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5529 {
5530 is_nacl /* os */
5531 };
5532
5533 #undef elf_backend_arch_data
5534 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5535
5536 #undef elf_backend_object_p
5537 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5538 #undef elf_backend_modify_segment_map
5539 #define elf_backend_modify_segment_map nacl_modify_segment_map
5540 #undef elf_backend_modify_headers
5541 #define elf_backend_modify_headers nacl_modify_headers
5542 #undef elf_backend_final_write_processing
5543 #define elf_backend_final_write_processing nacl_final_write_processing
5544
5545 #include "elf64-target.h"
5546
5547 /* Native Client x32 support. */
5548
5549 static bfd_boolean
5550 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5551 {
5552 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5553 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5554 return TRUE;
5555 }
5556
5557 #undef TARGET_LITTLE_SYM
5558 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5559 #undef TARGET_LITTLE_NAME
5560 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5561 #undef elf32_bed
5562 #define elf32_bed elf32_x86_64_nacl_bed
5563
5564 #define bfd_elf32_bfd_reloc_type_lookup \
5565 elf_x86_64_reloc_type_lookup
5566 #define bfd_elf32_bfd_reloc_name_lookup \
5567 elf_x86_64_reloc_name_lookup
5568 #define bfd_elf32_get_synthetic_symtab \
5569 elf_x86_64_get_synthetic_symtab
5570
5571 #undef elf_backend_object_p
5572 #define elf_backend_object_p \
5573 elf32_x86_64_nacl_elf_object_p
5574
5575 #undef elf_backend_bfd_from_remote_memory
5576 #define elf_backend_bfd_from_remote_memory \
5577 _bfd_elf32_bfd_from_remote_memory
5578
5579 #undef elf_backend_size_info
5580 #define elf_backend_size_info \
5581 _bfd_elf32_size_info
5582
5583 #undef elf32_bed
5584 #define elf32_bed elf32_x86_64_bed
5585
5586 #include "elf32-target.h"
5587
5588 /* Restore defaults. */
5589 #undef elf_backend_object_p
5590 #define elf_backend_object_p elf64_x86_64_elf_object_p
5591 #undef elf_backend_bfd_from_remote_memory
5592 #undef elf_backend_size_info
5593 #undef elf_backend_modify_segment_map
5594 #undef elf_backend_modify_headers
5595 #undef elf_backend_final_write_processing
5596
5597 /* Intel L1OM support. */
5598
5599 static bfd_boolean
5600 elf64_l1om_elf_object_p (bfd *abfd)
5601 {
5602 /* Set the right machine number for an L1OM elf64 file. */
5603 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5604 return TRUE;
5605 }
5606
5607 #undef TARGET_LITTLE_SYM
5608 #define TARGET_LITTLE_SYM l1om_elf64_vec
5609 #undef TARGET_LITTLE_NAME
5610 #define TARGET_LITTLE_NAME "elf64-l1om"
5611 #undef ELF_ARCH
5612 #define ELF_ARCH bfd_arch_l1om
5613
5614 #undef ELF_MACHINE_CODE
5615 #define ELF_MACHINE_CODE EM_L1OM
5616
5617 #undef ELF_OSABI
5618
5619 #undef elf64_bed
5620 #define elf64_bed elf64_l1om_bed
5621
5622 #undef elf_backend_object_p
5623 #define elf_backend_object_p elf64_l1om_elf_object_p
5624
5625 /* Restore defaults. */
5626 #undef ELF_MAXPAGESIZE
5627 #undef ELF_MINPAGESIZE
5628 #undef ELF_COMMONPAGESIZE
5629 #if DEFAULT_LD_Z_SEPARATE_CODE
5630 # define ELF_MAXPAGESIZE 0x1000
5631 #else
5632 # define ELF_MAXPAGESIZE 0x200000
5633 #endif
5634 #define ELF_MINPAGESIZE 0x1000
5635 #define ELF_COMMONPAGESIZE 0x1000
5636 #undef elf_backend_plt_alignment
5637 #define elf_backend_plt_alignment 4
5638 #undef elf_backend_arch_data
5639 #define elf_backend_arch_data &elf_x86_64_arch_bed
5640
5641 #include "elf64-target.h"
5642
5643 /* FreeBSD L1OM support. */
5644
5645 #undef TARGET_LITTLE_SYM
5646 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5647 #undef TARGET_LITTLE_NAME
5648 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5649
5650 #undef ELF_OSABI
5651 #define ELF_OSABI ELFOSABI_FREEBSD
5652
5653 #undef elf64_bed
5654 #define elf64_bed elf64_l1om_fbsd_bed
5655
5656 #include "elf64-target.h"
5657
5658 /* Intel K1OM support. */
5659
5660 static bfd_boolean
5661 elf64_k1om_elf_object_p (bfd *abfd)
5662 {
5663 /* Set the right machine number for an K1OM elf64 file. */
5664 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5665 return TRUE;
5666 }
5667
5668 #undef TARGET_LITTLE_SYM
5669 #define TARGET_LITTLE_SYM k1om_elf64_vec
5670 #undef TARGET_LITTLE_NAME
5671 #define TARGET_LITTLE_NAME "elf64-k1om"
5672 #undef ELF_ARCH
5673 #define ELF_ARCH bfd_arch_k1om
5674
5675 #undef ELF_MACHINE_CODE
5676 #define ELF_MACHINE_CODE EM_K1OM
5677
5678 #undef ELF_OSABI
5679
5680 #undef elf64_bed
5681 #define elf64_bed elf64_k1om_bed
5682
5683 #undef elf_backend_object_p
5684 #define elf_backend_object_p elf64_k1om_elf_object_p
5685
5686 #undef elf_backend_static_tls_alignment
5687
5688 #undef elf_backend_want_plt_sym
5689 #define elf_backend_want_plt_sym 0
5690
5691 #include "elf64-target.h"
5692
5693 /* FreeBSD K1OM support. */
5694
5695 #undef TARGET_LITTLE_SYM
5696 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5697 #undef TARGET_LITTLE_NAME
5698 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5699
5700 #undef ELF_OSABI
5701 #define ELF_OSABI ELFOSABI_FREEBSD
5702
5703 #undef elf64_bed
5704 #define elf64_bed elf64_k1om_fbsd_bed
5705
5706 #include "elf64-target.h"
5707
5708 /* 32bit x86-64 support. */
5709
5710 #undef TARGET_LITTLE_SYM
5711 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5712 #undef TARGET_LITTLE_NAME
5713 #define TARGET_LITTLE_NAME "elf32-x86-64"
5714 #undef elf32_bed
5715
5716 #undef ELF_ARCH
5717 #define ELF_ARCH bfd_arch_i386
5718
5719 #undef ELF_MACHINE_CODE
5720 #define ELF_MACHINE_CODE EM_X86_64
5721
5722 #undef ELF_OSABI
5723
5724 #undef elf_backend_object_p
5725 #define elf_backend_object_p \
5726 elf32_x86_64_elf_object_p
5727
5728 #undef elf_backend_bfd_from_remote_memory
5729 #define elf_backend_bfd_from_remote_memory \
5730 _bfd_elf32_bfd_from_remote_memory
5731
5732 #undef elf_backend_size_info
5733 #define elf_backend_size_info \
5734 _bfd_elf32_size_info
5735
5736 #include "elf32-target.h"
This page took 0.158739 seconds and 4 git commands to generate.