Check info->executable for symbols which need copy relocs
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return 0;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if symbol has at least one BND relocation. */
761 bfd_boolean has_bnd_reloc;
762
763 /* Information about the GOT PLT entry. Filled when there are both
764 GOT and PLT relocations against the same function. */
765 union gotplt_union plt_got;
766
767 /* Information about the second PLT entry. Filled when has_bnd_reloc is
768 set. */
769 union gotplt_union plt_bnd;
770
771 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
772 starting at the end of the jump table. */
773 bfd_vma tlsdesc_got;
774 };
775
776 #define elf_x86_64_hash_entry(ent) \
777 ((struct elf_x86_64_link_hash_entry *)(ent))
778
779 struct elf_x86_64_obj_tdata
780 {
781 struct elf_obj_tdata root;
782
783 /* tls_type for each local got entry. */
784 char *local_got_tls_type;
785
786 /* GOTPLT entries for TLS descriptors. */
787 bfd_vma *local_tlsdesc_gotent;
788 };
789
790 #define elf_x86_64_tdata(abfd) \
791 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
792
793 #define elf_x86_64_local_got_tls_type(abfd) \
794 (elf_x86_64_tdata (abfd)->local_got_tls_type)
795
796 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
797 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
798
799 #define is_x86_64_elf(bfd) \
800 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
801 && elf_tdata (bfd) != NULL \
802 && elf_object_id (bfd) == X86_64_ELF_DATA)
803
804 static bfd_boolean
805 elf_x86_64_mkobject (bfd *abfd)
806 {
807 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
808 X86_64_ELF_DATA);
809 }
810
811 /* x86-64 ELF linker hash table. */
812
813 struct elf_x86_64_link_hash_table
814 {
815 struct elf_link_hash_table elf;
816
817 /* Short-cuts to get to dynamic linker sections. */
818 asection *sdynbss;
819 asection *srelbss;
820 asection *plt_eh_frame;
821 asection *plt_bnd;
822 asection *plt_got;
823
824 union
825 {
826 bfd_signed_vma refcount;
827 bfd_vma offset;
828 } tls_ld_got;
829
830 /* The amount of space used by the jump slots in the GOT. */
831 bfd_vma sgotplt_jump_table_size;
832
833 /* Small local sym cache. */
834 struct sym_cache sym_cache;
835
836 bfd_vma (*r_info) (bfd_vma, bfd_vma);
837 bfd_vma (*r_sym) (bfd_vma);
838 unsigned int pointer_r_type;
839 const char *dynamic_interpreter;
840 int dynamic_interpreter_size;
841
842 /* _TLS_MODULE_BASE_ symbol. */
843 struct bfd_link_hash_entry *tls_module_base;
844
845 /* Used by local STT_GNU_IFUNC symbols. */
846 htab_t loc_hash_table;
847 void * loc_hash_memory;
848
849 /* The offset into splt of the PLT entry for the TLS descriptor
850 resolver. Special values are 0, if not necessary (or not found
851 to be necessary yet), and -1 if needed but not determined
852 yet. */
853 bfd_vma tlsdesc_plt;
854 /* The offset into sgot of the GOT entry used by the PLT entry
855 above. */
856 bfd_vma tlsdesc_got;
857
858 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
859 bfd_vma next_jump_slot_index;
860 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
861 bfd_vma next_irelative_index;
862 };
863
864 /* Get the x86-64 ELF linker hash table from a link_info structure. */
865
866 #define elf_x86_64_hash_table(p) \
867 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
868 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
869
870 #define elf_x86_64_compute_jump_table_size(htab) \
871 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
872
873 /* Create an entry in an x86-64 ELF linker hash table. */
874
875 static struct bfd_hash_entry *
876 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
877 struct bfd_hash_table *table,
878 const char *string)
879 {
880 /* Allocate the structure if it has not already been allocated by a
881 subclass. */
882 if (entry == NULL)
883 {
884 entry = (struct bfd_hash_entry *)
885 bfd_hash_allocate (table,
886 sizeof (struct elf_x86_64_link_hash_entry));
887 if (entry == NULL)
888 return entry;
889 }
890
891 /* Call the allocation method of the superclass. */
892 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
893 if (entry != NULL)
894 {
895 struct elf_x86_64_link_hash_entry *eh;
896
897 eh = (struct elf_x86_64_link_hash_entry *) entry;
898 eh->dyn_relocs = NULL;
899 eh->tls_type = GOT_UNKNOWN;
900 eh->has_bnd_reloc = FALSE;
901 eh->plt_bnd.offset = (bfd_vma) -1;
902 eh->plt_got.offset = (bfd_vma) -1;
903 eh->tlsdesc_got = (bfd_vma) -1;
904 }
905
906 return entry;
907 }
908
909 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
910 for local symbol so that we can handle local STT_GNU_IFUNC symbols
911 as global symbol. We reuse indx and dynstr_index for local symbol
912 hash since they aren't used by global symbols in this backend. */
913
914 static hashval_t
915 elf_x86_64_local_htab_hash (const void *ptr)
916 {
917 struct elf_link_hash_entry *h
918 = (struct elf_link_hash_entry *) ptr;
919 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
920 }
921
922 /* Compare local hash entries. */
923
924 static int
925 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
926 {
927 struct elf_link_hash_entry *h1
928 = (struct elf_link_hash_entry *) ptr1;
929 struct elf_link_hash_entry *h2
930 = (struct elf_link_hash_entry *) ptr2;
931
932 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
933 }
934
935 /* Find and/or create a hash entry for local symbol. */
936
937 static struct elf_link_hash_entry *
938 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
939 bfd *abfd, const Elf_Internal_Rela *rel,
940 bfd_boolean create)
941 {
942 struct elf_x86_64_link_hash_entry e, *ret;
943 asection *sec = abfd->sections;
944 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
945 htab->r_sym (rel->r_info));
946 void **slot;
947
948 e.elf.indx = sec->id;
949 e.elf.dynstr_index = htab->r_sym (rel->r_info);
950 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
951 create ? INSERT : NO_INSERT);
952
953 if (!slot)
954 return NULL;
955
956 if (*slot)
957 {
958 ret = (struct elf_x86_64_link_hash_entry *) *slot;
959 return &ret->elf;
960 }
961
962 ret = (struct elf_x86_64_link_hash_entry *)
963 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
964 sizeof (struct elf_x86_64_link_hash_entry));
965 if (ret)
966 {
967 memset (ret, 0, sizeof (*ret));
968 ret->elf.indx = sec->id;
969 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
970 ret->elf.dynindx = -1;
971 ret->plt_got.offset = (bfd_vma) -1;
972 *slot = ret;
973 }
974 return &ret->elf;
975 }
976
977 /* Destroy an X86-64 ELF linker hash table. */
978
979 static void
980 elf_x86_64_link_hash_table_free (bfd *obfd)
981 {
982 struct elf_x86_64_link_hash_table *htab
983 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
984
985 if (htab->loc_hash_table)
986 htab_delete (htab->loc_hash_table);
987 if (htab->loc_hash_memory)
988 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
989 _bfd_elf_link_hash_table_free (obfd);
990 }
991
992 /* Create an X86-64 ELF linker hash table. */
993
994 static struct bfd_link_hash_table *
995 elf_x86_64_link_hash_table_create (bfd *abfd)
996 {
997 struct elf_x86_64_link_hash_table *ret;
998 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
999
1000 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1001 if (ret == NULL)
1002 return NULL;
1003
1004 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1005 elf_x86_64_link_hash_newfunc,
1006 sizeof (struct elf_x86_64_link_hash_entry),
1007 X86_64_ELF_DATA))
1008 {
1009 free (ret);
1010 return NULL;
1011 }
1012
1013 if (ABI_64_P (abfd))
1014 {
1015 ret->r_info = elf64_r_info;
1016 ret->r_sym = elf64_r_sym;
1017 ret->pointer_r_type = R_X86_64_64;
1018 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1019 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1020 }
1021 else
1022 {
1023 ret->r_info = elf32_r_info;
1024 ret->r_sym = elf32_r_sym;
1025 ret->pointer_r_type = R_X86_64_32;
1026 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1028 }
1029
1030 ret->loc_hash_table = htab_try_create (1024,
1031 elf_x86_64_local_htab_hash,
1032 elf_x86_64_local_htab_eq,
1033 NULL);
1034 ret->loc_hash_memory = objalloc_create ();
1035 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1036 {
1037 elf_x86_64_link_hash_table_free (abfd);
1038 return NULL;
1039 }
1040 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1041
1042 return &ret->elf.root;
1043 }
1044
1045 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1046 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1047 hash table. */
1048
1049 static bfd_boolean
1050 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1051 struct bfd_link_info *info)
1052 {
1053 struct elf_x86_64_link_hash_table *htab;
1054
1055 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1056 return FALSE;
1057
1058 htab = elf_x86_64_hash_table (info);
1059 if (htab == NULL)
1060 return FALSE;
1061
1062 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1063 if (!htab->sdynbss)
1064 abort ();
1065
1066 if (info->executable)
1067 {
1068 /* Always allow copy relocs for building executables. */
1069 asection *s;
1070 s = bfd_get_linker_section (dynobj, ".rela.bss");
1071 if (s == NULL)
1072 {
1073 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1074 s = bfd_make_section_anyway_with_flags (dynobj,
1075 ".rela.bss",
1076 (bed->dynamic_sec_flags
1077 | SEC_READONLY));
1078 if (s == NULL
1079 || ! bfd_set_section_alignment (dynobj, s,
1080 bed->s->log_file_align))
1081 return FALSE;
1082 }
1083 htab->srelbss = s;
1084 }
1085
1086 if (!info->no_ld_generated_unwind_info
1087 && htab->plt_eh_frame == NULL
1088 && htab->elf.splt != NULL)
1089 {
1090 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1091 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1092 | SEC_LINKER_CREATED);
1093 htab->plt_eh_frame
1094 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1095 if (htab->plt_eh_frame == NULL
1096 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1097 return FALSE;
1098 }
1099 return TRUE;
1100 }
1101
1102 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1103
1104 static void
1105 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1106 struct elf_link_hash_entry *dir,
1107 struct elf_link_hash_entry *ind)
1108 {
1109 struct elf_x86_64_link_hash_entry *edir, *eind;
1110
1111 edir = (struct elf_x86_64_link_hash_entry *) dir;
1112 eind = (struct elf_x86_64_link_hash_entry *) ind;
1113
1114 if (!edir->has_bnd_reloc)
1115 edir->has_bnd_reloc = eind->has_bnd_reloc;
1116
1117 if (eind->dyn_relocs != NULL)
1118 {
1119 if (edir->dyn_relocs != NULL)
1120 {
1121 struct elf_dyn_relocs **pp;
1122 struct elf_dyn_relocs *p;
1123
1124 /* Add reloc counts against the indirect sym to the direct sym
1125 list. Merge any entries against the same section. */
1126 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1127 {
1128 struct elf_dyn_relocs *q;
1129
1130 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1131 if (q->sec == p->sec)
1132 {
1133 q->pc_count += p->pc_count;
1134 q->count += p->count;
1135 *pp = p->next;
1136 break;
1137 }
1138 if (q == NULL)
1139 pp = &p->next;
1140 }
1141 *pp = edir->dyn_relocs;
1142 }
1143
1144 edir->dyn_relocs = eind->dyn_relocs;
1145 eind->dyn_relocs = NULL;
1146 }
1147
1148 if (ind->root.type == bfd_link_hash_indirect
1149 && dir->got.refcount <= 0)
1150 {
1151 edir->tls_type = eind->tls_type;
1152 eind->tls_type = GOT_UNKNOWN;
1153 }
1154
1155 if (ELIMINATE_COPY_RELOCS
1156 && ind->root.type != bfd_link_hash_indirect
1157 && dir->dynamic_adjusted)
1158 {
1159 /* If called to transfer flags for a weakdef during processing
1160 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1161 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1162 dir->ref_dynamic |= ind->ref_dynamic;
1163 dir->ref_regular |= ind->ref_regular;
1164 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1165 dir->needs_plt |= ind->needs_plt;
1166 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1167 }
1168 else
1169 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1170 }
1171
1172 static bfd_boolean
1173 elf64_x86_64_elf_object_p (bfd *abfd)
1174 {
1175 /* Set the right machine number for an x86-64 elf64 file. */
1176 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1177 return TRUE;
1178 }
1179
1180 static bfd_boolean
1181 elf32_x86_64_elf_object_p (bfd *abfd)
1182 {
1183 /* Set the right machine number for an x86-64 elf32 file. */
1184 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1185 return TRUE;
1186 }
1187
1188 /* Return TRUE if the TLS access code sequence support transition
1189 from R_TYPE. */
1190
1191 static bfd_boolean
1192 elf_x86_64_check_tls_transition (bfd *abfd,
1193 struct bfd_link_info *info,
1194 asection *sec,
1195 bfd_byte *contents,
1196 Elf_Internal_Shdr *symtab_hdr,
1197 struct elf_link_hash_entry **sym_hashes,
1198 unsigned int r_type,
1199 const Elf_Internal_Rela *rel,
1200 const Elf_Internal_Rela *relend)
1201 {
1202 unsigned int val;
1203 unsigned long r_symndx;
1204 bfd_boolean largepic = FALSE;
1205 struct elf_link_hash_entry *h;
1206 bfd_vma offset;
1207 struct elf_x86_64_link_hash_table *htab;
1208
1209 /* Get the section contents. */
1210 if (contents == NULL)
1211 {
1212 if (elf_section_data (sec)->this_hdr.contents != NULL)
1213 contents = elf_section_data (sec)->this_hdr.contents;
1214 else
1215 {
1216 /* FIXME: How to better handle error condition? */
1217 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1218 return FALSE;
1219
1220 /* Cache the section contents for elf_link_input_bfd. */
1221 elf_section_data (sec)->this_hdr.contents = contents;
1222 }
1223 }
1224
1225 htab = elf_x86_64_hash_table (info);
1226 offset = rel->r_offset;
1227 switch (r_type)
1228 {
1229 case R_X86_64_TLSGD:
1230 case R_X86_64_TLSLD:
1231 if ((rel + 1) >= relend)
1232 return FALSE;
1233
1234 if (r_type == R_X86_64_TLSGD)
1235 {
1236 /* Check transition from GD access model. For 64bit, only
1237 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1238 .word 0x6666; rex64; call __tls_get_addr
1239 can transit to different access model. For 32bit, only
1240 leaq foo@tlsgd(%rip), %rdi
1241 .word 0x6666; rex64; call __tls_get_addr
1242 can transit to different access model. For largepic
1243 we also support:
1244 leaq foo@tlsgd(%rip), %rdi
1245 movabsq $__tls_get_addr@pltoff, %rax
1246 addq $rbx, %rax
1247 call *%rax. */
1248
1249 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1250 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1251
1252 if ((offset + 12) > sec->size)
1253 return FALSE;
1254
1255 if (memcmp (contents + offset + 4, call, 4) != 0)
1256 {
1257 if (!ABI_64_P (abfd)
1258 || (offset + 19) > sec->size
1259 || offset < 3
1260 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1261 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1262 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1263 != 0)
1264 return FALSE;
1265 largepic = TRUE;
1266 }
1267 else if (ABI_64_P (abfd))
1268 {
1269 if (offset < 4
1270 || memcmp (contents + offset - 4, leaq, 4) != 0)
1271 return FALSE;
1272 }
1273 else
1274 {
1275 if (offset < 3
1276 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1277 return FALSE;
1278 }
1279 }
1280 else
1281 {
1282 /* Check transition from LD access model. Only
1283 leaq foo@tlsld(%rip), %rdi;
1284 call __tls_get_addr
1285 can transit to different access model. For largepic
1286 we also support:
1287 leaq foo@tlsld(%rip), %rdi
1288 movabsq $__tls_get_addr@pltoff, %rax
1289 addq $rbx, %rax
1290 call *%rax. */
1291
1292 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1293
1294 if (offset < 3 || (offset + 9) > sec->size)
1295 return FALSE;
1296
1297 if (memcmp (contents + offset - 3, lea, 3) != 0)
1298 return FALSE;
1299
1300 if (0xe8 != *(contents + offset + 4))
1301 {
1302 if (!ABI_64_P (abfd)
1303 || (offset + 19) > sec->size
1304 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1305 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1306 != 0)
1307 return FALSE;
1308 largepic = TRUE;
1309 }
1310 }
1311
1312 r_symndx = htab->r_sym (rel[1].r_info);
1313 if (r_symndx < symtab_hdr->sh_info)
1314 return FALSE;
1315
1316 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1317 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1318 may be versioned. */
1319 return (h != NULL
1320 && h->root.root.string != NULL
1321 && (largepic
1322 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1323 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1324 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1325 && (strncmp (h->root.root.string,
1326 "__tls_get_addr", 14) == 0));
1327
1328 case R_X86_64_GOTTPOFF:
1329 /* Check transition from IE access model:
1330 mov foo@gottpoff(%rip), %reg
1331 add foo@gottpoff(%rip), %reg
1332 */
1333
1334 /* Check REX prefix first. */
1335 if (offset >= 3 && (offset + 4) <= sec->size)
1336 {
1337 val = bfd_get_8 (abfd, contents + offset - 3);
1338 if (val != 0x48 && val != 0x4c)
1339 {
1340 /* X32 may have 0x44 REX prefix or no REX prefix. */
1341 if (ABI_64_P (abfd))
1342 return FALSE;
1343 }
1344 }
1345 else
1346 {
1347 /* X32 may not have any REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 if (offset < 2 || (offset + 3) > sec->size)
1351 return FALSE;
1352 }
1353
1354 val = bfd_get_8 (abfd, contents + offset - 2);
1355 if (val != 0x8b && val != 0x03)
1356 return FALSE;
1357
1358 val = bfd_get_8 (abfd, contents + offset - 1);
1359 return (val & 0xc7) == 5;
1360
1361 case R_X86_64_GOTPC32_TLSDESC:
1362 /* Check transition from GDesc access model:
1363 leaq x@tlsdesc(%rip), %rax
1364
1365 Make sure it's a leaq adding rip to a 32-bit offset
1366 into any register, although it's probably almost always
1367 going to be rax. */
1368
1369 if (offset < 3 || (offset + 4) > sec->size)
1370 return FALSE;
1371
1372 val = bfd_get_8 (abfd, contents + offset - 3);
1373 if ((val & 0xfb) != 0x48)
1374 return FALSE;
1375
1376 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 1);
1380 return (val & 0xc7) == 0x05;
1381
1382 case R_X86_64_TLSDESC_CALL:
1383 /* Check transition from GDesc access model:
1384 call *x@tlsdesc(%rax)
1385 */
1386 if (offset + 2 <= sec->size)
1387 {
1388 /* Make sure that it's a call *x@tlsdesc(%rax). */
1389 static const unsigned char call[] = { 0xff, 0x10 };
1390 return memcmp (contents + offset, call, 2) == 0;
1391 }
1392
1393 return FALSE;
1394
1395 default:
1396 abort ();
1397 }
1398 }
1399
1400 /* Return TRUE if the TLS access transition is OK or no transition
1401 will be performed. Update R_TYPE if there is a transition. */
1402
1403 static bfd_boolean
1404 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1405 asection *sec, bfd_byte *contents,
1406 Elf_Internal_Shdr *symtab_hdr,
1407 struct elf_link_hash_entry **sym_hashes,
1408 unsigned int *r_type, int tls_type,
1409 const Elf_Internal_Rela *rel,
1410 const Elf_Internal_Rela *relend,
1411 struct elf_link_hash_entry *h,
1412 unsigned long r_symndx)
1413 {
1414 unsigned int from_type = *r_type;
1415 unsigned int to_type = from_type;
1416 bfd_boolean check = TRUE;
1417
1418 /* Skip TLS transition for functions. */
1419 if (h != NULL
1420 && (h->type == STT_FUNC
1421 || h->type == STT_GNU_IFUNC))
1422 return TRUE;
1423
1424 switch (from_type)
1425 {
1426 case R_X86_64_TLSGD:
1427 case R_X86_64_GOTPC32_TLSDESC:
1428 case R_X86_64_TLSDESC_CALL:
1429 case R_X86_64_GOTTPOFF:
1430 if (info->executable)
1431 {
1432 if (h == NULL)
1433 to_type = R_X86_64_TPOFF32;
1434 else
1435 to_type = R_X86_64_GOTTPOFF;
1436 }
1437
1438 /* When we are called from elf_x86_64_relocate_section,
1439 CONTENTS isn't NULL and there may be additional transitions
1440 based on TLS_TYPE. */
1441 if (contents != NULL)
1442 {
1443 unsigned int new_to_type = to_type;
1444
1445 if (info->executable
1446 && h != NULL
1447 && h->dynindx == -1
1448 && tls_type == GOT_TLS_IE)
1449 new_to_type = R_X86_64_TPOFF32;
1450
1451 if (to_type == R_X86_64_TLSGD
1452 || to_type == R_X86_64_GOTPC32_TLSDESC
1453 || to_type == R_X86_64_TLSDESC_CALL)
1454 {
1455 if (tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_GOTTPOFF;
1457 }
1458
1459 /* We checked the transition before when we were called from
1460 elf_x86_64_check_relocs. We only want to check the new
1461 transition which hasn't been checked before. */
1462 check = new_to_type != to_type && from_type == to_type;
1463 to_type = new_to_type;
1464 }
1465
1466 break;
1467
1468 case R_X86_64_TLSLD:
1469 if (info->executable)
1470 to_type = R_X86_64_TPOFF32;
1471 break;
1472
1473 default:
1474 return TRUE;
1475 }
1476
1477 /* Return TRUE if there is no transition. */
1478 if (from_type == to_type)
1479 return TRUE;
1480
1481 /* Check if the transition can be performed. */
1482 if (check
1483 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1484 symtab_hdr, sym_hashes,
1485 from_type, rel, relend))
1486 {
1487 reloc_howto_type *from, *to;
1488 const char *name;
1489
1490 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1491 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1492
1493 if (h)
1494 name = h->root.root.string;
1495 else
1496 {
1497 struct elf_x86_64_link_hash_table *htab;
1498
1499 htab = elf_x86_64_hash_table (info);
1500 if (htab == NULL)
1501 name = "*unknown*";
1502 else
1503 {
1504 Elf_Internal_Sym *isym;
1505
1506 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1507 abfd, r_symndx);
1508 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1509 }
1510 }
1511
1512 (*_bfd_error_handler)
1513 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1514 "in section `%A' failed"),
1515 abfd, sec, from->name, to->name, name,
1516 (unsigned long) rel->r_offset);
1517 bfd_set_error (bfd_error_bad_value);
1518 return FALSE;
1519 }
1520
1521 *r_type = to_type;
1522 return TRUE;
1523 }
1524
1525 /* Look through the relocs for a section during the first phase, and
1526 calculate needed space in the global offset table, procedure
1527 linkage table, and dynamic reloc sections. */
1528
1529 static bfd_boolean
1530 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1531 asection *sec,
1532 const Elf_Internal_Rela *relocs)
1533 {
1534 struct elf_x86_64_link_hash_table *htab;
1535 Elf_Internal_Shdr *symtab_hdr;
1536 struct elf_link_hash_entry **sym_hashes;
1537 const Elf_Internal_Rela *rel;
1538 const Elf_Internal_Rela *rel_end;
1539 asection *sreloc;
1540 bfd_boolean use_plt_got;
1541
1542 if (info->relocatable)
1543 return TRUE;
1544
1545 BFD_ASSERT (is_x86_64_elf (abfd));
1546
1547 htab = elf_x86_64_hash_table (info);
1548 if (htab == NULL)
1549 return FALSE;
1550
1551 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1552
1553 symtab_hdr = &elf_symtab_hdr (abfd);
1554 sym_hashes = elf_sym_hashes (abfd);
1555
1556 sreloc = NULL;
1557
1558 rel_end = relocs + sec->reloc_count;
1559 for (rel = relocs; rel < rel_end; rel++)
1560 {
1561 unsigned int r_type;
1562 unsigned long r_symndx;
1563 struct elf_link_hash_entry *h;
1564 Elf_Internal_Sym *isym;
1565 const char *name;
1566 bfd_boolean size_reloc;
1567
1568 r_symndx = htab->r_sym (rel->r_info);
1569 r_type = ELF32_R_TYPE (rel->r_info);
1570
1571 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1572 {
1573 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1574 abfd, r_symndx);
1575 return FALSE;
1576 }
1577
1578 if (r_symndx < symtab_hdr->sh_info)
1579 {
1580 /* A local symbol. */
1581 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1582 abfd, r_symndx);
1583 if (isym == NULL)
1584 return FALSE;
1585
1586 /* Check relocation against local STT_GNU_IFUNC symbol. */
1587 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1588 {
1589 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1590 TRUE);
1591 if (h == NULL)
1592 return FALSE;
1593
1594 /* Fake a STT_GNU_IFUNC symbol. */
1595 h->type = STT_GNU_IFUNC;
1596 h->def_regular = 1;
1597 h->ref_regular = 1;
1598 h->forced_local = 1;
1599 h->root.type = bfd_link_hash_defined;
1600 }
1601 else
1602 h = NULL;
1603 }
1604 else
1605 {
1606 isym = NULL;
1607 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1608 while (h->root.type == bfd_link_hash_indirect
1609 || h->root.type == bfd_link_hash_warning)
1610 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1611 }
1612
1613 /* Check invalid x32 relocations. */
1614 if (!ABI_64_P (abfd))
1615 switch (r_type)
1616 {
1617 default:
1618 break;
1619
1620 case R_X86_64_DTPOFF64:
1621 case R_X86_64_TPOFF64:
1622 case R_X86_64_PC64:
1623 case R_X86_64_GOTOFF64:
1624 case R_X86_64_GOT64:
1625 case R_X86_64_GOTPCREL64:
1626 case R_X86_64_GOTPC64:
1627 case R_X86_64_GOTPLT64:
1628 case R_X86_64_PLTOFF64:
1629 {
1630 if (h)
1631 name = h->root.root.string;
1632 else
1633 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1634 NULL);
1635 (*_bfd_error_handler)
1636 (_("%B: relocation %s against symbol `%s' isn't "
1637 "supported in x32 mode"), abfd,
1638 x86_64_elf_howto_table[r_type].name, name);
1639 bfd_set_error (bfd_error_bad_value);
1640 return FALSE;
1641 }
1642 break;
1643 }
1644
1645 if (h != NULL)
1646 {
1647 /* Create the ifunc sections for static executables. If we
1648 never see an indirect function symbol nor we are building
1649 a static executable, those sections will be empty and
1650 won't appear in output. */
1651 switch (r_type)
1652 {
1653 default:
1654 break;
1655
1656 case R_X86_64_PC32_BND:
1657 case R_X86_64_PLT32_BND:
1658 case R_X86_64_PC32:
1659 case R_X86_64_PLT32:
1660 case R_X86_64_32:
1661 case R_X86_64_64:
1662 /* MPX PLT is supported only if elf_x86_64_arch_bed
1663 is used in 64-bit mode. */
1664 if (ABI_64_P (abfd)
1665 && info->bndplt
1666 && (get_elf_x86_64_backend_data (abfd)
1667 == &elf_x86_64_arch_bed))
1668 {
1669 elf_x86_64_hash_entry (h)->has_bnd_reloc = TRUE;
1670
1671 /* Create the second PLT for Intel MPX support. */
1672 if (htab->plt_bnd == NULL)
1673 {
1674 unsigned int plt_bnd_align;
1675 const struct elf_backend_data *bed;
1676
1677 bed = get_elf_backend_data (info->output_bfd);
1678 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1679 && (sizeof (elf_x86_64_bnd_plt2_entry)
1680 == sizeof (elf_x86_64_legacy_plt2_entry)));
1681 plt_bnd_align = 3;
1682
1683 if (htab->elf.dynobj == NULL)
1684 htab->elf.dynobj = abfd;
1685 htab->plt_bnd
1686 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1687 ".plt.bnd",
1688 (bed->dynamic_sec_flags
1689 | SEC_ALLOC
1690 | SEC_CODE
1691 | SEC_LOAD
1692 | SEC_READONLY));
1693 if (htab->plt_bnd == NULL
1694 || !bfd_set_section_alignment (htab->elf.dynobj,
1695 htab->plt_bnd,
1696 plt_bnd_align))
1697 return FALSE;
1698 }
1699 }
1700
1701 case R_X86_64_32S:
1702 case R_X86_64_PC64:
1703 case R_X86_64_GOTPCREL:
1704 case R_X86_64_GOTPCREL64:
1705 if (htab->elf.dynobj == NULL)
1706 htab->elf.dynobj = abfd;
1707 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1708 return FALSE;
1709 break;
1710 }
1711
1712 /* It is referenced by a non-shared object. */
1713 h->ref_regular = 1;
1714 h->root.non_ir_ref = 1;
1715 }
1716
1717 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1718 symtab_hdr, sym_hashes,
1719 &r_type, GOT_UNKNOWN,
1720 rel, rel_end, h, r_symndx))
1721 return FALSE;
1722
1723 switch (r_type)
1724 {
1725 case R_X86_64_TLSLD:
1726 htab->tls_ld_got.refcount += 1;
1727 goto create_got;
1728
1729 case R_X86_64_TPOFF32:
1730 if (!info->executable && ABI_64_P (abfd))
1731 {
1732 if (h)
1733 name = h->root.root.string;
1734 else
1735 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1736 NULL);
1737 (*_bfd_error_handler)
1738 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1739 abfd,
1740 x86_64_elf_howto_table[r_type].name, name);
1741 bfd_set_error (bfd_error_bad_value);
1742 return FALSE;
1743 }
1744 break;
1745
1746 case R_X86_64_GOTTPOFF:
1747 if (!info->executable)
1748 info->flags |= DF_STATIC_TLS;
1749 /* Fall through */
1750
1751 case R_X86_64_GOT32:
1752 case R_X86_64_GOTPCREL:
1753 case R_X86_64_TLSGD:
1754 case R_X86_64_GOT64:
1755 case R_X86_64_GOTPCREL64:
1756 case R_X86_64_GOTPLT64:
1757 case R_X86_64_GOTPC32_TLSDESC:
1758 case R_X86_64_TLSDESC_CALL:
1759 /* This symbol requires a global offset table entry. */
1760 {
1761 int tls_type, old_tls_type;
1762
1763 switch (r_type)
1764 {
1765 default: tls_type = GOT_NORMAL; break;
1766 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1767 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1768 case R_X86_64_GOTPC32_TLSDESC:
1769 case R_X86_64_TLSDESC_CALL:
1770 tls_type = GOT_TLS_GDESC; break;
1771 }
1772
1773 if (h != NULL)
1774 {
1775 h->got.refcount += 1;
1776 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1777 }
1778 else
1779 {
1780 bfd_signed_vma *local_got_refcounts;
1781
1782 /* This is a global offset table entry for a local symbol. */
1783 local_got_refcounts = elf_local_got_refcounts (abfd);
1784 if (local_got_refcounts == NULL)
1785 {
1786 bfd_size_type size;
1787
1788 size = symtab_hdr->sh_info;
1789 size *= sizeof (bfd_signed_vma)
1790 + sizeof (bfd_vma) + sizeof (char);
1791 local_got_refcounts = ((bfd_signed_vma *)
1792 bfd_zalloc (abfd, size));
1793 if (local_got_refcounts == NULL)
1794 return FALSE;
1795 elf_local_got_refcounts (abfd) = local_got_refcounts;
1796 elf_x86_64_local_tlsdesc_gotent (abfd)
1797 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1798 elf_x86_64_local_got_tls_type (abfd)
1799 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1800 }
1801 local_got_refcounts[r_symndx] += 1;
1802 old_tls_type
1803 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1804 }
1805
1806 /* If a TLS symbol is accessed using IE at least once,
1807 there is no point to use dynamic model for it. */
1808 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1809 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1810 || tls_type != GOT_TLS_IE))
1811 {
1812 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1813 tls_type = old_tls_type;
1814 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1815 && GOT_TLS_GD_ANY_P (tls_type))
1816 tls_type |= old_tls_type;
1817 else
1818 {
1819 if (h)
1820 name = h->root.root.string;
1821 else
1822 name = bfd_elf_sym_name (abfd, symtab_hdr,
1823 isym, NULL);
1824 (*_bfd_error_handler)
1825 (_("%B: '%s' accessed both as normal and thread local symbol"),
1826 abfd, name);
1827 bfd_set_error (bfd_error_bad_value);
1828 return FALSE;
1829 }
1830 }
1831
1832 if (old_tls_type != tls_type)
1833 {
1834 if (h != NULL)
1835 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1836 else
1837 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1838 }
1839 }
1840 /* Fall through */
1841
1842 case R_X86_64_GOTOFF64:
1843 case R_X86_64_GOTPC32:
1844 case R_X86_64_GOTPC64:
1845 create_got:
1846 if (htab->elf.sgot == NULL)
1847 {
1848 if (htab->elf.dynobj == NULL)
1849 htab->elf.dynobj = abfd;
1850 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1851 info))
1852 return FALSE;
1853 }
1854 break;
1855
1856 case R_X86_64_PLT32:
1857 case R_X86_64_PLT32_BND:
1858 /* This symbol requires a procedure linkage table entry. We
1859 actually build the entry in adjust_dynamic_symbol,
1860 because this might be a case of linking PIC code which is
1861 never referenced by a dynamic object, in which case we
1862 don't need to generate a procedure linkage table entry
1863 after all. */
1864
1865 /* If this is a local symbol, we resolve it directly without
1866 creating a procedure linkage table entry. */
1867 if (h == NULL)
1868 continue;
1869
1870 h->needs_plt = 1;
1871 h->plt.refcount += 1;
1872 break;
1873
1874 case R_X86_64_PLTOFF64:
1875 /* This tries to form the 'address' of a function relative
1876 to GOT. For global symbols we need a PLT entry. */
1877 if (h != NULL)
1878 {
1879 h->needs_plt = 1;
1880 h->plt.refcount += 1;
1881 }
1882 goto create_got;
1883
1884 case R_X86_64_SIZE32:
1885 case R_X86_64_SIZE64:
1886 size_reloc = TRUE;
1887 goto do_size;
1888
1889 case R_X86_64_32:
1890 if (!ABI_64_P (abfd))
1891 goto pointer;
1892 case R_X86_64_8:
1893 case R_X86_64_16:
1894 case R_X86_64_32S:
1895 /* Let's help debug shared library creation. These relocs
1896 cannot be used in shared libs. Don't error out for
1897 sections we don't care about, such as debug sections or
1898 non-constant sections. */
1899 if (info->shared
1900 && (sec->flags & SEC_ALLOC) != 0
1901 && (sec->flags & SEC_READONLY) != 0)
1902 {
1903 if (h)
1904 name = h->root.root.string;
1905 else
1906 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1907 (*_bfd_error_handler)
1908 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1909 abfd, x86_64_elf_howto_table[r_type].name, name);
1910 bfd_set_error (bfd_error_bad_value);
1911 return FALSE;
1912 }
1913 /* Fall through. */
1914
1915 case R_X86_64_PC8:
1916 case R_X86_64_PC16:
1917 case R_X86_64_PC32:
1918 case R_X86_64_PC32_BND:
1919 case R_X86_64_PC64:
1920 case R_X86_64_64:
1921 pointer:
1922 if (h != NULL && info->executable)
1923 {
1924 /* If this reloc is in a read-only section, we might
1925 need a copy reloc. We can't check reliably at this
1926 stage whether the section is read-only, as input
1927 sections have not yet been mapped to output sections.
1928 Tentatively set the flag for now, and correct in
1929 adjust_dynamic_symbol. */
1930 h->non_got_ref = 1;
1931
1932 /* We may need a .plt entry if the function this reloc
1933 refers to is in a shared lib. */
1934 h->plt.refcount += 1;
1935 if (r_type != R_X86_64_PC32
1936 && r_type != R_X86_64_PC32_BND
1937 && r_type != R_X86_64_PC64)
1938 h->pointer_equality_needed = 1;
1939 }
1940
1941 size_reloc = FALSE;
1942 do_size:
1943 /* If we are creating a shared library, and this is a reloc
1944 against a global symbol, or a non PC relative reloc
1945 against a local symbol, then we need to copy the reloc
1946 into the shared library. However, if we are linking with
1947 -Bsymbolic, we do not need to copy a reloc against a
1948 global symbol which is defined in an object we are
1949 including in the link (i.e., DEF_REGULAR is set). At
1950 this point we have not seen all the input files, so it is
1951 possible that DEF_REGULAR is not set now but will be set
1952 later (it is never cleared). In case of a weak definition,
1953 DEF_REGULAR may be cleared later by a strong definition in
1954 a shared library. We account for that possibility below by
1955 storing information in the relocs_copied field of the hash
1956 table entry. A similar situation occurs when creating
1957 shared libraries and symbol visibility changes render the
1958 symbol local.
1959
1960 If on the other hand, we are creating an executable, we
1961 may need to keep relocations for symbols satisfied by a
1962 dynamic library if we manage to avoid copy relocs for the
1963 symbol. */
1964 if ((info->shared
1965 && (sec->flags & SEC_ALLOC) != 0
1966 && (! IS_X86_64_PCREL_TYPE (r_type)
1967 || (h != NULL
1968 && (! SYMBOLIC_BIND (info, h)
1969 || h->root.type == bfd_link_hash_defweak
1970 || !h->def_regular))))
1971 || (ELIMINATE_COPY_RELOCS
1972 && !info->shared
1973 && (sec->flags & SEC_ALLOC) != 0
1974 && h != NULL
1975 && (h->root.type == bfd_link_hash_defweak
1976 || !h->def_regular)))
1977 {
1978 struct elf_dyn_relocs *p;
1979 struct elf_dyn_relocs **head;
1980
1981 /* We must copy these reloc types into the output file.
1982 Create a reloc section in dynobj and make room for
1983 this reloc. */
1984 if (sreloc == NULL)
1985 {
1986 if (htab->elf.dynobj == NULL)
1987 htab->elf.dynobj = abfd;
1988
1989 sreloc = _bfd_elf_make_dynamic_reloc_section
1990 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1991 abfd, /*rela?*/ TRUE);
1992
1993 if (sreloc == NULL)
1994 return FALSE;
1995 }
1996
1997 /* If this is a global symbol, we count the number of
1998 relocations we need for this symbol. */
1999 if (h != NULL)
2000 {
2001 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2002 }
2003 else
2004 {
2005 /* Track dynamic relocs needed for local syms too.
2006 We really need local syms available to do this
2007 easily. Oh well. */
2008 asection *s;
2009 void **vpp;
2010
2011 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2012 abfd, r_symndx);
2013 if (isym == NULL)
2014 return FALSE;
2015
2016 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2017 if (s == NULL)
2018 s = sec;
2019
2020 /* Beware of type punned pointers vs strict aliasing
2021 rules. */
2022 vpp = &(elf_section_data (s)->local_dynrel);
2023 head = (struct elf_dyn_relocs **)vpp;
2024 }
2025
2026 p = *head;
2027 if (p == NULL || p->sec != sec)
2028 {
2029 bfd_size_type amt = sizeof *p;
2030
2031 p = ((struct elf_dyn_relocs *)
2032 bfd_alloc (htab->elf.dynobj, amt));
2033 if (p == NULL)
2034 return FALSE;
2035 p->next = *head;
2036 *head = p;
2037 p->sec = sec;
2038 p->count = 0;
2039 p->pc_count = 0;
2040 }
2041
2042 p->count += 1;
2043 /* Count size relocation as PC-relative relocation. */
2044 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2045 p->pc_count += 1;
2046 }
2047 break;
2048
2049 /* This relocation describes the C++ object vtable hierarchy.
2050 Reconstruct it for later use during GC. */
2051 case R_X86_64_GNU_VTINHERIT:
2052 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2053 return FALSE;
2054 break;
2055
2056 /* This relocation describes which C++ vtable entries are actually
2057 used. Record for later use during GC. */
2058 case R_X86_64_GNU_VTENTRY:
2059 BFD_ASSERT (h != NULL);
2060 if (h != NULL
2061 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2062 return FALSE;
2063 break;
2064
2065 default:
2066 break;
2067 }
2068
2069 if (use_plt_got
2070 && h != NULL
2071 && h->plt.refcount > 0
2072 && h->got.refcount > 0
2073 && htab->plt_got == NULL)
2074 {
2075 /* Create the GOT procedure linkage table. */
2076 unsigned int plt_got_align;
2077 const struct elf_backend_data *bed;
2078
2079 bed = get_elf_backend_data (info->output_bfd);
2080 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2081 && (sizeof (elf_x86_64_bnd_plt2_entry)
2082 == sizeof (elf_x86_64_legacy_plt2_entry)));
2083 plt_got_align = 3;
2084
2085 if (htab->elf.dynobj == NULL)
2086 htab->elf.dynobj = abfd;
2087 htab->plt_got
2088 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2089 ".plt.got",
2090 (bed->dynamic_sec_flags
2091 | SEC_ALLOC
2092 | SEC_CODE
2093 | SEC_LOAD
2094 | SEC_READONLY));
2095 if (htab->plt_got == NULL
2096 || !bfd_set_section_alignment (htab->elf.dynobj,
2097 htab->plt_got,
2098 plt_got_align))
2099 return FALSE;
2100 }
2101 }
2102
2103 return TRUE;
2104 }
2105
2106 /* Return the section that should be marked against GC for a given
2107 relocation. */
2108
2109 static asection *
2110 elf_x86_64_gc_mark_hook (asection *sec,
2111 struct bfd_link_info *info,
2112 Elf_Internal_Rela *rel,
2113 struct elf_link_hash_entry *h,
2114 Elf_Internal_Sym *sym)
2115 {
2116 if (h != NULL)
2117 switch (ELF32_R_TYPE (rel->r_info))
2118 {
2119 case R_X86_64_GNU_VTINHERIT:
2120 case R_X86_64_GNU_VTENTRY:
2121 return NULL;
2122 }
2123
2124 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2125 }
2126
2127 /* Update the got entry reference counts for the section being removed. */
2128
2129 static bfd_boolean
2130 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2131 asection *sec,
2132 const Elf_Internal_Rela *relocs)
2133 {
2134 struct elf_x86_64_link_hash_table *htab;
2135 Elf_Internal_Shdr *symtab_hdr;
2136 struct elf_link_hash_entry **sym_hashes;
2137 bfd_signed_vma *local_got_refcounts;
2138 const Elf_Internal_Rela *rel, *relend;
2139
2140 if (info->relocatable)
2141 return TRUE;
2142
2143 htab = elf_x86_64_hash_table (info);
2144 if (htab == NULL)
2145 return FALSE;
2146
2147 elf_section_data (sec)->local_dynrel = NULL;
2148
2149 symtab_hdr = &elf_symtab_hdr (abfd);
2150 sym_hashes = elf_sym_hashes (abfd);
2151 local_got_refcounts = elf_local_got_refcounts (abfd);
2152
2153 htab = elf_x86_64_hash_table (info);
2154 relend = relocs + sec->reloc_count;
2155 for (rel = relocs; rel < relend; rel++)
2156 {
2157 unsigned long r_symndx;
2158 unsigned int r_type;
2159 struct elf_link_hash_entry *h = NULL;
2160
2161 r_symndx = htab->r_sym (rel->r_info);
2162 if (r_symndx >= symtab_hdr->sh_info)
2163 {
2164 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2165 while (h->root.type == bfd_link_hash_indirect
2166 || h->root.type == bfd_link_hash_warning)
2167 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2168 }
2169 else
2170 {
2171 /* A local symbol. */
2172 Elf_Internal_Sym *isym;
2173
2174 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2175 abfd, r_symndx);
2176
2177 /* Check relocation against local STT_GNU_IFUNC symbol. */
2178 if (isym != NULL
2179 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2180 {
2181 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2182 if (h == NULL)
2183 abort ();
2184 }
2185 }
2186
2187 if (h)
2188 {
2189 struct elf_x86_64_link_hash_entry *eh;
2190 struct elf_dyn_relocs **pp;
2191 struct elf_dyn_relocs *p;
2192
2193 eh = (struct elf_x86_64_link_hash_entry *) h;
2194
2195 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2196 if (p->sec == sec)
2197 {
2198 /* Everything must go for SEC. */
2199 *pp = p->next;
2200 break;
2201 }
2202 }
2203
2204 r_type = ELF32_R_TYPE (rel->r_info);
2205 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2206 symtab_hdr, sym_hashes,
2207 &r_type, GOT_UNKNOWN,
2208 rel, relend, h, r_symndx))
2209 return FALSE;
2210
2211 switch (r_type)
2212 {
2213 case R_X86_64_TLSLD:
2214 if (htab->tls_ld_got.refcount > 0)
2215 htab->tls_ld_got.refcount -= 1;
2216 break;
2217
2218 case R_X86_64_TLSGD:
2219 case R_X86_64_GOTPC32_TLSDESC:
2220 case R_X86_64_TLSDESC_CALL:
2221 case R_X86_64_GOTTPOFF:
2222 case R_X86_64_GOT32:
2223 case R_X86_64_GOTPCREL:
2224 case R_X86_64_GOT64:
2225 case R_X86_64_GOTPCREL64:
2226 case R_X86_64_GOTPLT64:
2227 if (h != NULL)
2228 {
2229 if (h->got.refcount > 0)
2230 h->got.refcount -= 1;
2231 if (h->type == STT_GNU_IFUNC)
2232 {
2233 if (h->plt.refcount > 0)
2234 h->plt.refcount -= 1;
2235 }
2236 }
2237 else if (local_got_refcounts != NULL)
2238 {
2239 if (local_got_refcounts[r_symndx] > 0)
2240 local_got_refcounts[r_symndx] -= 1;
2241 }
2242 break;
2243
2244 case R_X86_64_8:
2245 case R_X86_64_16:
2246 case R_X86_64_32:
2247 case R_X86_64_64:
2248 case R_X86_64_32S:
2249 case R_X86_64_PC8:
2250 case R_X86_64_PC16:
2251 case R_X86_64_PC32:
2252 case R_X86_64_PC32_BND:
2253 case R_X86_64_PC64:
2254 case R_X86_64_SIZE32:
2255 case R_X86_64_SIZE64:
2256 if (info->shared
2257 && (h == NULL || h->type != STT_GNU_IFUNC))
2258 break;
2259 /* Fall thru */
2260
2261 case R_X86_64_PLT32:
2262 case R_X86_64_PLT32_BND:
2263 case R_X86_64_PLTOFF64:
2264 if (h != NULL)
2265 {
2266 if (h->plt.refcount > 0)
2267 h->plt.refcount -= 1;
2268 }
2269 break;
2270
2271 default:
2272 break;
2273 }
2274 }
2275
2276 return TRUE;
2277 }
2278
2279 /* Adjust a symbol defined by a dynamic object and referenced by a
2280 regular object. The current definition is in some section of the
2281 dynamic object, but we're not including those sections. We have to
2282 change the definition to something the rest of the link can
2283 understand. */
2284
2285 static bfd_boolean
2286 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2287 struct elf_link_hash_entry *h)
2288 {
2289 struct elf_x86_64_link_hash_table *htab;
2290 asection *s;
2291 struct elf_x86_64_link_hash_entry *eh;
2292 struct elf_dyn_relocs *p;
2293
2294 /* STT_GNU_IFUNC symbol must go through PLT. */
2295 if (h->type == STT_GNU_IFUNC)
2296 {
2297 /* All local STT_GNU_IFUNC references must be treate as local
2298 calls via local PLT. */
2299 if (h->ref_regular
2300 && SYMBOL_CALLS_LOCAL (info, h))
2301 {
2302 bfd_size_type pc_count = 0, count = 0;
2303 struct elf_dyn_relocs **pp;
2304
2305 eh = (struct elf_x86_64_link_hash_entry *) h;
2306 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2307 {
2308 pc_count += p->pc_count;
2309 p->count -= p->pc_count;
2310 p->pc_count = 0;
2311 count += p->count;
2312 if (p->count == 0)
2313 *pp = p->next;
2314 else
2315 pp = &p->next;
2316 }
2317
2318 if (pc_count || count)
2319 {
2320 h->needs_plt = 1;
2321 h->non_got_ref = 1;
2322 if (h->plt.refcount <= 0)
2323 h->plt.refcount = 1;
2324 else
2325 h->plt.refcount += 1;
2326 }
2327 }
2328
2329 if (h->plt.refcount <= 0)
2330 {
2331 h->plt.offset = (bfd_vma) -1;
2332 h->needs_plt = 0;
2333 }
2334 return TRUE;
2335 }
2336
2337 /* If this is a function, put it in the procedure linkage table. We
2338 will fill in the contents of the procedure linkage table later,
2339 when we know the address of the .got section. */
2340 if (h->type == STT_FUNC
2341 || h->needs_plt)
2342 {
2343 if (h->plt.refcount <= 0
2344 || SYMBOL_CALLS_LOCAL (info, h)
2345 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2346 && h->root.type == bfd_link_hash_undefweak))
2347 {
2348 /* This case can occur if we saw a PLT32 reloc in an input
2349 file, but the symbol was never referred to by a dynamic
2350 object, or if all references were garbage collected. In
2351 such a case, we don't actually need to build a procedure
2352 linkage table, and we can just do a PC32 reloc instead. */
2353 h->plt.offset = (bfd_vma) -1;
2354 h->needs_plt = 0;
2355 }
2356
2357 return TRUE;
2358 }
2359 else
2360 /* It's possible that we incorrectly decided a .plt reloc was
2361 needed for an R_X86_64_PC32 reloc to a non-function sym in
2362 check_relocs. We can't decide accurately between function and
2363 non-function syms in check-relocs; Objects loaded later in
2364 the link may change h->type. So fix it now. */
2365 h->plt.offset = (bfd_vma) -1;
2366
2367 /* If this is a weak symbol, and there is a real definition, the
2368 processor independent code will have arranged for us to see the
2369 real definition first, and we can just use the same value. */
2370 if (h->u.weakdef != NULL)
2371 {
2372 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2373 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2374 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2375 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2376 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2377 h->non_got_ref = h->u.weakdef->non_got_ref;
2378 return TRUE;
2379 }
2380
2381 /* This is a reference to a symbol defined by a dynamic object which
2382 is not a function. */
2383
2384 /* If we are creating a shared library, we must presume that the
2385 only references to the symbol are via the global offset table.
2386 For such cases we need not do anything here; the relocations will
2387 be handled correctly by relocate_section. */
2388 if (!info->executable)
2389 return TRUE;
2390
2391 /* If there are no references to this symbol that do not use the
2392 GOT, we don't need to generate a copy reloc. */
2393 if (!h->non_got_ref)
2394 return TRUE;
2395
2396 /* If -z nocopyreloc was given, we won't generate them either. */
2397 if (info->nocopyreloc)
2398 {
2399 h->non_got_ref = 0;
2400 return TRUE;
2401 }
2402
2403 if (ELIMINATE_COPY_RELOCS)
2404 {
2405 eh = (struct elf_x86_64_link_hash_entry *) h;
2406 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2407 {
2408 s = p->sec->output_section;
2409 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2410 break;
2411 }
2412
2413 /* If we didn't find any dynamic relocs in read-only sections, then
2414 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2415 if (p == NULL)
2416 {
2417 h->non_got_ref = 0;
2418 return TRUE;
2419 }
2420 }
2421
2422 /* We must allocate the symbol in our .dynbss section, which will
2423 become part of the .bss section of the executable. There will be
2424 an entry for this symbol in the .dynsym section. The dynamic
2425 object will contain position independent code, so all references
2426 from the dynamic object to this symbol will go through the global
2427 offset table. The dynamic linker will use the .dynsym entry to
2428 determine the address it must put in the global offset table, so
2429 both the dynamic object and the regular object will refer to the
2430 same memory location for the variable. */
2431
2432 htab = elf_x86_64_hash_table (info);
2433 if (htab == NULL)
2434 return FALSE;
2435
2436 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2437 to copy the initial value out of the dynamic object and into the
2438 runtime process image. */
2439 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2440 {
2441 const struct elf_backend_data *bed;
2442 bed = get_elf_backend_data (info->output_bfd);
2443 htab->srelbss->size += bed->s->sizeof_rela;
2444 h->needs_copy = 1;
2445 }
2446
2447 s = htab->sdynbss;
2448
2449 return _bfd_elf_adjust_dynamic_copy (h, s);
2450 }
2451
2452 /* Allocate space in .plt, .got and associated reloc sections for
2453 dynamic relocs. */
2454
2455 static bfd_boolean
2456 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2457 {
2458 struct bfd_link_info *info;
2459 struct elf_x86_64_link_hash_table *htab;
2460 struct elf_x86_64_link_hash_entry *eh;
2461 struct elf_dyn_relocs *p;
2462 const struct elf_backend_data *bed;
2463 unsigned int plt_entry_size;
2464
2465 if (h->root.type == bfd_link_hash_indirect)
2466 return TRUE;
2467
2468 eh = (struct elf_x86_64_link_hash_entry *) h;
2469
2470 info = (struct bfd_link_info *) inf;
2471 htab = elf_x86_64_hash_table (info);
2472 if (htab == NULL)
2473 return FALSE;
2474 bed = get_elf_backend_data (info->output_bfd);
2475 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2476
2477 /* We can't use the GOT PLT if pointer equality is needed since
2478 finish_dynamic_symbol won't clear symbol value and the dynamic
2479 linker won't update the GOT slot. We will get into an infinite
2480 loop at run-time. */
2481 if (htab->plt_got != NULL
2482 && h->type != STT_GNU_IFUNC
2483 && !h->pointer_equality_needed
2484 && h->plt.refcount > 0
2485 && h->got.refcount > 0)
2486 {
2487 /* Don't use the regular PLT if there are both GOT and GOTPLT
2488 reloctions. */
2489 h->plt.offset = (bfd_vma) -1;
2490
2491 /* Use the GOT PLT. */
2492 eh->plt_got.refcount = 1;
2493 }
2494
2495 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2496 here if it is defined and referenced in a non-shared object. */
2497 if (h->type == STT_GNU_IFUNC
2498 && h->def_regular)
2499 {
2500 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2501 &eh->dyn_relocs,
2502 plt_entry_size,
2503 plt_entry_size,
2504 GOT_ENTRY_SIZE))
2505 {
2506 asection *s = htab->plt_bnd;
2507 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2508 {
2509 /* Use the .plt.bnd section if it is created. */
2510 eh->plt_bnd.offset = s->size;
2511
2512 /* Make room for this entry in the .plt.bnd section. */
2513 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2514 }
2515
2516 return TRUE;
2517 }
2518 else
2519 return FALSE;
2520 }
2521 else if (htab->elf.dynamic_sections_created
2522 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2523 {
2524 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2525
2526 /* Make sure this symbol is output as a dynamic symbol.
2527 Undefined weak syms won't yet be marked as dynamic. */
2528 if (h->dynindx == -1
2529 && !h->forced_local)
2530 {
2531 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2532 return FALSE;
2533 }
2534
2535 if (info->shared
2536 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2537 {
2538 asection *s = htab->elf.splt;
2539 asection *bnd_s = htab->plt_bnd;
2540 asection *got_s = htab->plt_got;
2541
2542 /* If this is the first .plt entry, make room for the special
2543 first entry. */
2544 if (s->size == 0)
2545 s->size = plt_entry_size;
2546
2547 if (use_plt_got)
2548 eh->plt_got.offset = got_s->size;
2549 else
2550 {
2551 h->plt.offset = s->size;
2552 if (bnd_s)
2553 eh->plt_bnd.offset = bnd_s->size;
2554 }
2555
2556 /* If this symbol is not defined in a regular file, and we are
2557 not generating a shared library, then set the symbol to this
2558 location in the .plt. This is required to make function
2559 pointers compare as equal between the normal executable and
2560 the shared library. */
2561 if (! info->shared
2562 && !h->def_regular)
2563 {
2564 if (use_plt_got)
2565 {
2566 /* We need to make a call to the entry of the GOT PLT
2567 instead of regular PLT entry. */
2568 h->root.u.def.section = got_s;
2569 h->root.u.def.value = eh->plt_got.offset;
2570 }
2571 else
2572 {
2573 if (bnd_s)
2574 {
2575 /* We need to make a call to the entry of the second
2576 PLT instead of regular PLT entry. */
2577 h->root.u.def.section = bnd_s;
2578 h->root.u.def.value = eh->plt_bnd.offset;
2579 }
2580 else
2581 {
2582 h->root.u.def.section = s;
2583 h->root.u.def.value = h->plt.offset;
2584 }
2585 }
2586 }
2587
2588 /* Make room for this entry. */
2589 if (use_plt_got)
2590 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2591 else
2592 {
2593 s->size += plt_entry_size;
2594 if (bnd_s)
2595 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2596
2597 /* We also need to make an entry in the .got.plt section,
2598 which will be placed in the .got section by the linker
2599 script. */
2600 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2601
2602 /* We also need to make an entry in the .rela.plt
2603 section. */
2604 htab->elf.srelplt->size += bed->s->sizeof_rela;
2605 htab->elf.srelplt->reloc_count++;
2606 }
2607 }
2608 else
2609 {
2610 h->plt.offset = (bfd_vma) -1;
2611 h->needs_plt = 0;
2612 }
2613 }
2614 else
2615 {
2616 h->plt.offset = (bfd_vma) -1;
2617 h->needs_plt = 0;
2618 }
2619
2620 eh->tlsdesc_got = (bfd_vma) -1;
2621
2622 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2623 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2624 if (h->got.refcount > 0
2625 && info->executable
2626 && h->dynindx == -1
2627 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2628 {
2629 h->got.offset = (bfd_vma) -1;
2630 }
2631 else if (h->got.refcount > 0)
2632 {
2633 asection *s;
2634 bfd_boolean dyn;
2635 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2636
2637 /* Make sure this symbol is output as a dynamic symbol.
2638 Undefined weak syms won't yet be marked as dynamic. */
2639 if (h->dynindx == -1
2640 && !h->forced_local)
2641 {
2642 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2643 return FALSE;
2644 }
2645
2646 if (GOT_TLS_GDESC_P (tls_type))
2647 {
2648 eh->tlsdesc_got = htab->elf.sgotplt->size
2649 - elf_x86_64_compute_jump_table_size (htab);
2650 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2651 h->got.offset = (bfd_vma) -2;
2652 }
2653 if (! GOT_TLS_GDESC_P (tls_type)
2654 || GOT_TLS_GD_P (tls_type))
2655 {
2656 s = htab->elf.sgot;
2657 h->got.offset = s->size;
2658 s->size += GOT_ENTRY_SIZE;
2659 if (GOT_TLS_GD_P (tls_type))
2660 s->size += GOT_ENTRY_SIZE;
2661 }
2662 dyn = htab->elf.dynamic_sections_created;
2663 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2664 and two if global.
2665 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2666 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2667 || tls_type == GOT_TLS_IE)
2668 htab->elf.srelgot->size += bed->s->sizeof_rela;
2669 else if (GOT_TLS_GD_P (tls_type))
2670 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2671 else if (! GOT_TLS_GDESC_P (tls_type)
2672 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2673 || h->root.type != bfd_link_hash_undefweak)
2674 && (info->shared
2675 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2676 htab->elf.srelgot->size += bed->s->sizeof_rela;
2677 if (GOT_TLS_GDESC_P (tls_type))
2678 {
2679 htab->elf.srelplt->size += bed->s->sizeof_rela;
2680 htab->tlsdesc_plt = (bfd_vma) -1;
2681 }
2682 }
2683 else
2684 h->got.offset = (bfd_vma) -1;
2685
2686 if (eh->dyn_relocs == NULL)
2687 return TRUE;
2688
2689 /* In the shared -Bsymbolic case, discard space allocated for
2690 dynamic pc-relative relocs against symbols which turn out to be
2691 defined in regular objects. For the normal shared case, discard
2692 space for pc-relative relocs that have become local due to symbol
2693 visibility changes. */
2694
2695 if (info->shared)
2696 {
2697 /* Relocs that use pc_count are those that appear on a call
2698 insn, or certain REL relocs that can generated via assembly.
2699 We want calls to protected symbols to resolve directly to the
2700 function rather than going via the plt. If people want
2701 function pointer comparisons to work as expected then they
2702 should avoid writing weird assembly. */
2703 if (SYMBOL_CALLS_LOCAL (info, h))
2704 {
2705 struct elf_dyn_relocs **pp;
2706
2707 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2708 {
2709 p->count -= p->pc_count;
2710 p->pc_count = 0;
2711 if (p->count == 0)
2712 *pp = p->next;
2713 else
2714 pp = &p->next;
2715 }
2716 }
2717
2718 /* Also discard relocs on undefined weak syms with non-default
2719 visibility. */
2720 if (eh->dyn_relocs != NULL)
2721 {
2722 if (h->root.type == bfd_link_hash_undefweak)
2723 {
2724 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2725 eh->dyn_relocs = NULL;
2726
2727 /* Make sure undefined weak symbols are output as a dynamic
2728 symbol in PIEs. */
2729 else if (h->dynindx == -1
2730 && ! h->forced_local
2731 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2732 return FALSE;
2733 }
2734 /* For PIE, discard space for relocs against symbols which
2735 turn out to need copy relocs. */
2736 else if (info->executable
2737 && h->needs_copy
2738 && h->def_dynamic
2739 && !h->def_regular)
2740 eh->dyn_relocs = NULL;
2741 }
2742 }
2743 else if (ELIMINATE_COPY_RELOCS)
2744 {
2745 /* For the non-shared case, discard space for relocs against
2746 symbols which turn out to need copy relocs or are not
2747 dynamic. */
2748
2749 if (!h->non_got_ref
2750 && ((h->def_dynamic
2751 && !h->def_regular)
2752 || (htab->elf.dynamic_sections_created
2753 && (h->root.type == bfd_link_hash_undefweak
2754 || h->root.type == bfd_link_hash_undefined))))
2755 {
2756 /* Make sure this symbol is output as a dynamic symbol.
2757 Undefined weak syms won't yet be marked as dynamic. */
2758 if (h->dynindx == -1
2759 && ! h->forced_local
2760 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2761 return FALSE;
2762
2763 /* If that succeeded, we know we'll be keeping all the
2764 relocs. */
2765 if (h->dynindx != -1)
2766 goto keep;
2767 }
2768
2769 eh->dyn_relocs = NULL;
2770
2771 keep: ;
2772 }
2773
2774 /* Finally, allocate space. */
2775 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2776 {
2777 asection * sreloc;
2778
2779 sreloc = elf_section_data (p->sec)->sreloc;
2780
2781 BFD_ASSERT (sreloc != NULL);
2782
2783 sreloc->size += p->count * bed->s->sizeof_rela;
2784 }
2785
2786 return TRUE;
2787 }
2788
2789 /* Allocate space in .plt, .got and associated reloc sections for
2790 local dynamic relocs. */
2791
2792 static bfd_boolean
2793 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2794 {
2795 struct elf_link_hash_entry *h
2796 = (struct elf_link_hash_entry *) *slot;
2797
2798 if (h->type != STT_GNU_IFUNC
2799 || !h->def_regular
2800 || !h->ref_regular
2801 || !h->forced_local
2802 || h->root.type != bfd_link_hash_defined)
2803 abort ();
2804
2805 return elf_x86_64_allocate_dynrelocs (h, inf);
2806 }
2807
2808 /* Find any dynamic relocs that apply to read-only sections. */
2809
2810 static bfd_boolean
2811 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2812 void * inf)
2813 {
2814 struct elf_x86_64_link_hash_entry *eh;
2815 struct elf_dyn_relocs *p;
2816
2817 /* Skip local IFUNC symbols. */
2818 if (h->forced_local && h->type == STT_GNU_IFUNC)
2819 return TRUE;
2820
2821 eh = (struct elf_x86_64_link_hash_entry *) h;
2822 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2823 {
2824 asection *s = p->sec->output_section;
2825
2826 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2827 {
2828 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2829
2830 info->flags |= DF_TEXTREL;
2831
2832 if (info->warn_shared_textrel && info->shared)
2833 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2834 p->sec->owner, h->root.root.string,
2835 p->sec);
2836
2837 /* Not an error, just cut short the traversal. */
2838 return FALSE;
2839 }
2840 }
2841 return TRUE;
2842 }
2843
2844 /* Convert
2845 mov foo@GOTPCREL(%rip), %reg
2846 to
2847 lea foo(%rip), %reg
2848 with the local symbol, foo. */
2849
2850 static bfd_boolean
2851 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2852 struct bfd_link_info *link_info)
2853 {
2854 Elf_Internal_Shdr *symtab_hdr;
2855 Elf_Internal_Rela *internal_relocs;
2856 Elf_Internal_Rela *irel, *irelend;
2857 bfd_byte *contents;
2858 struct elf_x86_64_link_hash_table *htab;
2859 bfd_boolean changed_contents;
2860 bfd_boolean changed_relocs;
2861 bfd_signed_vma *local_got_refcounts;
2862
2863 /* Don't even try to convert non-ELF outputs. */
2864 if (!is_elf_hash_table (link_info->hash))
2865 return FALSE;
2866
2867 /* Nothing to do if there are no codes, no relocations or no output. */
2868 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2869 || sec->reloc_count == 0
2870 || bfd_is_abs_section (sec->output_section))
2871 return TRUE;
2872
2873 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2874
2875 /* Load the relocations for this section. */
2876 internal_relocs = (_bfd_elf_link_read_relocs
2877 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2878 link_info->keep_memory));
2879 if (internal_relocs == NULL)
2880 return FALSE;
2881
2882 htab = elf_x86_64_hash_table (link_info);
2883 changed_contents = FALSE;
2884 changed_relocs = FALSE;
2885 local_got_refcounts = elf_local_got_refcounts (abfd);
2886
2887 /* Get the section contents. */
2888 if (elf_section_data (sec)->this_hdr.contents != NULL)
2889 contents = elf_section_data (sec)->this_hdr.contents;
2890 else
2891 {
2892 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2893 goto error_return;
2894 }
2895
2896 irelend = internal_relocs + sec->reloc_count;
2897 for (irel = internal_relocs; irel < irelend; irel++)
2898 {
2899 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2900 unsigned int r_symndx = htab->r_sym (irel->r_info);
2901 unsigned int indx;
2902 struct elf_link_hash_entry *h;
2903
2904 if (r_type != R_X86_64_GOTPCREL)
2905 continue;
2906
2907 /* Get the symbol referred to by the reloc. */
2908 if (r_symndx < symtab_hdr->sh_info)
2909 {
2910 Elf_Internal_Sym *isym;
2911
2912 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2913 abfd, r_symndx);
2914
2915 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2916 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2917 && irel->r_offset >= 2
2918 && bfd_get_8 (input_bfd,
2919 contents + irel->r_offset - 2) == 0x8b)
2920 {
2921 bfd_put_8 (output_bfd, 0x8d,
2922 contents + irel->r_offset - 2);
2923 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2924 if (local_got_refcounts != NULL
2925 && local_got_refcounts[r_symndx] > 0)
2926 local_got_refcounts[r_symndx] -= 1;
2927 changed_contents = TRUE;
2928 changed_relocs = TRUE;
2929 }
2930 continue;
2931 }
2932
2933 indx = r_symndx - symtab_hdr->sh_info;
2934 h = elf_sym_hashes (abfd)[indx];
2935 BFD_ASSERT (h != NULL);
2936
2937 while (h->root.type == bfd_link_hash_indirect
2938 || h->root.type == bfd_link_hash_warning)
2939 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2940
2941 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2942 avoid optimizing _DYNAMIC since ld.so may use its link-time
2943 address. */
2944 if (h->def_regular
2945 && h->type != STT_GNU_IFUNC
2946 && h != htab->elf.hdynamic
2947 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2948 && irel->r_offset >= 2
2949 && bfd_get_8 (input_bfd,
2950 contents + irel->r_offset - 2) == 0x8b)
2951 {
2952 bfd_put_8 (output_bfd, 0x8d,
2953 contents + irel->r_offset - 2);
2954 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2955 if (h->got.refcount > 0)
2956 h->got.refcount -= 1;
2957 changed_contents = TRUE;
2958 changed_relocs = TRUE;
2959 }
2960 }
2961
2962 if (contents != NULL
2963 && elf_section_data (sec)->this_hdr.contents != contents)
2964 {
2965 if (!changed_contents && !link_info->keep_memory)
2966 free (contents);
2967 else
2968 {
2969 /* Cache the section contents for elf_link_input_bfd. */
2970 elf_section_data (sec)->this_hdr.contents = contents;
2971 }
2972 }
2973
2974 if (elf_section_data (sec)->relocs != internal_relocs)
2975 {
2976 if (!changed_relocs)
2977 free (internal_relocs);
2978 else
2979 elf_section_data (sec)->relocs = internal_relocs;
2980 }
2981
2982 return TRUE;
2983
2984 error_return:
2985 if (contents != NULL
2986 && elf_section_data (sec)->this_hdr.contents != contents)
2987 free (contents);
2988 if (internal_relocs != NULL
2989 && elf_section_data (sec)->relocs != internal_relocs)
2990 free (internal_relocs);
2991 return FALSE;
2992 }
2993
2994 /* Set the sizes of the dynamic sections. */
2995
2996 static bfd_boolean
2997 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
2998 struct bfd_link_info *info)
2999 {
3000 struct elf_x86_64_link_hash_table *htab;
3001 bfd *dynobj;
3002 asection *s;
3003 bfd_boolean relocs;
3004 bfd *ibfd;
3005 const struct elf_backend_data *bed;
3006
3007 htab = elf_x86_64_hash_table (info);
3008 if (htab == NULL)
3009 return FALSE;
3010 bed = get_elf_backend_data (output_bfd);
3011
3012 dynobj = htab->elf.dynobj;
3013 if (dynobj == NULL)
3014 abort ();
3015
3016 if (htab->elf.dynamic_sections_created)
3017 {
3018 /* Set the contents of the .interp section to the interpreter. */
3019 if (info->executable)
3020 {
3021 s = bfd_get_linker_section (dynobj, ".interp");
3022 if (s == NULL)
3023 abort ();
3024 s->size = htab->dynamic_interpreter_size;
3025 s->contents = (unsigned char *) htab->dynamic_interpreter;
3026 }
3027 }
3028
3029 /* Set up .got offsets for local syms, and space for local dynamic
3030 relocs. */
3031 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3032 {
3033 bfd_signed_vma *local_got;
3034 bfd_signed_vma *end_local_got;
3035 char *local_tls_type;
3036 bfd_vma *local_tlsdesc_gotent;
3037 bfd_size_type locsymcount;
3038 Elf_Internal_Shdr *symtab_hdr;
3039 asection *srel;
3040
3041 if (! is_x86_64_elf (ibfd))
3042 continue;
3043
3044 for (s = ibfd->sections; s != NULL; s = s->next)
3045 {
3046 struct elf_dyn_relocs *p;
3047
3048 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3049 return FALSE;
3050
3051 for (p = (struct elf_dyn_relocs *)
3052 (elf_section_data (s)->local_dynrel);
3053 p != NULL;
3054 p = p->next)
3055 {
3056 if (!bfd_is_abs_section (p->sec)
3057 && bfd_is_abs_section (p->sec->output_section))
3058 {
3059 /* Input section has been discarded, either because
3060 it is a copy of a linkonce section or due to
3061 linker script /DISCARD/, so we'll be discarding
3062 the relocs too. */
3063 }
3064 else if (p->count != 0)
3065 {
3066 srel = elf_section_data (p->sec)->sreloc;
3067 srel->size += p->count * bed->s->sizeof_rela;
3068 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3069 && (info->flags & DF_TEXTREL) == 0)
3070 {
3071 info->flags |= DF_TEXTREL;
3072 if (info->warn_shared_textrel && info->shared)
3073 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3074 p->sec->owner, p->sec);
3075 }
3076 }
3077 }
3078 }
3079
3080 local_got = elf_local_got_refcounts (ibfd);
3081 if (!local_got)
3082 continue;
3083
3084 symtab_hdr = &elf_symtab_hdr (ibfd);
3085 locsymcount = symtab_hdr->sh_info;
3086 end_local_got = local_got + locsymcount;
3087 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3088 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3089 s = htab->elf.sgot;
3090 srel = htab->elf.srelgot;
3091 for (; local_got < end_local_got;
3092 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3093 {
3094 *local_tlsdesc_gotent = (bfd_vma) -1;
3095 if (*local_got > 0)
3096 {
3097 if (GOT_TLS_GDESC_P (*local_tls_type))
3098 {
3099 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3100 - elf_x86_64_compute_jump_table_size (htab);
3101 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3102 *local_got = (bfd_vma) -2;
3103 }
3104 if (! GOT_TLS_GDESC_P (*local_tls_type)
3105 || GOT_TLS_GD_P (*local_tls_type))
3106 {
3107 *local_got = s->size;
3108 s->size += GOT_ENTRY_SIZE;
3109 if (GOT_TLS_GD_P (*local_tls_type))
3110 s->size += GOT_ENTRY_SIZE;
3111 }
3112 if (info->shared
3113 || GOT_TLS_GD_ANY_P (*local_tls_type)
3114 || *local_tls_type == GOT_TLS_IE)
3115 {
3116 if (GOT_TLS_GDESC_P (*local_tls_type))
3117 {
3118 htab->elf.srelplt->size
3119 += bed->s->sizeof_rela;
3120 htab->tlsdesc_plt = (bfd_vma) -1;
3121 }
3122 if (! GOT_TLS_GDESC_P (*local_tls_type)
3123 || GOT_TLS_GD_P (*local_tls_type))
3124 srel->size += bed->s->sizeof_rela;
3125 }
3126 }
3127 else
3128 *local_got = (bfd_vma) -1;
3129 }
3130 }
3131
3132 if (htab->tls_ld_got.refcount > 0)
3133 {
3134 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3135 relocs. */
3136 htab->tls_ld_got.offset = htab->elf.sgot->size;
3137 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3138 htab->elf.srelgot->size += bed->s->sizeof_rela;
3139 }
3140 else
3141 htab->tls_ld_got.offset = -1;
3142
3143 /* Allocate global sym .plt and .got entries, and space for global
3144 sym dynamic relocs. */
3145 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3146 info);
3147
3148 /* Allocate .plt and .got entries, and space for local symbols. */
3149 htab_traverse (htab->loc_hash_table,
3150 elf_x86_64_allocate_local_dynrelocs,
3151 info);
3152
3153 /* For every jump slot reserved in the sgotplt, reloc_count is
3154 incremented. However, when we reserve space for TLS descriptors,
3155 it's not incremented, so in order to compute the space reserved
3156 for them, it suffices to multiply the reloc count by the jump
3157 slot size.
3158
3159 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3160 so that R_X86_64_IRELATIVE entries come last. */
3161 if (htab->elf.srelplt)
3162 {
3163 htab->sgotplt_jump_table_size
3164 = elf_x86_64_compute_jump_table_size (htab);
3165 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3166 }
3167 else if (htab->elf.irelplt)
3168 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3169
3170 if (htab->tlsdesc_plt)
3171 {
3172 /* If we're not using lazy TLS relocations, don't generate the
3173 PLT and GOT entries they require. */
3174 if ((info->flags & DF_BIND_NOW))
3175 htab->tlsdesc_plt = 0;
3176 else
3177 {
3178 htab->tlsdesc_got = htab->elf.sgot->size;
3179 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3180 /* Reserve room for the initial entry.
3181 FIXME: we could probably do away with it in this case. */
3182 if (htab->elf.splt->size == 0)
3183 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3184 htab->tlsdesc_plt = htab->elf.splt->size;
3185 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3186 }
3187 }
3188
3189 if (htab->elf.sgotplt)
3190 {
3191 /* Don't allocate .got.plt section if there are no GOT nor PLT
3192 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3193 if ((htab->elf.hgot == NULL
3194 || !htab->elf.hgot->ref_regular_nonweak)
3195 && (htab->elf.sgotplt->size
3196 == get_elf_backend_data (output_bfd)->got_header_size)
3197 && (htab->elf.splt == NULL
3198 || htab->elf.splt->size == 0)
3199 && (htab->elf.sgot == NULL
3200 || htab->elf.sgot->size == 0)
3201 && (htab->elf.iplt == NULL
3202 || htab->elf.iplt->size == 0)
3203 && (htab->elf.igotplt == NULL
3204 || htab->elf.igotplt->size == 0))
3205 htab->elf.sgotplt->size = 0;
3206 }
3207
3208 if (htab->plt_eh_frame != NULL
3209 && htab->elf.splt != NULL
3210 && htab->elf.splt->size != 0
3211 && !bfd_is_abs_section (htab->elf.splt->output_section)
3212 && _bfd_elf_eh_frame_present (info))
3213 {
3214 const struct elf_x86_64_backend_data *arch_data
3215 = get_elf_x86_64_arch_data (bed);
3216 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3217 }
3218
3219 /* We now have determined the sizes of the various dynamic sections.
3220 Allocate memory for them. */
3221 relocs = FALSE;
3222 for (s = dynobj->sections; s != NULL; s = s->next)
3223 {
3224 if ((s->flags & SEC_LINKER_CREATED) == 0)
3225 continue;
3226
3227 if (s == htab->elf.splt
3228 || s == htab->elf.sgot
3229 || s == htab->elf.sgotplt
3230 || s == htab->elf.iplt
3231 || s == htab->elf.igotplt
3232 || s == htab->plt_bnd
3233 || s == htab->plt_got
3234 || s == htab->plt_eh_frame
3235 || s == htab->sdynbss)
3236 {
3237 /* Strip this section if we don't need it; see the
3238 comment below. */
3239 }
3240 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3241 {
3242 if (s->size != 0 && s != htab->elf.srelplt)
3243 relocs = TRUE;
3244
3245 /* We use the reloc_count field as a counter if we need
3246 to copy relocs into the output file. */
3247 if (s != htab->elf.srelplt)
3248 s->reloc_count = 0;
3249 }
3250 else
3251 {
3252 /* It's not one of our sections, so don't allocate space. */
3253 continue;
3254 }
3255
3256 if (s->size == 0)
3257 {
3258 /* If we don't need this section, strip it from the
3259 output file. This is mostly to handle .rela.bss and
3260 .rela.plt. We must create both sections in
3261 create_dynamic_sections, because they must be created
3262 before the linker maps input sections to output
3263 sections. The linker does that before
3264 adjust_dynamic_symbol is called, and it is that
3265 function which decides whether anything needs to go
3266 into these sections. */
3267
3268 s->flags |= SEC_EXCLUDE;
3269 continue;
3270 }
3271
3272 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3273 continue;
3274
3275 /* Allocate memory for the section contents. We use bfd_zalloc
3276 here in case unused entries are not reclaimed before the
3277 section's contents are written out. This should not happen,
3278 but this way if it does, we get a R_X86_64_NONE reloc instead
3279 of garbage. */
3280 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3281 if (s->contents == NULL)
3282 return FALSE;
3283 }
3284
3285 if (htab->plt_eh_frame != NULL
3286 && htab->plt_eh_frame->contents != NULL)
3287 {
3288 const struct elf_x86_64_backend_data *arch_data
3289 = get_elf_x86_64_arch_data (bed);
3290
3291 memcpy (htab->plt_eh_frame->contents,
3292 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3293 bfd_put_32 (dynobj, htab->elf.splt->size,
3294 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3295 }
3296
3297 if (htab->elf.dynamic_sections_created)
3298 {
3299 /* Add some entries to the .dynamic section. We fill in the
3300 values later, in elf_x86_64_finish_dynamic_sections, but we
3301 must add the entries now so that we get the correct size for
3302 the .dynamic section. The DT_DEBUG entry is filled in by the
3303 dynamic linker and used by the debugger. */
3304 #define add_dynamic_entry(TAG, VAL) \
3305 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3306
3307 if (info->executable)
3308 {
3309 if (!add_dynamic_entry (DT_DEBUG, 0))
3310 return FALSE;
3311 }
3312
3313 if (htab->elf.splt->size != 0)
3314 {
3315 if (!add_dynamic_entry (DT_PLTGOT, 0)
3316 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3317 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3318 || !add_dynamic_entry (DT_JMPREL, 0))
3319 return FALSE;
3320
3321 if (htab->tlsdesc_plt
3322 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3323 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3324 return FALSE;
3325 }
3326
3327 if (relocs)
3328 {
3329 if (!add_dynamic_entry (DT_RELA, 0)
3330 || !add_dynamic_entry (DT_RELASZ, 0)
3331 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3332 return FALSE;
3333
3334 /* If any dynamic relocs apply to a read-only section,
3335 then we need a DT_TEXTREL entry. */
3336 if ((info->flags & DF_TEXTREL) == 0)
3337 elf_link_hash_traverse (&htab->elf,
3338 elf_x86_64_readonly_dynrelocs,
3339 info);
3340
3341 if ((info->flags & DF_TEXTREL) != 0)
3342 {
3343 if (!add_dynamic_entry (DT_TEXTREL, 0))
3344 return FALSE;
3345 }
3346 }
3347 }
3348 #undef add_dynamic_entry
3349
3350 return TRUE;
3351 }
3352
3353 static bfd_boolean
3354 elf_x86_64_always_size_sections (bfd *output_bfd,
3355 struct bfd_link_info *info)
3356 {
3357 asection *tls_sec = elf_hash_table (info)->tls_sec;
3358
3359 if (tls_sec)
3360 {
3361 struct elf_link_hash_entry *tlsbase;
3362
3363 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3364 "_TLS_MODULE_BASE_",
3365 FALSE, FALSE, FALSE);
3366
3367 if (tlsbase && tlsbase->type == STT_TLS)
3368 {
3369 struct elf_x86_64_link_hash_table *htab;
3370 struct bfd_link_hash_entry *bh = NULL;
3371 const struct elf_backend_data *bed
3372 = get_elf_backend_data (output_bfd);
3373
3374 htab = elf_x86_64_hash_table (info);
3375 if (htab == NULL)
3376 return FALSE;
3377
3378 if (!(_bfd_generic_link_add_one_symbol
3379 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3380 tls_sec, 0, NULL, FALSE,
3381 bed->collect, &bh)))
3382 return FALSE;
3383
3384 htab->tls_module_base = bh;
3385
3386 tlsbase = (struct elf_link_hash_entry *)bh;
3387 tlsbase->def_regular = 1;
3388 tlsbase->other = STV_HIDDEN;
3389 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3390 }
3391 }
3392
3393 return TRUE;
3394 }
3395
3396 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3397 executables. Rather than setting it to the beginning of the TLS
3398 section, we have to set it to the end. This function may be called
3399 multiple times, it is idempotent. */
3400
3401 static void
3402 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3403 {
3404 struct elf_x86_64_link_hash_table *htab;
3405 struct bfd_link_hash_entry *base;
3406
3407 if (!info->executable)
3408 return;
3409
3410 htab = elf_x86_64_hash_table (info);
3411 if (htab == NULL)
3412 return;
3413
3414 base = htab->tls_module_base;
3415 if (base == NULL)
3416 return;
3417
3418 base->u.def.value = htab->elf.tls_size;
3419 }
3420
3421 /* Return the base VMA address which should be subtracted from real addresses
3422 when resolving @dtpoff relocation.
3423 This is PT_TLS segment p_vaddr. */
3424
3425 static bfd_vma
3426 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3427 {
3428 /* If tls_sec is NULL, we should have signalled an error already. */
3429 if (elf_hash_table (info)->tls_sec == NULL)
3430 return 0;
3431 return elf_hash_table (info)->tls_sec->vma;
3432 }
3433
3434 /* Return the relocation value for @tpoff relocation
3435 if STT_TLS virtual address is ADDRESS. */
3436
3437 static bfd_vma
3438 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3439 {
3440 struct elf_link_hash_table *htab = elf_hash_table (info);
3441 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3442 bfd_vma static_tls_size;
3443
3444 /* If tls_segment is NULL, we should have signalled an error already. */
3445 if (htab->tls_sec == NULL)
3446 return 0;
3447
3448 /* Consider special static TLS alignment requirements. */
3449 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3450 return address - static_tls_size - htab->tls_sec->vma;
3451 }
3452
3453 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3454 branch? */
3455
3456 static bfd_boolean
3457 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3458 {
3459 /* Opcode Instruction
3460 0xe8 call
3461 0xe9 jump
3462 0x0f 0x8x conditional jump */
3463 return ((offset > 0
3464 && (contents [offset - 1] == 0xe8
3465 || contents [offset - 1] == 0xe9))
3466 || (offset > 1
3467 && contents [offset - 2] == 0x0f
3468 && (contents [offset - 1] & 0xf0) == 0x80));
3469 }
3470
3471 /* Relocate an x86_64 ELF section. */
3472
3473 static bfd_boolean
3474 elf_x86_64_relocate_section (bfd *output_bfd,
3475 struct bfd_link_info *info,
3476 bfd *input_bfd,
3477 asection *input_section,
3478 bfd_byte *contents,
3479 Elf_Internal_Rela *relocs,
3480 Elf_Internal_Sym *local_syms,
3481 asection **local_sections)
3482 {
3483 struct elf_x86_64_link_hash_table *htab;
3484 Elf_Internal_Shdr *symtab_hdr;
3485 struct elf_link_hash_entry **sym_hashes;
3486 bfd_vma *local_got_offsets;
3487 bfd_vma *local_tlsdesc_gotents;
3488 Elf_Internal_Rela *rel;
3489 Elf_Internal_Rela *relend;
3490 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3491
3492 BFD_ASSERT (is_x86_64_elf (input_bfd));
3493
3494 htab = elf_x86_64_hash_table (info);
3495 if (htab == NULL)
3496 return FALSE;
3497 symtab_hdr = &elf_symtab_hdr (input_bfd);
3498 sym_hashes = elf_sym_hashes (input_bfd);
3499 local_got_offsets = elf_local_got_offsets (input_bfd);
3500 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3501
3502 elf_x86_64_set_tls_module_base (info);
3503
3504 rel = relocs;
3505 relend = relocs + input_section->reloc_count;
3506 for (; rel < relend; rel++)
3507 {
3508 unsigned int r_type;
3509 reloc_howto_type *howto;
3510 unsigned long r_symndx;
3511 struct elf_link_hash_entry *h;
3512 struct elf_x86_64_link_hash_entry *eh;
3513 Elf_Internal_Sym *sym;
3514 asection *sec;
3515 bfd_vma off, offplt, plt_offset;
3516 bfd_vma relocation;
3517 bfd_boolean unresolved_reloc;
3518 bfd_reloc_status_type r;
3519 int tls_type;
3520 asection *base_got, *resolved_plt;
3521 bfd_vma st_size;
3522
3523 r_type = ELF32_R_TYPE (rel->r_info);
3524 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3525 || r_type == (int) R_X86_64_GNU_VTENTRY)
3526 continue;
3527
3528 if (r_type >= (int) R_X86_64_standard)
3529 {
3530 (*_bfd_error_handler)
3531 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3532 input_bfd, input_section, r_type);
3533 bfd_set_error (bfd_error_bad_value);
3534 return FALSE;
3535 }
3536
3537 if (r_type != (int) R_X86_64_32
3538 || ABI_64_P (output_bfd))
3539 howto = x86_64_elf_howto_table + r_type;
3540 else
3541 howto = (x86_64_elf_howto_table
3542 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3543 r_symndx = htab->r_sym (rel->r_info);
3544 h = NULL;
3545 sym = NULL;
3546 sec = NULL;
3547 unresolved_reloc = FALSE;
3548 if (r_symndx < symtab_hdr->sh_info)
3549 {
3550 sym = local_syms + r_symndx;
3551 sec = local_sections[r_symndx];
3552
3553 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3554 &sec, rel);
3555 st_size = sym->st_size;
3556
3557 /* Relocate against local STT_GNU_IFUNC symbol. */
3558 if (!info->relocatable
3559 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3560 {
3561 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3562 rel, FALSE);
3563 if (h == NULL)
3564 abort ();
3565
3566 /* Set STT_GNU_IFUNC symbol value. */
3567 h->root.u.def.value = sym->st_value;
3568 h->root.u.def.section = sec;
3569 }
3570 }
3571 else
3572 {
3573 bfd_boolean warned ATTRIBUTE_UNUSED;
3574 bfd_boolean ignored ATTRIBUTE_UNUSED;
3575
3576 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3577 r_symndx, symtab_hdr, sym_hashes,
3578 h, sec, relocation,
3579 unresolved_reloc, warned, ignored);
3580 st_size = h->size;
3581 }
3582
3583 if (sec != NULL && discarded_section (sec))
3584 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3585 rel, 1, relend, howto, 0, contents);
3586
3587 if (info->relocatable)
3588 continue;
3589
3590 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3591 {
3592 if (r_type == R_X86_64_64)
3593 {
3594 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3595 zero-extend it to 64bit if addend is zero. */
3596 r_type = R_X86_64_32;
3597 memset (contents + rel->r_offset + 4, 0, 4);
3598 }
3599 else if (r_type == R_X86_64_SIZE64)
3600 {
3601 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3602 zero-extend it to 64bit if addend is zero. */
3603 r_type = R_X86_64_SIZE32;
3604 memset (contents + rel->r_offset + 4, 0, 4);
3605 }
3606 }
3607
3608 eh = (struct elf_x86_64_link_hash_entry *) h;
3609
3610 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3611 it here if it is defined in a non-shared object. */
3612 if (h != NULL
3613 && h->type == STT_GNU_IFUNC
3614 && h->def_regular)
3615 {
3616 bfd_vma plt_index;
3617 const char *name;
3618
3619 if ((input_section->flags & SEC_ALLOC) == 0
3620 || h->plt.offset == (bfd_vma) -1)
3621 abort ();
3622
3623 /* STT_GNU_IFUNC symbol must go through PLT. */
3624 if (htab->elf.splt != NULL)
3625 {
3626 if (htab->plt_bnd != NULL)
3627 {
3628 resolved_plt = htab->plt_bnd;
3629 plt_offset = eh->plt_bnd.offset;
3630 }
3631 else
3632 {
3633 resolved_plt = htab->elf.splt;
3634 plt_offset = h->plt.offset;
3635 }
3636 }
3637 else
3638 {
3639 resolved_plt = htab->elf.iplt;
3640 plt_offset = h->plt.offset;
3641 }
3642
3643 relocation = (resolved_plt->output_section->vma
3644 + resolved_plt->output_offset + plt_offset);
3645
3646 switch (r_type)
3647 {
3648 default:
3649 if (h->root.root.string)
3650 name = h->root.root.string;
3651 else
3652 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3653 NULL);
3654 (*_bfd_error_handler)
3655 (_("%B: relocation %s against STT_GNU_IFUNC "
3656 "symbol `%s' isn't handled by %s"), input_bfd,
3657 x86_64_elf_howto_table[r_type].name,
3658 name, __FUNCTION__);
3659 bfd_set_error (bfd_error_bad_value);
3660 return FALSE;
3661
3662 case R_X86_64_32S:
3663 if (info->shared)
3664 abort ();
3665 goto do_relocation;
3666
3667 case R_X86_64_32:
3668 if (ABI_64_P (output_bfd))
3669 goto do_relocation;
3670 /* FALLTHROUGH */
3671 case R_X86_64_64:
3672 if (rel->r_addend != 0)
3673 {
3674 if (h->root.root.string)
3675 name = h->root.root.string;
3676 else
3677 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3678 sym, NULL);
3679 (*_bfd_error_handler)
3680 (_("%B: relocation %s against STT_GNU_IFUNC "
3681 "symbol `%s' has non-zero addend: %d"),
3682 input_bfd, x86_64_elf_howto_table[r_type].name,
3683 name, rel->r_addend);
3684 bfd_set_error (bfd_error_bad_value);
3685 return FALSE;
3686 }
3687
3688 /* Generate dynamic relcoation only when there is a
3689 non-GOT reference in a shared object. */
3690 if (info->shared && h->non_got_ref)
3691 {
3692 Elf_Internal_Rela outrel;
3693 asection *sreloc;
3694
3695 /* Need a dynamic relocation to get the real function
3696 address. */
3697 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3698 info,
3699 input_section,
3700 rel->r_offset);
3701 if (outrel.r_offset == (bfd_vma) -1
3702 || outrel.r_offset == (bfd_vma) -2)
3703 abort ();
3704
3705 outrel.r_offset += (input_section->output_section->vma
3706 + input_section->output_offset);
3707
3708 if (h->dynindx == -1
3709 || h->forced_local
3710 || info->executable)
3711 {
3712 /* This symbol is resolved locally. */
3713 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3714 outrel.r_addend = (h->root.u.def.value
3715 + h->root.u.def.section->output_section->vma
3716 + h->root.u.def.section->output_offset);
3717 }
3718 else
3719 {
3720 outrel.r_info = htab->r_info (h->dynindx, r_type);
3721 outrel.r_addend = 0;
3722 }
3723
3724 sreloc = htab->elf.irelifunc;
3725 elf_append_rela (output_bfd, sreloc, &outrel);
3726
3727 /* If this reloc is against an external symbol, we
3728 do not want to fiddle with the addend. Otherwise,
3729 we need to include the symbol value so that it
3730 becomes an addend for the dynamic reloc. For an
3731 internal symbol, we have updated addend. */
3732 continue;
3733 }
3734 /* FALLTHROUGH */
3735 case R_X86_64_PC32:
3736 case R_X86_64_PC32_BND:
3737 case R_X86_64_PC64:
3738 case R_X86_64_PLT32:
3739 case R_X86_64_PLT32_BND:
3740 goto do_relocation;
3741
3742 case R_X86_64_GOTPCREL:
3743 case R_X86_64_GOTPCREL64:
3744 base_got = htab->elf.sgot;
3745 off = h->got.offset;
3746
3747 if (base_got == NULL)
3748 abort ();
3749
3750 if (off == (bfd_vma) -1)
3751 {
3752 /* We can't use h->got.offset here to save state, or
3753 even just remember the offset, as finish_dynamic_symbol
3754 would use that as offset into .got. */
3755
3756 if (htab->elf.splt != NULL)
3757 {
3758 plt_index = h->plt.offset / plt_entry_size - 1;
3759 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3760 base_got = htab->elf.sgotplt;
3761 }
3762 else
3763 {
3764 plt_index = h->plt.offset / plt_entry_size;
3765 off = plt_index * GOT_ENTRY_SIZE;
3766 base_got = htab->elf.igotplt;
3767 }
3768
3769 if (h->dynindx == -1
3770 || h->forced_local
3771 || info->symbolic)
3772 {
3773 /* This references the local defitionion. We must
3774 initialize this entry in the global offset table.
3775 Since the offset must always be a multiple of 8,
3776 we use the least significant bit to record
3777 whether we have initialized it already.
3778
3779 When doing a dynamic link, we create a .rela.got
3780 relocation entry to initialize the value. This
3781 is done in the finish_dynamic_symbol routine. */
3782 if ((off & 1) != 0)
3783 off &= ~1;
3784 else
3785 {
3786 bfd_put_64 (output_bfd, relocation,
3787 base_got->contents + off);
3788 /* Note that this is harmless for the GOTPLT64
3789 case, as -1 | 1 still is -1. */
3790 h->got.offset |= 1;
3791 }
3792 }
3793 }
3794
3795 relocation = (base_got->output_section->vma
3796 + base_got->output_offset + off);
3797
3798 goto do_relocation;
3799 }
3800 }
3801
3802 /* When generating a shared object, the relocations handled here are
3803 copied into the output file to be resolved at run time. */
3804 switch (r_type)
3805 {
3806 case R_X86_64_GOT32:
3807 case R_X86_64_GOT64:
3808 /* Relocation is to the entry for this symbol in the global
3809 offset table. */
3810 case R_X86_64_GOTPCREL:
3811 case R_X86_64_GOTPCREL64:
3812 /* Use global offset table entry as symbol value. */
3813 case R_X86_64_GOTPLT64:
3814 /* This is obsolete and treated the the same as GOT64. */
3815 base_got = htab->elf.sgot;
3816
3817 if (htab->elf.sgot == NULL)
3818 abort ();
3819
3820 if (h != NULL)
3821 {
3822 bfd_boolean dyn;
3823
3824 off = h->got.offset;
3825 if (h->needs_plt
3826 && h->plt.offset != (bfd_vma)-1
3827 && off == (bfd_vma)-1)
3828 {
3829 /* We can't use h->got.offset here to save
3830 state, or even just remember the offset, as
3831 finish_dynamic_symbol would use that as offset into
3832 .got. */
3833 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3834 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3835 base_got = htab->elf.sgotplt;
3836 }
3837
3838 dyn = htab->elf.dynamic_sections_created;
3839
3840 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3841 || (info->shared
3842 && SYMBOL_REFERENCES_LOCAL (info, h))
3843 || (ELF_ST_VISIBILITY (h->other)
3844 && h->root.type == bfd_link_hash_undefweak))
3845 {
3846 /* This is actually a static link, or it is a -Bsymbolic
3847 link and the symbol is defined locally, or the symbol
3848 was forced to be local because of a version file. We
3849 must initialize this entry in the global offset table.
3850 Since the offset must always be a multiple of 8, we
3851 use the least significant bit to record whether we
3852 have initialized it already.
3853
3854 When doing a dynamic link, we create a .rela.got
3855 relocation entry to initialize the value. This is
3856 done in the finish_dynamic_symbol routine. */
3857 if ((off & 1) != 0)
3858 off &= ~1;
3859 else
3860 {
3861 bfd_put_64 (output_bfd, relocation,
3862 base_got->contents + off);
3863 /* Note that this is harmless for the GOTPLT64 case,
3864 as -1 | 1 still is -1. */
3865 h->got.offset |= 1;
3866 }
3867 }
3868 else
3869 unresolved_reloc = FALSE;
3870 }
3871 else
3872 {
3873 if (local_got_offsets == NULL)
3874 abort ();
3875
3876 off = local_got_offsets[r_symndx];
3877
3878 /* The offset must always be a multiple of 8. We use
3879 the least significant bit to record whether we have
3880 already generated the necessary reloc. */
3881 if ((off & 1) != 0)
3882 off &= ~1;
3883 else
3884 {
3885 bfd_put_64 (output_bfd, relocation,
3886 base_got->contents + off);
3887
3888 if (info->shared)
3889 {
3890 asection *s;
3891 Elf_Internal_Rela outrel;
3892
3893 /* We need to generate a R_X86_64_RELATIVE reloc
3894 for the dynamic linker. */
3895 s = htab->elf.srelgot;
3896 if (s == NULL)
3897 abort ();
3898
3899 outrel.r_offset = (base_got->output_section->vma
3900 + base_got->output_offset
3901 + off);
3902 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3903 outrel.r_addend = relocation;
3904 elf_append_rela (output_bfd, s, &outrel);
3905 }
3906
3907 local_got_offsets[r_symndx] |= 1;
3908 }
3909 }
3910
3911 if (off >= (bfd_vma) -2)
3912 abort ();
3913
3914 relocation = base_got->output_section->vma
3915 + base_got->output_offset + off;
3916 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3917 relocation -= htab->elf.sgotplt->output_section->vma
3918 - htab->elf.sgotplt->output_offset;
3919
3920 break;
3921
3922 case R_X86_64_GOTOFF64:
3923 /* Relocation is relative to the start of the global offset
3924 table. */
3925
3926 /* Check to make sure it isn't a protected function symbol
3927 for shared library since it may not be local when used
3928 as function address. */
3929 if (!info->executable
3930 && h
3931 && !SYMBOLIC_BIND (info, h)
3932 && h->def_regular
3933 && h->type == STT_FUNC
3934 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3935 {
3936 (*_bfd_error_handler)
3937 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3938 input_bfd, h->root.root.string);
3939 bfd_set_error (bfd_error_bad_value);
3940 return FALSE;
3941 }
3942
3943 /* Note that sgot is not involved in this
3944 calculation. We always want the start of .got.plt. If we
3945 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3946 permitted by the ABI, we might have to change this
3947 calculation. */
3948 relocation -= htab->elf.sgotplt->output_section->vma
3949 + htab->elf.sgotplt->output_offset;
3950 break;
3951
3952 case R_X86_64_GOTPC32:
3953 case R_X86_64_GOTPC64:
3954 /* Use global offset table as symbol value. */
3955 relocation = htab->elf.sgotplt->output_section->vma
3956 + htab->elf.sgotplt->output_offset;
3957 unresolved_reloc = FALSE;
3958 break;
3959
3960 case R_X86_64_PLTOFF64:
3961 /* Relocation is PLT entry relative to GOT. For local
3962 symbols it's the symbol itself relative to GOT. */
3963 if (h != NULL
3964 /* See PLT32 handling. */
3965 && h->plt.offset != (bfd_vma) -1
3966 && htab->elf.splt != NULL)
3967 {
3968 if (htab->plt_bnd != NULL)
3969 {
3970 resolved_plt = htab->plt_bnd;
3971 plt_offset = eh->plt_bnd.offset;
3972 }
3973 else
3974 {
3975 resolved_plt = htab->elf.splt;
3976 plt_offset = h->plt.offset;
3977 }
3978
3979 relocation = (resolved_plt->output_section->vma
3980 + resolved_plt->output_offset
3981 + plt_offset);
3982 unresolved_reloc = FALSE;
3983 }
3984
3985 relocation -= htab->elf.sgotplt->output_section->vma
3986 + htab->elf.sgotplt->output_offset;
3987 break;
3988
3989 case R_X86_64_PLT32:
3990 case R_X86_64_PLT32_BND:
3991 /* Relocation is to the entry for this symbol in the
3992 procedure linkage table. */
3993
3994 /* Resolve a PLT32 reloc against a local symbol directly,
3995 without using the procedure linkage table. */
3996 if (h == NULL)
3997 break;
3998
3999 if ((h->plt.offset == (bfd_vma) -1
4000 && eh->plt_got.offset == (bfd_vma) -1)
4001 || htab->elf.splt == NULL)
4002 {
4003 /* We didn't make a PLT entry for this symbol. This
4004 happens when statically linking PIC code, or when
4005 using -Bsymbolic. */
4006 break;
4007 }
4008
4009 if (h->plt.offset != (bfd_vma) -1)
4010 {
4011 if (htab->plt_bnd != NULL)
4012 {
4013 resolved_plt = htab->plt_bnd;
4014 plt_offset = eh->plt_bnd.offset;
4015 }
4016 else
4017 {
4018 resolved_plt = htab->elf.splt;
4019 plt_offset = h->plt.offset;
4020 }
4021 }
4022 else
4023 {
4024 /* Use the GOT PLT. */
4025 resolved_plt = htab->plt_got;
4026 plt_offset = eh->plt_got.offset;
4027 }
4028
4029 relocation = (resolved_plt->output_section->vma
4030 + resolved_plt->output_offset
4031 + plt_offset);
4032 unresolved_reloc = FALSE;
4033 break;
4034
4035 case R_X86_64_SIZE32:
4036 case R_X86_64_SIZE64:
4037 /* Set to symbol size. */
4038 relocation = st_size;
4039 goto direct;
4040
4041 case R_X86_64_PC8:
4042 case R_X86_64_PC16:
4043 case R_X86_64_PC32:
4044 case R_X86_64_PC32_BND:
4045 if (info->shared
4046 && (input_section->flags & SEC_ALLOC) != 0
4047 && (input_section->flags & SEC_READONLY) != 0
4048 && h != NULL)
4049 {
4050 bfd_boolean fail = FALSE;
4051 bfd_boolean branch
4052 = ((r_type == R_X86_64_PC32
4053 || r_type == R_X86_64_PC32_BND)
4054 && is_32bit_relative_branch (contents, rel->r_offset));
4055
4056 if (SYMBOL_REFERENCES_LOCAL (info, h))
4057 {
4058 /* Symbol is referenced locally. Make sure it is
4059 defined locally or for a branch. */
4060 fail = !h->def_regular && !branch;
4061 }
4062 else if (!(info->executable && h->needs_copy))
4063 {
4064 /* Symbol doesn't need copy reloc and isn't referenced
4065 locally. We only allow branch to symbol with
4066 non-default visibility. */
4067 fail = (!branch
4068 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4069 }
4070
4071 if (fail)
4072 {
4073 const char *fmt;
4074 const char *v;
4075 const char *pic = "";
4076
4077 switch (ELF_ST_VISIBILITY (h->other))
4078 {
4079 case STV_HIDDEN:
4080 v = _("hidden symbol");
4081 break;
4082 case STV_INTERNAL:
4083 v = _("internal symbol");
4084 break;
4085 case STV_PROTECTED:
4086 v = _("protected symbol");
4087 break;
4088 default:
4089 v = _("symbol");
4090 pic = _("; recompile with -fPIC");
4091 break;
4092 }
4093
4094 if (h->def_regular)
4095 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4096 else
4097 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4098
4099 (*_bfd_error_handler) (fmt, input_bfd,
4100 x86_64_elf_howto_table[r_type].name,
4101 v, h->root.root.string, pic);
4102 bfd_set_error (bfd_error_bad_value);
4103 return FALSE;
4104 }
4105 }
4106 /* Fall through. */
4107
4108 case R_X86_64_8:
4109 case R_X86_64_16:
4110 case R_X86_64_32:
4111 case R_X86_64_PC64:
4112 case R_X86_64_64:
4113 /* FIXME: The ABI says the linker should make sure the value is
4114 the same when it's zeroextended to 64 bit. */
4115
4116 direct:
4117 if ((input_section->flags & SEC_ALLOC) == 0)
4118 break;
4119
4120 /* Don't copy a pc-relative relocation into the output file
4121 if the symbol needs copy reloc. */
4122 if ((info->shared
4123 && !(info->executable
4124 && h != NULL
4125 && h->needs_copy
4126 && IS_X86_64_PCREL_TYPE (r_type))
4127 && (h == NULL
4128 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4129 || h->root.type != bfd_link_hash_undefweak)
4130 && ((! IS_X86_64_PCREL_TYPE (r_type)
4131 && r_type != R_X86_64_SIZE32
4132 && r_type != R_X86_64_SIZE64)
4133 || ! SYMBOL_CALLS_LOCAL (info, h)))
4134 || (ELIMINATE_COPY_RELOCS
4135 && !info->shared
4136 && h != NULL
4137 && h->dynindx != -1
4138 && !h->non_got_ref
4139 && ((h->def_dynamic
4140 && !h->def_regular)
4141 || h->root.type == bfd_link_hash_undefweak
4142 || h->root.type == bfd_link_hash_undefined)))
4143 {
4144 Elf_Internal_Rela outrel;
4145 bfd_boolean skip, relocate;
4146 asection *sreloc;
4147
4148 /* When generating a shared object, these relocations
4149 are copied into the output file to be resolved at run
4150 time. */
4151 skip = FALSE;
4152 relocate = FALSE;
4153
4154 outrel.r_offset =
4155 _bfd_elf_section_offset (output_bfd, info, input_section,
4156 rel->r_offset);
4157 if (outrel.r_offset == (bfd_vma) -1)
4158 skip = TRUE;
4159 else if (outrel.r_offset == (bfd_vma) -2)
4160 skip = TRUE, relocate = TRUE;
4161
4162 outrel.r_offset += (input_section->output_section->vma
4163 + input_section->output_offset);
4164
4165 if (skip)
4166 memset (&outrel, 0, sizeof outrel);
4167
4168 /* h->dynindx may be -1 if this symbol was marked to
4169 become local. */
4170 else if (h != NULL
4171 && h->dynindx != -1
4172 && (IS_X86_64_PCREL_TYPE (r_type)
4173 || ! info->shared
4174 || ! SYMBOLIC_BIND (info, h)
4175 || ! h->def_regular))
4176 {
4177 outrel.r_info = htab->r_info (h->dynindx, r_type);
4178 outrel.r_addend = rel->r_addend;
4179 }
4180 else
4181 {
4182 /* This symbol is local, or marked to become local. */
4183 if (r_type == htab->pointer_r_type)
4184 {
4185 relocate = TRUE;
4186 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4187 outrel.r_addend = relocation + rel->r_addend;
4188 }
4189 else if (r_type == R_X86_64_64
4190 && !ABI_64_P (output_bfd))
4191 {
4192 relocate = TRUE;
4193 outrel.r_info = htab->r_info (0,
4194 R_X86_64_RELATIVE64);
4195 outrel.r_addend = relocation + rel->r_addend;
4196 /* Check addend overflow. */
4197 if ((outrel.r_addend & 0x80000000)
4198 != (rel->r_addend & 0x80000000))
4199 {
4200 const char *name;
4201 int addend = rel->r_addend;
4202 if (h && h->root.root.string)
4203 name = h->root.root.string;
4204 else
4205 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4206 sym, NULL);
4207 if (addend < 0)
4208 (*_bfd_error_handler)
4209 (_("%B: addend -0x%x in relocation %s against "
4210 "symbol `%s' at 0x%lx in section `%A' is "
4211 "out of range"),
4212 input_bfd, input_section, addend,
4213 x86_64_elf_howto_table[r_type].name,
4214 name, (unsigned long) rel->r_offset);
4215 else
4216 (*_bfd_error_handler)
4217 (_("%B: addend 0x%x in relocation %s against "
4218 "symbol `%s' at 0x%lx in section `%A' is "
4219 "out of range"),
4220 input_bfd, input_section, addend,
4221 x86_64_elf_howto_table[r_type].name,
4222 name, (unsigned long) rel->r_offset);
4223 bfd_set_error (bfd_error_bad_value);
4224 return FALSE;
4225 }
4226 }
4227 else
4228 {
4229 long sindx;
4230
4231 if (bfd_is_abs_section (sec))
4232 sindx = 0;
4233 else if (sec == NULL || sec->owner == NULL)
4234 {
4235 bfd_set_error (bfd_error_bad_value);
4236 return FALSE;
4237 }
4238 else
4239 {
4240 asection *osec;
4241
4242 /* We are turning this relocation into one
4243 against a section symbol. It would be
4244 proper to subtract the symbol's value,
4245 osec->vma, from the emitted reloc addend,
4246 but ld.so expects buggy relocs. */
4247 osec = sec->output_section;
4248 sindx = elf_section_data (osec)->dynindx;
4249 if (sindx == 0)
4250 {
4251 asection *oi = htab->elf.text_index_section;
4252 sindx = elf_section_data (oi)->dynindx;
4253 }
4254 BFD_ASSERT (sindx != 0);
4255 }
4256
4257 outrel.r_info = htab->r_info (sindx, r_type);
4258 outrel.r_addend = relocation + rel->r_addend;
4259 }
4260 }
4261
4262 sreloc = elf_section_data (input_section)->sreloc;
4263
4264 if (sreloc == NULL || sreloc->contents == NULL)
4265 {
4266 r = bfd_reloc_notsupported;
4267 goto check_relocation_error;
4268 }
4269
4270 elf_append_rela (output_bfd, sreloc, &outrel);
4271
4272 /* If this reloc is against an external symbol, we do
4273 not want to fiddle with the addend. Otherwise, we
4274 need to include the symbol value so that it becomes
4275 an addend for the dynamic reloc. */
4276 if (! relocate)
4277 continue;
4278 }
4279
4280 break;
4281
4282 case R_X86_64_TLSGD:
4283 case R_X86_64_GOTPC32_TLSDESC:
4284 case R_X86_64_TLSDESC_CALL:
4285 case R_X86_64_GOTTPOFF:
4286 tls_type = GOT_UNKNOWN;
4287 if (h == NULL && local_got_offsets)
4288 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4289 else if (h != NULL)
4290 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4291
4292 if (! elf_x86_64_tls_transition (info, input_bfd,
4293 input_section, contents,
4294 symtab_hdr, sym_hashes,
4295 &r_type, tls_type, rel,
4296 relend, h, r_symndx))
4297 return FALSE;
4298
4299 if (r_type == R_X86_64_TPOFF32)
4300 {
4301 bfd_vma roff = rel->r_offset;
4302
4303 BFD_ASSERT (! unresolved_reloc);
4304
4305 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4306 {
4307 /* GD->LE transition. For 64bit, change
4308 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4309 .word 0x6666; rex64; call __tls_get_addr
4310 into:
4311 movq %fs:0, %rax
4312 leaq foo@tpoff(%rax), %rax
4313 For 32bit, change
4314 leaq foo@tlsgd(%rip), %rdi
4315 .word 0x6666; rex64; call __tls_get_addr
4316 into:
4317 movl %fs:0, %eax
4318 leaq foo@tpoff(%rax), %rax
4319 For largepic, change:
4320 leaq foo@tlsgd(%rip), %rdi
4321 movabsq $__tls_get_addr@pltoff, %rax
4322 addq %rbx, %rax
4323 call *%rax
4324 into:
4325 movq %fs:0, %rax
4326 leaq foo@tpoff(%rax), %rax
4327 nopw 0x0(%rax,%rax,1) */
4328 int largepic = 0;
4329 if (ABI_64_P (output_bfd)
4330 && contents[roff + 5] == (bfd_byte) '\xb8')
4331 {
4332 memcpy (contents + roff - 3,
4333 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4334 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4335 largepic = 1;
4336 }
4337 else if (ABI_64_P (output_bfd))
4338 memcpy (contents + roff - 4,
4339 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4340 16);
4341 else
4342 memcpy (contents + roff - 3,
4343 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4344 15);
4345 bfd_put_32 (output_bfd,
4346 elf_x86_64_tpoff (info, relocation),
4347 contents + roff + 8 + largepic);
4348 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4349 rel++;
4350 continue;
4351 }
4352 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4353 {
4354 /* GDesc -> LE transition.
4355 It's originally something like:
4356 leaq x@tlsdesc(%rip), %rax
4357
4358 Change it to:
4359 movl $x@tpoff, %rax. */
4360
4361 unsigned int val, type;
4362
4363 type = bfd_get_8 (input_bfd, contents + roff - 3);
4364 val = bfd_get_8 (input_bfd, contents + roff - 1);
4365 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4366 contents + roff - 3);
4367 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4368 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4369 contents + roff - 1);
4370 bfd_put_32 (output_bfd,
4371 elf_x86_64_tpoff (info, relocation),
4372 contents + roff);
4373 continue;
4374 }
4375 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4376 {
4377 /* GDesc -> LE transition.
4378 It's originally:
4379 call *(%rax)
4380 Turn it into:
4381 xchg %ax,%ax. */
4382 bfd_put_8 (output_bfd, 0x66, contents + roff);
4383 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4384 continue;
4385 }
4386 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4387 {
4388 /* IE->LE transition:
4389 For 64bit, originally it can be one of:
4390 movq foo@gottpoff(%rip), %reg
4391 addq foo@gottpoff(%rip), %reg
4392 We change it into:
4393 movq $foo, %reg
4394 leaq foo(%reg), %reg
4395 addq $foo, %reg.
4396 For 32bit, originally it can be one of:
4397 movq foo@gottpoff(%rip), %reg
4398 addl foo@gottpoff(%rip), %reg
4399 We change it into:
4400 movq $foo, %reg
4401 leal foo(%reg), %reg
4402 addl $foo, %reg. */
4403
4404 unsigned int val, type, reg;
4405
4406 if (roff >= 3)
4407 val = bfd_get_8 (input_bfd, contents + roff - 3);
4408 else
4409 val = 0;
4410 type = bfd_get_8 (input_bfd, contents + roff - 2);
4411 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4412 reg >>= 3;
4413 if (type == 0x8b)
4414 {
4415 /* movq */
4416 if (val == 0x4c)
4417 bfd_put_8 (output_bfd, 0x49,
4418 contents + roff - 3);
4419 else if (!ABI_64_P (output_bfd) && val == 0x44)
4420 bfd_put_8 (output_bfd, 0x41,
4421 contents + roff - 3);
4422 bfd_put_8 (output_bfd, 0xc7,
4423 contents + roff - 2);
4424 bfd_put_8 (output_bfd, 0xc0 | reg,
4425 contents + roff - 1);
4426 }
4427 else if (reg == 4)
4428 {
4429 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4430 is special */
4431 if (val == 0x4c)
4432 bfd_put_8 (output_bfd, 0x49,
4433 contents + roff - 3);
4434 else if (!ABI_64_P (output_bfd) && val == 0x44)
4435 bfd_put_8 (output_bfd, 0x41,
4436 contents + roff - 3);
4437 bfd_put_8 (output_bfd, 0x81,
4438 contents + roff - 2);
4439 bfd_put_8 (output_bfd, 0xc0 | reg,
4440 contents + roff - 1);
4441 }
4442 else
4443 {
4444 /* addq/addl -> leaq/leal */
4445 if (val == 0x4c)
4446 bfd_put_8 (output_bfd, 0x4d,
4447 contents + roff - 3);
4448 else if (!ABI_64_P (output_bfd) && val == 0x44)
4449 bfd_put_8 (output_bfd, 0x45,
4450 contents + roff - 3);
4451 bfd_put_8 (output_bfd, 0x8d,
4452 contents + roff - 2);
4453 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4454 contents + roff - 1);
4455 }
4456 bfd_put_32 (output_bfd,
4457 elf_x86_64_tpoff (info, relocation),
4458 contents + roff);
4459 continue;
4460 }
4461 else
4462 BFD_ASSERT (FALSE);
4463 }
4464
4465 if (htab->elf.sgot == NULL)
4466 abort ();
4467
4468 if (h != NULL)
4469 {
4470 off = h->got.offset;
4471 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4472 }
4473 else
4474 {
4475 if (local_got_offsets == NULL)
4476 abort ();
4477
4478 off = local_got_offsets[r_symndx];
4479 offplt = local_tlsdesc_gotents[r_symndx];
4480 }
4481
4482 if ((off & 1) != 0)
4483 off &= ~1;
4484 else
4485 {
4486 Elf_Internal_Rela outrel;
4487 int dr_type, indx;
4488 asection *sreloc;
4489
4490 if (htab->elf.srelgot == NULL)
4491 abort ();
4492
4493 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4494
4495 if (GOT_TLS_GDESC_P (tls_type))
4496 {
4497 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4498 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4499 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4500 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4501 + htab->elf.sgotplt->output_offset
4502 + offplt
4503 + htab->sgotplt_jump_table_size);
4504 sreloc = htab->elf.srelplt;
4505 if (indx == 0)
4506 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4507 else
4508 outrel.r_addend = 0;
4509 elf_append_rela (output_bfd, sreloc, &outrel);
4510 }
4511
4512 sreloc = htab->elf.srelgot;
4513
4514 outrel.r_offset = (htab->elf.sgot->output_section->vma
4515 + htab->elf.sgot->output_offset + off);
4516
4517 if (GOT_TLS_GD_P (tls_type))
4518 dr_type = R_X86_64_DTPMOD64;
4519 else if (GOT_TLS_GDESC_P (tls_type))
4520 goto dr_done;
4521 else
4522 dr_type = R_X86_64_TPOFF64;
4523
4524 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4525 outrel.r_addend = 0;
4526 if ((dr_type == R_X86_64_TPOFF64
4527 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4528 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4529 outrel.r_info = htab->r_info (indx, dr_type);
4530
4531 elf_append_rela (output_bfd, sreloc, &outrel);
4532
4533 if (GOT_TLS_GD_P (tls_type))
4534 {
4535 if (indx == 0)
4536 {
4537 BFD_ASSERT (! unresolved_reloc);
4538 bfd_put_64 (output_bfd,
4539 relocation - elf_x86_64_dtpoff_base (info),
4540 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4541 }
4542 else
4543 {
4544 bfd_put_64 (output_bfd, 0,
4545 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4546 outrel.r_info = htab->r_info (indx,
4547 R_X86_64_DTPOFF64);
4548 outrel.r_offset += GOT_ENTRY_SIZE;
4549 elf_append_rela (output_bfd, sreloc,
4550 &outrel);
4551 }
4552 }
4553
4554 dr_done:
4555 if (h != NULL)
4556 h->got.offset |= 1;
4557 else
4558 local_got_offsets[r_symndx] |= 1;
4559 }
4560
4561 if (off >= (bfd_vma) -2
4562 && ! GOT_TLS_GDESC_P (tls_type))
4563 abort ();
4564 if (r_type == ELF32_R_TYPE (rel->r_info))
4565 {
4566 if (r_type == R_X86_64_GOTPC32_TLSDESC
4567 || r_type == R_X86_64_TLSDESC_CALL)
4568 relocation = htab->elf.sgotplt->output_section->vma
4569 + htab->elf.sgotplt->output_offset
4570 + offplt + htab->sgotplt_jump_table_size;
4571 else
4572 relocation = htab->elf.sgot->output_section->vma
4573 + htab->elf.sgot->output_offset + off;
4574 unresolved_reloc = FALSE;
4575 }
4576 else
4577 {
4578 bfd_vma roff = rel->r_offset;
4579
4580 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4581 {
4582 /* GD->IE transition. For 64bit, change
4583 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4584 .word 0x6666; rex64; call __tls_get_addr@plt
4585 into:
4586 movq %fs:0, %rax
4587 addq foo@gottpoff(%rip), %rax
4588 For 32bit, change
4589 leaq foo@tlsgd(%rip), %rdi
4590 .word 0x6666; rex64; call __tls_get_addr@plt
4591 into:
4592 movl %fs:0, %eax
4593 addq foo@gottpoff(%rip), %rax
4594 For largepic, change:
4595 leaq foo@tlsgd(%rip), %rdi
4596 movabsq $__tls_get_addr@pltoff, %rax
4597 addq %rbx, %rax
4598 call *%rax
4599 into:
4600 movq %fs:0, %rax
4601 addq foo@gottpoff(%rax), %rax
4602 nopw 0x0(%rax,%rax,1) */
4603 int largepic = 0;
4604 if (ABI_64_P (output_bfd)
4605 && contents[roff + 5] == (bfd_byte) '\xb8')
4606 {
4607 memcpy (contents + roff - 3,
4608 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4609 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4610 largepic = 1;
4611 }
4612 else if (ABI_64_P (output_bfd))
4613 memcpy (contents + roff - 4,
4614 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4615 16);
4616 else
4617 memcpy (contents + roff - 3,
4618 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4619 15);
4620
4621 relocation = (htab->elf.sgot->output_section->vma
4622 + htab->elf.sgot->output_offset + off
4623 - roff
4624 - largepic
4625 - input_section->output_section->vma
4626 - input_section->output_offset
4627 - 12);
4628 bfd_put_32 (output_bfd, relocation,
4629 contents + roff + 8 + largepic);
4630 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4631 rel++;
4632 continue;
4633 }
4634 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4635 {
4636 /* GDesc -> IE transition.
4637 It's originally something like:
4638 leaq x@tlsdesc(%rip), %rax
4639
4640 Change it to:
4641 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4642
4643 /* Now modify the instruction as appropriate. To
4644 turn a leaq into a movq in the form we use it, it
4645 suffices to change the second byte from 0x8d to
4646 0x8b. */
4647 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4648
4649 bfd_put_32 (output_bfd,
4650 htab->elf.sgot->output_section->vma
4651 + htab->elf.sgot->output_offset + off
4652 - rel->r_offset
4653 - input_section->output_section->vma
4654 - input_section->output_offset
4655 - 4,
4656 contents + roff);
4657 continue;
4658 }
4659 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4660 {
4661 /* GDesc -> IE transition.
4662 It's originally:
4663 call *(%rax)
4664
4665 Change it to:
4666 xchg %ax, %ax. */
4667
4668 bfd_put_8 (output_bfd, 0x66, contents + roff);
4669 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4670 continue;
4671 }
4672 else
4673 BFD_ASSERT (FALSE);
4674 }
4675 break;
4676
4677 case R_X86_64_TLSLD:
4678 if (! elf_x86_64_tls_transition (info, input_bfd,
4679 input_section, contents,
4680 symtab_hdr, sym_hashes,
4681 &r_type, GOT_UNKNOWN,
4682 rel, relend, h, r_symndx))
4683 return FALSE;
4684
4685 if (r_type != R_X86_64_TLSLD)
4686 {
4687 /* LD->LE transition:
4688 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4689 For 64bit, we change it into:
4690 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4691 For 32bit, we change it into:
4692 nopl 0x0(%rax); movl %fs:0, %eax.
4693 For largepic, change:
4694 leaq foo@tlsgd(%rip), %rdi
4695 movabsq $__tls_get_addr@pltoff, %rax
4696 addq %rbx, %rax
4697 call *%rax
4698 into:
4699 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4700 movq %fs:0, %eax */
4701
4702 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4703 if (ABI_64_P (output_bfd)
4704 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4705 memcpy (contents + rel->r_offset - 3,
4706 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4707 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4708 else if (ABI_64_P (output_bfd))
4709 memcpy (contents + rel->r_offset - 3,
4710 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4711 else
4712 memcpy (contents + rel->r_offset - 3,
4713 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4714 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4715 rel++;
4716 continue;
4717 }
4718
4719 if (htab->elf.sgot == NULL)
4720 abort ();
4721
4722 off = htab->tls_ld_got.offset;
4723 if (off & 1)
4724 off &= ~1;
4725 else
4726 {
4727 Elf_Internal_Rela outrel;
4728
4729 if (htab->elf.srelgot == NULL)
4730 abort ();
4731
4732 outrel.r_offset = (htab->elf.sgot->output_section->vma
4733 + htab->elf.sgot->output_offset + off);
4734
4735 bfd_put_64 (output_bfd, 0,
4736 htab->elf.sgot->contents + off);
4737 bfd_put_64 (output_bfd, 0,
4738 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4739 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4740 outrel.r_addend = 0;
4741 elf_append_rela (output_bfd, htab->elf.srelgot,
4742 &outrel);
4743 htab->tls_ld_got.offset |= 1;
4744 }
4745 relocation = htab->elf.sgot->output_section->vma
4746 + htab->elf.sgot->output_offset + off;
4747 unresolved_reloc = FALSE;
4748 break;
4749
4750 case R_X86_64_DTPOFF32:
4751 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4752 relocation -= elf_x86_64_dtpoff_base (info);
4753 else
4754 relocation = elf_x86_64_tpoff (info, relocation);
4755 break;
4756
4757 case R_X86_64_TPOFF32:
4758 case R_X86_64_TPOFF64:
4759 BFD_ASSERT (info->executable);
4760 relocation = elf_x86_64_tpoff (info, relocation);
4761 break;
4762
4763 case R_X86_64_DTPOFF64:
4764 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4765 relocation -= elf_x86_64_dtpoff_base (info);
4766 break;
4767
4768 default:
4769 break;
4770 }
4771
4772 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4773 because such sections are not SEC_ALLOC and thus ld.so will
4774 not process them. */
4775 if (unresolved_reloc
4776 && !((input_section->flags & SEC_DEBUGGING) != 0
4777 && h->def_dynamic)
4778 && _bfd_elf_section_offset (output_bfd, info, input_section,
4779 rel->r_offset) != (bfd_vma) -1)
4780 {
4781 (*_bfd_error_handler)
4782 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4783 input_bfd,
4784 input_section,
4785 (long) rel->r_offset,
4786 howto->name,
4787 h->root.root.string);
4788 return FALSE;
4789 }
4790
4791 do_relocation:
4792 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4793 contents, rel->r_offset,
4794 relocation, rel->r_addend);
4795
4796 check_relocation_error:
4797 if (r != bfd_reloc_ok)
4798 {
4799 const char *name;
4800
4801 if (h != NULL)
4802 name = h->root.root.string;
4803 else
4804 {
4805 name = bfd_elf_string_from_elf_section (input_bfd,
4806 symtab_hdr->sh_link,
4807 sym->st_name);
4808 if (name == NULL)
4809 return FALSE;
4810 if (*name == '\0')
4811 name = bfd_section_name (input_bfd, sec);
4812 }
4813
4814 if (r == bfd_reloc_overflow)
4815 {
4816 if (! ((*info->callbacks->reloc_overflow)
4817 (info, (h ? &h->root : NULL), name, howto->name,
4818 (bfd_vma) 0, input_bfd, input_section,
4819 rel->r_offset)))
4820 return FALSE;
4821 }
4822 else
4823 {
4824 (*_bfd_error_handler)
4825 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4826 input_bfd, input_section,
4827 (long) rel->r_offset, name, (int) r);
4828 return FALSE;
4829 }
4830 }
4831 }
4832
4833 return TRUE;
4834 }
4835
4836 /* Finish up dynamic symbol handling. We set the contents of various
4837 dynamic sections here. */
4838
4839 static bfd_boolean
4840 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4841 struct bfd_link_info *info,
4842 struct elf_link_hash_entry *h,
4843 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4844 {
4845 struct elf_x86_64_link_hash_table *htab;
4846 const struct elf_x86_64_backend_data *abed;
4847 bfd_boolean use_plt_bnd;
4848 struct elf_x86_64_link_hash_entry *eh;
4849
4850 htab = elf_x86_64_hash_table (info);
4851 if (htab == NULL)
4852 return FALSE;
4853
4854 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4855 section only if there is .plt section. */
4856 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4857 abed = (use_plt_bnd
4858 ? &elf_x86_64_bnd_arch_bed
4859 : get_elf_x86_64_backend_data (output_bfd));
4860
4861 eh = (struct elf_x86_64_link_hash_entry *) h;
4862
4863 if (h->plt.offset != (bfd_vma) -1)
4864 {
4865 bfd_vma plt_index;
4866 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4867 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4868 Elf_Internal_Rela rela;
4869 bfd_byte *loc;
4870 asection *plt, *gotplt, *relplt, *resolved_plt;
4871 const struct elf_backend_data *bed;
4872 bfd_vma plt_got_pcrel_offset;
4873
4874 /* When building a static executable, use .iplt, .igot.plt and
4875 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4876 if (htab->elf.splt != NULL)
4877 {
4878 plt = htab->elf.splt;
4879 gotplt = htab->elf.sgotplt;
4880 relplt = htab->elf.srelplt;
4881 }
4882 else
4883 {
4884 plt = htab->elf.iplt;
4885 gotplt = htab->elf.igotplt;
4886 relplt = htab->elf.irelplt;
4887 }
4888
4889 /* This symbol has an entry in the procedure linkage table. Set
4890 it up. */
4891 if ((h->dynindx == -1
4892 && !((h->forced_local || info->executable)
4893 && h->def_regular
4894 && h->type == STT_GNU_IFUNC))
4895 || plt == NULL
4896 || gotplt == NULL
4897 || relplt == NULL)
4898 abort ();
4899
4900 /* Get the index in the procedure linkage table which
4901 corresponds to this symbol. This is the index of this symbol
4902 in all the symbols for which we are making plt entries. The
4903 first entry in the procedure linkage table is reserved.
4904
4905 Get the offset into the .got table of the entry that
4906 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4907 bytes. The first three are reserved for the dynamic linker.
4908
4909 For static executables, we don't reserve anything. */
4910
4911 if (plt == htab->elf.splt)
4912 {
4913 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4914 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4915 }
4916 else
4917 {
4918 got_offset = h->plt.offset / abed->plt_entry_size;
4919 got_offset = got_offset * GOT_ENTRY_SIZE;
4920 }
4921
4922 plt_plt_insn_end = abed->plt_plt_insn_end;
4923 plt_plt_offset = abed->plt_plt_offset;
4924 plt_got_insn_size = abed->plt_got_insn_size;
4925 plt_got_offset = abed->plt_got_offset;
4926 if (use_plt_bnd)
4927 {
4928 /* Use the second PLT with BND relocations. */
4929 const bfd_byte *plt_entry, *plt2_entry;
4930
4931 if (eh->has_bnd_reloc)
4932 {
4933 plt_entry = elf_x86_64_bnd_plt_entry;
4934 plt2_entry = elf_x86_64_bnd_plt2_entry;
4935 }
4936 else
4937 {
4938 plt_entry = elf_x86_64_legacy_plt_entry;
4939 plt2_entry = elf_x86_64_legacy_plt2_entry;
4940
4941 /* Subtract 1 since there is no BND prefix. */
4942 plt_plt_insn_end -= 1;
4943 plt_plt_offset -= 1;
4944 plt_got_insn_size -= 1;
4945 plt_got_offset -= 1;
4946 }
4947
4948 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4949 == sizeof (elf_x86_64_legacy_plt_entry));
4950
4951 /* Fill in the entry in the procedure linkage table. */
4952 memcpy (plt->contents + h->plt.offset,
4953 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4954 /* Fill in the entry in the second PLT. */
4955 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4956 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4957
4958 resolved_plt = htab->plt_bnd;
4959 plt_offset = eh->plt_bnd.offset;
4960 }
4961 else
4962 {
4963 /* Fill in the entry in the procedure linkage table. */
4964 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4965 abed->plt_entry_size);
4966
4967 resolved_plt = plt;
4968 plt_offset = h->plt.offset;
4969 }
4970
4971 /* Insert the relocation positions of the plt section. */
4972
4973 /* Put offset the PC-relative instruction referring to the GOT entry,
4974 subtracting the size of that instruction. */
4975 plt_got_pcrel_offset = (gotplt->output_section->vma
4976 + gotplt->output_offset
4977 + got_offset
4978 - resolved_plt->output_section->vma
4979 - resolved_plt->output_offset
4980 - plt_offset
4981 - plt_got_insn_size);
4982
4983 /* Check PC-relative offset overflow in PLT entry. */
4984 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4985 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4986 output_bfd, h->root.root.string);
4987
4988 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4989 resolved_plt->contents + plt_offset + plt_got_offset);
4990
4991 /* Fill in the entry in the global offset table, initially this
4992 points to the second part of the PLT entry. */
4993 bfd_put_64 (output_bfd, (plt->output_section->vma
4994 + plt->output_offset
4995 + h->plt.offset + abed->plt_lazy_offset),
4996 gotplt->contents + got_offset);
4997
4998 /* Fill in the entry in the .rela.plt section. */
4999 rela.r_offset = (gotplt->output_section->vma
5000 + gotplt->output_offset
5001 + got_offset);
5002 if (h->dynindx == -1
5003 || ((info->executable
5004 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5005 && h->def_regular
5006 && h->type == STT_GNU_IFUNC))
5007 {
5008 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5009 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5010 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5011 rela.r_addend = (h->root.u.def.value
5012 + h->root.u.def.section->output_section->vma
5013 + h->root.u.def.section->output_offset);
5014 /* R_X86_64_IRELATIVE comes last. */
5015 plt_index = htab->next_irelative_index--;
5016 }
5017 else
5018 {
5019 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5020 rela.r_addend = 0;
5021 plt_index = htab->next_jump_slot_index++;
5022 }
5023
5024 /* Don't fill PLT entry for static executables. */
5025 if (plt == htab->elf.splt)
5026 {
5027 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5028
5029 /* Put relocation index. */
5030 bfd_put_32 (output_bfd, plt_index,
5031 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5032
5033 /* Put offset for jmp .PLT0 and check for overflow. We don't
5034 check relocation index for overflow since branch displacement
5035 will overflow first. */
5036 if (plt0_offset > 0x80000000)
5037 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5038 output_bfd, h->root.root.string);
5039 bfd_put_32 (output_bfd, - plt0_offset,
5040 plt->contents + h->plt.offset + plt_plt_offset);
5041 }
5042
5043 bed = get_elf_backend_data (output_bfd);
5044 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5045 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5046 }
5047 else if (eh->plt_got.offset != (bfd_vma) -1)
5048 {
5049 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5050 asection *plt, *got;
5051 bfd_boolean got_after_plt;
5052 int32_t got_pcrel_offset;
5053 const bfd_byte *got_plt_entry;
5054
5055 /* Set the entry in the GOT procedure linkage table. */
5056 plt = htab->plt_got;
5057 got = htab->elf.sgot;
5058 got_offset = h->got.offset;
5059
5060 if (got_offset == (bfd_vma) -1
5061 || h->type == STT_GNU_IFUNC
5062 || plt == NULL
5063 || got == NULL)
5064 abort ();
5065
5066 /* Use the second PLT entry template for the GOT PLT since they
5067 are the identical. */
5068 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5069 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5070 if (eh->has_bnd_reloc)
5071 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5072 else
5073 {
5074 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5075
5076 /* Subtract 1 since there is no BND prefix. */
5077 plt_got_insn_size -= 1;
5078 plt_got_offset -= 1;
5079 }
5080
5081 /* Fill in the entry in the GOT procedure linkage table. */
5082 plt_offset = eh->plt_got.offset;
5083 memcpy (plt->contents + plt_offset,
5084 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5085
5086 /* Put offset the PC-relative instruction referring to the GOT
5087 entry, subtracting the size of that instruction. */
5088 got_pcrel_offset = (got->output_section->vma
5089 + got->output_offset
5090 + got_offset
5091 - plt->output_section->vma
5092 - plt->output_offset
5093 - plt_offset
5094 - plt_got_insn_size);
5095
5096 /* Check PC-relative offset overflow in GOT PLT entry. */
5097 got_after_plt = got->output_section->vma > plt->output_section->vma;
5098 if ((got_after_plt && got_pcrel_offset < 0)
5099 || (!got_after_plt && got_pcrel_offset > 0))
5100 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5101 output_bfd, h->root.root.string);
5102
5103 bfd_put_32 (output_bfd, got_pcrel_offset,
5104 plt->contents + plt_offset + plt_got_offset);
5105 }
5106
5107 if (!h->def_regular
5108 && (h->plt.offset != (bfd_vma) -1
5109 || eh->plt_got.offset != (bfd_vma) -1))
5110 {
5111 /* Mark the symbol as undefined, rather than as defined in
5112 the .plt section. Leave the value if there were any
5113 relocations where pointer equality matters (this is a clue
5114 for the dynamic linker, to make function pointer
5115 comparisons work between an application and shared
5116 library), otherwise set it to zero. If a function is only
5117 called from a binary, there is no need to slow down
5118 shared libraries because of that. */
5119 sym->st_shndx = SHN_UNDEF;
5120 if (!h->pointer_equality_needed)
5121 sym->st_value = 0;
5122 }
5123
5124 if (h->got.offset != (bfd_vma) -1
5125 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5126 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5127 {
5128 Elf_Internal_Rela rela;
5129
5130 /* This symbol has an entry in the global offset table. Set it
5131 up. */
5132 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5133 abort ();
5134
5135 rela.r_offset = (htab->elf.sgot->output_section->vma
5136 + htab->elf.sgot->output_offset
5137 + (h->got.offset &~ (bfd_vma) 1));
5138
5139 /* If this is a static link, or it is a -Bsymbolic link and the
5140 symbol is defined locally or was forced to be local because
5141 of a version file, we just want to emit a RELATIVE reloc.
5142 The entry in the global offset table will already have been
5143 initialized in the relocate_section function. */
5144 if (h->def_regular
5145 && h->type == STT_GNU_IFUNC)
5146 {
5147 if (info->shared)
5148 {
5149 /* Generate R_X86_64_GLOB_DAT. */
5150 goto do_glob_dat;
5151 }
5152 else
5153 {
5154 asection *plt;
5155
5156 if (!h->pointer_equality_needed)
5157 abort ();
5158
5159 /* For non-shared object, we can't use .got.plt, which
5160 contains the real function addres if we need pointer
5161 equality. We load the GOT entry with the PLT entry. */
5162 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5163 bfd_put_64 (output_bfd, (plt->output_section->vma
5164 + plt->output_offset
5165 + h->plt.offset),
5166 htab->elf.sgot->contents + h->got.offset);
5167 return TRUE;
5168 }
5169 }
5170 else if (info->shared
5171 && SYMBOL_REFERENCES_LOCAL (info, h))
5172 {
5173 if (!h->def_regular)
5174 return FALSE;
5175 BFD_ASSERT((h->got.offset & 1) != 0);
5176 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5177 rela.r_addend = (h->root.u.def.value
5178 + h->root.u.def.section->output_section->vma
5179 + h->root.u.def.section->output_offset);
5180 }
5181 else
5182 {
5183 BFD_ASSERT((h->got.offset & 1) == 0);
5184 do_glob_dat:
5185 bfd_put_64 (output_bfd, (bfd_vma) 0,
5186 htab->elf.sgot->contents + h->got.offset);
5187 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5188 rela.r_addend = 0;
5189 }
5190
5191 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5192 }
5193
5194 if (h->needs_copy)
5195 {
5196 Elf_Internal_Rela rela;
5197
5198 /* This symbol needs a copy reloc. Set it up. */
5199
5200 if (h->dynindx == -1
5201 || (h->root.type != bfd_link_hash_defined
5202 && h->root.type != bfd_link_hash_defweak)
5203 || htab->srelbss == NULL)
5204 abort ();
5205
5206 rela.r_offset = (h->root.u.def.value
5207 + h->root.u.def.section->output_section->vma
5208 + h->root.u.def.section->output_offset);
5209 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5210 rela.r_addend = 0;
5211 elf_append_rela (output_bfd, htab->srelbss, &rela);
5212 }
5213
5214 return TRUE;
5215 }
5216
5217 /* Finish up local dynamic symbol handling. We set the contents of
5218 various dynamic sections here. */
5219
5220 static bfd_boolean
5221 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5222 {
5223 struct elf_link_hash_entry *h
5224 = (struct elf_link_hash_entry *) *slot;
5225 struct bfd_link_info *info
5226 = (struct bfd_link_info *) inf;
5227
5228 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5229 info, h, NULL);
5230 }
5231
5232 /* Used to decide how to sort relocs in an optimal manner for the
5233 dynamic linker, before writing them out. */
5234
5235 static enum elf_reloc_type_class
5236 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5237 const asection *rel_sec ATTRIBUTE_UNUSED,
5238 const Elf_Internal_Rela *rela)
5239 {
5240 switch ((int) ELF32_R_TYPE (rela->r_info))
5241 {
5242 case R_X86_64_RELATIVE:
5243 case R_X86_64_RELATIVE64:
5244 return reloc_class_relative;
5245 case R_X86_64_JUMP_SLOT:
5246 return reloc_class_plt;
5247 case R_X86_64_COPY:
5248 return reloc_class_copy;
5249 default:
5250 return reloc_class_normal;
5251 }
5252 }
5253
5254 /* Finish up the dynamic sections. */
5255
5256 static bfd_boolean
5257 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5258 struct bfd_link_info *info)
5259 {
5260 struct elf_x86_64_link_hash_table *htab;
5261 bfd *dynobj;
5262 asection *sdyn;
5263 const struct elf_x86_64_backend_data *abed;
5264
5265 htab = elf_x86_64_hash_table (info);
5266 if (htab == NULL)
5267 return FALSE;
5268
5269 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5270 section only if there is .plt section. */
5271 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5272 ? &elf_x86_64_bnd_arch_bed
5273 : get_elf_x86_64_backend_data (output_bfd));
5274
5275 dynobj = htab->elf.dynobj;
5276 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5277
5278 if (htab->elf.dynamic_sections_created)
5279 {
5280 bfd_byte *dyncon, *dynconend;
5281 const struct elf_backend_data *bed;
5282 bfd_size_type sizeof_dyn;
5283
5284 if (sdyn == NULL || htab->elf.sgot == NULL)
5285 abort ();
5286
5287 bed = get_elf_backend_data (dynobj);
5288 sizeof_dyn = bed->s->sizeof_dyn;
5289 dyncon = sdyn->contents;
5290 dynconend = sdyn->contents + sdyn->size;
5291 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5292 {
5293 Elf_Internal_Dyn dyn;
5294 asection *s;
5295
5296 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5297
5298 switch (dyn.d_tag)
5299 {
5300 default:
5301 continue;
5302
5303 case DT_PLTGOT:
5304 s = htab->elf.sgotplt;
5305 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5306 break;
5307
5308 case DT_JMPREL:
5309 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5310 break;
5311
5312 case DT_PLTRELSZ:
5313 s = htab->elf.srelplt->output_section;
5314 dyn.d_un.d_val = s->size;
5315 break;
5316
5317 case DT_RELASZ:
5318 /* The procedure linkage table relocs (DT_JMPREL) should
5319 not be included in the overall relocs (DT_RELA).
5320 Therefore, we override the DT_RELASZ entry here to
5321 make it not include the JMPREL relocs. Since the
5322 linker script arranges for .rela.plt to follow all
5323 other relocation sections, we don't have to worry
5324 about changing the DT_RELA entry. */
5325 if (htab->elf.srelplt != NULL)
5326 {
5327 s = htab->elf.srelplt->output_section;
5328 dyn.d_un.d_val -= s->size;
5329 }
5330 break;
5331
5332 case DT_TLSDESC_PLT:
5333 s = htab->elf.splt;
5334 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5335 + htab->tlsdesc_plt;
5336 break;
5337
5338 case DT_TLSDESC_GOT:
5339 s = htab->elf.sgot;
5340 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5341 + htab->tlsdesc_got;
5342 break;
5343 }
5344
5345 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5346 }
5347
5348 /* Fill in the special first entry in the procedure linkage table. */
5349 if (htab->elf.splt && htab->elf.splt->size > 0)
5350 {
5351 /* Fill in the first entry in the procedure linkage table. */
5352 memcpy (htab->elf.splt->contents,
5353 abed->plt0_entry, abed->plt_entry_size);
5354 /* Add offset for pushq GOT+8(%rip), since the instruction
5355 uses 6 bytes subtract this value. */
5356 bfd_put_32 (output_bfd,
5357 (htab->elf.sgotplt->output_section->vma
5358 + htab->elf.sgotplt->output_offset
5359 + 8
5360 - htab->elf.splt->output_section->vma
5361 - htab->elf.splt->output_offset
5362 - 6),
5363 htab->elf.splt->contents + abed->plt0_got1_offset);
5364 /* Add offset for the PC-relative instruction accessing GOT+16,
5365 subtracting the offset to the end of that instruction. */
5366 bfd_put_32 (output_bfd,
5367 (htab->elf.sgotplt->output_section->vma
5368 + htab->elf.sgotplt->output_offset
5369 + 16
5370 - htab->elf.splt->output_section->vma
5371 - htab->elf.splt->output_offset
5372 - abed->plt0_got2_insn_end),
5373 htab->elf.splt->contents + abed->plt0_got2_offset);
5374
5375 elf_section_data (htab->elf.splt->output_section)
5376 ->this_hdr.sh_entsize = abed->plt_entry_size;
5377
5378 if (htab->tlsdesc_plt)
5379 {
5380 bfd_put_64 (output_bfd, (bfd_vma) 0,
5381 htab->elf.sgot->contents + htab->tlsdesc_got);
5382
5383 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5384 abed->plt0_entry, abed->plt_entry_size);
5385
5386 /* Add offset for pushq GOT+8(%rip), since the
5387 instruction uses 6 bytes subtract this value. */
5388 bfd_put_32 (output_bfd,
5389 (htab->elf.sgotplt->output_section->vma
5390 + htab->elf.sgotplt->output_offset
5391 + 8
5392 - htab->elf.splt->output_section->vma
5393 - htab->elf.splt->output_offset
5394 - htab->tlsdesc_plt
5395 - 6),
5396 htab->elf.splt->contents
5397 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5398 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5399 where TGD stands for htab->tlsdesc_got, subtracting the offset
5400 to the end of that instruction. */
5401 bfd_put_32 (output_bfd,
5402 (htab->elf.sgot->output_section->vma
5403 + htab->elf.sgot->output_offset
5404 + htab->tlsdesc_got
5405 - htab->elf.splt->output_section->vma
5406 - htab->elf.splt->output_offset
5407 - htab->tlsdesc_plt
5408 - abed->plt0_got2_insn_end),
5409 htab->elf.splt->contents
5410 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5411 }
5412 }
5413 }
5414
5415 if (htab->plt_bnd != NULL)
5416 elf_section_data (htab->plt_bnd->output_section)
5417 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5418
5419 if (htab->elf.sgotplt)
5420 {
5421 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5422 {
5423 (*_bfd_error_handler)
5424 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5425 return FALSE;
5426 }
5427
5428 /* Fill in the first three entries in the global offset table. */
5429 if (htab->elf.sgotplt->size > 0)
5430 {
5431 /* Set the first entry in the global offset table to the address of
5432 the dynamic section. */
5433 if (sdyn == NULL)
5434 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5435 else
5436 bfd_put_64 (output_bfd,
5437 sdyn->output_section->vma + sdyn->output_offset,
5438 htab->elf.sgotplt->contents);
5439 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5440 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5441 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5442 }
5443
5444 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5445 GOT_ENTRY_SIZE;
5446 }
5447
5448 /* Adjust .eh_frame for .plt section. */
5449 if (htab->plt_eh_frame != NULL
5450 && htab->plt_eh_frame->contents != NULL)
5451 {
5452 if (htab->elf.splt != NULL
5453 && htab->elf.splt->size != 0
5454 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5455 && htab->elf.splt->output_section != NULL
5456 && htab->plt_eh_frame->output_section != NULL)
5457 {
5458 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5459 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5460 + htab->plt_eh_frame->output_offset
5461 + PLT_FDE_START_OFFSET;
5462 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5463 htab->plt_eh_frame->contents
5464 + PLT_FDE_START_OFFSET);
5465 }
5466 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5467 {
5468 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5469 htab->plt_eh_frame,
5470 htab->plt_eh_frame->contents))
5471 return FALSE;
5472 }
5473 }
5474
5475 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5476 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5477 = GOT_ENTRY_SIZE;
5478
5479 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5480 htab_traverse (htab->loc_hash_table,
5481 elf_x86_64_finish_local_dynamic_symbol,
5482 info);
5483
5484 return TRUE;
5485 }
5486
5487 /* Return address in section PLT for the Ith GOTPLT relocation, for
5488 relocation REL or (bfd_vma) -1 if it should not be included. */
5489
5490 static bfd_vma
5491 elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt,
5492 const arelent *rel)
5493 {
5494 bfd *abfd;
5495 const struct elf_x86_64_backend_data *bed;
5496 bfd_vma plt_offset;
5497
5498 /* Only match R_X86_64_JUMP_SLOT and R_X86_64_IRELATIVE. */
5499 if (rel->howto->type != R_X86_64_JUMP_SLOT
5500 && rel->howto->type != R_X86_64_IRELATIVE)
5501 return (bfd_vma) -1;
5502
5503 abfd = plt->owner;
5504 bed = get_elf_x86_64_backend_data (abfd);
5505 plt_offset = bed->plt_entry_size;
5506
5507 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5508 return plt->vma + (i + 1) * plt_offset;
5509
5510 while (plt_offset < plt->size)
5511 {
5512 bfd_vma reloc_index;
5513 bfd_byte reloc_index_raw[4];
5514
5515 if (!bfd_get_section_contents (abfd, (asection *) plt,
5516 reloc_index_raw,
5517 plt_offset + bed->plt_reloc_offset,
5518 sizeof (reloc_index_raw)))
5519 return (bfd_vma) -1;
5520
5521 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5522 if (reloc_index == i)
5523 return plt->vma + plt_offset;
5524 plt_offset += bed->plt_entry_size;
5525 }
5526
5527 abort ();
5528 }
5529
5530 /* Return offset in .plt.bnd section for the Ith GOTPLT relocation with
5531 PLT section, or (bfd_vma) -1 if it should not be included. */
5532
5533 static bfd_vma
5534 elf_x86_64_plt_sym_val_offset_plt_bnd (bfd_vma i, const asection *plt)
5535 {
5536 const struct elf_x86_64_backend_data *bed = &elf_x86_64_bnd_arch_bed;
5537 bfd *abfd = plt->owner;
5538 bfd_vma plt_offset = bed->plt_entry_size;
5539
5540 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5541 return i * sizeof (elf_x86_64_legacy_plt2_entry);
5542
5543 while (plt_offset < plt->size)
5544 {
5545 bfd_vma reloc_index;
5546 bfd_byte reloc_index_raw[4];
5547
5548 if (!bfd_get_section_contents (abfd, (asection *) plt,
5549 reloc_index_raw,
5550 plt_offset + bed->plt_reloc_offset,
5551 sizeof (reloc_index_raw)))
5552 return (bfd_vma) -1;
5553
5554 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5555 if (reloc_index == i)
5556 {
5557 /* This is the index in .plt section. */
5558 long plt_index = plt_offset / bed->plt_entry_size;
5559 /* Return the offset in .plt.bnd section. */
5560 return (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry);
5561 }
5562 plt_offset += bed->plt_entry_size;
5563 }
5564
5565 abort ();
5566 }
5567
5568 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5569 support. */
5570
5571 static long
5572 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5573 long symcount,
5574 asymbol **syms,
5575 long dynsymcount,
5576 asymbol **dynsyms,
5577 asymbol **ret)
5578 {
5579 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5580 asection *relplt;
5581 asymbol *s;
5582 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5583 arelent *p;
5584 long count, i, n;
5585 size_t size;
5586 Elf_Internal_Shdr *hdr;
5587 char *names;
5588 asection *plt, *plt_push;
5589
5590 plt_push = bfd_get_section_by_name (abfd, ".plt");
5591 if (plt_push == NULL)
5592 return 0;
5593
5594 plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5595 /* Use the generic ELF version if there is no .plt.bnd section. */
5596 if (plt == NULL)
5597 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
5598 dynsymcount, dynsyms, ret);
5599
5600 *ret = NULL;
5601
5602 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5603 return 0;
5604
5605 if (dynsymcount <= 0)
5606 return 0;
5607
5608 relplt = bfd_get_section_by_name (abfd, ".rela.plt");
5609 if (relplt == NULL)
5610 return 0;
5611
5612 hdr = &elf_section_data (relplt)->this_hdr;
5613 if (hdr->sh_link != elf_dynsymtab (abfd)
5614 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
5615 return 0;
5616
5617 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5618 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5619 return -1;
5620
5621 count = relplt->size / hdr->sh_entsize;
5622 size = count * sizeof (asymbol);
5623 p = relplt->relocation;
5624 for (i = 0; i < count; i++, p += bed->s->int_rels_per_ext_rel)
5625 {
5626 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
5627 if (p->addend != 0)
5628 size += sizeof ("+0x") - 1 + 8 + 8;
5629 }
5630
5631 s = *ret = (asymbol *) bfd_malloc (size);
5632 if (s == NULL)
5633 return -1;
5634
5635 names = (char *) (s + count);
5636 p = relplt->relocation;
5637 n = 0;
5638 for (i = 0; i < count; i++, p++)
5639 {
5640 bfd_vma offset;
5641 size_t len;
5642
5643 if (p->howto->type != R_X86_64_JUMP_SLOT
5644 && p->howto->type != R_X86_64_IRELATIVE)
5645 continue;
5646
5647 offset = elf_x86_64_plt_sym_val_offset_plt_bnd (i, plt_push);
5648
5649 *s = **p->sym_ptr_ptr;
5650 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
5651 we are defining a symbol, ensure one of them is set. */
5652 if ((s->flags & BSF_LOCAL) == 0)
5653 s->flags |= BSF_GLOBAL;
5654 s->flags |= BSF_SYNTHETIC;
5655 s->section = plt;
5656 s->value = offset;
5657 s->name = names;
5658 s->udata.p = NULL;
5659 len = strlen ((*p->sym_ptr_ptr)->name);
5660 memcpy (names, (*p->sym_ptr_ptr)->name, len);
5661 names += len;
5662 if (p->addend != 0)
5663 {
5664 char buf[30], *a;
5665
5666 memcpy (names, "+0x", sizeof ("+0x") - 1);
5667 names += sizeof ("+0x") - 1;
5668 bfd_sprintf_vma (abfd, buf, p->addend);
5669 for (a = buf; *a == '0'; ++a)
5670 ;
5671 len = strlen (a);
5672 memcpy (names, a, len);
5673 names += len;
5674 }
5675 memcpy (names, "@plt", sizeof ("@plt"));
5676 names += sizeof ("@plt");
5677 ++s, ++n;
5678 }
5679
5680 return n;
5681 }
5682
5683 /* Handle an x86-64 specific section when reading an object file. This
5684 is called when elfcode.h finds a section with an unknown type. */
5685
5686 static bfd_boolean
5687 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5688 const char *name, int shindex)
5689 {
5690 if (hdr->sh_type != SHT_X86_64_UNWIND)
5691 return FALSE;
5692
5693 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5694 return FALSE;
5695
5696 return TRUE;
5697 }
5698
5699 /* Hook called by the linker routine which adds symbols from an object
5700 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5701 of .bss. */
5702
5703 static bfd_boolean
5704 elf_x86_64_add_symbol_hook (bfd *abfd,
5705 struct bfd_link_info *info,
5706 Elf_Internal_Sym *sym,
5707 const char **namep ATTRIBUTE_UNUSED,
5708 flagword *flagsp ATTRIBUTE_UNUSED,
5709 asection **secp,
5710 bfd_vma *valp)
5711 {
5712 asection *lcomm;
5713
5714 switch (sym->st_shndx)
5715 {
5716 case SHN_X86_64_LCOMMON:
5717 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5718 if (lcomm == NULL)
5719 {
5720 lcomm = bfd_make_section_with_flags (abfd,
5721 "LARGE_COMMON",
5722 (SEC_ALLOC
5723 | SEC_IS_COMMON
5724 | SEC_LINKER_CREATED));
5725 if (lcomm == NULL)
5726 return FALSE;
5727 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5728 }
5729 *secp = lcomm;
5730 *valp = sym->st_size;
5731 return TRUE;
5732 }
5733
5734 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5735 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5736 && (abfd->flags & DYNAMIC) == 0
5737 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5738 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5739
5740 return TRUE;
5741 }
5742
5743
5744 /* Given a BFD section, try to locate the corresponding ELF section
5745 index. */
5746
5747 static bfd_boolean
5748 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5749 asection *sec, int *index_return)
5750 {
5751 if (sec == &_bfd_elf_large_com_section)
5752 {
5753 *index_return = SHN_X86_64_LCOMMON;
5754 return TRUE;
5755 }
5756 return FALSE;
5757 }
5758
5759 /* Process a symbol. */
5760
5761 static void
5762 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5763 asymbol *asym)
5764 {
5765 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5766
5767 switch (elfsym->internal_elf_sym.st_shndx)
5768 {
5769 case SHN_X86_64_LCOMMON:
5770 asym->section = &_bfd_elf_large_com_section;
5771 asym->value = elfsym->internal_elf_sym.st_size;
5772 /* Common symbol doesn't set BSF_GLOBAL. */
5773 asym->flags &= ~BSF_GLOBAL;
5774 break;
5775 }
5776 }
5777
5778 static bfd_boolean
5779 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5780 {
5781 return (sym->st_shndx == SHN_COMMON
5782 || sym->st_shndx == SHN_X86_64_LCOMMON);
5783 }
5784
5785 static unsigned int
5786 elf_x86_64_common_section_index (asection *sec)
5787 {
5788 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5789 return SHN_COMMON;
5790 else
5791 return SHN_X86_64_LCOMMON;
5792 }
5793
5794 static asection *
5795 elf_x86_64_common_section (asection *sec)
5796 {
5797 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5798 return bfd_com_section_ptr;
5799 else
5800 return &_bfd_elf_large_com_section;
5801 }
5802
5803 static bfd_boolean
5804 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5805 const Elf_Internal_Sym *sym,
5806 asection **psec,
5807 bfd_boolean newdef,
5808 bfd_boolean olddef,
5809 bfd *oldbfd,
5810 const asection *oldsec)
5811 {
5812 /* A normal common symbol and a large common symbol result in a
5813 normal common symbol. We turn the large common symbol into a
5814 normal one. */
5815 if (!olddef
5816 && h->root.type == bfd_link_hash_common
5817 && !newdef
5818 && bfd_is_com_section (*psec)
5819 && oldsec != *psec)
5820 {
5821 if (sym->st_shndx == SHN_COMMON
5822 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5823 {
5824 h->root.u.c.p->section
5825 = bfd_make_section_old_way (oldbfd, "COMMON");
5826 h->root.u.c.p->section->flags = SEC_ALLOC;
5827 }
5828 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5829 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5830 *psec = bfd_com_section_ptr;
5831 }
5832
5833 return TRUE;
5834 }
5835
5836 static int
5837 elf_x86_64_additional_program_headers (bfd *abfd,
5838 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5839 {
5840 asection *s;
5841 int count = 0;
5842
5843 /* Check to see if we need a large readonly segment. */
5844 s = bfd_get_section_by_name (abfd, ".lrodata");
5845 if (s && (s->flags & SEC_LOAD))
5846 count++;
5847
5848 /* Check to see if we need a large data segment. Since .lbss sections
5849 is placed right after the .bss section, there should be no need for
5850 a large data segment just because of .lbss. */
5851 s = bfd_get_section_by_name (abfd, ".ldata");
5852 if (s && (s->flags & SEC_LOAD))
5853 count++;
5854
5855 return count;
5856 }
5857
5858 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5859
5860 static bfd_boolean
5861 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5862 {
5863 if (h->plt.offset != (bfd_vma) -1
5864 && !h->def_regular
5865 && !h->pointer_equality_needed)
5866 return FALSE;
5867
5868 return _bfd_elf_hash_symbol (h);
5869 }
5870
5871 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5872
5873 static bfd_boolean
5874 elf_x86_64_relocs_compatible (const bfd_target *input,
5875 const bfd_target *output)
5876 {
5877 return ((xvec_get_elf_backend_data (input)->s->elfclass
5878 == xvec_get_elf_backend_data (output)->s->elfclass)
5879 && _bfd_elf_relocs_compatible (input, output));
5880 }
5881
5882 static const struct bfd_elf_special_section
5883 elf_x86_64_special_sections[]=
5884 {
5885 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5886 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5887 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5888 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5889 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5890 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5891 { NULL, 0, 0, 0, 0 }
5892 };
5893
5894 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5895 #define TARGET_LITTLE_NAME "elf64-x86-64"
5896 #define ELF_ARCH bfd_arch_i386
5897 #define ELF_TARGET_ID X86_64_ELF_DATA
5898 #define ELF_MACHINE_CODE EM_X86_64
5899 #define ELF_MAXPAGESIZE 0x200000
5900 #define ELF_MINPAGESIZE 0x1000
5901 #define ELF_COMMONPAGESIZE 0x1000
5902
5903 #define elf_backend_can_gc_sections 1
5904 #define elf_backend_can_refcount 1
5905 #define elf_backend_want_got_plt 1
5906 #define elf_backend_plt_readonly 1
5907 #define elf_backend_want_plt_sym 0
5908 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5909 #define elf_backend_rela_normal 1
5910 #define elf_backend_plt_alignment 4
5911
5912 #define elf_info_to_howto elf_x86_64_info_to_howto
5913
5914 #define bfd_elf64_bfd_link_hash_table_create \
5915 elf_x86_64_link_hash_table_create
5916 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5917 #define bfd_elf64_bfd_reloc_name_lookup \
5918 elf_x86_64_reloc_name_lookup
5919
5920 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5921 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5922 #define elf_backend_check_relocs elf_x86_64_check_relocs
5923 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5924 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5925 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5926 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5927 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5928 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5929 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5930 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5931 #ifdef CORE_HEADER
5932 #define elf_backend_write_core_note elf_x86_64_write_core_note
5933 #endif
5934 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5935 #define elf_backend_relocate_section elf_x86_64_relocate_section
5936 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5937 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5938 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5939 #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val
5940 #define elf_backend_object_p elf64_x86_64_elf_object_p
5941 #define bfd_elf64_mkobject elf_x86_64_mkobject
5942 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5943
5944 #define elf_backend_section_from_shdr \
5945 elf_x86_64_section_from_shdr
5946
5947 #define elf_backend_section_from_bfd_section \
5948 elf_x86_64_elf_section_from_bfd_section
5949 #define elf_backend_add_symbol_hook \
5950 elf_x86_64_add_symbol_hook
5951 #define elf_backend_symbol_processing \
5952 elf_x86_64_symbol_processing
5953 #define elf_backend_common_section_index \
5954 elf_x86_64_common_section_index
5955 #define elf_backend_common_section \
5956 elf_x86_64_common_section
5957 #define elf_backend_common_definition \
5958 elf_x86_64_common_definition
5959 #define elf_backend_merge_symbol \
5960 elf_x86_64_merge_symbol
5961 #define elf_backend_special_sections \
5962 elf_x86_64_special_sections
5963 #define elf_backend_additional_program_headers \
5964 elf_x86_64_additional_program_headers
5965 #define elf_backend_hash_symbol \
5966 elf_x86_64_hash_symbol
5967
5968 #include "elf64-target.h"
5969
5970 /* FreeBSD support. */
5971
5972 #undef TARGET_LITTLE_SYM
5973 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5974 #undef TARGET_LITTLE_NAME
5975 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5976
5977 #undef ELF_OSABI
5978 #define ELF_OSABI ELFOSABI_FREEBSD
5979
5980 #undef elf64_bed
5981 #define elf64_bed elf64_x86_64_fbsd_bed
5982
5983 #include "elf64-target.h"
5984
5985 /* Solaris 2 support. */
5986
5987 #undef TARGET_LITTLE_SYM
5988 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5989 #undef TARGET_LITTLE_NAME
5990 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5991
5992 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5993 objects won't be recognized. */
5994 #undef ELF_OSABI
5995
5996 #undef elf64_bed
5997 #define elf64_bed elf64_x86_64_sol2_bed
5998
5999 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6000 boundary. */
6001 #undef elf_backend_static_tls_alignment
6002 #define elf_backend_static_tls_alignment 16
6003
6004 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6005
6006 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6007 File, p.63. */
6008 #undef elf_backend_want_plt_sym
6009 #define elf_backend_want_plt_sym 1
6010
6011 #include "elf64-target.h"
6012
6013 #undef bfd_elf64_get_synthetic_symtab
6014
6015 /* Native Client support. */
6016
6017 static bfd_boolean
6018 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6019 {
6020 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6021 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6022 return TRUE;
6023 }
6024
6025 #undef TARGET_LITTLE_SYM
6026 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6027 #undef TARGET_LITTLE_NAME
6028 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6029 #undef elf64_bed
6030 #define elf64_bed elf64_x86_64_nacl_bed
6031
6032 #undef ELF_MAXPAGESIZE
6033 #undef ELF_MINPAGESIZE
6034 #undef ELF_COMMONPAGESIZE
6035 #define ELF_MAXPAGESIZE 0x10000
6036 #define ELF_MINPAGESIZE 0x10000
6037 #define ELF_COMMONPAGESIZE 0x10000
6038
6039 /* Restore defaults. */
6040 #undef ELF_OSABI
6041 #undef elf_backend_static_tls_alignment
6042 #undef elf_backend_want_plt_sym
6043 #define elf_backend_want_plt_sym 0
6044
6045 /* NaCl uses substantially different PLT entries for the same effects. */
6046
6047 #undef elf_backend_plt_alignment
6048 #define elf_backend_plt_alignment 5
6049 #define NACL_PLT_ENTRY_SIZE 64
6050 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6051
6052 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6053 {
6054 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6055 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6056 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6057 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6058 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6059
6060 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6061 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6062
6063 /* 32 bytes of nop to pad out to the standard size. */
6064 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6065 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6066 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6067 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6068 0x66, /* excess data32 prefix */
6069 0x90 /* nop */
6070 };
6071
6072 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6073 {
6074 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6075 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6076 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6077 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6078
6079 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6080 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6081 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6082
6083 /* Lazy GOT entries point here (32-byte aligned). */
6084 0x68, /* pushq immediate */
6085 0, 0, 0, 0, /* replaced with index into relocation table. */
6086 0xe9, /* jmp relative */
6087 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6088
6089 /* 22 bytes of nop to pad out to the standard size. */
6090 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6091 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6092 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6093 };
6094
6095 /* .eh_frame covering the .plt section. */
6096
6097 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6098 {
6099 #if (PLT_CIE_LENGTH != 20 \
6100 || PLT_FDE_LENGTH != 36 \
6101 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6102 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6103 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6104 #endif
6105 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6106 0, 0, 0, 0, /* CIE ID */
6107 1, /* CIE version */
6108 'z', 'R', 0, /* Augmentation string */
6109 1, /* Code alignment factor */
6110 0x78, /* Data alignment factor */
6111 16, /* Return address column */
6112 1, /* Augmentation size */
6113 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6114 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6115 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6116 DW_CFA_nop, DW_CFA_nop,
6117
6118 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6119 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6120 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6121 0, 0, 0, 0, /* .plt size goes here */
6122 0, /* Augmentation size */
6123 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6124 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6125 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6126 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6127 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6128 13, /* Block length */
6129 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6130 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6131 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6132 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6133 DW_CFA_nop, DW_CFA_nop
6134 };
6135
6136 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6137 {
6138 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6139 elf_x86_64_nacl_plt_entry, /* plt_entry */
6140 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6141 2, /* plt0_got1_offset */
6142 9, /* plt0_got2_offset */
6143 13, /* plt0_got2_insn_end */
6144 3, /* plt_got_offset */
6145 33, /* plt_reloc_offset */
6146 38, /* plt_plt_offset */
6147 7, /* plt_got_insn_size */
6148 42, /* plt_plt_insn_end */
6149 32, /* plt_lazy_offset */
6150 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6151 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6152 };
6153
6154 #undef elf_backend_arch_data
6155 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6156
6157 #undef elf_backend_object_p
6158 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6159 #undef elf_backend_modify_segment_map
6160 #define elf_backend_modify_segment_map nacl_modify_segment_map
6161 #undef elf_backend_modify_program_headers
6162 #define elf_backend_modify_program_headers nacl_modify_program_headers
6163 #undef elf_backend_final_write_processing
6164 #define elf_backend_final_write_processing nacl_final_write_processing
6165
6166 #include "elf64-target.h"
6167
6168 /* Native Client x32 support. */
6169
6170 static bfd_boolean
6171 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6172 {
6173 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6174 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6175 return TRUE;
6176 }
6177
6178 #undef TARGET_LITTLE_SYM
6179 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6180 #undef TARGET_LITTLE_NAME
6181 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6182 #undef elf32_bed
6183 #define elf32_bed elf32_x86_64_nacl_bed
6184
6185 #define bfd_elf32_bfd_link_hash_table_create \
6186 elf_x86_64_link_hash_table_create
6187 #define bfd_elf32_bfd_reloc_type_lookup \
6188 elf_x86_64_reloc_type_lookup
6189 #define bfd_elf32_bfd_reloc_name_lookup \
6190 elf_x86_64_reloc_name_lookup
6191 #define bfd_elf32_mkobject \
6192 elf_x86_64_mkobject
6193
6194 #undef elf_backend_object_p
6195 #define elf_backend_object_p \
6196 elf32_x86_64_nacl_elf_object_p
6197
6198 #undef elf_backend_bfd_from_remote_memory
6199 #define elf_backend_bfd_from_remote_memory \
6200 _bfd_elf32_bfd_from_remote_memory
6201
6202 #undef elf_backend_size_info
6203 #define elf_backend_size_info \
6204 _bfd_elf32_size_info
6205
6206 #include "elf32-target.h"
6207
6208 /* Restore defaults. */
6209 #undef elf_backend_object_p
6210 #define elf_backend_object_p elf64_x86_64_elf_object_p
6211 #undef elf_backend_bfd_from_remote_memory
6212 #undef elf_backend_size_info
6213 #undef elf_backend_modify_segment_map
6214 #undef elf_backend_modify_program_headers
6215 #undef elf_backend_final_write_processing
6216
6217 /* Intel L1OM support. */
6218
6219 static bfd_boolean
6220 elf64_l1om_elf_object_p (bfd *abfd)
6221 {
6222 /* Set the right machine number for an L1OM elf64 file. */
6223 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6224 return TRUE;
6225 }
6226
6227 #undef TARGET_LITTLE_SYM
6228 #define TARGET_LITTLE_SYM l1om_elf64_vec
6229 #undef TARGET_LITTLE_NAME
6230 #define TARGET_LITTLE_NAME "elf64-l1om"
6231 #undef ELF_ARCH
6232 #define ELF_ARCH bfd_arch_l1om
6233
6234 #undef ELF_MACHINE_CODE
6235 #define ELF_MACHINE_CODE EM_L1OM
6236
6237 #undef ELF_OSABI
6238
6239 #undef elf64_bed
6240 #define elf64_bed elf64_l1om_bed
6241
6242 #undef elf_backend_object_p
6243 #define elf_backend_object_p elf64_l1om_elf_object_p
6244
6245 /* Restore defaults. */
6246 #undef ELF_MAXPAGESIZE
6247 #undef ELF_MINPAGESIZE
6248 #undef ELF_COMMONPAGESIZE
6249 #define ELF_MAXPAGESIZE 0x200000
6250 #define ELF_MINPAGESIZE 0x1000
6251 #define ELF_COMMONPAGESIZE 0x1000
6252 #undef elf_backend_plt_alignment
6253 #define elf_backend_plt_alignment 4
6254 #undef elf_backend_arch_data
6255 #define elf_backend_arch_data &elf_x86_64_arch_bed
6256
6257 #include "elf64-target.h"
6258
6259 /* FreeBSD L1OM support. */
6260
6261 #undef TARGET_LITTLE_SYM
6262 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6263 #undef TARGET_LITTLE_NAME
6264 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6265
6266 #undef ELF_OSABI
6267 #define ELF_OSABI ELFOSABI_FREEBSD
6268
6269 #undef elf64_bed
6270 #define elf64_bed elf64_l1om_fbsd_bed
6271
6272 #include "elf64-target.h"
6273
6274 /* Intel K1OM support. */
6275
6276 static bfd_boolean
6277 elf64_k1om_elf_object_p (bfd *abfd)
6278 {
6279 /* Set the right machine number for an K1OM elf64 file. */
6280 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6281 return TRUE;
6282 }
6283
6284 #undef TARGET_LITTLE_SYM
6285 #define TARGET_LITTLE_SYM k1om_elf64_vec
6286 #undef TARGET_LITTLE_NAME
6287 #define TARGET_LITTLE_NAME "elf64-k1om"
6288 #undef ELF_ARCH
6289 #define ELF_ARCH bfd_arch_k1om
6290
6291 #undef ELF_MACHINE_CODE
6292 #define ELF_MACHINE_CODE EM_K1OM
6293
6294 #undef ELF_OSABI
6295
6296 #undef elf64_bed
6297 #define elf64_bed elf64_k1om_bed
6298
6299 #undef elf_backend_object_p
6300 #define elf_backend_object_p elf64_k1om_elf_object_p
6301
6302 #undef elf_backend_static_tls_alignment
6303
6304 #undef elf_backend_want_plt_sym
6305 #define elf_backend_want_plt_sym 0
6306
6307 #include "elf64-target.h"
6308
6309 /* FreeBSD K1OM support. */
6310
6311 #undef TARGET_LITTLE_SYM
6312 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6313 #undef TARGET_LITTLE_NAME
6314 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6315
6316 #undef ELF_OSABI
6317 #define ELF_OSABI ELFOSABI_FREEBSD
6318
6319 #undef elf64_bed
6320 #define elf64_bed elf64_k1om_fbsd_bed
6321
6322 #include "elf64-target.h"
6323
6324 /* 32bit x86-64 support. */
6325
6326 #undef TARGET_LITTLE_SYM
6327 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6328 #undef TARGET_LITTLE_NAME
6329 #define TARGET_LITTLE_NAME "elf32-x86-64"
6330 #undef elf32_bed
6331
6332 #undef ELF_ARCH
6333 #define ELF_ARCH bfd_arch_i386
6334
6335 #undef ELF_MACHINE_CODE
6336 #define ELF_MACHINE_CODE EM_X86_64
6337
6338 #undef ELF_OSABI
6339
6340 #undef elf_backend_object_p
6341 #define elf_backend_object_p \
6342 elf32_x86_64_elf_object_p
6343
6344 #undef elf_backend_bfd_from_remote_memory
6345 #define elf_backend_bfd_from_remote_memory \
6346 _bfd_elf32_bfd_from_remote_memory
6347
6348 #undef elf_backend_size_info
6349 #define elf_backend_size_info \
6350 _bfd_elf32_size_info
6351
6352 #include "elf32-target.h"
This page took 0.155341 seconds and 5 git commands to generate.