Automatic date update in version.in
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Reference count of C/C++ function pointer relocations in read-write
771 section which can be resolved at run-time. */
772 bfd_signed_vma func_pointer_refcount;
773
774 /* Information about the GOT PLT entry. Filled when there are both
775 GOT and PLT relocations against the same function. */
776 union gotplt_union plt_got;
777
778 /* Information about the second PLT entry. Filled when has_bnd_reloc is
779 set. */
780 union gotplt_union plt_bnd;
781
782 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
783 starting at the end of the jump table. */
784 bfd_vma tlsdesc_got;
785 };
786
787 #define elf_x86_64_hash_entry(ent) \
788 ((struct elf_x86_64_link_hash_entry *)(ent))
789
790 struct elf_x86_64_obj_tdata
791 {
792 struct elf_obj_tdata root;
793
794 /* tls_type for each local got entry. */
795 char *local_got_tls_type;
796
797 /* GOTPLT entries for TLS descriptors. */
798 bfd_vma *local_tlsdesc_gotent;
799 };
800
801 #define elf_x86_64_tdata(abfd) \
802 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
803
804 #define elf_x86_64_local_got_tls_type(abfd) \
805 (elf_x86_64_tdata (abfd)->local_got_tls_type)
806
807 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
808 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
809
810 #define is_x86_64_elf(bfd) \
811 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
812 && elf_tdata (bfd) != NULL \
813 && elf_object_id (bfd) == X86_64_ELF_DATA)
814
815 static bfd_boolean
816 elf_x86_64_mkobject (bfd *abfd)
817 {
818 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
819 X86_64_ELF_DATA);
820 }
821
822 /* x86-64 ELF linker hash table. */
823
824 struct elf_x86_64_link_hash_table
825 {
826 struct elf_link_hash_table elf;
827
828 /* Short-cuts to get to dynamic linker sections. */
829 asection *sdynbss;
830 asection *srelbss;
831 asection *plt_eh_frame;
832 asection *plt_bnd;
833 asection *plt_got;
834
835 union
836 {
837 bfd_signed_vma refcount;
838 bfd_vma offset;
839 } tls_ld_got;
840
841 /* The amount of space used by the jump slots in the GOT. */
842 bfd_vma sgotplt_jump_table_size;
843
844 /* Small local sym cache. */
845 struct sym_cache sym_cache;
846
847 bfd_vma (*r_info) (bfd_vma, bfd_vma);
848 bfd_vma (*r_sym) (bfd_vma);
849 unsigned int pointer_r_type;
850 const char *dynamic_interpreter;
851 int dynamic_interpreter_size;
852
853 /* _TLS_MODULE_BASE_ symbol. */
854 struct bfd_link_hash_entry *tls_module_base;
855
856 /* Used by local STT_GNU_IFUNC symbols. */
857 htab_t loc_hash_table;
858 void * loc_hash_memory;
859
860 /* The offset into splt of the PLT entry for the TLS descriptor
861 resolver. Special values are 0, if not necessary (or not found
862 to be necessary yet), and -1 if needed but not determined
863 yet. */
864 bfd_vma tlsdesc_plt;
865 /* The offset into sgot of the GOT entry used by the PLT entry
866 above. */
867 bfd_vma tlsdesc_got;
868
869 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
870 bfd_vma next_jump_slot_index;
871 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
872 bfd_vma next_irelative_index;
873 };
874
875 /* Get the x86-64 ELF linker hash table from a link_info structure. */
876
877 #define elf_x86_64_hash_table(p) \
878 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
879 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
880
881 #define elf_x86_64_compute_jump_table_size(htab) \
882 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
883
884 /* Create an entry in an x86-64 ELF linker hash table. */
885
886 static struct bfd_hash_entry *
887 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
888 struct bfd_hash_table *table,
889 const char *string)
890 {
891 /* Allocate the structure if it has not already been allocated by a
892 subclass. */
893 if (entry == NULL)
894 {
895 entry = (struct bfd_hash_entry *)
896 bfd_hash_allocate (table,
897 sizeof (struct elf_x86_64_link_hash_entry));
898 if (entry == NULL)
899 return entry;
900 }
901
902 /* Call the allocation method of the superclass. */
903 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
904 if (entry != NULL)
905 {
906 struct elf_x86_64_link_hash_entry *eh;
907
908 eh = (struct elf_x86_64_link_hash_entry *) entry;
909 eh->dyn_relocs = NULL;
910 eh->tls_type = GOT_UNKNOWN;
911 eh->needs_copy = 0;
912 eh->has_bnd_reloc = 0;
913 eh->func_pointer_refcount = 0;
914 eh->plt_bnd.offset = (bfd_vma) -1;
915 eh->plt_got.offset = (bfd_vma) -1;
916 eh->tlsdesc_got = (bfd_vma) -1;
917 }
918
919 return entry;
920 }
921
922 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
923 for local symbol so that we can handle local STT_GNU_IFUNC symbols
924 as global symbol. We reuse indx and dynstr_index for local symbol
925 hash since they aren't used by global symbols in this backend. */
926
927 static hashval_t
928 elf_x86_64_local_htab_hash (const void *ptr)
929 {
930 struct elf_link_hash_entry *h
931 = (struct elf_link_hash_entry *) ptr;
932 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
933 }
934
935 /* Compare local hash entries. */
936
937 static int
938 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
939 {
940 struct elf_link_hash_entry *h1
941 = (struct elf_link_hash_entry *) ptr1;
942 struct elf_link_hash_entry *h2
943 = (struct elf_link_hash_entry *) ptr2;
944
945 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
946 }
947
948 /* Find and/or create a hash entry for local symbol. */
949
950 static struct elf_link_hash_entry *
951 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
952 bfd *abfd, const Elf_Internal_Rela *rel,
953 bfd_boolean create)
954 {
955 struct elf_x86_64_link_hash_entry e, *ret;
956 asection *sec = abfd->sections;
957 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
958 htab->r_sym (rel->r_info));
959 void **slot;
960
961 e.elf.indx = sec->id;
962 e.elf.dynstr_index = htab->r_sym (rel->r_info);
963 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
964 create ? INSERT : NO_INSERT);
965
966 if (!slot)
967 return NULL;
968
969 if (*slot)
970 {
971 ret = (struct elf_x86_64_link_hash_entry *) *slot;
972 return &ret->elf;
973 }
974
975 ret = (struct elf_x86_64_link_hash_entry *)
976 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
977 sizeof (struct elf_x86_64_link_hash_entry));
978 if (ret)
979 {
980 memset (ret, 0, sizeof (*ret));
981 ret->elf.indx = sec->id;
982 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
983 ret->elf.dynindx = -1;
984 ret->func_pointer_refcount = 0;
985 ret->plt_got.offset = (bfd_vma) -1;
986 *slot = ret;
987 }
988 return &ret->elf;
989 }
990
991 /* Destroy an X86-64 ELF linker hash table. */
992
993 static void
994 elf_x86_64_link_hash_table_free (bfd *obfd)
995 {
996 struct elf_x86_64_link_hash_table *htab
997 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
998
999 if (htab->loc_hash_table)
1000 htab_delete (htab->loc_hash_table);
1001 if (htab->loc_hash_memory)
1002 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1003 _bfd_elf_link_hash_table_free (obfd);
1004 }
1005
1006 /* Create an X86-64 ELF linker hash table. */
1007
1008 static struct bfd_link_hash_table *
1009 elf_x86_64_link_hash_table_create (bfd *abfd)
1010 {
1011 struct elf_x86_64_link_hash_table *ret;
1012 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1013
1014 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1015 if (ret == NULL)
1016 return NULL;
1017
1018 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1019 elf_x86_64_link_hash_newfunc,
1020 sizeof (struct elf_x86_64_link_hash_entry),
1021 X86_64_ELF_DATA))
1022 {
1023 free (ret);
1024 return NULL;
1025 }
1026
1027 if (ABI_64_P (abfd))
1028 {
1029 ret->r_info = elf64_r_info;
1030 ret->r_sym = elf64_r_sym;
1031 ret->pointer_r_type = R_X86_64_64;
1032 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1033 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1034 }
1035 else
1036 {
1037 ret->r_info = elf32_r_info;
1038 ret->r_sym = elf32_r_sym;
1039 ret->pointer_r_type = R_X86_64_32;
1040 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1041 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1042 }
1043
1044 ret->loc_hash_table = htab_try_create (1024,
1045 elf_x86_64_local_htab_hash,
1046 elf_x86_64_local_htab_eq,
1047 NULL);
1048 ret->loc_hash_memory = objalloc_create ();
1049 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1050 {
1051 elf_x86_64_link_hash_table_free (abfd);
1052 return NULL;
1053 }
1054 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1055
1056 return &ret->elf.root;
1057 }
1058
1059 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1060 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1061 hash table. */
1062
1063 static bfd_boolean
1064 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1065 struct bfd_link_info *info)
1066 {
1067 struct elf_x86_64_link_hash_table *htab;
1068
1069 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1070 return FALSE;
1071
1072 htab = elf_x86_64_hash_table (info);
1073 if (htab == NULL)
1074 return FALSE;
1075
1076 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1077 if (!htab->sdynbss)
1078 abort ();
1079
1080 if (bfd_link_executable (info))
1081 {
1082 /* Always allow copy relocs for building executables. */
1083 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1084 if (s == NULL)
1085 {
1086 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1087 s = bfd_make_section_anyway_with_flags (dynobj,
1088 ".rela.bss",
1089 (bed->dynamic_sec_flags
1090 | SEC_READONLY));
1091 if (s == NULL
1092 || ! bfd_set_section_alignment (dynobj, s,
1093 bed->s->log_file_align))
1094 return FALSE;
1095 }
1096 htab->srelbss = s;
1097 }
1098
1099 if (!info->no_ld_generated_unwind_info
1100 && htab->plt_eh_frame == NULL
1101 && htab->elf.splt != NULL)
1102 {
1103 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1104 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1105 | SEC_LINKER_CREATED);
1106 htab->plt_eh_frame
1107 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1108 if (htab->plt_eh_frame == NULL
1109 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1110 return FALSE;
1111 }
1112 return TRUE;
1113 }
1114
1115 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1116
1117 static void
1118 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1119 struct elf_link_hash_entry *dir,
1120 struct elf_link_hash_entry *ind)
1121 {
1122 struct elf_x86_64_link_hash_entry *edir, *eind;
1123
1124 edir = (struct elf_x86_64_link_hash_entry *) dir;
1125 eind = (struct elf_x86_64_link_hash_entry *) ind;
1126
1127 if (!edir->has_bnd_reloc)
1128 edir->has_bnd_reloc = eind->has_bnd_reloc;
1129
1130 if (eind->dyn_relocs != NULL)
1131 {
1132 if (edir->dyn_relocs != NULL)
1133 {
1134 struct elf_dyn_relocs **pp;
1135 struct elf_dyn_relocs *p;
1136
1137 /* Add reloc counts against the indirect sym to the direct sym
1138 list. Merge any entries against the same section. */
1139 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1140 {
1141 struct elf_dyn_relocs *q;
1142
1143 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1144 if (q->sec == p->sec)
1145 {
1146 q->pc_count += p->pc_count;
1147 q->count += p->count;
1148 *pp = p->next;
1149 break;
1150 }
1151 if (q == NULL)
1152 pp = &p->next;
1153 }
1154 *pp = edir->dyn_relocs;
1155 }
1156
1157 edir->dyn_relocs = eind->dyn_relocs;
1158 eind->dyn_relocs = NULL;
1159 }
1160
1161 if (ind->root.type == bfd_link_hash_indirect
1162 && dir->got.refcount <= 0)
1163 {
1164 edir->tls_type = eind->tls_type;
1165 eind->tls_type = GOT_UNKNOWN;
1166 }
1167
1168 if (ELIMINATE_COPY_RELOCS
1169 && ind->root.type != bfd_link_hash_indirect
1170 && dir->dynamic_adjusted)
1171 {
1172 /* If called to transfer flags for a weakdef during processing
1173 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1174 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1175 dir->ref_dynamic |= ind->ref_dynamic;
1176 dir->ref_regular |= ind->ref_regular;
1177 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1178 dir->needs_plt |= ind->needs_plt;
1179 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1180 }
1181 else
1182 {
1183 if (eind->func_pointer_refcount > 0)
1184 {
1185 edir->func_pointer_refcount += eind->func_pointer_refcount;
1186 eind->func_pointer_refcount = 0;
1187 }
1188
1189 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1190 }
1191 }
1192
1193 static bfd_boolean
1194 elf64_x86_64_elf_object_p (bfd *abfd)
1195 {
1196 /* Set the right machine number for an x86-64 elf64 file. */
1197 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1198 return TRUE;
1199 }
1200
1201 static bfd_boolean
1202 elf32_x86_64_elf_object_p (bfd *abfd)
1203 {
1204 /* Set the right machine number for an x86-64 elf32 file. */
1205 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1206 return TRUE;
1207 }
1208
1209 /* Return TRUE if the TLS access code sequence support transition
1210 from R_TYPE. */
1211
1212 static bfd_boolean
1213 elf_x86_64_check_tls_transition (bfd *abfd,
1214 struct bfd_link_info *info,
1215 asection *sec,
1216 bfd_byte *contents,
1217 Elf_Internal_Shdr *symtab_hdr,
1218 struct elf_link_hash_entry **sym_hashes,
1219 unsigned int r_type,
1220 const Elf_Internal_Rela *rel,
1221 const Elf_Internal_Rela *relend)
1222 {
1223 unsigned int val;
1224 unsigned long r_symndx;
1225 bfd_boolean largepic = FALSE;
1226 struct elf_link_hash_entry *h;
1227 bfd_vma offset;
1228 struct elf_x86_64_link_hash_table *htab;
1229
1230 /* Get the section contents. */
1231 if (contents == NULL)
1232 {
1233 if (elf_section_data (sec)->this_hdr.contents != NULL)
1234 contents = elf_section_data (sec)->this_hdr.contents;
1235 else
1236 {
1237 /* FIXME: How to better handle error condition? */
1238 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1239 return FALSE;
1240
1241 /* Cache the section contents for elf_link_input_bfd. */
1242 elf_section_data (sec)->this_hdr.contents = contents;
1243 }
1244 }
1245
1246 htab = elf_x86_64_hash_table (info);
1247 offset = rel->r_offset;
1248 switch (r_type)
1249 {
1250 case R_X86_64_TLSGD:
1251 case R_X86_64_TLSLD:
1252 if ((rel + 1) >= relend)
1253 return FALSE;
1254
1255 if (r_type == R_X86_64_TLSGD)
1256 {
1257 /* Check transition from GD access model. For 64bit, only
1258 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1259 .word 0x6666; rex64; call __tls_get_addr
1260 can transit to different access model. For 32bit, only
1261 leaq foo@tlsgd(%rip), %rdi
1262 .word 0x6666; rex64; call __tls_get_addr
1263 can transit to different access model. For largepic
1264 we also support:
1265 leaq foo@tlsgd(%rip), %rdi
1266 movabsq $__tls_get_addr@pltoff, %rax
1267 addq $rbx, %rax
1268 call *%rax. */
1269
1270 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1271 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1272
1273 if ((offset + 12) > sec->size)
1274 return FALSE;
1275
1276 if (memcmp (contents + offset + 4, call, 4) != 0)
1277 {
1278 if (!ABI_64_P (abfd)
1279 || (offset + 19) > sec->size
1280 || offset < 3
1281 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1282 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1283 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1284 != 0)
1285 return FALSE;
1286 largepic = TRUE;
1287 }
1288 else if (ABI_64_P (abfd))
1289 {
1290 if (offset < 4
1291 || memcmp (contents + offset - 4, leaq, 4) != 0)
1292 return FALSE;
1293 }
1294 else
1295 {
1296 if (offset < 3
1297 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1298 return FALSE;
1299 }
1300 }
1301 else
1302 {
1303 /* Check transition from LD access model. Only
1304 leaq foo@tlsld(%rip), %rdi;
1305 call __tls_get_addr
1306 can transit to different access model. For largepic
1307 we also support:
1308 leaq foo@tlsld(%rip), %rdi
1309 movabsq $__tls_get_addr@pltoff, %rax
1310 addq $rbx, %rax
1311 call *%rax. */
1312
1313 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1314
1315 if (offset < 3 || (offset + 9) > sec->size)
1316 return FALSE;
1317
1318 if (memcmp (contents + offset - 3, lea, 3) != 0)
1319 return FALSE;
1320
1321 if (0xe8 != *(contents + offset + 4))
1322 {
1323 if (!ABI_64_P (abfd)
1324 || (offset + 19) > sec->size
1325 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1326 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1327 != 0)
1328 return FALSE;
1329 largepic = TRUE;
1330 }
1331 }
1332
1333 r_symndx = htab->r_sym (rel[1].r_info);
1334 if (r_symndx < symtab_hdr->sh_info)
1335 return FALSE;
1336
1337 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1338 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1339 may be versioned. */
1340 return (h != NULL
1341 && h->root.root.string != NULL
1342 && (largepic
1343 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1344 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1345 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1346 && (strncmp (h->root.root.string,
1347 "__tls_get_addr", 14) == 0));
1348
1349 case R_X86_64_GOTTPOFF:
1350 /* Check transition from IE access model:
1351 mov foo@gottpoff(%rip), %reg
1352 add foo@gottpoff(%rip), %reg
1353 */
1354
1355 /* Check REX prefix first. */
1356 if (offset >= 3 && (offset + 4) <= sec->size)
1357 {
1358 val = bfd_get_8 (abfd, contents + offset - 3);
1359 if (val != 0x48 && val != 0x4c)
1360 {
1361 /* X32 may have 0x44 REX prefix or no REX prefix. */
1362 if (ABI_64_P (abfd))
1363 return FALSE;
1364 }
1365 }
1366 else
1367 {
1368 /* X32 may not have any REX prefix. */
1369 if (ABI_64_P (abfd))
1370 return FALSE;
1371 if (offset < 2 || (offset + 3) > sec->size)
1372 return FALSE;
1373 }
1374
1375 val = bfd_get_8 (abfd, contents + offset - 2);
1376 if (val != 0x8b && val != 0x03)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 1);
1380 return (val & 0xc7) == 5;
1381
1382 case R_X86_64_GOTPC32_TLSDESC:
1383 /* Check transition from GDesc access model:
1384 leaq x@tlsdesc(%rip), %rax
1385
1386 Make sure it's a leaq adding rip to a 32-bit offset
1387 into any register, although it's probably almost always
1388 going to be rax. */
1389
1390 if (offset < 3 || (offset + 4) > sec->size)
1391 return FALSE;
1392
1393 val = bfd_get_8 (abfd, contents + offset - 3);
1394 if ((val & 0xfb) != 0x48)
1395 return FALSE;
1396
1397 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1398 return FALSE;
1399
1400 val = bfd_get_8 (abfd, contents + offset - 1);
1401 return (val & 0xc7) == 0x05;
1402
1403 case R_X86_64_TLSDESC_CALL:
1404 /* Check transition from GDesc access model:
1405 call *x@tlsdesc(%rax)
1406 */
1407 if (offset + 2 <= sec->size)
1408 {
1409 /* Make sure that it's a call *x@tlsdesc(%rax). */
1410 static const unsigned char call[] = { 0xff, 0x10 };
1411 return memcmp (contents + offset, call, 2) == 0;
1412 }
1413
1414 return FALSE;
1415
1416 default:
1417 abort ();
1418 }
1419 }
1420
1421 /* Return TRUE if the TLS access transition is OK or no transition
1422 will be performed. Update R_TYPE if there is a transition. */
1423
1424 static bfd_boolean
1425 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1426 asection *sec, bfd_byte *contents,
1427 Elf_Internal_Shdr *symtab_hdr,
1428 struct elf_link_hash_entry **sym_hashes,
1429 unsigned int *r_type, int tls_type,
1430 const Elf_Internal_Rela *rel,
1431 const Elf_Internal_Rela *relend,
1432 struct elf_link_hash_entry *h,
1433 unsigned long r_symndx)
1434 {
1435 unsigned int from_type = *r_type;
1436 unsigned int to_type = from_type;
1437 bfd_boolean check = TRUE;
1438
1439 /* Skip TLS transition for functions. */
1440 if (h != NULL
1441 && (h->type == STT_FUNC
1442 || h->type == STT_GNU_IFUNC))
1443 return TRUE;
1444
1445 switch (from_type)
1446 {
1447 case R_X86_64_TLSGD:
1448 case R_X86_64_GOTPC32_TLSDESC:
1449 case R_X86_64_TLSDESC_CALL:
1450 case R_X86_64_GOTTPOFF:
1451 if (bfd_link_executable (info))
1452 {
1453 if (h == NULL)
1454 to_type = R_X86_64_TPOFF32;
1455 else
1456 to_type = R_X86_64_GOTTPOFF;
1457 }
1458
1459 /* When we are called from elf_x86_64_relocate_section,
1460 CONTENTS isn't NULL and there may be additional transitions
1461 based on TLS_TYPE. */
1462 if (contents != NULL)
1463 {
1464 unsigned int new_to_type = to_type;
1465
1466 if (bfd_link_executable (info)
1467 && h != NULL
1468 && h->dynindx == -1
1469 && tls_type == GOT_TLS_IE)
1470 new_to_type = R_X86_64_TPOFF32;
1471
1472 if (to_type == R_X86_64_TLSGD
1473 || to_type == R_X86_64_GOTPC32_TLSDESC
1474 || to_type == R_X86_64_TLSDESC_CALL)
1475 {
1476 if (tls_type == GOT_TLS_IE)
1477 new_to_type = R_X86_64_GOTTPOFF;
1478 }
1479
1480 /* We checked the transition before when we were called from
1481 elf_x86_64_check_relocs. We only want to check the new
1482 transition which hasn't been checked before. */
1483 check = new_to_type != to_type && from_type == to_type;
1484 to_type = new_to_type;
1485 }
1486
1487 break;
1488
1489 case R_X86_64_TLSLD:
1490 if (bfd_link_executable (info))
1491 to_type = R_X86_64_TPOFF32;
1492 break;
1493
1494 default:
1495 return TRUE;
1496 }
1497
1498 /* Return TRUE if there is no transition. */
1499 if (from_type == to_type)
1500 return TRUE;
1501
1502 /* Check if the transition can be performed. */
1503 if (check
1504 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1505 symtab_hdr, sym_hashes,
1506 from_type, rel, relend))
1507 {
1508 reloc_howto_type *from, *to;
1509 const char *name;
1510
1511 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1512 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1513
1514 if (h)
1515 name = h->root.root.string;
1516 else
1517 {
1518 struct elf_x86_64_link_hash_table *htab;
1519
1520 htab = elf_x86_64_hash_table (info);
1521 if (htab == NULL)
1522 name = "*unknown*";
1523 else
1524 {
1525 Elf_Internal_Sym *isym;
1526
1527 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1528 abfd, r_symndx);
1529 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1530 }
1531 }
1532
1533 (*_bfd_error_handler)
1534 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1535 "in section `%A' failed"),
1536 abfd, sec, from->name, to->name, name,
1537 (unsigned long) rel->r_offset);
1538 bfd_set_error (bfd_error_bad_value);
1539 return FALSE;
1540 }
1541
1542 *r_type = to_type;
1543 return TRUE;
1544 }
1545
1546 /* Rename some of the generic section flags to better document how they
1547 are used here. */
1548 #define need_convert_mov_to_lea sec_flg0
1549
1550 /* Look through the relocs for a section during the first phase, and
1551 calculate needed space in the global offset table, procedure
1552 linkage table, and dynamic reloc sections. */
1553
1554 static bfd_boolean
1555 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1556 asection *sec,
1557 const Elf_Internal_Rela *relocs)
1558 {
1559 struct elf_x86_64_link_hash_table *htab;
1560 Elf_Internal_Shdr *symtab_hdr;
1561 struct elf_link_hash_entry **sym_hashes;
1562 const Elf_Internal_Rela *rel;
1563 const Elf_Internal_Rela *rel_end;
1564 asection *sreloc;
1565 bfd_boolean use_plt_got;
1566
1567 if (bfd_link_relocatable (info))
1568 return TRUE;
1569
1570 BFD_ASSERT (is_x86_64_elf (abfd));
1571
1572 htab = elf_x86_64_hash_table (info);
1573 if (htab == NULL)
1574 return FALSE;
1575
1576 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1577
1578 symtab_hdr = &elf_symtab_hdr (abfd);
1579 sym_hashes = elf_sym_hashes (abfd);
1580
1581 sreloc = NULL;
1582
1583 rel_end = relocs + sec->reloc_count;
1584 for (rel = relocs; rel < rel_end; rel++)
1585 {
1586 unsigned int r_type;
1587 unsigned long r_symndx;
1588 struct elf_link_hash_entry *h;
1589 Elf_Internal_Sym *isym;
1590 const char *name;
1591 bfd_boolean size_reloc;
1592
1593 r_symndx = htab->r_sym (rel->r_info);
1594 r_type = ELF32_R_TYPE (rel->r_info);
1595
1596 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1597 {
1598 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1599 abfd, r_symndx);
1600 return FALSE;
1601 }
1602
1603 if (r_symndx < symtab_hdr->sh_info)
1604 {
1605 /* A local symbol. */
1606 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1607 abfd, r_symndx);
1608 if (isym == NULL)
1609 return FALSE;
1610
1611 /* Check relocation against local STT_GNU_IFUNC symbol. */
1612 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1613 {
1614 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1615 TRUE);
1616 if (h == NULL)
1617 return FALSE;
1618
1619 /* Fake a STT_GNU_IFUNC symbol. */
1620 h->type = STT_GNU_IFUNC;
1621 h->def_regular = 1;
1622 h->ref_regular = 1;
1623 h->forced_local = 1;
1624 h->root.type = bfd_link_hash_defined;
1625 }
1626 else
1627 h = NULL;
1628 }
1629 else
1630 {
1631 isym = NULL;
1632 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1633 while (h->root.type == bfd_link_hash_indirect
1634 || h->root.type == bfd_link_hash_warning)
1635 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1636 }
1637
1638 /* Check invalid x32 relocations. */
1639 if (!ABI_64_P (abfd))
1640 switch (r_type)
1641 {
1642 default:
1643 break;
1644
1645 case R_X86_64_DTPOFF64:
1646 case R_X86_64_TPOFF64:
1647 case R_X86_64_PC64:
1648 case R_X86_64_GOTOFF64:
1649 case R_X86_64_GOT64:
1650 case R_X86_64_GOTPCREL64:
1651 case R_X86_64_GOTPC64:
1652 case R_X86_64_GOTPLT64:
1653 case R_X86_64_PLTOFF64:
1654 {
1655 if (h)
1656 name = h->root.root.string;
1657 else
1658 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1659 NULL);
1660 (*_bfd_error_handler)
1661 (_("%B: relocation %s against symbol `%s' isn't "
1662 "supported in x32 mode"), abfd,
1663 x86_64_elf_howto_table[r_type].name, name);
1664 bfd_set_error (bfd_error_bad_value);
1665 return FALSE;
1666 }
1667 break;
1668 }
1669
1670 if (h != NULL)
1671 {
1672 /* Create the ifunc sections for static executables. If we
1673 never see an indirect function symbol nor we are building
1674 a static executable, those sections will be empty and
1675 won't appear in output. */
1676 switch (r_type)
1677 {
1678 default:
1679 break;
1680
1681 case R_X86_64_PC32_BND:
1682 case R_X86_64_PLT32_BND:
1683 case R_X86_64_PC32:
1684 case R_X86_64_PLT32:
1685 case R_X86_64_32:
1686 case R_X86_64_64:
1687 /* MPX PLT is supported only if elf_x86_64_arch_bed
1688 is used in 64-bit mode. */
1689 if (ABI_64_P (abfd)
1690 && info->bndplt
1691 && (get_elf_x86_64_backend_data (abfd)
1692 == &elf_x86_64_arch_bed))
1693 {
1694 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1695
1696 /* Create the second PLT for Intel MPX support. */
1697 if (htab->plt_bnd == NULL)
1698 {
1699 unsigned int plt_bnd_align;
1700 const struct elf_backend_data *bed;
1701
1702 bed = get_elf_backend_data (info->output_bfd);
1703 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1704 && (sizeof (elf_x86_64_bnd_plt2_entry)
1705 == sizeof (elf_x86_64_legacy_plt2_entry)));
1706 plt_bnd_align = 3;
1707
1708 if (htab->elf.dynobj == NULL)
1709 htab->elf.dynobj = abfd;
1710 htab->plt_bnd
1711 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1712 ".plt.bnd",
1713 (bed->dynamic_sec_flags
1714 | SEC_ALLOC
1715 | SEC_CODE
1716 | SEC_LOAD
1717 | SEC_READONLY));
1718 if (htab->plt_bnd == NULL
1719 || !bfd_set_section_alignment (htab->elf.dynobj,
1720 htab->plt_bnd,
1721 plt_bnd_align))
1722 return FALSE;
1723 }
1724 }
1725
1726 case R_X86_64_32S:
1727 case R_X86_64_PC64:
1728 case R_X86_64_GOTPCREL:
1729 case R_X86_64_GOTPCREL64:
1730 if (htab->elf.dynobj == NULL)
1731 htab->elf.dynobj = abfd;
1732 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1733 return FALSE;
1734 break;
1735 }
1736
1737 /* It is referenced by a non-shared object. */
1738 h->ref_regular = 1;
1739 h->root.non_ir_ref = 1;
1740
1741 if (h->type == STT_GNU_IFUNC)
1742 elf_tdata (info->output_bfd)->has_gnu_symbols
1743 |= elf_gnu_symbol_ifunc;
1744 }
1745
1746 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1747 symtab_hdr, sym_hashes,
1748 &r_type, GOT_UNKNOWN,
1749 rel, rel_end, h, r_symndx))
1750 return FALSE;
1751
1752 switch (r_type)
1753 {
1754 case R_X86_64_TLSLD:
1755 htab->tls_ld_got.refcount += 1;
1756 goto create_got;
1757
1758 case R_X86_64_TPOFF32:
1759 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1760 {
1761 if (h)
1762 name = h->root.root.string;
1763 else
1764 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1765 NULL);
1766 (*_bfd_error_handler)
1767 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1768 abfd,
1769 x86_64_elf_howto_table[r_type].name, name);
1770 bfd_set_error (bfd_error_bad_value);
1771 return FALSE;
1772 }
1773 break;
1774
1775 case R_X86_64_GOTTPOFF:
1776 if (!bfd_link_executable (info))
1777 info->flags |= DF_STATIC_TLS;
1778 /* Fall through */
1779
1780 case R_X86_64_GOT32:
1781 case R_X86_64_GOTPCREL:
1782 case R_X86_64_TLSGD:
1783 case R_X86_64_GOT64:
1784 case R_X86_64_GOTPCREL64:
1785 case R_X86_64_GOTPLT64:
1786 case R_X86_64_GOTPC32_TLSDESC:
1787 case R_X86_64_TLSDESC_CALL:
1788 /* This symbol requires a global offset table entry. */
1789 {
1790 int tls_type, old_tls_type;
1791
1792 switch (r_type)
1793 {
1794 default: tls_type = GOT_NORMAL; break;
1795 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1796 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1797 case R_X86_64_GOTPC32_TLSDESC:
1798 case R_X86_64_TLSDESC_CALL:
1799 tls_type = GOT_TLS_GDESC; break;
1800 }
1801
1802 if (h != NULL)
1803 {
1804 h->got.refcount += 1;
1805 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1806 }
1807 else
1808 {
1809 bfd_signed_vma *local_got_refcounts;
1810
1811 /* This is a global offset table entry for a local symbol. */
1812 local_got_refcounts = elf_local_got_refcounts (abfd);
1813 if (local_got_refcounts == NULL)
1814 {
1815 bfd_size_type size;
1816
1817 size = symtab_hdr->sh_info;
1818 size *= sizeof (bfd_signed_vma)
1819 + sizeof (bfd_vma) + sizeof (char);
1820 local_got_refcounts = ((bfd_signed_vma *)
1821 bfd_zalloc (abfd, size));
1822 if (local_got_refcounts == NULL)
1823 return FALSE;
1824 elf_local_got_refcounts (abfd) = local_got_refcounts;
1825 elf_x86_64_local_tlsdesc_gotent (abfd)
1826 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1827 elf_x86_64_local_got_tls_type (abfd)
1828 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1829 }
1830 local_got_refcounts[r_symndx] += 1;
1831 old_tls_type
1832 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1833 }
1834
1835 /* If a TLS symbol is accessed using IE at least once,
1836 there is no point to use dynamic model for it. */
1837 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1838 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1839 || tls_type != GOT_TLS_IE))
1840 {
1841 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1842 tls_type = old_tls_type;
1843 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1844 && GOT_TLS_GD_ANY_P (tls_type))
1845 tls_type |= old_tls_type;
1846 else
1847 {
1848 if (h)
1849 name = h->root.root.string;
1850 else
1851 name = bfd_elf_sym_name (abfd, symtab_hdr,
1852 isym, NULL);
1853 (*_bfd_error_handler)
1854 (_("%B: '%s' accessed both as normal and thread local symbol"),
1855 abfd, name);
1856 bfd_set_error (bfd_error_bad_value);
1857 return FALSE;
1858 }
1859 }
1860
1861 if (old_tls_type != tls_type)
1862 {
1863 if (h != NULL)
1864 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1865 else
1866 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1867 }
1868 }
1869 /* Fall through */
1870
1871 case R_X86_64_GOTOFF64:
1872 case R_X86_64_GOTPC32:
1873 case R_X86_64_GOTPC64:
1874 create_got:
1875 if (htab->elf.sgot == NULL)
1876 {
1877 if (htab->elf.dynobj == NULL)
1878 htab->elf.dynobj = abfd;
1879 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1880 info))
1881 return FALSE;
1882 }
1883 break;
1884
1885 case R_X86_64_PLT32:
1886 case R_X86_64_PLT32_BND:
1887 /* This symbol requires a procedure linkage table entry. We
1888 actually build the entry in adjust_dynamic_symbol,
1889 because this might be a case of linking PIC code which is
1890 never referenced by a dynamic object, in which case we
1891 don't need to generate a procedure linkage table entry
1892 after all. */
1893
1894 /* If this is a local symbol, we resolve it directly without
1895 creating a procedure linkage table entry. */
1896 if (h == NULL)
1897 continue;
1898
1899 h->needs_plt = 1;
1900 h->plt.refcount += 1;
1901 break;
1902
1903 case R_X86_64_PLTOFF64:
1904 /* This tries to form the 'address' of a function relative
1905 to GOT. For global symbols we need a PLT entry. */
1906 if (h != NULL)
1907 {
1908 h->needs_plt = 1;
1909 h->plt.refcount += 1;
1910 }
1911 goto create_got;
1912
1913 case R_X86_64_SIZE32:
1914 case R_X86_64_SIZE64:
1915 size_reloc = TRUE;
1916 goto do_size;
1917
1918 case R_X86_64_32:
1919 if (!ABI_64_P (abfd))
1920 goto pointer;
1921 case R_X86_64_8:
1922 case R_X86_64_16:
1923 case R_X86_64_32S:
1924 /* Let's help debug shared library creation. These relocs
1925 cannot be used in shared libs. Don't error out for
1926 sections we don't care about, such as debug sections or
1927 non-constant sections. */
1928 if (bfd_link_pic (info)
1929 && (sec->flags & SEC_ALLOC) != 0
1930 && (sec->flags & SEC_READONLY) != 0)
1931 {
1932 if (h)
1933 name = h->root.root.string;
1934 else
1935 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1936 (*_bfd_error_handler)
1937 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1938 abfd, x86_64_elf_howto_table[r_type].name, name);
1939 bfd_set_error (bfd_error_bad_value);
1940 return FALSE;
1941 }
1942 /* Fall through. */
1943
1944 case R_X86_64_PC8:
1945 case R_X86_64_PC16:
1946 case R_X86_64_PC32:
1947 case R_X86_64_PC32_BND:
1948 case R_X86_64_PC64:
1949 case R_X86_64_64:
1950 pointer:
1951 if (h != NULL && bfd_link_executable (info))
1952 {
1953 /* If this reloc is in a read-only section, we might
1954 need a copy reloc. We can't check reliably at this
1955 stage whether the section is read-only, as input
1956 sections have not yet been mapped to output sections.
1957 Tentatively set the flag for now, and correct in
1958 adjust_dynamic_symbol. */
1959 h->non_got_ref = 1;
1960
1961 /* We may need a .plt entry if the function this reloc
1962 refers to is in a shared lib. */
1963 h->plt.refcount += 1;
1964 if (r_type == R_X86_64_PC32)
1965 {
1966 /* Since something like ".long foo - ." may be used
1967 as pointer, make sure that PLT is used if foo is
1968 a function defined in a shared library. */
1969 if ((sec->flags & SEC_CODE) == 0)
1970 h->pointer_equality_needed = 1;
1971 }
1972 else if (r_type != R_X86_64_PC32_BND
1973 && r_type != R_X86_64_PC64)
1974 {
1975 h->pointer_equality_needed = 1;
1976 /* At run-time, R_X86_64_64 can be resolved for both
1977 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
1978 can only be resolved for x32. */
1979 if ((sec->flags & SEC_READONLY) == 0
1980 && (r_type == R_X86_64_64
1981 || (!ABI_64_P (abfd)
1982 && (r_type == R_X86_64_32
1983 || r_type == R_X86_64_32S))))
1984 {
1985 struct elf_x86_64_link_hash_entry *eh
1986 = (struct elf_x86_64_link_hash_entry *) h;
1987 eh->func_pointer_refcount += 1;
1988 }
1989 }
1990 }
1991
1992 size_reloc = FALSE;
1993 do_size:
1994 /* If we are creating a shared library, and this is a reloc
1995 against a global symbol, or a non PC relative reloc
1996 against a local symbol, then we need to copy the reloc
1997 into the shared library. However, if we are linking with
1998 -Bsymbolic, we do not need to copy a reloc against a
1999 global symbol which is defined in an object we are
2000 including in the link (i.e., DEF_REGULAR is set). At
2001 this point we have not seen all the input files, so it is
2002 possible that DEF_REGULAR is not set now but will be set
2003 later (it is never cleared). In case of a weak definition,
2004 DEF_REGULAR may be cleared later by a strong definition in
2005 a shared library. We account for that possibility below by
2006 storing information in the relocs_copied field of the hash
2007 table entry. A similar situation occurs when creating
2008 shared libraries and symbol visibility changes render the
2009 symbol local.
2010
2011 If on the other hand, we are creating an executable, we
2012 may need to keep relocations for symbols satisfied by a
2013 dynamic library if we manage to avoid copy relocs for the
2014 symbol. */
2015 if ((bfd_link_pic (info)
2016 && (sec->flags & SEC_ALLOC) != 0
2017 && (! IS_X86_64_PCREL_TYPE (r_type)
2018 || (h != NULL
2019 && (! SYMBOLIC_BIND (info, h)
2020 || h->root.type == bfd_link_hash_defweak
2021 || !h->def_regular))))
2022 || (ELIMINATE_COPY_RELOCS
2023 && !bfd_link_pic (info)
2024 && (sec->flags & SEC_ALLOC) != 0
2025 && h != NULL
2026 && (h->root.type == bfd_link_hash_defweak
2027 || !h->def_regular)))
2028 {
2029 struct elf_dyn_relocs *p;
2030 struct elf_dyn_relocs **head;
2031
2032 /* We must copy these reloc types into the output file.
2033 Create a reloc section in dynobj and make room for
2034 this reloc. */
2035 if (sreloc == NULL)
2036 {
2037 if (htab->elf.dynobj == NULL)
2038 htab->elf.dynobj = abfd;
2039
2040 sreloc = _bfd_elf_make_dynamic_reloc_section
2041 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2042 abfd, /*rela?*/ TRUE);
2043
2044 if (sreloc == NULL)
2045 return FALSE;
2046 }
2047
2048 /* If this is a global symbol, we count the number of
2049 relocations we need for this symbol. */
2050 if (h != NULL)
2051 {
2052 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2053 }
2054 else
2055 {
2056 /* Track dynamic relocs needed for local syms too.
2057 We really need local syms available to do this
2058 easily. Oh well. */
2059 asection *s;
2060 void **vpp;
2061
2062 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2063 abfd, r_symndx);
2064 if (isym == NULL)
2065 return FALSE;
2066
2067 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2068 if (s == NULL)
2069 s = sec;
2070
2071 /* Beware of type punned pointers vs strict aliasing
2072 rules. */
2073 vpp = &(elf_section_data (s)->local_dynrel);
2074 head = (struct elf_dyn_relocs **)vpp;
2075 }
2076
2077 p = *head;
2078 if (p == NULL || p->sec != sec)
2079 {
2080 bfd_size_type amt = sizeof *p;
2081
2082 p = ((struct elf_dyn_relocs *)
2083 bfd_alloc (htab->elf.dynobj, amt));
2084 if (p == NULL)
2085 return FALSE;
2086 p->next = *head;
2087 *head = p;
2088 p->sec = sec;
2089 p->count = 0;
2090 p->pc_count = 0;
2091 }
2092
2093 p->count += 1;
2094 /* Count size relocation as PC-relative relocation. */
2095 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2096 p->pc_count += 1;
2097 }
2098 break;
2099
2100 /* This relocation describes the C++ object vtable hierarchy.
2101 Reconstruct it for later use during GC. */
2102 case R_X86_64_GNU_VTINHERIT:
2103 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2104 return FALSE;
2105 break;
2106
2107 /* This relocation describes which C++ vtable entries are actually
2108 used. Record for later use during GC. */
2109 case R_X86_64_GNU_VTENTRY:
2110 BFD_ASSERT (h != NULL);
2111 if (h != NULL
2112 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2113 return FALSE;
2114 break;
2115
2116 default:
2117 break;
2118 }
2119
2120 if (use_plt_got
2121 && h != NULL
2122 && h->plt.refcount > 0
2123 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2124 || h->got.refcount > 0)
2125 && htab->plt_got == NULL)
2126 {
2127 /* Create the GOT procedure linkage table. */
2128 unsigned int plt_got_align;
2129 const struct elf_backend_data *bed;
2130
2131 bed = get_elf_backend_data (info->output_bfd);
2132 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2133 && (sizeof (elf_x86_64_bnd_plt2_entry)
2134 == sizeof (elf_x86_64_legacy_plt2_entry)));
2135 plt_got_align = 3;
2136
2137 if (htab->elf.dynobj == NULL)
2138 htab->elf.dynobj = abfd;
2139 htab->plt_got
2140 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2141 ".plt.got",
2142 (bed->dynamic_sec_flags
2143 | SEC_ALLOC
2144 | SEC_CODE
2145 | SEC_LOAD
2146 | SEC_READONLY));
2147 if (htab->plt_got == NULL
2148 || !bfd_set_section_alignment (htab->elf.dynobj,
2149 htab->plt_got,
2150 plt_got_align))
2151 return FALSE;
2152 }
2153
2154 if (r_type == R_X86_64_GOTPCREL
2155 && (h == NULL || h->type != STT_GNU_IFUNC))
2156 sec->need_convert_mov_to_lea = 1;
2157 }
2158
2159 return TRUE;
2160 }
2161
2162 /* Return the section that should be marked against GC for a given
2163 relocation. */
2164
2165 static asection *
2166 elf_x86_64_gc_mark_hook (asection *sec,
2167 struct bfd_link_info *info,
2168 Elf_Internal_Rela *rel,
2169 struct elf_link_hash_entry *h,
2170 Elf_Internal_Sym *sym)
2171 {
2172 if (h != NULL)
2173 switch (ELF32_R_TYPE (rel->r_info))
2174 {
2175 case R_X86_64_GNU_VTINHERIT:
2176 case R_X86_64_GNU_VTENTRY:
2177 return NULL;
2178 }
2179
2180 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2181 }
2182
2183 /* Update the got entry reference counts for the section being removed. */
2184
2185 static bfd_boolean
2186 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2187 asection *sec,
2188 const Elf_Internal_Rela *relocs)
2189 {
2190 struct elf_x86_64_link_hash_table *htab;
2191 Elf_Internal_Shdr *symtab_hdr;
2192 struct elf_link_hash_entry **sym_hashes;
2193 bfd_signed_vma *local_got_refcounts;
2194 const Elf_Internal_Rela *rel, *relend;
2195
2196 if (bfd_link_relocatable (info))
2197 return TRUE;
2198
2199 htab = elf_x86_64_hash_table (info);
2200 if (htab == NULL)
2201 return FALSE;
2202
2203 elf_section_data (sec)->local_dynrel = NULL;
2204
2205 symtab_hdr = &elf_symtab_hdr (abfd);
2206 sym_hashes = elf_sym_hashes (abfd);
2207 local_got_refcounts = elf_local_got_refcounts (abfd);
2208
2209 htab = elf_x86_64_hash_table (info);
2210 relend = relocs + sec->reloc_count;
2211 for (rel = relocs; rel < relend; rel++)
2212 {
2213 unsigned long r_symndx;
2214 unsigned int r_type;
2215 struct elf_link_hash_entry *h = NULL;
2216 bfd_boolean pointer_reloc;
2217
2218 r_symndx = htab->r_sym (rel->r_info);
2219 if (r_symndx >= symtab_hdr->sh_info)
2220 {
2221 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2222 while (h->root.type == bfd_link_hash_indirect
2223 || h->root.type == bfd_link_hash_warning)
2224 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2225 }
2226 else
2227 {
2228 /* A local symbol. */
2229 Elf_Internal_Sym *isym;
2230
2231 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2232 abfd, r_symndx);
2233
2234 /* Check relocation against local STT_GNU_IFUNC symbol. */
2235 if (isym != NULL
2236 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2237 {
2238 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2239 if (h == NULL)
2240 abort ();
2241 }
2242 }
2243
2244 if (h)
2245 {
2246 struct elf_x86_64_link_hash_entry *eh;
2247 struct elf_dyn_relocs **pp;
2248 struct elf_dyn_relocs *p;
2249
2250 eh = (struct elf_x86_64_link_hash_entry *) h;
2251
2252 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2253 if (p->sec == sec)
2254 {
2255 /* Everything must go for SEC. */
2256 *pp = p->next;
2257 break;
2258 }
2259 }
2260
2261 r_type = ELF32_R_TYPE (rel->r_info);
2262 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2263 symtab_hdr, sym_hashes,
2264 &r_type, GOT_UNKNOWN,
2265 rel, relend, h, r_symndx))
2266 return FALSE;
2267
2268 pointer_reloc = FALSE;
2269 switch (r_type)
2270 {
2271 case R_X86_64_TLSLD:
2272 if (htab->tls_ld_got.refcount > 0)
2273 htab->tls_ld_got.refcount -= 1;
2274 break;
2275
2276 case R_X86_64_TLSGD:
2277 case R_X86_64_GOTPC32_TLSDESC:
2278 case R_X86_64_TLSDESC_CALL:
2279 case R_X86_64_GOTTPOFF:
2280 case R_X86_64_GOT32:
2281 case R_X86_64_GOTPCREL:
2282 case R_X86_64_GOT64:
2283 case R_X86_64_GOTPCREL64:
2284 case R_X86_64_GOTPLT64:
2285 if (h != NULL)
2286 {
2287 if (h->got.refcount > 0)
2288 h->got.refcount -= 1;
2289 if (h->type == STT_GNU_IFUNC)
2290 {
2291 if (h->plt.refcount > 0)
2292 h->plt.refcount -= 1;
2293 }
2294 }
2295 else if (local_got_refcounts != NULL)
2296 {
2297 if (local_got_refcounts[r_symndx] > 0)
2298 local_got_refcounts[r_symndx] -= 1;
2299 }
2300 break;
2301
2302 case R_X86_64_32:
2303 case R_X86_64_32S:
2304 pointer_reloc = !ABI_64_P (abfd);
2305 goto pointer;
2306
2307 case R_X86_64_64:
2308 pointer_reloc = TRUE;
2309 case R_X86_64_8:
2310 case R_X86_64_16:
2311 case R_X86_64_PC8:
2312 case R_X86_64_PC16:
2313 case R_X86_64_PC32:
2314 case R_X86_64_PC32_BND:
2315 case R_X86_64_PC64:
2316 case R_X86_64_SIZE32:
2317 case R_X86_64_SIZE64:
2318 pointer:
2319 if (bfd_link_pic (info)
2320 && (h == NULL || h->type != STT_GNU_IFUNC))
2321 break;
2322 /* Fall thru */
2323
2324 case R_X86_64_PLT32:
2325 case R_X86_64_PLT32_BND:
2326 case R_X86_64_PLTOFF64:
2327 if (h != NULL)
2328 {
2329 if (h->plt.refcount > 0)
2330 h->plt.refcount -= 1;
2331 if (pointer_reloc && (sec->flags & SEC_READONLY) == 0)
2332 {
2333 struct elf_x86_64_link_hash_entry *eh
2334 = (struct elf_x86_64_link_hash_entry *) h;
2335 if (eh->func_pointer_refcount > 0)
2336 eh->func_pointer_refcount -= 1;
2337 }
2338 }
2339 break;
2340
2341 default:
2342 break;
2343 }
2344 }
2345
2346 return TRUE;
2347 }
2348
2349 /* Adjust a symbol defined by a dynamic object and referenced by a
2350 regular object. The current definition is in some section of the
2351 dynamic object, but we're not including those sections. We have to
2352 change the definition to something the rest of the link can
2353 understand. */
2354
2355 static bfd_boolean
2356 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2357 struct elf_link_hash_entry *h)
2358 {
2359 struct elf_x86_64_link_hash_table *htab;
2360 asection *s;
2361 struct elf_x86_64_link_hash_entry *eh;
2362 struct elf_dyn_relocs *p;
2363
2364 /* STT_GNU_IFUNC symbol must go through PLT. */
2365 if (h->type == STT_GNU_IFUNC)
2366 {
2367 /* All local STT_GNU_IFUNC references must be treate as local
2368 calls via local PLT. */
2369 if (h->ref_regular
2370 && SYMBOL_CALLS_LOCAL (info, h))
2371 {
2372 bfd_size_type pc_count = 0, count = 0;
2373 struct elf_dyn_relocs **pp;
2374
2375 eh = (struct elf_x86_64_link_hash_entry *) h;
2376 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2377 {
2378 pc_count += p->pc_count;
2379 p->count -= p->pc_count;
2380 p->pc_count = 0;
2381 count += p->count;
2382 if (p->count == 0)
2383 *pp = p->next;
2384 else
2385 pp = &p->next;
2386 }
2387
2388 if (pc_count || count)
2389 {
2390 h->needs_plt = 1;
2391 h->non_got_ref = 1;
2392 if (h->plt.refcount <= 0)
2393 h->plt.refcount = 1;
2394 else
2395 h->plt.refcount += 1;
2396 }
2397 }
2398
2399 if (h->plt.refcount <= 0)
2400 {
2401 h->plt.offset = (bfd_vma) -1;
2402 h->needs_plt = 0;
2403 }
2404 return TRUE;
2405 }
2406
2407 /* If this is a function, put it in the procedure linkage table. We
2408 will fill in the contents of the procedure linkage table later,
2409 when we know the address of the .got section. */
2410 if (h->type == STT_FUNC
2411 || h->needs_plt)
2412 {
2413 if (h->plt.refcount <= 0
2414 || SYMBOL_CALLS_LOCAL (info, h)
2415 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2416 && h->root.type == bfd_link_hash_undefweak))
2417 {
2418 /* This case can occur if we saw a PLT32 reloc in an input
2419 file, but the symbol was never referred to by a dynamic
2420 object, or if all references were garbage collected. In
2421 such a case, we don't actually need to build a procedure
2422 linkage table, and we can just do a PC32 reloc instead. */
2423 h->plt.offset = (bfd_vma) -1;
2424 h->needs_plt = 0;
2425 }
2426
2427 return TRUE;
2428 }
2429 else
2430 /* It's possible that we incorrectly decided a .plt reloc was
2431 needed for an R_X86_64_PC32 reloc to a non-function sym in
2432 check_relocs. We can't decide accurately between function and
2433 non-function syms in check-relocs; Objects loaded later in
2434 the link may change h->type. So fix it now. */
2435 h->plt.offset = (bfd_vma) -1;
2436
2437 /* If this is a weak symbol, and there is a real definition, the
2438 processor independent code will have arranged for us to see the
2439 real definition first, and we can just use the same value. */
2440 if (h->u.weakdef != NULL)
2441 {
2442 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2443 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2444 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2445 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2446 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2447 {
2448 eh = (struct elf_x86_64_link_hash_entry *) h;
2449 h->non_got_ref = h->u.weakdef->non_got_ref;
2450 eh->needs_copy = h->u.weakdef->needs_copy;
2451 }
2452 return TRUE;
2453 }
2454
2455 /* This is a reference to a symbol defined by a dynamic object which
2456 is not a function. */
2457
2458 /* If we are creating a shared library, we must presume that the
2459 only references to the symbol are via the global offset table.
2460 For such cases we need not do anything here; the relocations will
2461 be handled correctly by relocate_section. */
2462 if (!bfd_link_executable (info))
2463 return TRUE;
2464
2465 /* If there are no references to this symbol that do not use the
2466 GOT, we don't need to generate a copy reloc. */
2467 if (!h->non_got_ref)
2468 return TRUE;
2469
2470 /* If -z nocopyreloc was given, we won't generate them either. */
2471 if (info->nocopyreloc)
2472 {
2473 h->non_got_ref = 0;
2474 return TRUE;
2475 }
2476
2477 if (ELIMINATE_COPY_RELOCS)
2478 {
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2481 {
2482 s = p->sec->output_section;
2483 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2484 break;
2485 }
2486
2487 /* If we didn't find any dynamic relocs in read-only sections, then
2488 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2489 if (p == NULL)
2490 {
2491 h->non_got_ref = 0;
2492 return TRUE;
2493 }
2494 }
2495
2496 /* We must allocate the symbol in our .dynbss section, which will
2497 become part of the .bss section of the executable. There will be
2498 an entry for this symbol in the .dynsym section. The dynamic
2499 object will contain position independent code, so all references
2500 from the dynamic object to this symbol will go through the global
2501 offset table. The dynamic linker will use the .dynsym entry to
2502 determine the address it must put in the global offset table, so
2503 both the dynamic object and the regular object will refer to the
2504 same memory location for the variable. */
2505
2506 htab = elf_x86_64_hash_table (info);
2507 if (htab == NULL)
2508 return FALSE;
2509
2510 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2511 to copy the initial value out of the dynamic object and into the
2512 runtime process image. */
2513 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2514 {
2515 const struct elf_backend_data *bed;
2516 bed = get_elf_backend_data (info->output_bfd);
2517 htab->srelbss->size += bed->s->sizeof_rela;
2518 h->needs_copy = 1;
2519 }
2520
2521 s = htab->sdynbss;
2522
2523 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2524 }
2525
2526 /* Allocate space in .plt, .got and associated reloc sections for
2527 dynamic relocs. */
2528
2529 static bfd_boolean
2530 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2531 {
2532 struct bfd_link_info *info;
2533 struct elf_x86_64_link_hash_table *htab;
2534 struct elf_x86_64_link_hash_entry *eh;
2535 struct elf_dyn_relocs *p;
2536 const struct elf_backend_data *bed;
2537 unsigned int plt_entry_size;
2538
2539 if (h->root.type == bfd_link_hash_indirect)
2540 return TRUE;
2541
2542 eh = (struct elf_x86_64_link_hash_entry *) h;
2543
2544 info = (struct bfd_link_info *) inf;
2545 htab = elf_x86_64_hash_table (info);
2546 if (htab == NULL)
2547 return FALSE;
2548 bed = get_elf_backend_data (info->output_bfd);
2549 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2550
2551 /* We can't use the GOT PLT if pointer equality is needed since
2552 finish_dynamic_symbol won't clear symbol value and the dynamic
2553 linker won't update the GOT slot. We will get into an infinite
2554 loop at run-time. */
2555 if (htab->plt_got != NULL
2556 && h->type != STT_GNU_IFUNC
2557 && !h->pointer_equality_needed
2558 && h->plt.refcount > 0
2559 && h->got.refcount > 0)
2560 {
2561 /* Don't use the regular PLT if there are both GOT and GOTPLT
2562 reloctions. */
2563 h->plt.offset = (bfd_vma) -1;
2564
2565 /* Use the GOT PLT. */
2566 eh->plt_got.refcount = 1;
2567 }
2568
2569 /* Clear the reference count of function pointer relocations if
2570 symbol isn't a normal function. */
2571 if (h->type != STT_FUNC)
2572 eh->func_pointer_refcount = 0;
2573
2574 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2575 here if it is defined and referenced in a non-shared object. */
2576 if (h->type == STT_GNU_IFUNC
2577 && h->def_regular)
2578 {
2579 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2580 &eh->dyn_relocs,
2581 plt_entry_size,
2582 plt_entry_size,
2583 GOT_ENTRY_SIZE))
2584 {
2585 asection *s = htab->plt_bnd;
2586 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2587 {
2588 /* Use the .plt.bnd section if it is created. */
2589 eh->plt_bnd.offset = s->size;
2590
2591 /* Make room for this entry in the .plt.bnd section. */
2592 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2593 }
2594
2595 return TRUE;
2596 }
2597 else
2598 return FALSE;
2599 }
2600 /* Don't create the PLT entry if there are only function pointer
2601 relocations which can be resolved at run-time. */
2602 else if (htab->elf.dynamic_sections_created
2603 && (h->plt.refcount > eh->func_pointer_refcount
2604 || eh->plt_got.refcount > 0))
2605 {
2606 bfd_boolean use_plt_got;
2607
2608 /* Clear the reference count of function pointer relocations
2609 if PLT is used. */
2610 eh->func_pointer_refcount = 0;
2611
2612 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2613 {
2614 /* Don't use the regular PLT for DF_BIND_NOW. */
2615 h->plt.offset = (bfd_vma) -1;
2616
2617 /* Use the GOT PLT. */
2618 h->got.refcount = 1;
2619 eh->plt_got.refcount = 1;
2620 }
2621
2622 use_plt_got = eh->plt_got.refcount > 0;
2623
2624 /* Make sure this symbol is output as a dynamic symbol.
2625 Undefined weak syms won't yet be marked as dynamic. */
2626 if (h->dynindx == -1
2627 && !h->forced_local)
2628 {
2629 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2630 return FALSE;
2631 }
2632
2633 if (bfd_link_pic (info)
2634 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2635 {
2636 asection *s = htab->elf.splt;
2637 asection *bnd_s = htab->plt_bnd;
2638 asection *got_s = htab->plt_got;
2639
2640 /* If this is the first .plt entry, make room for the special
2641 first entry. The .plt section is used by prelink to undo
2642 prelinking for dynamic relocations. */
2643 if (s->size == 0)
2644 s->size = plt_entry_size;
2645
2646 if (use_plt_got)
2647 eh->plt_got.offset = got_s->size;
2648 else
2649 {
2650 h->plt.offset = s->size;
2651 if (bnd_s)
2652 eh->plt_bnd.offset = bnd_s->size;
2653 }
2654
2655 /* If this symbol is not defined in a regular file, and we are
2656 not generating a shared library, then set the symbol to this
2657 location in the .plt. This is required to make function
2658 pointers compare as equal between the normal executable and
2659 the shared library. */
2660 if (! bfd_link_pic (info)
2661 && !h->def_regular)
2662 {
2663 if (use_plt_got)
2664 {
2665 /* We need to make a call to the entry of the GOT PLT
2666 instead of regular PLT entry. */
2667 h->root.u.def.section = got_s;
2668 h->root.u.def.value = eh->plt_got.offset;
2669 }
2670 else
2671 {
2672 if (bnd_s)
2673 {
2674 /* We need to make a call to the entry of the second
2675 PLT instead of regular PLT entry. */
2676 h->root.u.def.section = bnd_s;
2677 h->root.u.def.value = eh->plt_bnd.offset;
2678 }
2679 else
2680 {
2681 h->root.u.def.section = s;
2682 h->root.u.def.value = h->plt.offset;
2683 }
2684 }
2685 }
2686
2687 /* Make room for this entry. */
2688 if (use_plt_got)
2689 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2690 else
2691 {
2692 s->size += plt_entry_size;
2693 if (bnd_s)
2694 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2695
2696 /* We also need to make an entry in the .got.plt section,
2697 which will be placed in the .got section by the linker
2698 script. */
2699 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2700
2701 /* We also need to make an entry in the .rela.plt
2702 section. */
2703 htab->elf.srelplt->size += bed->s->sizeof_rela;
2704 htab->elf.srelplt->reloc_count++;
2705 }
2706 }
2707 else
2708 {
2709 h->plt.offset = (bfd_vma) -1;
2710 h->needs_plt = 0;
2711 }
2712 }
2713 else
2714 {
2715 h->plt.offset = (bfd_vma) -1;
2716 h->needs_plt = 0;
2717 }
2718
2719 eh->tlsdesc_got = (bfd_vma) -1;
2720
2721 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2722 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2723 if (h->got.refcount > 0
2724 && bfd_link_executable (info)
2725 && h->dynindx == -1
2726 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2727 {
2728 h->got.offset = (bfd_vma) -1;
2729 }
2730 else if (h->got.refcount > 0)
2731 {
2732 asection *s;
2733 bfd_boolean dyn;
2734 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2735
2736 /* Make sure this symbol is output as a dynamic symbol.
2737 Undefined weak syms won't yet be marked as dynamic. */
2738 if (h->dynindx == -1
2739 && !h->forced_local)
2740 {
2741 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2742 return FALSE;
2743 }
2744
2745 if (GOT_TLS_GDESC_P (tls_type))
2746 {
2747 eh->tlsdesc_got = htab->elf.sgotplt->size
2748 - elf_x86_64_compute_jump_table_size (htab);
2749 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2750 h->got.offset = (bfd_vma) -2;
2751 }
2752 if (! GOT_TLS_GDESC_P (tls_type)
2753 || GOT_TLS_GD_P (tls_type))
2754 {
2755 s = htab->elf.sgot;
2756 h->got.offset = s->size;
2757 s->size += GOT_ENTRY_SIZE;
2758 if (GOT_TLS_GD_P (tls_type))
2759 s->size += GOT_ENTRY_SIZE;
2760 }
2761 dyn = htab->elf.dynamic_sections_created;
2762 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2763 and two if global.
2764 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2765 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2766 || tls_type == GOT_TLS_IE)
2767 htab->elf.srelgot->size += bed->s->sizeof_rela;
2768 else if (GOT_TLS_GD_P (tls_type))
2769 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2770 else if (! GOT_TLS_GDESC_P (tls_type)
2771 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2772 || h->root.type != bfd_link_hash_undefweak)
2773 && (bfd_link_pic (info)
2774 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2775 htab->elf.srelgot->size += bed->s->sizeof_rela;
2776 if (GOT_TLS_GDESC_P (tls_type))
2777 {
2778 htab->elf.srelplt->size += bed->s->sizeof_rela;
2779 htab->tlsdesc_plt = (bfd_vma) -1;
2780 }
2781 }
2782 else
2783 h->got.offset = (bfd_vma) -1;
2784
2785 if (eh->dyn_relocs == NULL)
2786 return TRUE;
2787
2788 /* In the shared -Bsymbolic case, discard space allocated for
2789 dynamic pc-relative relocs against symbols which turn out to be
2790 defined in regular objects. For the normal shared case, discard
2791 space for pc-relative relocs that have become local due to symbol
2792 visibility changes. */
2793
2794 if (bfd_link_pic (info))
2795 {
2796 /* Relocs that use pc_count are those that appear on a call
2797 insn, or certain REL relocs that can generated via assembly.
2798 We want calls to protected symbols to resolve directly to the
2799 function rather than going via the plt. If people want
2800 function pointer comparisons to work as expected then they
2801 should avoid writing weird assembly. */
2802 if (SYMBOL_CALLS_LOCAL (info, h))
2803 {
2804 struct elf_dyn_relocs **pp;
2805
2806 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2807 {
2808 p->count -= p->pc_count;
2809 p->pc_count = 0;
2810 if (p->count == 0)
2811 *pp = p->next;
2812 else
2813 pp = &p->next;
2814 }
2815 }
2816
2817 /* Also discard relocs on undefined weak syms with non-default
2818 visibility. */
2819 if (eh->dyn_relocs != NULL)
2820 {
2821 if (h->root.type == bfd_link_hash_undefweak)
2822 {
2823 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2824 eh->dyn_relocs = NULL;
2825
2826 /* Make sure undefined weak symbols are output as a dynamic
2827 symbol in PIEs. */
2828 else if (h->dynindx == -1
2829 && ! h->forced_local
2830 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2831 return FALSE;
2832 }
2833 /* For PIE, discard space for pc-relative relocs against
2834 symbols which turn out to need copy relocs. */
2835 else if (bfd_link_executable (info)
2836 && (h->needs_copy || eh->needs_copy)
2837 && h->def_dynamic
2838 && !h->def_regular)
2839 {
2840 struct elf_dyn_relocs **pp;
2841
2842 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2843 {
2844 if (p->pc_count != 0)
2845 *pp = p->next;
2846 else
2847 pp = &p->next;
2848 }
2849 }
2850 }
2851 }
2852 else if (ELIMINATE_COPY_RELOCS)
2853 {
2854 /* For the non-shared case, discard space for relocs against
2855 symbols which turn out to need copy relocs or are not
2856 dynamic. Keep dynamic relocations for run-time function
2857 pointer initialization. */
2858
2859 if ((!h->non_got_ref || eh->func_pointer_refcount > 0)
2860 && ((h->def_dynamic
2861 && !h->def_regular)
2862 || (htab->elf.dynamic_sections_created
2863 && (h->root.type == bfd_link_hash_undefweak
2864 || h->root.type == bfd_link_hash_undefined))))
2865 {
2866 /* Make sure this symbol is output as a dynamic symbol.
2867 Undefined weak syms won't yet be marked as dynamic. */
2868 if (h->dynindx == -1
2869 && ! h->forced_local
2870 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2871 return FALSE;
2872
2873 /* If that succeeded, we know we'll be keeping all the
2874 relocs. */
2875 if (h->dynindx != -1)
2876 goto keep;
2877 }
2878
2879 eh->dyn_relocs = NULL;
2880 eh->func_pointer_refcount = 0;
2881
2882 keep: ;
2883 }
2884
2885 /* Finally, allocate space. */
2886 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2887 {
2888 asection * sreloc;
2889
2890 sreloc = elf_section_data (p->sec)->sreloc;
2891
2892 BFD_ASSERT (sreloc != NULL);
2893
2894 sreloc->size += p->count * bed->s->sizeof_rela;
2895 }
2896
2897 return TRUE;
2898 }
2899
2900 /* Allocate space in .plt, .got and associated reloc sections for
2901 local dynamic relocs. */
2902
2903 static bfd_boolean
2904 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2905 {
2906 struct elf_link_hash_entry *h
2907 = (struct elf_link_hash_entry *) *slot;
2908
2909 if (h->type != STT_GNU_IFUNC
2910 || !h->def_regular
2911 || !h->ref_regular
2912 || !h->forced_local
2913 || h->root.type != bfd_link_hash_defined)
2914 abort ();
2915
2916 return elf_x86_64_allocate_dynrelocs (h, inf);
2917 }
2918
2919 /* Find any dynamic relocs that apply to read-only sections. */
2920
2921 static bfd_boolean
2922 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2923 void * inf)
2924 {
2925 struct elf_x86_64_link_hash_entry *eh;
2926 struct elf_dyn_relocs *p;
2927
2928 /* Skip local IFUNC symbols. */
2929 if (h->forced_local && h->type == STT_GNU_IFUNC)
2930 return TRUE;
2931
2932 eh = (struct elf_x86_64_link_hash_entry *) h;
2933 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2934 {
2935 asection *s = p->sec->output_section;
2936
2937 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2938 {
2939 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2940
2941 info->flags |= DF_TEXTREL;
2942
2943 if ((info->warn_shared_textrel && bfd_link_pic (info))
2944 || info->error_textrel)
2945 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2946 p->sec->owner, h->root.root.string,
2947 p->sec);
2948
2949 /* Not an error, just cut short the traversal. */
2950 return FALSE;
2951 }
2952 }
2953 return TRUE;
2954 }
2955
2956 /* Convert
2957 mov foo@GOTPCREL(%rip), %reg
2958 to
2959 lea foo(%rip), %reg
2960 with the local symbol, foo. */
2961
2962 static bfd_boolean
2963 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2964 struct bfd_link_info *link_info)
2965 {
2966 Elf_Internal_Shdr *symtab_hdr;
2967 Elf_Internal_Rela *internal_relocs;
2968 Elf_Internal_Rela *irel, *irelend;
2969 bfd_byte *contents;
2970 struct elf_x86_64_link_hash_table *htab;
2971 bfd_boolean changed_contents;
2972 bfd_boolean changed_relocs;
2973 bfd_signed_vma *local_got_refcounts;
2974 bfd_vma maxpagesize;
2975
2976 /* Don't even try to convert non-ELF outputs. */
2977 if (!is_elf_hash_table (link_info->hash))
2978 return FALSE;
2979
2980 /* Nothing to do if there is no need or no output. */
2981 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2982 || sec->need_convert_mov_to_lea == 0
2983 || bfd_is_abs_section (sec->output_section))
2984 return TRUE;
2985
2986 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2987
2988 /* Load the relocations for this section. */
2989 internal_relocs = (_bfd_elf_link_read_relocs
2990 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2991 link_info->keep_memory));
2992 if (internal_relocs == NULL)
2993 return FALSE;
2994
2995 htab = elf_x86_64_hash_table (link_info);
2996 changed_contents = FALSE;
2997 changed_relocs = FALSE;
2998 local_got_refcounts = elf_local_got_refcounts (abfd);
2999 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
3000
3001 /* Get the section contents. */
3002 if (elf_section_data (sec)->this_hdr.contents != NULL)
3003 contents = elf_section_data (sec)->this_hdr.contents;
3004 else
3005 {
3006 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3007 goto error_return;
3008 }
3009
3010 irelend = internal_relocs + sec->reloc_count;
3011 for (irel = internal_relocs; irel < irelend; irel++)
3012 {
3013 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3014 unsigned int r_symndx = htab->r_sym (irel->r_info);
3015 unsigned int indx;
3016 struct elf_link_hash_entry *h;
3017 asection *tsec;
3018 char symtype;
3019 bfd_vma toff, roff;
3020 unsigned int opcode;
3021
3022 if (r_type != R_X86_64_GOTPCREL)
3023 continue;
3024
3025 roff = irel->r_offset;
3026
3027 if (roff < 2)
3028 continue;
3029
3030 opcode = bfd_get_8 (abfd, contents + roff - 2);
3031
3032 /* PR ld/18591: Don't convert R_X86_64_GOTPCREL relocation if it
3033 isn't for mov instruction. */
3034 if (opcode != 0x8b)
3035 continue;
3036
3037 /* Get the symbol referred to by the reloc. */
3038 if (r_symndx < symtab_hdr->sh_info)
3039 {
3040 Elf_Internal_Sym *isym;
3041
3042 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3043 abfd, r_symndx);
3044
3045 symtype = ELF_ST_TYPE (isym->st_info);
3046
3047 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and
3048 skip relocation against undefined symbols. */
3049 if (symtype == STT_GNU_IFUNC || isym->st_shndx == SHN_UNDEF)
3050 continue;
3051
3052 if (isym->st_shndx == SHN_ABS)
3053 tsec = bfd_abs_section_ptr;
3054 else if (isym->st_shndx == SHN_COMMON)
3055 tsec = bfd_com_section_ptr;
3056 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
3057 tsec = &_bfd_elf_large_com_section;
3058 else
3059 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3060
3061 h = NULL;
3062 toff = isym->st_value;
3063 }
3064 else
3065 {
3066 indx = r_symndx - symtab_hdr->sh_info;
3067 h = elf_sym_hashes (abfd)[indx];
3068 BFD_ASSERT (h != NULL);
3069
3070 while (h->root.type == bfd_link_hash_indirect
3071 || h->root.type == bfd_link_hash_warning)
3072 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3073
3074 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
3075 avoid optimizing _DYNAMIC since ld.so may use its link-time
3076 address. */
3077 if (h->def_regular
3078 && h->type != STT_GNU_IFUNC
3079 && h != htab->elf.hdynamic
3080 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3081 {
3082 tsec = h->root.u.def.section;
3083 toff = h->root.u.def.value;
3084 symtype = h->type;
3085 }
3086 else
3087 continue;
3088 }
3089
3090 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3091 {
3092 /* At this stage in linking, no SEC_MERGE symbol has been
3093 adjusted, so all references to such symbols need to be
3094 passed through _bfd_merged_section_offset. (Later, in
3095 relocate_section, all SEC_MERGE symbols *except* for
3096 section symbols have been adjusted.)
3097
3098 gas may reduce relocations against symbols in SEC_MERGE
3099 sections to a relocation against the section symbol when
3100 the original addend was zero. When the reloc is against
3101 a section symbol we should include the addend in the
3102 offset passed to _bfd_merged_section_offset, since the
3103 location of interest is the original symbol. On the
3104 other hand, an access to "sym+addend" where "sym" is not
3105 a section symbol should not include the addend; Such an
3106 access is presumed to be an offset from "sym"; The
3107 location of interest is just "sym". */
3108 if (symtype == STT_SECTION)
3109 toff += irel->r_addend;
3110
3111 toff = _bfd_merged_section_offset (abfd, &tsec,
3112 elf_section_data (tsec)->sec_info,
3113 toff);
3114
3115 if (symtype != STT_SECTION)
3116 toff += irel->r_addend;
3117 }
3118 else
3119 toff += irel->r_addend;
3120
3121 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3122 if (tsec->output_section == sec->output_section)
3123 {
3124 if ((toff - roff + 0x80000000) > 0xffffffff)
3125 continue;
3126 }
3127 else
3128 {
3129 asection *asect;
3130 bfd_size_type size;
3131
3132 /* At this point, we don't know the load addresses of TSEC
3133 section nor SEC section. We estimate the distrance between
3134 SEC and TSEC. */
3135 size = 0;
3136 for (asect = sec->output_section;
3137 asect != NULL && asect != tsec->output_section;
3138 asect = asect->next)
3139 {
3140 asection *i;
3141 for (i = asect->output_section->map_head.s;
3142 i != NULL;
3143 i = i->map_head.s)
3144 {
3145 size = align_power (size, i->alignment_power);
3146 size += i->size;
3147 }
3148 }
3149
3150 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after
3151 SEC. */
3152 if (asect == NULL)
3153 continue;
3154
3155 /* Take PT_GNU_RELRO segment into account by adding
3156 maxpagesize. */
3157 if ((toff + size + maxpagesize - roff + 0x80000000)
3158 > 0xffffffff)
3159 continue;
3160 }
3161
3162 bfd_put_8 (abfd, 0x8d, contents + roff - 2);
3163 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
3164 changed_contents = TRUE;
3165 changed_relocs = TRUE;
3166
3167 if (h)
3168 {
3169 if (h->got.refcount > 0)
3170 h->got.refcount -= 1;
3171 }
3172 else
3173 {
3174 if (local_got_refcounts != NULL
3175 && local_got_refcounts[r_symndx] > 0)
3176 local_got_refcounts[r_symndx] -= 1;
3177 }
3178 }
3179
3180 if (contents != NULL
3181 && elf_section_data (sec)->this_hdr.contents != contents)
3182 {
3183 if (!changed_contents && !link_info->keep_memory)
3184 free (contents);
3185 else
3186 {
3187 /* Cache the section contents for elf_link_input_bfd. */
3188 elf_section_data (sec)->this_hdr.contents = contents;
3189 }
3190 }
3191
3192 if (elf_section_data (sec)->relocs != internal_relocs)
3193 {
3194 if (!changed_relocs)
3195 free (internal_relocs);
3196 else
3197 elf_section_data (sec)->relocs = internal_relocs;
3198 }
3199
3200 return TRUE;
3201
3202 error_return:
3203 if (contents != NULL
3204 && elf_section_data (sec)->this_hdr.contents != contents)
3205 free (contents);
3206 if (internal_relocs != NULL
3207 && elf_section_data (sec)->relocs != internal_relocs)
3208 free (internal_relocs);
3209 return FALSE;
3210 }
3211
3212 /* Set the sizes of the dynamic sections. */
3213
3214 static bfd_boolean
3215 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3216 struct bfd_link_info *info)
3217 {
3218 struct elf_x86_64_link_hash_table *htab;
3219 bfd *dynobj;
3220 asection *s;
3221 bfd_boolean relocs;
3222 bfd *ibfd;
3223 const struct elf_backend_data *bed;
3224
3225 htab = elf_x86_64_hash_table (info);
3226 if (htab == NULL)
3227 return FALSE;
3228 bed = get_elf_backend_data (output_bfd);
3229
3230 dynobj = htab->elf.dynobj;
3231 if (dynobj == NULL)
3232 abort ();
3233
3234 if (htab->elf.dynamic_sections_created)
3235 {
3236 /* Set the contents of the .interp section to the interpreter. */
3237 if (bfd_link_executable (info) && !info->nointerp)
3238 {
3239 s = bfd_get_linker_section (dynobj, ".interp");
3240 if (s == NULL)
3241 abort ();
3242 s->size = htab->dynamic_interpreter_size;
3243 s->contents = (unsigned char *) htab->dynamic_interpreter;
3244 }
3245 }
3246
3247 /* Set up .got offsets for local syms, and space for local dynamic
3248 relocs. */
3249 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3250 {
3251 bfd_signed_vma *local_got;
3252 bfd_signed_vma *end_local_got;
3253 char *local_tls_type;
3254 bfd_vma *local_tlsdesc_gotent;
3255 bfd_size_type locsymcount;
3256 Elf_Internal_Shdr *symtab_hdr;
3257 asection *srel;
3258
3259 if (! is_x86_64_elf (ibfd))
3260 continue;
3261
3262 for (s = ibfd->sections; s != NULL; s = s->next)
3263 {
3264 struct elf_dyn_relocs *p;
3265
3266 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3267 return FALSE;
3268
3269 for (p = (struct elf_dyn_relocs *)
3270 (elf_section_data (s)->local_dynrel);
3271 p != NULL;
3272 p = p->next)
3273 {
3274 if (!bfd_is_abs_section (p->sec)
3275 && bfd_is_abs_section (p->sec->output_section))
3276 {
3277 /* Input section has been discarded, either because
3278 it is a copy of a linkonce section or due to
3279 linker script /DISCARD/, so we'll be discarding
3280 the relocs too. */
3281 }
3282 else if (p->count != 0)
3283 {
3284 srel = elf_section_data (p->sec)->sreloc;
3285 srel->size += p->count * bed->s->sizeof_rela;
3286 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3287 && (info->flags & DF_TEXTREL) == 0)
3288 {
3289 info->flags |= DF_TEXTREL;
3290 if ((info->warn_shared_textrel && bfd_link_pic (info))
3291 || info->error_textrel)
3292 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3293 p->sec->owner, p->sec);
3294 }
3295 }
3296 }
3297 }
3298
3299 local_got = elf_local_got_refcounts (ibfd);
3300 if (!local_got)
3301 continue;
3302
3303 symtab_hdr = &elf_symtab_hdr (ibfd);
3304 locsymcount = symtab_hdr->sh_info;
3305 end_local_got = local_got + locsymcount;
3306 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3307 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3308 s = htab->elf.sgot;
3309 srel = htab->elf.srelgot;
3310 for (; local_got < end_local_got;
3311 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3312 {
3313 *local_tlsdesc_gotent = (bfd_vma) -1;
3314 if (*local_got > 0)
3315 {
3316 if (GOT_TLS_GDESC_P (*local_tls_type))
3317 {
3318 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3319 - elf_x86_64_compute_jump_table_size (htab);
3320 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3321 *local_got = (bfd_vma) -2;
3322 }
3323 if (! GOT_TLS_GDESC_P (*local_tls_type)
3324 || GOT_TLS_GD_P (*local_tls_type))
3325 {
3326 *local_got = s->size;
3327 s->size += GOT_ENTRY_SIZE;
3328 if (GOT_TLS_GD_P (*local_tls_type))
3329 s->size += GOT_ENTRY_SIZE;
3330 }
3331 if (bfd_link_pic (info)
3332 || GOT_TLS_GD_ANY_P (*local_tls_type)
3333 || *local_tls_type == GOT_TLS_IE)
3334 {
3335 if (GOT_TLS_GDESC_P (*local_tls_type))
3336 {
3337 htab->elf.srelplt->size
3338 += bed->s->sizeof_rela;
3339 htab->tlsdesc_plt = (bfd_vma) -1;
3340 }
3341 if (! GOT_TLS_GDESC_P (*local_tls_type)
3342 || GOT_TLS_GD_P (*local_tls_type))
3343 srel->size += bed->s->sizeof_rela;
3344 }
3345 }
3346 else
3347 *local_got = (bfd_vma) -1;
3348 }
3349 }
3350
3351 if (htab->tls_ld_got.refcount > 0)
3352 {
3353 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3354 relocs. */
3355 htab->tls_ld_got.offset = htab->elf.sgot->size;
3356 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3357 htab->elf.srelgot->size += bed->s->sizeof_rela;
3358 }
3359 else
3360 htab->tls_ld_got.offset = -1;
3361
3362 /* Allocate global sym .plt and .got entries, and space for global
3363 sym dynamic relocs. */
3364 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3365 info);
3366
3367 /* Allocate .plt and .got entries, and space for local symbols. */
3368 htab_traverse (htab->loc_hash_table,
3369 elf_x86_64_allocate_local_dynrelocs,
3370 info);
3371
3372 /* For every jump slot reserved in the sgotplt, reloc_count is
3373 incremented. However, when we reserve space for TLS descriptors,
3374 it's not incremented, so in order to compute the space reserved
3375 for them, it suffices to multiply the reloc count by the jump
3376 slot size.
3377
3378 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3379 so that R_X86_64_IRELATIVE entries come last. */
3380 if (htab->elf.srelplt)
3381 {
3382 htab->sgotplt_jump_table_size
3383 = elf_x86_64_compute_jump_table_size (htab);
3384 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3385 }
3386 else if (htab->elf.irelplt)
3387 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3388
3389 if (htab->tlsdesc_plt)
3390 {
3391 /* If we're not using lazy TLS relocations, don't generate the
3392 PLT and GOT entries they require. */
3393 if ((info->flags & DF_BIND_NOW))
3394 htab->tlsdesc_plt = 0;
3395 else
3396 {
3397 htab->tlsdesc_got = htab->elf.sgot->size;
3398 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3399 /* Reserve room for the initial entry.
3400 FIXME: we could probably do away with it in this case. */
3401 if (htab->elf.splt->size == 0)
3402 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3403 htab->tlsdesc_plt = htab->elf.splt->size;
3404 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3405 }
3406 }
3407
3408 if (htab->elf.sgotplt)
3409 {
3410 /* Don't allocate .got.plt section if there are no GOT nor PLT
3411 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3412 if ((htab->elf.hgot == NULL
3413 || !htab->elf.hgot->ref_regular_nonweak)
3414 && (htab->elf.sgotplt->size
3415 == get_elf_backend_data (output_bfd)->got_header_size)
3416 && (htab->elf.splt == NULL
3417 || htab->elf.splt->size == 0)
3418 && (htab->elf.sgot == NULL
3419 || htab->elf.sgot->size == 0)
3420 && (htab->elf.iplt == NULL
3421 || htab->elf.iplt->size == 0)
3422 && (htab->elf.igotplt == NULL
3423 || htab->elf.igotplt->size == 0))
3424 htab->elf.sgotplt->size = 0;
3425 }
3426
3427 if (htab->plt_eh_frame != NULL
3428 && htab->elf.splt != NULL
3429 && htab->elf.splt->size != 0
3430 && !bfd_is_abs_section (htab->elf.splt->output_section)
3431 && _bfd_elf_eh_frame_present (info))
3432 {
3433 const struct elf_x86_64_backend_data *arch_data
3434 = get_elf_x86_64_arch_data (bed);
3435 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3436 }
3437
3438 /* We now have determined the sizes of the various dynamic sections.
3439 Allocate memory for them. */
3440 relocs = FALSE;
3441 for (s = dynobj->sections; s != NULL; s = s->next)
3442 {
3443 if ((s->flags & SEC_LINKER_CREATED) == 0)
3444 continue;
3445
3446 if (s == htab->elf.splt
3447 || s == htab->elf.sgot
3448 || s == htab->elf.sgotplt
3449 || s == htab->elf.iplt
3450 || s == htab->elf.igotplt
3451 || s == htab->plt_bnd
3452 || s == htab->plt_got
3453 || s == htab->plt_eh_frame
3454 || s == htab->sdynbss)
3455 {
3456 /* Strip this section if we don't need it; see the
3457 comment below. */
3458 }
3459 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3460 {
3461 if (s->size != 0 && s != htab->elf.srelplt)
3462 relocs = TRUE;
3463
3464 /* We use the reloc_count field as a counter if we need
3465 to copy relocs into the output file. */
3466 if (s != htab->elf.srelplt)
3467 s->reloc_count = 0;
3468 }
3469 else
3470 {
3471 /* It's not one of our sections, so don't allocate space. */
3472 continue;
3473 }
3474
3475 if (s->size == 0)
3476 {
3477 /* If we don't need this section, strip it from the
3478 output file. This is mostly to handle .rela.bss and
3479 .rela.plt. We must create both sections in
3480 create_dynamic_sections, because they must be created
3481 before the linker maps input sections to output
3482 sections. The linker does that before
3483 adjust_dynamic_symbol is called, and it is that
3484 function which decides whether anything needs to go
3485 into these sections. */
3486
3487 s->flags |= SEC_EXCLUDE;
3488 continue;
3489 }
3490
3491 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3492 continue;
3493
3494 /* Allocate memory for the section contents. We use bfd_zalloc
3495 here in case unused entries are not reclaimed before the
3496 section's contents are written out. This should not happen,
3497 but this way if it does, we get a R_X86_64_NONE reloc instead
3498 of garbage. */
3499 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3500 if (s->contents == NULL)
3501 return FALSE;
3502 }
3503
3504 if (htab->plt_eh_frame != NULL
3505 && htab->plt_eh_frame->contents != NULL)
3506 {
3507 const struct elf_x86_64_backend_data *arch_data
3508 = get_elf_x86_64_arch_data (bed);
3509
3510 memcpy (htab->plt_eh_frame->contents,
3511 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3512 bfd_put_32 (dynobj, htab->elf.splt->size,
3513 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3514 }
3515
3516 if (htab->elf.dynamic_sections_created)
3517 {
3518 /* Add some entries to the .dynamic section. We fill in the
3519 values later, in elf_x86_64_finish_dynamic_sections, but we
3520 must add the entries now so that we get the correct size for
3521 the .dynamic section. The DT_DEBUG entry is filled in by the
3522 dynamic linker and used by the debugger. */
3523 #define add_dynamic_entry(TAG, VAL) \
3524 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3525
3526 if (bfd_link_executable (info))
3527 {
3528 if (!add_dynamic_entry (DT_DEBUG, 0))
3529 return FALSE;
3530 }
3531
3532 if (htab->elf.splt->size != 0)
3533 {
3534 /* DT_PLTGOT is used by prelink even if there is no PLT
3535 relocation. */
3536 if (!add_dynamic_entry (DT_PLTGOT, 0))
3537 return FALSE;
3538
3539 if (htab->elf.srelplt->size != 0)
3540 {
3541 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3542 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3543 || !add_dynamic_entry (DT_JMPREL, 0))
3544 return FALSE;
3545 }
3546
3547 if (htab->tlsdesc_plt
3548 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3549 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3550 return FALSE;
3551 }
3552
3553 if (relocs)
3554 {
3555 if (!add_dynamic_entry (DT_RELA, 0)
3556 || !add_dynamic_entry (DT_RELASZ, 0)
3557 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3558 return FALSE;
3559
3560 /* If any dynamic relocs apply to a read-only section,
3561 then we need a DT_TEXTREL entry. */
3562 if ((info->flags & DF_TEXTREL) == 0)
3563 elf_link_hash_traverse (&htab->elf,
3564 elf_x86_64_readonly_dynrelocs,
3565 info);
3566
3567 if ((info->flags & DF_TEXTREL) != 0)
3568 {
3569 if ((elf_tdata (output_bfd)->has_gnu_symbols
3570 & elf_gnu_symbol_ifunc) == elf_gnu_symbol_ifunc)
3571 {
3572 info->callbacks->einfo
3573 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3574 bfd_set_error (bfd_error_bad_value);
3575 return FALSE;
3576 }
3577
3578 if (!add_dynamic_entry (DT_TEXTREL, 0))
3579 return FALSE;
3580 }
3581 }
3582 }
3583 #undef add_dynamic_entry
3584
3585 return TRUE;
3586 }
3587
3588 static bfd_boolean
3589 elf_x86_64_always_size_sections (bfd *output_bfd,
3590 struct bfd_link_info *info)
3591 {
3592 asection *tls_sec = elf_hash_table (info)->tls_sec;
3593
3594 if (tls_sec)
3595 {
3596 struct elf_link_hash_entry *tlsbase;
3597
3598 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3599 "_TLS_MODULE_BASE_",
3600 FALSE, FALSE, FALSE);
3601
3602 if (tlsbase && tlsbase->type == STT_TLS)
3603 {
3604 struct elf_x86_64_link_hash_table *htab;
3605 struct bfd_link_hash_entry *bh = NULL;
3606 const struct elf_backend_data *bed
3607 = get_elf_backend_data (output_bfd);
3608
3609 htab = elf_x86_64_hash_table (info);
3610 if (htab == NULL)
3611 return FALSE;
3612
3613 if (!(_bfd_generic_link_add_one_symbol
3614 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3615 tls_sec, 0, NULL, FALSE,
3616 bed->collect, &bh)))
3617 return FALSE;
3618
3619 htab->tls_module_base = bh;
3620
3621 tlsbase = (struct elf_link_hash_entry *)bh;
3622 tlsbase->def_regular = 1;
3623 tlsbase->other = STV_HIDDEN;
3624 tlsbase->root.linker_def = 1;
3625 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3626 }
3627 }
3628
3629 return TRUE;
3630 }
3631
3632 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3633 executables. Rather than setting it to the beginning of the TLS
3634 section, we have to set it to the end. This function may be called
3635 multiple times, it is idempotent. */
3636
3637 static void
3638 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3639 {
3640 struct elf_x86_64_link_hash_table *htab;
3641 struct bfd_link_hash_entry *base;
3642
3643 if (!bfd_link_executable (info))
3644 return;
3645
3646 htab = elf_x86_64_hash_table (info);
3647 if (htab == NULL)
3648 return;
3649
3650 base = htab->tls_module_base;
3651 if (base == NULL)
3652 return;
3653
3654 base->u.def.value = htab->elf.tls_size;
3655 }
3656
3657 /* Return the base VMA address which should be subtracted from real addresses
3658 when resolving @dtpoff relocation.
3659 This is PT_TLS segment p_vaddr. */
3660
3661 static bfd_vma
3662 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3663 {
3664 /* If tls_sec is NULL, we should have signalled an error already. */
3665 if (elf_hash_table (info)->tls_sec == NULL)
3666 return 0;
3667 return elf_hash_table (info)->tls_sec->vma;
3668 }
3669
3670 /* Return the relocation value for @tpoff relocation
3671 if STT_TLS virtual address is ADDRESS. */
3672
3673 static bfd_vma
3674 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3675 {
3676 struct elf_link_hash_table *htab = elf_hash_table (info);
3677 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3678 bfd_vma static_tls_size;
3679
3680 /* If tls_segment is NULL, we should have signalled an error already. */
3681 if (htab->tls_sec == NULL)
3682 return 0;
3683
3684 /* Consider special static TLS alignment requirements. */
3685 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3686 return address - static_tls_size - htab->tls_sec->vma;
3687 }
3688
3689 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3690 branch? */
3691
3692 static bfd_boolean
3693 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3694 {
3695 /* Opcode Instruction
3696 0xe8 call
3697 0xe9 jump
3698 0x0f 0x8x conditional jump */
3699 return ((offset > 0
3700 && (contents [offset - 1] == 0xe8
3701 || contents [offset - 1] == 0xe9))
3702 || (offset > 1
3703 && contents [offset - 2] == 0x0f
3704 && (contents [offset - 1] & 0xf0) == 0x80));
3705 }
3706
3707 /* Relocate an x86_64 ELF section. */
3708
3709 static bfd_boolean
3710 elf_x86_64_relocate_section (bfd *output_bfd,
3711 struct bfd_link_info *info,
3712 bfd *input_bfd,
3713 asection *input_section,
3714 bfd_byte *contents,
3715 Elf_Internal_Rela *relocs,
3716 Elf_Internal_Sym *local_syms,
3717 asection **local_sections)
3718 {
3719 struct elf_x86_64_link_hash_table *htab;
3720 Elf_Internal_Shdr *symtab_hdr;
3721 struct elf_link_hash_entry **sym_hashes;
3722 bfd_vma *local_got_offsets;
3723 bfd_vma *local_tlsdesc_gotents;
3724 Elf_Internal_Rela *rel;
3725 Elf_Internal_Rela *relend;
3726 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3727
3728 BFD_ASSERT (is_x86_64_elf (input_bfd));
3729
3730 htab = elf_x86_64_hash_table (info);
3731 if (htab == NULL)
3732 return FALSE;
3733 symtab_hdr = &elf_symtab_hdr (input_bfd);
3734 sym_hashes = elf_sym_hashes (input_bfd);
3735 local_got_offsets = elf_local_got_offsets (input_bfd);
3736 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3737
3738 elf_x86_64_set_tls_module_base (info);
3739
3740 rel = relocs;
3741 relend = relocs + input_section->reloc_count;
3742 for (; rel < relend; rel++)
3743 {
3744 unsigned int r_type;
3745 reloc_howto_type *howto;
3746 unsigned long r_symndx;
3747 struct elf_link_hash_entry *h;
3748 struct elf_x86_64_link_hash_entry *eh;
3749 Elf_Internal_Sym *sym;
3750 asection *sec;
3751 bfd_vma off, offplt, plt_offset;
3752 bfd_vma relocation;
3753 bfd_boolean unresolved_reloc;
3754 bfd_reloc_status_type r;
3755 int tls_type;
3756 asection *base_got, *resolved_plt;
3757 bfd_vma st_size;
3758
3759 r_type = ELF32_R_TYPE (rel->r_info);
3760 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3761 || r_type == (int) R_X86_64_GNU_VTENTRY)
3762 continue;
3763
3764 if (r_type >= (int) R_X86_64_standard)
3765 {
3766 (*_bfd_error_handler)
3767 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3768 input_bfd, input_section, r_type);
3769 bfd_set_error (bfd_error_bad_value);
3770 return FALSE;
3771 }
3772
3773 if (r_type != (int) R_X86_64_32
3774 || ABI_64_P (output_bfd))
3775 howto = x86_64_elf_howto_table + r_type;
3776 else
3777 howto = (x86_64_elf_howto_table
3778 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3779 r_symndx = htab->r_sym (rel->r_info);
3780 h = NULL;
3781 sym = NULL;
3782 sec = NULL;
3783 unresolved_reloc = FALSE;
3784 if (r_symndx < symtab_hdr->sh_info)
3785 {
3786 sym = local_syms + r_symndx;
3787 sec = local_sections[r_symndx];
3788
3789 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3790 &sec, rel);
3791 st_size = sym->st_size;
3792
3793 /* Relocate against local STT_GNU_IFUNC symbol. */
3794 if (!bfd_link_relocatable (info)
3795 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3796 {
3797 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3798 rel, FALSE);
3799 if (h == NULL)
3800 abort ();
3801
3802 /* Set STT_GNU_IFUNC symbol value. */
3803 h->root.u.def.value = sym->st_value;
3804 h->root.u.def.section = sec;
3805 }
3806 }
3807 else
3808 {
3809 bfd_boolean warned ATTRIBUTE_UNUSED;
3810 bfd_boolean ignored ATTRIBUTE_UNUSED;
3811
3812 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3813 r_symndx, symtab_hdr, sym_hashes,
3814 h, sec, relocation,
3815 unresolved_reloc, warned, ignored);
3816 st_size = h->size;
3817 }
3818
3819 if (sec != NULL && discarded_section (sec))
3820 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3821 rel, 1, relend, howto, 0, contents);
3822
3823 if (bfd_link_relocatable (info))
3824 continue;
3825
3826 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3827 {
3828 if (r_type == R_X86_64_64)
3829 {
3830 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3831 zero-extend it to 64bit if addend is zero. */
3832 r_type = R_X86_64_32;
3833 memset (contents + rel->r_offset + 4, 0, 4);
3834 }
3835 else if (r_type == R_X86_64_SIZE64)
3836 {
3837 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3838 zero-extend it to 64bit if addend is zero. */
3839 r_type = R_X86_64_SIZE32;
3840 memset (contents + rel->r_offset + 4, 0, 4);
3841 }
3842 }
3843
3844 eh = (struct elf_x86_64_link_hash_entry *) h;
3845
3846 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3847 it here if it is defined in a non-shared object. */
3848 if (h != NULL
3849 && h->type == STT_GNU_IFUNC
3850 && h->def_regular)
3851 {
3852 bfd_vma plt_index;
3853 const char *name;
3854
3855 if ((input_section->flags & SEC_ALLOC) == 0)
3856 {
3857 /* Dynamic relocs are not propagated for SEC_DEBUGGING
3858 sections because such sections are not SEC_ALLOC and
3859 thus ld.so will not process them. */
3860 if ((input_section->flags & SEC_DEBUGGING) != 0)
3861 continue;
3862 abort ();
3863 }
3864 else if (h->plt.offset == (bfd_vma) -1)
3865 abort ();
3866
3867 /* STT_GNU_IFUNC symbol must go through PLT. */
3868 if (htab->elf.splt != NULL)
3869 {
3870 if (htab->plt_bnd != NULL)
3871 {
3872 resolved_plt = htab->plt_bnd;
3873 plt_offset = eh->plt_bnd.offset;
3874 }
3875 else
3876 {
3877 resolved_plt = htab->elf.splt;
3878 plt_offset = h->plt.offset;
3879 }
3880 }
3881 else
3882 {
3883 resolved_plt = htab->elf.iplt;
3884 plt_offset = h->plt.offset;
3885 }
3886
3887 relocation = (resolved_plt->output_section->vma
3888 + resolved_plt->output_offset + plt_offset);
3889
3890 switch (r_type)
3891 {
3892 default:
3893 if (h->root.root.string)
3894 name = h->root.root.string;
3895 else
3896 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3897 NULL);
3898 (*_bfd_error_handler)
3899 (_("%B: relocation %s against STT_GNU_IFUNC "
3900 "symbol `%s' isn't handled by %s"), input_bfd,
3901 x86_64_elf_howto_table[r_type].name,
3902 name, __FUNCTION__);
3903 bfd_set_error (bfd_error_bad_value);
3904 return FALSE;
3905
3906 case R_X86_64_32S:
3907 if (bfd_link_pic (info))
3908 abort ();
3909 goto do_relocation;
3910
3911 case R_X86_64_32:
3912 if (ABI_64_P (output_bfd))
3913 goto do_relocation;
3914 /* FALLTHROUGH */
3915 case R_X86_64_64:
3916 if (rel->r_addend != 0)
3917 {
3918 if (h->root.root.string)
3919 name = h->root.root.string;
3920 else
3921 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3922 sym, NULL);
3923 (*_bfd_error_handler)
3924 (_("%B: relocation %s against STT_GNU_IFUNC "
3925 "symbol `%s' has non-zero addend: %d"),
3926 input_bfd, x86_64_elf_howto_table[r_type].name,
3927 name, rel->r_addend);
3928 bfd_set_error (bfd_error_bad_value);
3929 return FALSE;
3930 }
3931
3932 /* Generate dynamic relcoation only when there is a
3933 non-GOT reference in a shared object. */
3934 if (bfd_link_pic (info) && h->non_got_ref)
3935 {
3936 Elf_Internal_Rela outrel;
3937 asection *sreloc;
3938
3939 /* Need a dynamic relocation to get the real function
3940 address. */
3941 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3942 info,
3943 input_section,
3944 rel->r_offset);
3945 if (outrel.r_offset == (bfd_vma) -1
3946 || outrel.r_offset == (bfd_vma) -2)
3947 abort ();
3948
3949 outrel.r_offset += (input_section->output_section->vma
3950 + input_section->output_offset);
3951
3952 if (h->dynindx == -1
3953 || h->forced_local
3954 || bfd_link_executable (info))
3955 {
3956 /* This symbol is resolved locally. */
3957 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3958 outrel.r_addend = (h->root.u.def.value
3959 + h->root.u.def.section->output_section->vma
3960 + h->root.u.def.section->output_offset);
3961 }
3962 else
3963 {
3964 outrel.r_info = htab->r_info (h->dynindx, r_type);
3965 outrel.r_addend = 0;
3966 }
3967
3968 sreloc = htab->elf.irelifunc;
3969 elf_append_rela (output_bfd, sreloc, &outrel);
3970
3971 /* If this reloc is against an external symbol, we
3972 do not want to fiddle with the addend. Otherwise,
3973 we need to include the symbol value so that it
3974 becomes an addend for the dynamic reloc. For an
3975 internal symbol, we have updated addend. */
3976 continue;
3977 }
3978 /* FALLTHROUGH */
3979 case R_X86_64_PC32:
3980 case R_X86_64_PC32_BND:
3981 case R_X86_64_PC64:
3982 case R_X86_64_PLT32:
3983 case R_X86_64_PLT32_BND:
3984 goto do_relocation;
3985
3986 case R_X86_64_GOTPCREL:
3987 case R_X86_64_GOTPCREL64:
3988 base_got = htab->elf.sgot;
3989 off = h->got.offset;
3990
3991 if (base_got == NULL)
3992 abort ();
3993
3994 if (off == (bfd_vma) -1)
3995 {
3996 /* We can't use h->got.offset here to save state, or
3997 even just remember the offset, as finish_dynamic_symbol
3998 would use that as offset into .got. */
3999
4000 if (htab->elf.splt != NULL)
4001 {
4002 plt_index = h->plt.offset / plt_entry_size - 1;
4003 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4004 base_got = htab->elf.sgotplt;
4005 }
4006 else
4007 {
4008 plt_index = h->plt.offset / plt_entry_size;
4009 off = plt_index * GOT_ENTRY_SIZE;
4010 base_got = htab->elf.igotplt;
4011 }
4012
4013 if (h->dynindx == -1
4014 || h->forced_local
4015 || info->symbolic)
4016 {
4017 /* This references the local defitionion. We must
4018 initialize this entry in the global offset table.
4019 Since the offset must always be a multiple of 8,
4020 we use the least significant bit to record
4021 whether we have initialized it already.
4022
4023 When doing a dynamic link, we create a .rela.got
4024 relocation entry to initialize the value. This
4025 is done in the finish_dynamic_symbol routine. */
4026 if ((off & 1) != 0)
4027 off &= ~1;
4028 else
4029 {
4030 bfd_put_64 (output_bfd, relocation,
4031 base_got->contents + off);
4032 /* Note that this is harmless for the GOTPLT64
4033 case, as -1 | 1 still is -1. */
4034 h->got.offset |= 1;
4035 }
4036 }
4037 }
4038
4039 relocation = (base_got->output_section->vma
4040 + base_got->output_offset + off);
4041
4042 goto do_relocation;
4043 }
4044 }
4045
4046 /* When generating a shared object, the relocations handled here are
4047 copied into the output file to be resolved at run time. */
4048 switch (r_type)
4049 {
4050 case R_X86_64_GOT32:
4051 case R_X86_64_GOT64:
4052 /* Relocation is to the entry for this symbol in the global
4053 offset table. */
4054 case R_X86_64_GOTPCREL:
4055 case R_X86_64_GOTPCREL64:
4056 /* Use global offset table entry as symbol value. */
4057 case R_X86_64_GOTPLT64:
4058 /* This is obsolete and treated the the same as GOT64. */
4059 base_got = htab->elf.sgot;
4060
4061 if (htab->elf.sgot == NULL)
4062 abort ();
4063
4064 if (h != NULL)
4065 {
4066 bfd_boolean dyn;
4067
4068 off = h->got.offset;
4069 if (h->needs_plt
4070 && h->plt.offset != (bfd_vma)-1
4071 && off == (bfd_vma)-1)
4072 {
4073 /* We can't use h->got.offset here to save
4074 state, or even just remember the offset, as
4075 finish_dynamic_symbol would use that as offset into
4076 .got. */
4077 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4078 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4079 base_got = htab->elf.sgotplt;
4080 }
4081
4082 dyn = htab->elf.dynamic_sections_created;
4083
4084 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4085 || (bfd_link_pic (info)
4086 && SYMBOL_REFERENCES_LOCAL (info, h))
4087 || (ELF_ST_VISIBILITY (h->other)
4088 && h->root.type == bfd_link_hash_undefweak))
4089 {
4090 /* This is actually a static link, or it is a -Bsymbolic
4091 link and the symbol is defined locally, or the symbol
4092 was forced to be local because of a version file. We
4093 must initialize this entry in the global offset table.
4094 Since the offset must always be a multiple of 8, we
4095 use the least significant bit to record whether we
4096 have initialized it already.
4097
4098 When doing a dynamic link, we create a .rela.got
4099 relocation entry to initialize the value. This is
4100 done in the finish_dynamic_symbol routine. */
4101 if ((off & 1) != 0)
4102 off &= ~1;
4103 else
4104 {
4105 bfd_put_64 (output_bfd, relocation,
4106 base_got->contents + off);
4107 /* Note that this is harmless for the GOTPLT64 case,
4108 as -1 | 1 still is -1. */
4109 h->got.offset |= 1;
4110 }
4111 }
4112 else
4113 unresolved_reloc = FALSE;
4114 }
4115 else
4116 {
4117 if (local_got_offsets == NULL)
4118 abort ();
4119
4120 off = local_got_offsets[r_symndx];
4121
4122 /* The offset must always be a multiple of 8. We use
4123 the least significant bit to record whether we have
4124 already generated the necessary reloc. */
4125 if ((off & 1) != 0)
4126 off &= ~1;
4127 else
4128 {
4129 bfd_put_64 (output_bfd, relocation,
4130 base_got->contents + off);
4131
4132 if (bfd_link_pic (info))
4133 {
4134 asection *s;
4135 Elf_Internal_Rela outrel;
4136
4137 /* We need to generate a R_X86_64_RELATIVE reloc
4138 for the dynamic linker. */
4139 s = htab->elf.srelgot;
4140 if (s == NULL)
4141 abort ();
4142
4143 outrel.r_offset = (base_got->output_section->vma
4144 + base_got->output_offset
4145 + off);
4146 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4147 outrel.r_addend = relocation;
4148 elf_append_rela (output_bfd, s, &outrel);
4149 }
4150
4151 local_got_offsets[r_symndx] |= 1;
4152 }
4153 }
4154
4155 if (off >= (bfd_vma) -2)
4156 abort ();
4157
4158 relocation = base_got->output_section->vma
4159 + base_got->output_offset + off;
4160 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
4161 relocation -= htab->elf.sgotplt->output_section->vma
4162 - htab->elf.sgotplt->output_offset;
4163
4164 break;
4165
4166 case R_X86_64_GOTOFF64:
4167 /* Relocation is relative to the start of the global offset
4168 table. */
4169
4170 /* Check to make sure it isn't a protected function or data
4171 symbol for shared library since it may not be local when
4172 used as function address or with copy relocation. We also
4173 need to make sure that a symbol is referenced locally. */
4174 if (bfd_link_pic (info) && h)
4175 {
4176 if (!h->def_regular)
4177 {
4178 const char *v;
4179
4180 switch (ELF_ST_VISIBILITY (h->other))
4181 {
4182 case STV_HIDDEN:
4183 v = _("hidden symbol");
4184 break;
4185 case STV_INTERNAL:
4186 v = _("internal symbol");
4187 break;
4188 case STV_PROTECTED:
4189 v = _("protected symbol");
4190 break;
4191 default:
4192 v = _("symbol");
4193 break;
4194 }
4195
4196 (*_bfd_error_handler)
4197 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4198 input_bfd, v, h->root.root.string);
4199 bfd_set_error (bfd_error_bad_value);
4200 return FALSE;
4201 }
4202 else if (!bfd_link_executable (info)
4203 && !SYMBOL_REFERENCES_LOCAL (info, h)
4204 && (h->type == STT_FUNC
4205 || h->type == STT_OBJECT)
4206 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4207 {
4208 (*_bfd_error_handler)
4209 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4210 input_bfd,
4211 h->type == STT_FUNC ? "function" : "data",
4212 h->root.root.string);
4213 bfd_set_error (bfd_error_bad_value);
4214 return FALSE;
4215 }
4216 }
4217
4218 /* Note that sgot is not involved in this
4219 calculation. We always want the start of .got.plt. If we
4220 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4221 permitted by the ABI, we might have to change this
4222 calculation. */
4223 relocation -= htab->elf.sgotplt->output_section->vma
4224 + htab->elf.sgotplt->output_offset;
4225 break;
4226
4227 case R_X86_64_GOTPC32:
4228 case R_X86_64_GOTPC64:
4229 /* Use global offset table as symbol value. */
4230 relocation = htab->elf.sgotplt->output_section->vma
4231 + htab->elf.sgotplt->output_offset;
4232 unresolved_reloc = FALSE;
4233 break;
4234
4235 case R_X86_64_PLTOFF64:
4236 /* Relocation is PLT entry relative to GOT. For local
4237 symbols it's the symbol itself relative to GOT. */
4238 if (h != NULL
4239 /* See PLT32 handling. */
4240 && h->plt.offset != (bfd_vma) -1
4241 && htab->elf.splt != NULL)
4242 {
4243 if (htab->plt_bnd != NULL)
4244 {
4245 resolved_plt = htab->plt_bnd;
4246 plt_offset = eh->plt_bnd.offset;
4247 }
4248 else
4249 {
4250 resolved_plt = htab->elf.splt;
4251 plt_offset = h->plt.offset;
4252 }
4253
4254 relocation = (resolved_plt->output_section->vma
4255 + resolved_plt->output_offset
4256 + plt_offset);
4257 unresolved_reloc = FALSE;
4258 }
4259
4260 relocation -= htab->elf.sgotplt->output_section->vma
4261 + htab->elf.sgotplt->output_offset;
4262 break;
4263
4264 case R_X86_64_PLT32:
4265 case R_X86_64_PLT32_BND:
4266 /* Relocation is to the entry for this symbol in the
4267 procedure linkage table. */
4268
4269 /* Resolve a PLT32 reloc against a local symbol directly,
4270 without using the procedure linkage table. */
4271 if (h == NULL)
4272 break;
4273
4274 if ((h->plt.offset == (bfd_vma) -1
4275 && eh->plt_got.offset == (bfd_vma) -1)
4276 || htab->elf.splt == NULL)
4277 {
4278 /* We didn't make a PLT entry for this symbol. This
4279 happens when statically linking PIC code, or when
4280 using -Bsymbolic. */
4281 break;
4282 }
4283
4284 if (h->plt.offset != (bfd_vma) -1)
4285 {
4286 if (htab->plt_bnd != NULL)
4287 {
4288 resolved_plt = htab->plt_bnd;
4289 plt_offset = eh->plt_bnd.offset;
4290 }
4291 else
4292 {
4293 resolved_plt = htab->elf.splt;
4294 plt_offset = h->plt.offset;
4295 }
4296 }
4297 else
4298 {
4299 /* Use the GOT PLT. */
4300 resolved_plt = htab->plt_got;
4301 plt_offset = eh->plt_got.offset;
4302 }
4303
4304 relocation = (resolved_plt->output_section->vma
4305 + resolved_plt->output_offset
4306 + plt_offset);
4307 unresolved_reloc = FALSE;
4308 break;
4309
4310 case R_X86_64_SIZE32:
4311 case R_X86_64_SIZE64:
4312 /* Set to symbol size. */
4313 relocation = st_size;
4314 goto direct;
4315
4316 case R_X86_64_PC8:
4317 case R_X86_64_PC16:
4318 case R_X86_64_PC32:
4319 case R_X86_64_PC32_BND:
4320 /* Don't complain about -fPIC if the symbol is undefined when
4321 building executable. */
4322 if (bfd_link_pic (info)
4323 && (input_section->flags & SEC_ALLOC) != 0
4324 && (input_section->flags & SEC_READONLY) != 0
4325 && h != NULL
4326 && !(bfd_link_executable (info)
4327 && h->root.type == bfd_link_hash_undefined))
4328 {
4329 bfd_boolean fail = FALSE;
4330 bfd_boolean branch
4331 = ((r_type == R_X86_64_PC32
4332 || r_type == R_X86_64_PC32_BND)
4333 && is_32bit_relative_branch (contents, rel->r_offset));
4334
4335 if (SYMBOL_REFERENCES_LOCAL (info, h))
4336 {
4337 /* Symbol is referenced locally. Make sure it is
4338 defined locally or for a branch. */
4339 fail = !h->def_regular && !branch;
4340 }
4341 else if (!(bfd_link_executable (info)
4342 && (h->needs_copy || eh->needs_copy)))
4343 {
4344 /* Symbol doesn't need copy reloc and isn't referenced
4345 locally. We only allow branch to symbol with
4346 non-default visibility. */
4347 fail = (!branch
4348 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4349 }
4350
4351 if (fail)
4352 {
4353 const char *fmt;
4354 const char *v;
4355 const char *pic = "";
4356
4357 switch (ELF_ST_VISIBILITY (h->other))
4358 {
4359 case STV_HIDDEN:
4360 v = _("hidden symbol");
4361 break;
4362 case STV_INTERNAL:
4363 v = _("internal symbol");
4364 break;
4365 case STV_PROTECTED:
4366 v = _("protected symbol");
4367 break;
4368 default:
4369 v = _("symbol");
4370 pic = _("; recompile with -fPIC");
4371 break;
4372 }
4373
4374 if (h->def_regular)
4375 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4376 else
4377 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4378
4379 (*_bfd_error_handler) (fmt, input_bfd,
4380 x86_64_elf_howto_table[r_type].name,
4381 v, h->root.root.string, pic);
4382 bfd_set_error (bfd_error_bad_value);
4383 return FALSE;
4384 }
4385 }
4386 /* Fall through. */
4387
4388 case R_X86_64_8:
4389 case R_X86_64_16:
4390 case R_X86_64_32:
4391 case R_X86_64_PC64:
4392 case R_X86_64_64:
4393 /* FIXME: The ABI says the linker should make sure the value is
4394 the same when it's zeroextended to 64 bit. */
4395
4396 direct:
4397 if ((input_section->flags & SEC_ALLOC) == 0)
4398 break;
4399
4400 /* Don't copy a pc-relative relocation into the output file
4401 if the symbol needs copy reloc or the symbol is undefined
4402 when building executable. Copy dynamic function pointer
4403 relocations. */
4404 if ((bfd_link_pic (info)
4405 && !(bfd_link_executable (info)
4406 && h != NULL
4407 && (h->needs_copy
4408 || eh->needs_copy
4409 || h->root.type == bfd_link_hash_undefined)
4410 && IS_X86_64_PCREL_TYPE (r_type))
4411 && (h == NULL
4412 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4413 || h->root.type != bfd_link_hash_undefweak)
4414 && ((! IS_X86_64_PCREL_TYPE (r_type)
4415 && r_type != R_X86_64_SIZE32
4416 && r_type != R_X86_64_SIZE64)
4417 || ! SYMBOL_CALLS_LOCAL (info, h)))
4418 || (ELIMINATE_COPY_RELOCS
4419 && !bfd_link_pic (info)
4420 && h != NULL
4421 && h->dynindx != -1
4422 && (!h->non_got_ref || eh->func_pointer_refcount > 0)
4423 && ((h->def_dynamic
4424 && !h->def_regular)
4425 || h->root.type == bfd_link_hash_undefweak
4426 || h->root.type == bfd_link_hash_undefined)))
4427 {
4428 Elf_Internal_Rela outrel;
4429 bfd_boolean skip, relocate;
4430 asection *sreloc;
4431
4432 /* When generating a shared object, these relocations
4433 are copied into the output file to be resolved at run
4434 time. */
4435 skip = FALSE;
4436 relocate = FALSE;
4437
4438 outrel.r_offset =
4439 _bfd_elf_section_offset (output_bfd, info, input_section,
4440 rel->r_offset);
4441 if (outrel.r_offset == (bfd_vma) -1)
4442 skip = TRUE;
4443 else if (outrel.r_offset == (bfd_vma) -2)
4444 skip = TRUE, relocate = TRUE;
4445
4446 outrel.r_offset += (input_section->output_section->vma
4447 + input_section->output_offset);
4448
4449 if (skip)
4450 memset (&outrel, 0, sizeof outrel);
4451
4452 /* h->dynindx may be -1 if this symbol was marked to
4453 become local. */
4454 else if (h != NULL
4455 && h->dynindx != -1
4456 && (IS_X86_64_PCREL_TYPE (r_type)
4457 || ! bfd_link_pic (info)
4458 || ! SYMBOLIC_BIND (info, h)
4459 || ! h->def_regular))
4460 {
4461 outrel.r_info = htab->r_info (h->dynindx, r_type);
4462 outrel.r_addend = rel->r_addend;
4463 }
4464 else
4465 {
4466 /* This symbol is local, or marked to become local. */
4467 if (r_type == htab->pointer_r_type)
4468 {
4469 relocate = TRUE;
4470 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4471 outrel.r_addend = relocation + rel->r_addend;
4472 }
4473 else if (r_type == R_X86_64_64
4474 && !ABI_64_P (output_bfd))
4475 {
4476 relocate = TRUE;
4477 outrel.r_info = htab->r_info (0,
4478 R_X86_64_RELATIVE64);
4479 outrel.r_addend = relocation + rel->r_addend;
4480 /* Check addend overflow. */
4481 if ((outrel.r_addend & 0x80000000)
4482 != (rel->r_addend & 0x80000000))
4483 {
4484 const char *name;
4485 int addend = rel->r_addend;
4486 if (h && h->root.root.string)
4487 name = h->root.root.string;
4488 else
4489 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4490 sym, NULL);
4491 if (addend < 0)
4492 (*_bfd_error_handler)
4493 (_("%B: addend -0x%x in relocation %s against "
4494 "symbol `%s' at 0x%lx in section `%A' is "
4495 "out of range"),
4496 input_bfd, input_section, addend,
4497 x86_64_elf_howto_table[r_type].name,
4498 name, (unsigned long) rel->r_offset);
4499 else
4500 (*_bfd_error_handler)
4501 (_("%B: addend 0x%x in relocation %s against "
4502 "symbol `%s' at 0x%lx in section `%A' is "
4503 "out of range"),
4504 input_bfd, input_section, addend,
4505 x86_64_elf_howto_table[r_type].name,
4506 name, (unsigned long) rel->r_offset);
4507 bfd_set_error (bfd_error_bad_value);
4508 return FALSE;
4509 }
4510 }
4511 else
4512 {
4513 long sindx;
4514
4515 if (bfd_is_abs_section (sec))
4516 sindx = 0;
4517 else if (sec == NULL || sec->owner == NULL)
4518 {
4519 bfd_set_error (bfd_error_bad_value);
4520 return FALSE;
4521 }
4522 else
4523 {
4524 asection *osec;
4525
4526 /* We are turning this relocation into one
4527 against a section symbol. It would be
4528 proper to subtract the symbol's value,
4529 osec->vma, from the emitted reloc addend,
4530 but ld.so expects buggy relocs. */
4531 osec = sec->output_section;
4532 sindx = elf_section_data (osec)->dynindx;
4533 if (sindx == 0)
4534 {
4535 asection *oi = htab->elf.text_index_section;
4536 sindx = elf_section_data (oi)->dynindx;
4537 }
4538 BFD_ASSERT (sindx != 0);
4539 }
4540
4541 outrel.r_info = htab->r_info (sindx, r_type);
4542 outrel.r_addend = relocation + rel->r_addend;
4543 }
4544 }
4545
4546 sreloc = elf_section_data (input_section)->sreloc;
4547
4548 if (sreloc == NULL || sreloc->contents == NULL)
4549 {
4550 r = bfd_reloc_notsupported;
4551 goto check_relocation_error;
4552 }
4553
4554 elf_append_rela (output_bfd, sreloc, &outrel);
4555
4556 /* If this reloc is against an external symbol, we do
4557 not want to fiddle with the addend. Otherwise, we
4558 need to include the symbol value so that it becomes
4559 an addend for the dynamic reloc. */
4560 if (! relocate)
4561 continue;
4562 }
4563
4564 break;
4565
4566 case R_X86_64_TLSGD:
4567 case R_X86_64_GOTPC32_TLSDESC:
4568 case R_X86_64_TLSDESC_CALL:
4569 case R_X86_64_GOTTPOFF:
4570 tls_type = GOT_UNKNOWN;
4571 if (h == NULL && local_got_offsets)
4572 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4573 else if (h != NULL)
4574 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4575
4576 if (! elf_x86_64_tls_transition (info, input_bfd,
4577 input_section, contents,
4578 symtab_hdr, sym_hashes,
4579 &r_type, tls_type, rel,
4580 relend, h, r_symndx))
4581 return FALSE;
4582
4583 if (r_type == R_X86_64_TPOFF32)
4584 {
4585 bfd_vma roff = rel->r_offset;
4586
4587 BFD_ASSERT (! unresolved_reloc);
4588
4589 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4590 {
4591 /* GD->LE transition. For 64bit, change
4592 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4593 .word 0x6666; rex64; call __tls_get_addr
4594 into:
4595 movq %fs:0, %rax
4596 leaq foo@tpoff(%rax), %rax
4597 For 32bit, change
4598 leaq foo@tlsgd(%rip), %rdi
4599 .word 0x6666; rex64; call __tls_get_addr
4600 into:
4601 movl %fs:0, %eax
4602 leaq foo@tpoff(%rax), %rax
4603 For largepic, change:
4604 leaq foo@tlsgd(%rip), %rdi
4605 movabsq $__tls_get_addr@pltoff, %rax
4606 addq %rbx, %rax
4607 call *%rax
4608 into:
4609 movq %fs:0, %rax
4610 leaq foo@tpoff(%rax), %rax
4611 nopw 0x0(%rax,%rax,1) */
4612 int largepic = 0;
4613 if (ABI_64_P (output_bfd)
4614 && contents[roff + 5] == (bfd_byte) '\xb8')
4615 {
4616 memcpy (contents + roff - 3,
4617 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4618 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4619 largepic = 1;
4620 }
4621 else if (ABI_64_P (output_bfd))
4622 memcpy (contents + roff - 4,
4623 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4624 16);
4625 else
4626 memcpy (contents + roff - 3,
4627 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4628 15);
4629 bfd_put_32 (output_bfd,
4630 elf_x86_64_tpoff (info, relocation),
4631 contents + roff + 8 + largepic);
4632 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4633 rel++;
4634 continue;
4635 }
4636 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4637 {
4638 /* GDesc -> LE transition.
4639 It's originally something like:
4640 leaq x@tlsdesc(%rip), %rax
4641
4642 Change it to:
4643 movl $x@tpoff, %rax. */
4644
4645 unsigned int val, type;
4646
4647 type = bfd_get_8 (input_bfd, contents + roff - 3);
4648 val = bfd_get_8 (input_bfd, contents + roff - 1);
4649 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4650 contents + roff - 3);
4651 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4652 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4653 contents + roff - 1);
4654 bfd_put_32 (output_bfd,
4655 elf_x86_64_tpoff (info, relocation),
4656 contents + roff);
4657 continue;
4658 }
4659 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4660 {
4661 /* GDesc -> LE transition.
4662 It's originally:
4663 call *(%rax)
4664 Turn it into:
4665 xchg %ax,%ax. */
4666 bfd_put_8 (output_bfd, 0x66, contents + roff);
4667 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4668 continue;
4669 }
4670 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4671 {
4672 /* IE->LE transition:
4673 For 64bit, originally it can be one of:
4674 movq foo@gottpoff(%rip), %reg
4675 addq foo@gottpoff(%rip), %reg
4676 We change it into:
4677 movq $foo, %reg
4678 leaq foo(%reg), %reg
4679 addq $foo, %reg.
4680 For 32bit, originally it can be one of:
4681 movq foo@gottpoff(%rip), %reg
4682 addl foo@gottpoff(%rip), %reg
4683 We change it into:
4684 movq $foo, %reg
4685 leal foo(%reg), %reg
4686 addl $foo, %reg. */
4687
4688 unsigned int val, type, reg;
4689
4690 if (roff >= 3)
4691 val = bfd_get_8 (input_bfd, contents + roff - 3);
4692 else
4693 val = 0;
4694 type = bfd_get_8 (input_bfd, contents + roff - 2);
4695 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4696 reg >>= 3;
4697 if (type == 0x8b)
4698 {
4699 /* movq */
4700 if (val == 0x4c)
4701 bfd_put_8 (output_bfd, 0x49,
4702 contents + roff - 3);
4703 else if (!ABI_64_P (output_bfd) && val == 0x44)
4704 bfd_put_8 (output_bfd, 0x41,
4705 contents + roff - 3);
4706 bfd_put_8 (output_bfd, 0xc7,
4707 contents + roff - 2);
4708 bfd_put_8 (output_bfd, 0xc0 | reg,
4709 contents + roff - 1);
4710 }
4711 else if (reg == 4)
4712 {
4713 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4714 is special */
4715 if (val == 0x4c)
4716 bfd_put_8 (output_bfd, 0x49,
4717 contents + roff - 3);
4718 else if (!ABI_64_P (output_bfd) && val == 0x44)
4719 bfd_put_8 (output_bfd, 0x41,
4720 contents + roff - 3);
4721 bfd_put_8 (output_bfd, 0x81,
4722 contents + roff - 2);
4723 bfd_put_8 (output_bfd, 0xc0 | reg,
4724 contents + roff - 1);
4725 }
4726 else
4727 {
4728 /* addq/addl -> leaq/leal */
4729 if (val == 0x4c)
4730 bfd_put_8 (output_bfd, 0x4d,
4731 contents + roff - 3);
4732 else if (!ABI_64_P (output_bfd) && val == 0x44)
4733 bfd_put_8 (output_bfd, 0x45,
4734 contents + roff - 3);
4735 bfd_put_8 (output_bfd, 0x8d,
4736 contents + roff - 2);
4737 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4738 contents + roff - 1);
4739 }
4740 bfd_put_32 (output_bfd,
4741 elf_x86_64_tpoff (info, relocation),
4742 contents + roff);
4743 continue;
4744 }
4745 else
4746 BFD_ASSERT (FALSE);
4747 }
4748
4749 if (htab->elf.sgot == NULL)
4750 abort ();
4751
4752 if (h != NULL)
4753 {
4754 off = h->got.offset;
4755 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4756 }
4757 else
4758 {
4759 if (local_got_offsets == NULL)
4760 abort ();
4761
4762 off = local_got_offsets[r_symndx];
4763 offplt = local_tlsdesc_gotents[r_symndx];
4764 }
4765
4766 if ((off & 1) != 0)
4767 off &= ~1;
4768 else
4769 {
4770 Elf_Internal_Rela outrel;
4771 int dr_type, indx;
4772 asection *sreloc;
4773
4774 if (htab->elf.srelgot == NULL)
4775 abort ();
4776
4777 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4778
4779 if (GOT_TLS_GDESC_P (tls_type))
4780 {
4781 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4782 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4783 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4784 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4785 + htab->elf.sgotplt->output_offset
4786 + offplt
4787 + htab->sgotplt_jump_table_size);
4788 sreloc = htab->elf.srelplt;
4789 if (indx == 0)
4790 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4791 else
4792 outrel.r_addend = 0;
4793 elf_append_rela (output_bfd, sreloc, &outrel);
4794 }
4795
4796 sreloc = htab->elf.srelgot;
4797
4798 outrel.r_offset = (htab->elf.sgot->output_section->vma
4799 + htab->elf.sgot->output_offset + off);
4800
4801 if (GOT_TLS_GD_P (tls_type))
4802 dr_type = R_X86_64_DTPMOD64;
4803 else if (GOT_TLS_GDESC_P (tls_type))
4804 goto dr_done;
4805 else
4806 dr_type = R_X86_64_TPOFF64;
4807
4808 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4809 outrel.r_addend = 0;
4810 if ((dr_type == R_X86_64_TPOFF64
4811 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4812 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4813 outrel.r_info = htab->r_info (indx, dr_type);
4814
4815 elf_append_rela (output_bfd, sreloc, &outrel);
4816
4817 if (GOT_TLS_GD_P (tls_type))
4818 {
4819 if (indx == 0)
4820 {
4821 BFD_ASSERT (! unresolved_reloc);
4822 bfd_put_64 (output_bfd,
4823 relocation - elf_x86_64_dtpoff_base (info),
4824 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4825 }
4826 else
4827 {
4828 bfd_put_64 (output_bfd, 0,
4829 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4830 outrel.r_info = htab->r_info (indx,
4831 R_X86_64_DTPOFF64);
4832 outrel.r_offset += GOT_ENTRY_SIZE;
4833 elf_append_rela (output_bfd, sreloc,
4834 &outrel);
4835 }
4836 }
4837
4838 dr_done:
4839 if (h != NULL)
4840 h->got.offset |= 1;
4841 else
4842 local_got_offsets[r_symndx] |= 1;
4843 }
4844
4845 if (off >= (bfd_vma) -2
4846 && ! GOT_TLS_GDESC_P (tls_type))
4847 abort ();
4848 if (r_type == ELF32_R_TYPE (rel->r_info))
4849 {
4850 if (r_type == R_X86_64_GOTPC32_TLSDESC
4851 || r_type == R_X86_64_TLSDESC_CALL)
4852 relocation = htab->elf.sgotplt->output_section->vma
4853 + htab->elf.sgotplt->output_offset
4854 + offplt + htab->sgotplt_jump_table_size;
4855 else
4856 relocation = htab->elf.sgot->output_section->vma
4857 + htab->elf.sgot->output_offset + off;
4858 unresolved_reloc = FALSE;
4859 }
4860 else
4861 {
4862 bfd_vma roff = rel->r_offset;
4863
4864 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4865 {
4866 /* GD->IE transition. For 64bit, change
4867 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4868 .word 0x6666; rex64; call __tls_get_addr@plt
4869 into:
4870 movq %fs:0, %rax
4871 addq foo@gottpoff(%rip), %rax
4872 For 32bit, change
4873 leaq foo@tlsgd(%rip), %rdi
4874 .word 0x6666; rex64; call __tls_get_addr@plt
4875 into:
4876 movl %fs:0, %eax
4877 addq foo@gottpoff(%rip), %rax
4878 For largepic, change:
4879 leaq foo@tlsgd(%rip), %rdi
4880 movabsq $__tls_get_addr@pltoff, %rax
4881 addq %rbx, %rax
4882 call *%rax
4883 into:
4884 movq %fs:0, %rax
4885 addq foo@gottpoff(%rax), %rax
4886 nopw 0x0(%rax,%rax,1) */
4887 int largepic = 0;
4888 if (ABI_64_P (output_bfd)
4889 && contents[roff + 5] == (bfd_byte) '\xb8')
4890 {
4891 memcpy (contents + roff - 3,
4892 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4893 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4894 largepic = 1;
4895 }
4896 else if (ABI_64_P (output_bfd))
4897 memcpy (contents + roff - 4,
4898 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4899 16);
4900 else
4901 memcpy (contents + roff - 3,
4902 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4903 15);
4904
4905 relocation = (htab->elf.sgot->output_section->vma
4906 + htab->elf.sgot->output_offset + off
4907 - roff
4908 - largepic
4909 - input_section->output_section->vma
4910 - input_section->output_offset
4911 - 12);
4912 bfd_put_32 (output_bfd, relocation,
4913 contents + roff + 8 + largepic);
4914 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4915 rel++;
4916 continue;
4917 }
4918 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4919 {
4920 /* GDesc -> IE transition.
4921 It's originally something like:
4922 leaq x@tlsdesc(%rip), %rax
4923
4924 Change it to:
4925 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4926
4927 /* Now modify the instruction as appropriate. To
4928 turn a leaq into a movq in the form we use it, it
4929 suffices to change the second byte from 0x8d to
4930 0x8b. */
4931 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4932
4933 bfd_put_32 (output_bfd,
4934 htab->elf.sgot->output_section->vma
4935 + htab->elf.sgot->output_offset + off
4936 - rel->r_offset
4937 - input_section->output_section->vma
4938 - input_section->output_offset
4939 - 4,
4940 contents + roff);
4941 continue;
4942 }
4943 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4944 {
4945 /* GDesc -> IE transition.
4946 It's originally:
4947 call *(%rax)
4948
4949 Change it to:
4950 xchg %ax, %ax. */
4951
4952 bfd_put_8 (output_bfd, 0x66, contents + roff);
4953 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4954 continue;
4955 }
4956 else
4957 BFD_ASSERT (FALSE);
4958 }
4959 break;
4960
4961 case R_X86_64_TLSLD:
4962 if (! elf_x86_64_tls_transition (info, input_bfd,
4963 input_section, contents,
4964 symtab_hdr, sym_hashes,
4965 &r_type, GOT_UNKNOWN,
4966 rel, relend, h, r_symndx))
4967 return FALSE;
4968
4969 if (r_type != R_X86_64_TLSLD)
4970 {
4971 /* LD->LE transition:
4972 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4973 For 64bit, we change it into:
4974 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4975 For 32bit, we change it into:
4976 nopl 0x0(%rax); movl %fs:0, %eax.
4977 For largepic, change:
4978 leaq foo@tlsgd(%rip), %rdi
4979 movabsq $__tls_get_addr@pltoff, %rax
4980 addq %rbx, %rax
4981 call *%rax
4982 into:
4983 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4984 movq %fs:0, %eax */
4985
4986 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4987 if (ABI_64_P (output_bfd)
4988 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4989 memcpy (contents + rel->r_offset - 3,
4990 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4991 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4992 else if (ABI_64_P (output_bfd))
4993 memcpy (contents + rel->r_offset - 3,
4994 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4995 else
4996 memcpy (contents + rel->r_offset - 3,
4997 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4998 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4999 rel++;
5000 continue;
5001 }
5002
5003 if (htab->elf.sgot == NULL)
5004 abort ();
5005
5006 off = htab->tls_ld_got.offset;
5007 if (off & 1)
5008 off &= ~1;
5009 else
5010 {
5011 Elf_Internal_Rela outrel;
5012
5013 if (htab->elf.srelgot == NULL)
5014 abort ();
5015
5016 outrel.r_offset = (htab->elf.sgot->output_section->vma
5017 + htab->elf.sgot->output_offset + off);
5018
5019 bfd_put_64 (output_bfd, 0,
5020 htab->elf.sgot->contents + off);
5021 bfd_put_64 (output_bfd, 0,
5022 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5023 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5024 outrel.r_addend = 0;
5025 elf_append_rela (output_bfd, htab->elf.srelgot,
5026 &outrel);
5027 htab->tls_ld_got.offset |= 1;
5028 }
5029 relocation = htab->elf.sgot->output_section->vma
5030 + htab->elf.sgot->output_offset + off;
5031 unresolved_reloc = FALSE;
5032 break;
5033
5034 case R_X86_64_DTPOFF32:
5035 if (!bfd_link_executable (info)
5036 || (input_section->flags & SEC_CODE) == 0)
5037 relocation -= elf_x86_64_dtpoff_base (info);
5038 else
5039 relocation = elf_x86_64_tpoff (info, relocation);
5040 break;
5041
5042 case R_X86_64_TPOFF32:
5043 case R_X86_64_TPOFF64:
5044 BFD_ASSERT (bfd_link_executable (info));
5045 relocation = elf_x86_64_tpoff (info, relocation);
5046 break;
5047
5048 case R_X86_64_DTPOFF64:
5049 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5050 relocation -= elf_x86_64_dtpoff_base (info);
5051 break;
5052
5053 default:
5054 break;
5055 }
5056
5057 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5058 because such sections are not SEC_ALLOC and thus ld.so will
5059 not process them. */
5060 if (unresolved_reloc
5061 && !((input_section->flags & SEC_DEBUGGING) != 0
5062 && h->def_dynamic)
5063 && _bfd_elf_section_offset (output_bfd, info, input_section,
5064 rel->r_offset) != (bfd_vma) -1)
5065 {
5066 (*_bfd_error_handler)
5067 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5068 input_bfd,
5069 input_section,
5070 (long) rel->r_offset,
5071 howto->name,
5072 h->root.root.string);
5073 return FALSE;
5074 }
5075
5076 do_relocation:
5077 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5078 contents, rel->r_offset,
5079 relocation, rel->r_addend);
5080
5081 check_relocation_error:
5082 if (r != bfd_reloc_ok)
5083 {
5084 const char *name;
5085
5086 if (h != NULL)
5087 name = h->root.root.string;
5088 else
5089 {
5090 name = bfd_elf_string_from_elf_section (input_bfd,
5091 symtab_hdr->sh_link,
5092 sym->st_name);
5093 if (name == NULL)
5094 return FALSE;
5095 if (*name == '\0')
5096 name = bfd_section_name (input_bfd, sec);
5097 }
5098
5099 if (r == bfd_reloc_overflow)
5100 {
5101 if (! ((*info->callbacks->reloc_overflow)
5102 (info, (h ? &h->root : NULL), name, howto->name,
5103 (bfd_vma) 0, input_bfd, input_section,
5104 rel->r_offset)))
5105 return FALSE;
5106 }
5107 else
5108 {
5109 (*_bfd_error_handler)
5110 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5111 input_bfd, input_section,
5112 (long) rel->r_offset, name, (int) r);
5113 return FALSE;
5114 }
5115 }
5116 }
5117
5118 return TRUE;
5119 }
5120
5121 /* Finish up dynamic symbol handling. We set the contents of various
5122 dynamic sections here. */
5123
5124 static bfd_boolean
5125 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5126 struct bfd_link_info *info,
5127 struct elf_link_hash_entry *h,
5128 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5129 {
5130 struct elf_x86_64_link_hash_table *htab;
5131 const struct elf_x86_64_backend_data *abed;
5132 bfd_boolean use_plt_bnd;
5133 struct elf_x86_64_link_hash_entry *eh;
5134
5135 htab = elf_x86_64_hash_table (info);
5136 if (htab == NULL)
5137 return FALSE;
5138
5139 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5140 section only if there is .plt section. */
5141 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5142 abed = (use_plt_bnd
5143 ? &elf_x86_64_bnd_arch_bed
5144 : get_elf_x86_64_backend_data (output_bfd));
5145
5146 eh = (struct elf_x86_64_link_hash_entry *) h;
5147
5148 if (h->plt.offset != (bfd_vma) -1)
5149 {
5150 bfd_vma plt_index;
5151 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5152 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5153 Elf_Internal_Rela rela;
5154 bfd_byte *loc;
5155 asection *plt, *gotplt, *relplt, *resolved_plt;
5156 const struct elf_backend_data *bed;
5157 bfd_vma plt_got_pcrel_offset;
5158
5159 /* When building a static executable, use .iplt, .igot.plt and
5160 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5161 if (htab->elf.splt != NULL)
5162 {
5163 plt = htab->elf.splt;
5164 gotplt = htab->elf.sgotplt;
5165 relplt = htab->elf.srelplt;
5166 }
5167 else
5168 {
5169 plt = htab->elf.iplt;
5170 gotplt = htab->elf.igotplt;
5171 relplt = htab->elf.irelplt;
5172 }
5173
5174 /* This symbol has an entry in the procedure linkage table. Set
5175 it up. */
5176 if ((h->dynindx == -1
5177 && !((h->forced_local || bfd_link_executable (info))
5178 && h->def_regular
5179 && h->type == STT_GNU_IFUNC))
5180 || plt == NULL
5181 || gotplt == NULL
5182 || relplt == NULL)
5183 abort ();
5184
5185 /* Get the index in the procedure linkage table which
5186 corresponds to this symbol. This is the index of this symbol
5187 in all the symbols for which we are making plt entries. The
5188 first entry in the procedure linkage table is reserved.
5189
5190 Get the offset into the .got table of the entry that
5191 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5192 bytes. The first three are reserved for the dynamic linker.
5193
5194 For static executables, we don't reserve anything. */
5195
5196 if (plt == htab->elf.splt)
5197 {
5198 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5199 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5200 }
5201 else
5202 {
5203 got_offset = h->plt.offset / abed->plt_entry_size;
5204 got_offset = got_offset * GOT_ENTRY_SIZE;
5205 }
5206
5207 plt_plt_insn_end = abed->plt_plt_insn_end;
5208 plt_plt_offset = abed->plt_plt_offset;
5209 plt_got_insn_size = abed->plt_got_insn_size;
5210 plt_got_offset = abed->plt_got_offset;
5211 if (use_plt_bnd)
5212 {
5213 /* Use the second PLT with BND relocations. */
5214 const bfd_byte *plt_entry, *plt2_entry;
5215
5216 if (eh->has_bnd_reloc)
5217 {
5218 plt_entry = elf_x86_64_bnd_plt_entry;
5219 plt2_entry = elf_x86_64_bnd_plt2_entry;
5220 }
5221 else
5222 {
5223 plt_entry = elf_x86_64_legacy_plt_entry;
5224 plt2_entry = elf_x86_64_legacy_plt2_entry;
5225
5226 /* Subtract 1 since there is no BND prefix. */
5227 plt_plt_insn_end -= 1;
5228 plt_plt_offset -= 1;
5229 plt_got_insn_size -= 1;
5230 plt_got_offset -= 1;
5231 }
5232
5233 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5234 == sizeof (elf_x86_64_legacy_plt_entry));
5235
5236 /* Fill in the entry in the procedure linkage table. */
5237 memcpy (plt->contents + h->plt.offset,
5238 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5239 /* Fill in the entry in the second PLT. */
5240 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5241 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5242
5243 resolved_plt = htab->plt_bnd;
5244 plt_offset = eh->plt_bnd.offset;
5245 }
5246 else
5247 {
5248 /* Fill in the entry in the procedure linkage table. */
5249 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5250 abed->plt_entry_size);
5251
5252 resolved_plt = plt;
5253 plt_offset = h->plt.offset;
5254 }
5255
5256 /* Insert the relocation positions of the plt section. */
5257
5258 /* Put offset the PC-relative instruction referring to the GOT entry,
5259 subtracting the size of that instruction. */
5260 plt_got_pcrel_offset = (gotplt->output_section->vma
5261 + gotplt->output_offset
5262 + got_offset
5263 - resolved_plt->output_section->vma
5264 - resolved_plt->output_offset
5265 - plt_offset
5266 - plt_got_insn_size);
5267
5268 /* Check PC-relative offset overflow in PLT entry. */
5269 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5270 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5271 output_bfd, h->root.root.string);
5272
5273 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5274 resolved_plt->contents + plt_offset + plt_got_offset);
5275
5276 /* Fill in the entry in the global offset table, initially this
5277 points to the second part of the PLT entry. */
5278 bfd_put_64 (output_bfd, (plt->output_section->vma
5279 + plt->output_offset
5280 + h->plt.offset + abed->plt_lazy_offset),
5281 gotplt->contents + got_offset);
5282
5283 /* Fill in the entry in the .rela.plt section. */
5284 rela.r_offset = (gotplt->output_section->vma
5285 + gotplt->output_offset
5286 + got_offset);
5287 if (h->dynindx == -1
5288 || ((bfd_link_executable (info)
5289 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5290 && h->def_regular
5291 && h->type == STT_GNU_IFUNC))
5292 {
5293 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5294 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5295 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5296 rela.r_addend = (h->root.u.def.value
5297 + h->root.u.def.section->output_section->vma
5298 + h->root.u.def.section->output_offset);
5299 /* R_X86_64_IRELATIVE comes last. */
5300 plt_index = htab->next_irelative_index--;
5301 }
5302 else
5303 {
5304 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5305 rela.r_addend = 0;
5306 plt_index = htab->next_jump_slot_index++;
5307 }
5308
5309 /* Don't fill PLT entry for static executables. */
5310 if (plt == htab->elf.splt)
5311 {
5312 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5313
5314 /* Put relocation index. */
5315 bfd_put_32 (output_bfd, plt_index,
5316 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5317
5318 /* Put offset for jmp .PLT0 and check for overflow. We don't
5319 check relocation index for overflow since branch displacement
5320 will overflow first. */
5321 if (plt0_offset > 0x80000000)
5322 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5323 output_bfd, h->root.root.string);
5324 bfd_put_32 (output_bfd, - plt0_offset,
5325 plt->contents + h->plt.offset + plt_plt_offset);
5326 }
5327
5328 bed = get_elf_backend_data (output_bfd);
5329 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5330 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5331 }
5332 else if (eh->plt_got.offset != (bfd_vma) -1)
5333 {
5334 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5335 asection *plt, *got;
5336 bfd_boolean got_after_plt;
5337 int32_t got_pcrel_offset;
5338 const bfd_byte *got_plt_entry;
5339
5340 /* Set the entry in the GOT procedure linkage table. */
5341 plt = htab->plt_got;
5342 got = htab->elf.sgot;
5343 got_offset = h->got.offset;
5344
5345 if (got_offset == (bfd_vma) -1
5346 || h->type == STT_GNU_IFUNC
5347 || plt == NULL
5348 || got == NULL)
5349 abort ();
5350
5351 /* Use the second PLT entry template for the GOT PLT since they
5352 are the identical. */
5353 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5354 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5355 if (eh->has_bnd_reloc)
5356 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5357 else
5358 {
5359 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5360
5361 /* Subtract 1 since there is no BND prefix. */
5362 plt_got_insn_size -= 1;
5363 plt_got_offset -= 1;
5364 }
5365
5366 /* Fill in the entry in the GOT procedure linkage table. */
5367 plt_offset = eh->plt_got.offset;
5368 memcpy (plt->contents + plt_offset,
5369 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5370
5371 /* Put offset the PC-relative instruction referring to the GOT
5372 entry, subtracting the size of that instruction. */
5373 got_pcrel_offset = (got->output_section->vma
5374 + got->output_offset
5375 + got_offset
5376 - plt->output_section->vma
5377 - plt->output_offset
5378 - plt_offset
5379 - plt_got_insn_size);
5380
5381 /* Check PC-relative offset overflow in GOT PLT entry. */
5382 got_after_plt = got->output_section->vma > plt->output_section->vma;
5383 if ((got_after_plt && got_pcrel_offset < 0)
5384 || (!got_after_plt && got_pcrel_offset > 0))
5385 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5386 output_bfd, h->root.root.string);
5387
5388 bfd_put_32 (output_bfd, got_pcrel_offset,
5389 plt->contents + plt_offset + plt_got_offset);
5390 }
5391
5392 if (!h->def_regular
5393 && (h->plt.offset != (bfd_vma) -1
5394 || eh->plt_got.offset != (bfd_vma) -1))
5395 {
5396 /* Mark the symbol as undefined, rather than as defined in
5397 the .plt section. Leave the value if there were any
5398 relocations where pointer equality matters (this is a clue
5399 for the dynamic linker, to make function pointer
5400 comparisons work between an application and shared
5401 library), otherwise set it to zero. If a function is only
5402 called from a binary, there is no need to slow down
5403 shared libraries because of that. */
5404 sym->st_shndx = SHN_UNDEF;
5405 if (!h->pointer_equality_needed)
5406 sym->st_value = 0;
5407 }
5408
5409 if (h->got.offset != (bfd_vma) -1
5410 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5411 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5412 {
5413 Elf_Internal_Rela rela;
5414
5415 /* This symbol has an entry in the global offset table. Set it
5416 up. */
5417 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5418 abort ();
5419
5420 rela.r_offset = (htab->elf.sgot->output_section->vma
5421 + htab->elf.sgot->output_offset
5422 + (h->got.offset &~ (bfd_vma) 1));
5423
5424 /* If this is a static link, or it is a -Bsymbolic link and the
5425 symbol is defined locally or was forced to be local because
5426 of a version file, we just want to emit a RELATIVE reloc.
5427 The entry in the global offset table will already have been
5428 initialized in the relocate_section function. */
5429 if (h->def_regular
5430 && h->type == STT_GNU_IFUNC)
5431 {
5432 if (bfd_link_pic (info))
5433 {
5434 /* Generate R_X86_64_GLOB_DAT. */
5435 goto do_glob_dat;
5436 }
5437 else
5438 {
5439 asection *plt;
5440
5441 if (!h->pointer_equality_needed)
5442 abort ();
5443
5444 /* For non-shared object, we can't use .got.plt, which
5445 contains the real function addres if we need pointer
5446 equality. We load the GOT entry with the PLT entry. */
5447 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5448 bfd_put_64 (output_bfd, (plt->output_section->vma
5449 + plt->output_offset
5450 + h->plt.offset),
5451 htab->elf.sgot->contents + h->got.offset);
5452 return TRUE;
5453 }
5454 }
5455 else if (bfd_link_pic (info)
5456 && SYMBOL_REFERENCES_LOCAL (info, h))
5457 {
5458 if (!h->def_regular)
5459 return FALSE;
5460 BFD_ASSERT((h->got.offset & 1) != 0);
5461 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5462 rela.r_addend = (h->root.u.def.value
5463 + h->root.u.def.section->output_section->vma
5464 + h->root.u.def.section->output_offset);
5465 }
5466 else
5467 {
5468 BFD_ASSERT((h->got.offset & 1) == 0);
5469 do_glob_dat:
5470 bfd_put_64 (output_bfd, (bfd_vma) 0,
5471 htab->elf.sgot->contents + h->got.offset);
5472 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5473 rela.r_addend = 0;
5474 }
5475
5476 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5477 }
5478
5479 if (h->needs_copy)
5480 {
5481 Elf_Internal_Rela rela;
5482
5483 /* This symbol needs a copy reloc. Set it up. */
5484
5485 if (h->dynindx == -1
5486 || (h->root.type != bfd_link_hash_defined
5487 && h->root.type != bfd_link_hash_defweak)
5488 || htab->srelbss == NULL)
5489 abort ();
5490
5491 rela.r_offset = (h->root.u.def.value
5492 + h->root.u.def.section->output_section->vma
5493 + h->root.u.def.section->output_offset);
5494 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5495 rela.r_addend = 0;
5496 elf_append_rela (output_bfd, htab->srelbss, &rela);
5497 }
5498
5499 return TRUE;
5500 }
5501
5502 /* Finish up local dynamic symbol handling. We set the contents of
5503 various dynamic sections here. */
5504
5505 static bfd_boolean
5506 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5507 {
5508 struct elf_link_hash_entry *h
5509 = (struct elf_link_hash_entry *) *slot;
5510 struct bfd_link_info *info
5511 = (struct bfd_link_info *) inf;
5512
5513 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5514 info, h, NULL);
5515 }
5516
5517 /* Used to decide how to sort relocs in an optimal manner for the
5518 dynamic linker, before writing them out. */
5519
5520 static enum elf_reloc_type_class
5521 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
5522 const asection *rel_sec ATTRIBUTE_UNUSED,
5523 const Elf_Internal_Rela *rela)
5524 {
5525 bfd *abfd = info->output_bfd;
5526 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5527 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
5528 unsigned long r_symndx = htab->r_sym (rela->r_info);
5529 Elf_Internal_Sym sym;
5530
5531 if (htab->elf.dynsym == NULL
5532 || !bed->s->swap_symbol_in (abfd,
5533 (htab->elf.dynsym->contents
5534 + r_symndx * bed->s->sizeof_sym),
5535 0, &sym))
5536 abort ();
5537
5538 /* Check relocation against STT_GNU_IFUNC symbol. */
5539 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
5540 return reloc_class_ifunc;
5541
5542 switch ((int) ELF32_R_TYPE (rela->r_info))
5543 {
5544 case R_X86_64_RELATIVE:
5545 case R_X86_64_RELATIVE64:
5546 return reloc_class_relative;
5547 case R_X86_64_JUMP_SLOT:
5548 return reloc_class_plt;
5549 case R_X86_64_COPY:
5550 return reloc_class_copy;
5551 default:
5552 return reloc_class_normal;
5553 }
5554 }
5555
5556 /* Finish up the dynamic sections. */
5557
5558 static bfd_boolean
5559 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5560 struct bfd_link_info *info)
5561 {
5562 struct elf_x86_64_link_hash_table *htab;
5563 bfd *dynobj;
5564 asection *sdyn;
5565 const struct elf_x86_64_backend_data *abed;
5566
5567 htab = elf_x86_64_hash_table (info);
5568 if (htab == NULL)
5569 return FALSE;
5570
5571 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5572 section only if there is .plt section. */
5573 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5574 ? &elf_x86_64_bnd_arch_bed
5575 : get_elf_x86_64_backend_data (output_bfd));
5576
5577 dynobj = htab->elf.dynobj;
5578 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5579
5580 if (htab->elf.dynamic_sections_created)
5581 {
5582 bfd_byte *dyncon, *dynconend;
5583 const struct elf_backend_data *bed;
5584 bfd_size_type sizeof_dyn;
5585
5586 if (sdyn == NULL || htab->elf.sgot == NULL)
5587 abort ();
5588
5589 bed = get_elf_backend_data (dynobj);
5590 sizeof_dyn = bed->s->sizeof_dyn;
5591 dyncon = sdyn->contents;
5592 dynconend = sdyn->contents + sdyn->size;
5593 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5594 {
5595 Elf_Internal_Dyn dyn;
5596 asection *s;
5597
5598 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5599
5600 switch (dyn.d_tag)
5601 {
5602 default:
5603 continue;
5604
5605 case DT_PLTGOT:
5606 s = htab->elf.sgotplt;
5607 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5608 break;
5609
5610 case DT_JMPREL:
5611 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5612 break;
5613
5614 case DT_PLTRELSZ:
5615 s = htab->elf.srelplt->output_section;
5616 dyn.d_un.d_val = s->size;
5617 break;
5618
5619 case DT_RELASZ:
5620 /* The procedure linkage table relocs (DT_JMPREL) should
5621 not be included in the overall relocs (DT_RELA).
5622 Therefore, we override the DT_RELASZ entry here to
5623 make it not include the JMPREL relocs. Since the
5624 linker script arranges for .rela.plt to follow all
5625 other relocation sections, we don't have to worry
5626 about changing the DT_RELA entry. */
5627 if (htab->elf.srelplt != NULL)
5628 {
5629 s = htab->elf.srelplt->output_section;
5630 dyn.d_un.d_val -= s->size;
5631 }
5632 break;
5633
5634 case DT_TLSDESC_PLT:
5635 s = htab->elf.splt;
5636 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5637 + htab->tlsdesc_plt;
5638 break;
5639
5640 case DT_TLSDESC_GOT:
5641 s = htab->elf.sgot;
5642 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5643 + htab->tlsdesc_got;
5644 break;
5645 }
5646
5647 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5648 }
5649
5650 /* Fill in the special first entry in the procedure linkage table. */
5651 if (htab->elf.splt && htab->elf.splt->size > 0)
5652 {
5653 /* Fill in the first entry in the procedure linkage table. */
5654 memcpy (htab->elf.splt->contents,
5655 abed->plt0_entry, abed->plt_entry_size);
5656 /* Add offset for pushq GOT+8(%rip), since the instruction
5657 uses 6 bytes subtract this value. */
5658 bfd_put_32 (output_bfd,
5659 (htab->elf.sgotplt->output_section->vma
5660 + htab->elf.sgotplt->output_offset
5661 + 8
5662 - htab->elf.splt->output_section->vma
5663 - htab->elf.splt->output_offset
5664 - 6),
5665 htab->elf.splt->contents + abed->plt0_got1_offset);
5666 /* Add offset for the PC-relative instruction accessing GOT+16,
5667 subtracting the offset to the end of that instruction. */
5668 bfd_put_32 (output_bfd,
5669 (htab->elf.sgotplt->output_section->vma
5670 + htab->elf.sgotplt->output_offset
5671 + 16
5672 - htab->elf.splt->output_section->vma
5673 - htab->elf.splt->output_offset
5674 - abed->plt0_got2_insn_end),
5675 htab->elf.splt->contents + abed->plt0_got2_offset);
5676
5677 elf_section_data (htab->elf.splt->output_section)
5678 ->this_hdr.sh_entsize = abed->plt_entry_size;
5679
5680 if (htab->tlsdesc_plt)
5681 {
5682 bfd_put_64 (output_bfd, (bfd_vma) 0,
5683 htab->elf.sgot->contents + htab->tlsdesc_got);
5684
5685 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5686 abed->plt0_entry, abed->plt_entry_size);
5687
5688 /* Add offset for pushq GOT+8(%rip), since the
5689 instruction uses 6 bytes subtract this value. */
5690 bfd_put_32 (output_bfd,
5691 (htab->elf.sgotplt->output_section->vma
5692 + htab->elf.sgotplt->output_offset
5693 + 8
5694 - htab->elf.splt->output_section->vma
5695 - htab->elf.splt->output_offset
5696 - htab->tlsdesc_plt
5697 - 6),
5698 htab->elf.splt->contents
5699 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5700 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5701 where TGD stands for htab->tlsdesc_got, subtracting the offset
5702 to the end of that instruction. */
5703 bfd_put_32 (output_bfd,
5704 (htab->elf.sgot->output_section->vma
5705 + htab->elf.sgot->output_offset
5706 + htab->tlsdesc_got
5707 - htab->elf.splt->output_section->vma
5708 - htab->elf.splt->output_offset
5709 - htab->tlsdesc_plt
5710 - abed->plt0_got2_insn_end),
5711 htab->elf.splt->contents
5712 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5713 }
5714 }
5715 }
5716
5717 if (htab->plt_bnd != NULL)
5718 elf_section_data (htab->plt_bnd->output_section)
5719 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5720
5721 if (htab->elf.sgotplt)
5722 {
5723 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5724 {
5725 (*_bfd_error_handler)
5726 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5727 return FALSE;
5728 }
5729
5730 /* Fill in the first three entries in the global offset table. */
5731 if (htab->elf.sgotplt->size > 0)
5732 {
5733 /* Set the first entry in the global offset table to the address of
5734 the dynamic section. */
5735 if (sdyn == NULL)
5736 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5737 else
5738 bfd_put_64 (output_bfd,
5739 sdyn->output_section->vma + sdyn->output_offset,
5740 htab->elf.sgotplt->contents);
5741 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5742 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5743 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5744 }
5745
5746 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5747 GOT_ENTRY_SIZE;
5748 }
5749
5750 /* Adjust .eh_frame for .plt section. */
5751 if (htab->plt_eh_frame != NULL
5752 && htab->plt_eh_frame->contents != NULL)
5753 {
5754 if (htab->elf.splt != NULL
5755 && htab->elf.splt->size != 0
5756 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5757 && htab->elf.splt->output_section != NULL
5758 && htab->plt_eh_frame->output_section != NULL)
5759 {
5760 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5761 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5762 + htab->plt_eh_frame->output_offset
5763 + PLT_FDE_START_OFFSET;
5764 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5765 htab->plt_eh_frame->contents
5766 + PLT_FDE_START_OFFSET);
5767 }
5768 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5769 {
5770 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5771 htab->plt_eh_frame,
5772 htab->plt_eh_frame->contents))
5773 return FALSE;
5774 }
5775 }
5776
5777 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5778 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5779 = GOT_ENTRY_SIZE;
5780
5781 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5782 htab_traverse (htab->loc_hash_table,
5783 elf_x86_64_finish_local_dynamic_symbol,
5784 info);
5785
5786 return TRUE;
5787 }
5788
5789 /* Return an array of PLT entry symbol values. */
5790
5791 static bfd_vma *
5792 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5793 asection *relplt)
5794 {
5795 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5796 arelent *p;
5797 long count, i;
5798 bfd_vma *plt_sym_val;
5799 bfd_vma plt_offset;
5800 bfd_byte *plt_contents;
5801 const struct elf_x86_64_backend_data *bed;
5802 Elf_Internal_Shdr *hdr;
5803 asection *plt_bnd;
5804
5805 /* Get the .plt section contents. PLT passed down may point to the
5806 .plt.bnd section. Make sure that PLT always points to the .plt
5807 section. */
5808 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5809 if (plt_bnd)
5810 {
5811 if (plt != plt_bnd)
5812 abort ();
5813 plt = bfd_get_section_by_name (abfd, ".plt");
5814 if (plt == NULL)
5815 abort ();
5816 bed = &elf_x86_64_bnd_arch_bed;
5817 }
5818 else
5819 bed = get_elf_x86_64_backend_data (abfd);
5820
5821 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5822 if (plt_contents == NULL)
5823 return NULL;
5824 if (!bfd_get_section_contents (abfd, (asection *) plt,
5825 plt_contents, 0, plt->size))
5826 {
5827 bad_return:
5828 free (plt_contents);
5829 return NULL;
5830 }
5831
5832 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5833 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5834 goto bad_return;
5835
5836 hdr = &elf_section_data (relplt)->this_hdr;
5837 count = relplt->size / hdr->sh_entsize;
5838
5839 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5840 if (plt_sym_val == NULL)
5841 goto bad_return;
5842
5843 for (i = 0; i < count; i++)
5844 plt_sym_val[i] = -1;
5845
5846 plt_offset = bed->plt_entry_size;
5847 p = relplt->relocation;
5848 for (i = 0; i < count; i++, p++)
5849 {
5850 long reloc_index;
5851
5852 /* Skip unknown relocation. */
5853 if (p->howto == NULL)
5854 continue;
5855
5856 if (p->howto->type != R_X86_64_JUMP_SLOT
5857 && p->howto->type != R_X86_64_IRELATIVE)
5858 continue;
5859
5860 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5861 + bed->plt_reloc_offset));
5862 if (reloc_index >= count)
5863 abort ();
5864 if (plt_bnd)
5865 {
5866 /* This is the index in .plt section. */
5867 long plt_index = plt_offset / bed->plt_entry_size;
5868 /* Store VMA + the offset in .plt.bnd section. */
5869 plt_sym_val[reloc_index] =
5870 (plt_bnd->vma
5871 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5872 }
5873 else
5874 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5875 plt_offset += bed->plt_entry_size;
5876
5877 /* PR binutils/18437: Skip extra relocations in the .rela.plt
5878 section. */
5879 if (plt_offset >= plt->size)
5880 break;
5881 }
5882
5883 free (plt_contents);
5884
5885 return plt_sym_val;
5886 }
5887
5888 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5889 support. */
5890
5891 static long
5892 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5893 long symcount,
5894 asymbol **syms,
5895 long dynsymcount,
5896 asymbol **dynsyms,
5897 asymbol **ret)
5898 {
5899 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5900 as PLT if it exists. */
5901 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5902 if (plt == NULL)
5903 plt = bfd_get_section_by_name (abfd, ".plt");
5904 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5905 dynsymcount, dynsyms, ret,
5906 plt,
5907 elf_x86_64_get_plt_sym_val);
5908 }
5909
5910 /* Handle an x86-64 specific section when reading an object file. This
5911 is called when elfcode.h finds a section with an unknown type. */
5912
5913 static bfd_boolean
5914 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5915 const char *name, int shindex)
5916 {
5917 if (hdr->sh_type != SHT_X86_64_UNWIND)
5918 return FALSE;
5919
5920 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5921 return FALSE;
5922
5923 return TRUE;
5924 }
5925
5926 /* Hook called by the linker routine which adds symbols from an object
5927 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5928 of .bss. */
5929
5930 static bfd_boolean
5931 elf_x86_64_add_symbol_hook (bfd *abfd,
5932 struct bfd_link_info *info,
5933 Elf_Internal_Sym *sym,
5934 const char **namep ATTRIBUTE_UNUSED,
5935 flagword *flagsp ATTRIBUTE_UNUSED,
5936 asection **secp,
5937 bfd_vma *valp)
5938 {
5939 asection *lcomm;
5940
5941 switch (sym->st_shndx)
5942 {
5943 case SHN_X86_64_LCOMMON:
5944 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5945 if (lcomm == NULL)
5946 {
5947 lcomm = bfd_make_section_with_flags (abfd,
5948 "LARGE_COMMON",
5949 (SEC_ALLOC
5950 | SEC_IS_COMMON
5951 | SEC_LINKER_CREATED));
5952 if (lcomm == NULL)
5953 return FALSE;
5954 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5955 }
5956 *secp = lcomm;
5957 *valp = sym->st_size;
5958 return TRUE;
5959 }
5960
5961 if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE
5962 && (abfd->flags & DYNAMIC) == 0
5963 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5964 elf_tdata (info->output_bfd)->has_gnu_symbols
5965 |= elf_gnu_symbol_unique;
5966
5967 return TRUE;
5968 }
5969
5970
5971 /* Given a BFD section, try to locate the corresponding ELF section
5972 index. */
5973
5974 static bfd_boolean
5975 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5976 asection *sec, int *index_return)
5977 {
5978 if (sec == &_bfd_elf_large_com_section)
5979 {
5980 *index_return = SHN_X86_64_LCOMMON;
5981 return TRUE;
5982 }
5983 return FALSE;
5984 }
5985
5986 /* Process a symbol. */
5987
5988 static void
5989 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5990 asymbol *asym)
5991 {
5992 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5993
5994 switch (elfsym->internal_elf_sym.st_shndx)
5995 {
5996 case SHN_X86_64_LCOMMON:
5997 asym->section = &_bfd_elf_large_com_section;
5998 asym->value = elfsym->internal_elf_sym.st_size;
5999 /* Common symbol doesn't set BSF_GLOBAL. */
6000 asym->flags &= ~BSF_GLOBAL;
6001 break;
6002 }
6003 }
6004
6005 static bfd_boolean
6006 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6007 {
6008 return (sym->st_shndx == SHN_COMMON
6009 || sym->st_shndx == SHN_X86_64_LCOMMON);
6010 }
6011
6012 static unsigned int
6013 elf_x86_64_common_section_index (asection *sec)
6014 {
6015 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6016 return SHN_COMMON;
6017 else
6018 return SHN_X86_64_LCOMMON;
6019 }
6020
6021 static asection *
6022 elf_x86_64_common_section (asection *sec)
6023 {
6024 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6025 return bfd_com_section_ptr;
6026 else
6027 return &_bfd_elf_large_com_section;
6028 }
6029
6030 static bfd_boolean
6031 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6032 const Elf_Internal_Sym *sym,
6033 asection **psec,
6034 bfd_boolean newdef,
6035 bfd_boolean olddef,
6036 bfd *oldbfd,
6037 const asection *oldsec)
6038 {
6039 /* A normal common symbol and a large common symbol result in a
6040 normal common symbol. We turn the large common symbol into a
6041 normal one. */
6042 if (!olddef
6043 && h->root.type == bfd_link_hash_common
6044 && !newdef
6045 && bfd_is_com_section (*psec)
6046 && oldsec != *psec)
6047 {
6048 if (sym->st_shndx == SHN_COMMON
6049 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6050 {
6051 h->root.u.c.p->section
6052 = bfd_make_section_old_way (oldbfd, "COMMON");
6053 h->root.u.c.p->section->flags = SEC_ALLOC;
6054 }
6055 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6056 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6057 *psec = bfd_com_section_ptr;
6058 }
6059
6060 return TRUE;
6061 }
6062
6063 static int
6064 elf_x86_64_additional_program_headers (bfd *abfd,
6065 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6066 {
6067 asection *s;
6068 int count = 0;
6069
6070 /* Check to see if we need a large readonly segment. */
6071 s = bfd_get_section_by_name (abfd, ".lrodata");
6072 if (s && (s->flags & SEC_LOAD))
6073 count++;
6074
6075 /* Check to see if we need a large data segment. Since .lbss sections
6076 is placed right after the .bss section, there should be no need for
6077 a large data segment just because of .lbss. */
6078 s = bfd_get_section_by_name (abfd, ".ldata");
6079 if (s && (s->flags & SEC_LOAD))
6080 count++;
6081
6082 return count;
6083 }
6084
6085 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6086
6087 static bfd_boolean
6088 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6089 {
6090 if (h->plt.offset != (bfd_vma) -1
6091 && !h->def_regular
6092 && !h->pointer_equality_needed)
6093 return FALSE;
6094
6095 return _bfd_elf_hash_symbol (h);
6096 }
6097
6098 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6099
6100 static bfd_boolean
6101 elf_x86_64_relocs_compatible (const bfd_target *input,
6102 const bfd_target *output)
6103 {
6104 return ((xvec_get_elf_backend_data (input)->s->elfclass
6105 == xvec_get_elf_backend_data (output)->s->elfclass)
6106 && _bfd_elf_relocs_compatible (input, output));
6107 }
6108
6109 static const struct bfd_elf_special_section
6110 elf_x86_64_special_sections[]=
6111 {
6112 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6113 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6114 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6115 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6116 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6117 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6118 { NULL, 0, 0, 0, 0 }
6119 };
6120
6121 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6122 #define TARGET_LITTLE_NAME "elf64-x86-64"
6123 #define ELF_ARCH bfd_arch_i386
6124 #define ELF_TARGET_ID X86_64_ELF_DATA
6125 #define ELF_MACHINE_CODE EM_X86_64
6126 #define ELF_MAXPAGESIZE 0x200000
6127 #define ELF_MINPAGESIZE 0x1000
6128 #define ELF_COMMONPAGESIZE 0x1000
6129
6130 #define elf_backend_can_gc_sections 1
6131 #define elf_backend_can_refcount 1
6132 #define elf_backend_want_got_plt 1
6133 #define elf_backend_plt_readonly 1
6134 #define elf_backend_want_plt_sym 0
6135 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6136 #define elf_backend_rela_normal 1
6137 #define elf_backend_plt_alignment 4
6138 #define elf_backend_extern_protected_data 1
6139
6140 #define elf_info_to_howto elf_x86_64_info_to_howto
6141
6142 #define bfd_elf64_bfd_link_hash_table_create \
6143 elf_x86_64_link_hash_table_create
6144 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6145 #define bfd_elf64_bfd_reloc_name_lookup \
6146 elf_x86_64_reloc_name_lookup
6147
6148 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6149 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6150 #define elf_backend_check_relocs elf_x86_64_check_relocs
6151 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6152 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6153 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6154 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6155 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6156 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6157 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6158 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6159 #ifdef CORE_HEADER
6160 #define elf_backend_write_core_note elf_x86_64_write_core_note
6161 #endif
6162 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6163 #define elf_backend_relocate_section elf_x86_64_relocate_section
6164 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6165 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6166 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6167 #define elf_backend_object_p elf64_x86_64_elf_object_p
6168 #define bfd_elf64_mkobject elf_x86_64_mkobject
6169 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6170
6171 #define elf_backend_section_from_shdr \
6172 elf_x86_64_section_from_shdr
6173
6174 #define elf_backend_section_from_bfd_section \
6175 elf_x86_64_elf_section_from_bfd_section
6176 #define elf_backend_add_symbol_hook \
6177 elf_x86_64_add_symbol_hook
6178 #define elf_backend_symbol_processing \
6179 elf_x86_64_symbol_processing
6180 #define elf_backend_common_section_index \
6181 elf_x86_64_common_section_index
6182 #define elf_backend_common_section \
6183 elf_x86_64_common_section
6184 #define elf_backend_common_definition \
6185 elf_x86_64_common_definition
6186 #define elf_backend_merge_symbol \
6187 elf_x86_64_merge_symbol
6188 #define elf_backend_special_sections \
6189 elf_x86_64_special_sections
6190 #define elf_backend_additional_program_headers \
6191 elf_x86_64_additional_program_headers
6192 #define elf_backend_hash_symbol \
6193 elf_x86_64_hash_symbol
6194
6195 #include "elf64-target.h"
6196
6197 /* CloudABI support. */
6198
6199 #undef TARGET_LITTLE_SYM
6200 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6201 #undef TARGET_LITTLE_NAME
6202 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6203
6204 #undef ELF_OSABI
6205 #define ELF_OSABI ELFOSABI_CLOUDABI
6206
6207 #undef elf64_bed
6208 #define elf64_bed elf64_x86_64_cloudabi_bed
6209
6210 #include "elf64-target.h"
6211
6212 /* FreeBSD support. */
6213
6214 #undef TARGET_LITTLE_SYM
6215 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6216 #undef TARGET_LITTLE_NAME
6217 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6218
6219 #undef ELF_OSABI
6220 #define ELF_OSABI ELFOSABI_FREEBSD
6221
6222 #undef elf64_bed
6223 #define elf64_bed elf64_x86_64_fbsd_bed
6224
6225 #include "elf64-target.h"
6226
6227 /* Solaris 2 support. */
6228
6229 #undef TARGET_LITTLE_SYM
6230 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6231 #undef TARGET_LITTLE_NAME
6232 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6233
6234 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6235 objects won't be recognized. */
6236 #undef ELF_OSABI
6237
6238 #undef elf64_bed
6239 #define elf64_bed elf64_x86_64_sol2_bed
6240
6241 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6242 boundary. */
6243 #undef elf_backend_static_tls_alignment
6244 #define elf_backend_static_tls_alignment 16
6245
6246 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6247
6248 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6249 File, p.63. */
6250 #undef elf_backend_want_plt_sym
6251 #define elf_backend_want_plt_sym 1
6252
6253 #include "elf64-target.h"
6254
6255 /* Native Client support. */
6256
6257 static bfd_boolean
6258 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6259 {
6260 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6261 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6262 return TRUE;
6263 }
6264
6265 #undef TARGET_LITTLE_SYM
6266 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6267 #undef TARGET_LITTLE_NAME
6268 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6269 #undef elf64_bed
6270 #define elf64_bed elf64_x86_64_nacl_bed
6271
6272 #undef ELF_MAXPAGESIZE
6273 #undef ELF_MINPAGESIZE
6274 #undef ELF_COMMONPAGESIZE
6275 #define ELF_MAXPAGESIZE 0x10000
6276 #define ELF_MINPAGESIZE 0x10000
6277 #define ELF_COMMONPAGESIZE 0x10000
6278
6279 /* Restore defaults. */
6280 #undef ELF_OSABI
6281 #undef elf_backend_static_tls_alignment
6282 #undef elf_backend_want_plt_sym
6283 #define elf_backend_want_plt_sym 0
6284
6285 /* NaCl uses substantially different PLT entries for the same effects. */
6286
6287 #undef elf_backend_plt_alignment
6288 #define elf_backend_plt_alignment 5
6289 #define NACL_PLT_ENTRY_SIZE 64
6290 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6291
6292 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6293 {
6294 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6295 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6296 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6297 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6298 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6299
6300 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6301 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6302
6303 /* 32 bytes of nop to pad out to the standard size. */
6304 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6305 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6306 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6307 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6308 0x66, /* excess data32 prefix */
6309 0x90 /* nop */
6310 };
6311
6312 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6313 {
6314 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6315 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6316 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6317 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6318
6319 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6320 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6321 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6322
6323 /* Lazy GOT entries point here (32-byte aligned). */
6324 0x68, /* pushq immediate */
6325 0, 0, 0, 0, /* replaced with index into relocation table. */
6326 0xe9, /* jmp relative */
6327 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6328
6329 /* 22 bytes of nop to pad out to the standard size. */
6330 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6331 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6332 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6333 };
6334
6335 /* .eh_frame covering the .plt section. */
6336
6337 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6338 {
6339 #if (PLT_CIE_LENGTH != 20 \
6340 || PLT_FDE_LENGTH != 36 \
6341 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6342 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6343 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6344 #endif
6345 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6346 0, 0, 0, 0, /* CIE ID */
6347 1, /* CIE version */
6348 'z', 'R', 0, /* Augmentation string */
6349 1, /* Code alignment factor */
6350 0x78, /* Data alignment factor */
6351 16, /* Return address column */
6352 1, /* Augmentation size */
6353 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6354 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6355 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6356 DW_CFA_nop, DW_CFA_nop,
6357
6358 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6359 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6360 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6361 0, 0, 0, 0, /* .plt size goes here */
6362 0, /* Augmentation size */
6363 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6364 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6365 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6366 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6367 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6368 13, /* Block length */
6369 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6370 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6371 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6372 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6373 DW_CFA_nop, DW_CFA_nop
6374 };
6375
6376 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6377 {
6378 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6379 elf_x86_64_nacl_plt_entry, /* plt_entry */
6380 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6381 2, /* plt0_got1_offset */
6382 9, /* plt0_got2_offset */
6383 13, /* plt0_got2_insn_end */
6384 3, /* plt_got_offset */
6385 33, /* plt_reloc_offset */
6386 38, /* plt_plt_offset */
6387 7, /* plt_got_insn_size */
6388 42, /* plt_plt_insn_end */
6389 32, /* plt_lazy_offset */
6390 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6391 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6392 };
6393
6394 #undef elf_backend_arch_data
6395 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6396
6397 #undef elf_backend_object_p
6398 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6399 #undef elf_backend_modify_segment_map
6400 #define elf_backend_modify_segment_map nacl_modify_segment_map
6401 #undef elf_backend_modify_program_headers
6402 #define elf_backend_modify_program_headers nacl_modify_program_headers
6403 #undef elf_backend_final_write_processing
6404 #define elf_backend_final_write_processing nacl_final_write_processing
6405
6406 #include "elf64-target.h"
6407
6408 /* Native Client x32 support. */
6409
6410 static bfd_boolean
6411 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6412 {
6413 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6414 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6415 return TRUE;
6416 }
6417
6418 #undef TARGET_LITTLE_SYM
6419 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6420 #undef TARGET_LITTLE_NAME
6421 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6422 #undef elf32_bed
6423 #define elf32_bed elf32_x86_64_nacl_bed
6424
6425 #define bfd_elf32_bfd_link_hash_table_create \
6426 elf_x86_64_link_hash_table_create
6427 #define bfd_elf32_bfd_reloc_type_lookup \
6428 elf_x86_64_reloc_type_lookup
6429 #define bfd_elf32_bfd_reloc_name_lookup \
6430 elf_x86_64_reloc_name_lookup
6431 #define bfd_elf32_mkobject \
6432 elf_x86_64_mkobject
6433 #define bfd_elf32_get_synthetic_symtab \
6434 elf_x86_64_get_synthetic_symtab
6435
6436 #undef elf_backend_object_p
6437 #define elf_backend_object_p \
6438 elf32_x86_64_nacl_elf_object_p
6439
6440 #undef elf_backend_bfd_from_remote_memory
6441 #define elf_backend_bfd_from_remote_memory \
6442 _bfd_elf32_bfd_from_remote_memory
6443
6444 #undef elf_backend_size_info
6445 #define elf_backend_size_info \
6446 _bfd_elf32_size_info
6447
6448 #include "elf32-target.h"
6449
6450 /* Restore defaults. */
6451 #undef elf_backend_object_p
6452 #define elf_backend_object_p elf64_x86_64_elf_object_p
6453 #undef elf_backend_bfd_from_remote_memory
6454 #undef elf_backend_size_info
6455 #undef elf_backend_modify_segment_map
6456 #undef elf_backend_modify_program_headers
6457 #undef elf_backend_final_write_processing
6458
6459 /* Intel L1OM support. */
6460
6461 static bfd_boolean
6462 elf64_l1om_elf_object_p (bfd *abfd)
6463 {
6464 /* Set the right machine number for an L1OM elf64 file. */
6465 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6466 return TRUE;
6467 }
6468
6469 #undef TARGET_LITTLE_SYM
6470 #define TARGET_LITTLE_SYM l1om_elf64_vec
6471 #undef TARGET_LITTLE_NAME
6472 #define TARGET_LITTLE_NAME "elf64-l1om"
6473 #undef ELF_ARCH
6474 #define ELF_ARCH bfd_arch_l1om
6475
6476 #undef ELF_MACHINE_CODE
6477 #define ELF_MACHINE_CODE EM_L1OM
6478
6479 #undef ELF_OSABI
6480
6481 #undef elf64_bed
6482 #define elf64_bed elf64_l1om_bed
6483
6484 #undef elf_backend_object_p
6485 #define elf_backend_object_p elf64_l1om_elf_object_p
6486
6487 /* Restore defaults. */
6488 #undef ELF_MAXPAGESIZE
6489 #undef ELF_MINPAGESIZE
6490 #undef ELF_COMMONPAGESIZE
6491 #define ELF_MAXPAGESIZE 0x200000
6492 #define ELF_MINPAGESIZE 0x1000
6493 #define ELF_COMMONPAGESIZE 0x1000
6494 #undef elf_backend_plt_alignment
6495 #define elf_backend_plt_alignment 4
6496 #undef elf_backend_arch_data
6497 #define elf_backend_arch_data &elf_x86_64_arch_bed
6498
6499 #include "elf64-target.h"
6500
6501 /* FreeBSD L1OM support. */
6502
6503 #undef TARGET_LITTLE_SYM
6504 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6505 #undef TARGET_LITTLE_NAME
6506 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6507
6508 #undef ELF_OSABI
6509 #define ELF_OSABI ELFOSABI_FREEBSD
6510
6511 #undef elf64_bed
6512 #define elf64_bed elf64_l1om_fbsd_bed
6513
6514 #include "elf64-target.h"
6515
6516 /* Intel K1OM support. */
6517
6518 static bfd_boolean
6519 elf64_k1om_elf_object_p (bfd *abfd)
6520 {
6521 /* Set the right machine number for an K1OM elf64 file. */
6522 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6523 return TRUE;
6524 }
6525
6526 #undef TARGET_LITTLE_SYM
6527 #define TARGET_LITTLE_SYM k1om_elf64_vec
6528 #undef TARGET_LITTLE_NAME
6529 #define TARGET_LITTLE_NAME "elf64-k1om"
6530 #undef ELF_ARCH
6531 #define ELF_ARCH bfd_arch_k1om
6532
6533 #undef ELF_MACHINE_CODE
6534 #define ELF_MACHINE_CODE EM_K1OM
6535
6536 #undef ELF_OSABI
6537
6538 #undef elf64_bed
6539 #define elf64_bed elf64_k1om_bed
6540
6541 #undef elf_backend_object_p
6542 #define elf_backend_object_p elf64_k1om_elf_object_p
6543
6544 #undef elf_backend_static_tls_alignment
6545
6546 #undef elf_backend_want_plt_sym
6547 #define elf_backend_want_plt_sym 0
6548
6549 #include "elf64-target.h"
6550
6551 /* FreeBSD K1OM support. */
6552
6553 #undef TARGET_LITTLE_SYM
6554 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6555 #undef TARGET_LITTLE_NAME
6556 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6557
6558 #undef ELF_OSABI
6559 #define ELF_OSABI ELFOSABI_FREEBSD
6560
6561 #undef elf64_bed
6562 #define elf64_bed elf64_k1om_fbsd_bed
6563
6564 #include "elf64-target.h"
6565
6566 /* 32bit x86-64 support. */
6567
6568 #undef TARGET_LITTLE_SYM
6569 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6570 #undef TARGET_LITTLE_NAME
6571 #define TARGET_LITTLE_NAME "elf32-x86-64"
6572 #undef elf32_bed
6573
6574 #undef ELF_ARCH
6575 #define ELF_ARCH bfd_arch_i386
6576
6577 #undef ELF_MACHINE_CODE
6578 #define ELF_MACHINE_CODE EM_X86_64
6579
6580 #undef ELF_OSABI
6581
6582 #undef elf_backend_object_p
6583 #define elf_backend_object_p \
6584 elf32_x86_64_elf_object_p
6585
6586 #undef elf_backend_bfd_from_remote_memory
6587 #define elf_backend_bfd_from_remote_memory \
6588 _bfd_elf32_bfd_from_remote_memory
6589
6590 #undef elf_backend_size_info
6591 #define elf_backend_size_info \
6592 _bfd_elf32_size_info
6593
6594 #include "elf32-target.h"
This page took 0.214714 seconds and 4 git commands to generate.