Fix a null pointer dereference when reading the debug link info from a corrupt file.
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return 0;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if symbol has at least one BND relocation. */
761 bfd_boolean has_bnd_reloc;
762
763 /* Information about the second PLT entry. Filled when has_bnd_reloc is
764 set. */
765 union gotplt_union plt_bnd;
766
767 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
768 starting at the end of the jump table. */
769 bfd_vma tlsdesc_got;
770 };
771
772 #define elf_x86_64_hash_entry(ent) \
773 ((struct elf_x86_64_link_hash_entry *)(ent))
774
775 struct elf_x86_64_obj_tdata
776 {
777 struct elf_obj_tdata root;
778
779 /* tls_type for each local got entry. */
780 char *local_got_tls_type;
781
782 /* GOTPLT entries for TLS descriptors. */
783 bfd_vma *local_tlsdesc_gotent;
784 };
785
786 #define elf_x86_64_tdata(abfd) \
787 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
788
789 #define elf_x86_64_local_got_tls_type(abfd) \
790 (elf_x86_64_tdata (abfd)->local_got_tls_type)
791
792 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
793 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
794
795 #define is_x86_64_elf(bfd) \
796 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
797 && elf_tdata (bfd) != NULL \
798 && elf_object_id (bfd) == X86_64_ELF_DATA)
799
800 static bfd_boolean
801 elf_x86_64_mkobject (bfd *abfd)
802 {
803 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
804 X86_64_ELF_DATA);
805 }
806
807 /* x86-64 ELF linker hash table. */
808
809 struct elf_x86_64_link_hash_table
810 {
811 struct elf_link_hash_table elf;
812
813 /* Short-cuts to get to dynamic linker sections. */
814 asection *sdynbss;
815 asection *srelbss;
816 asection *plt_eh_frame;
817 asection *plt_bnd;
818
819 union
820 {
821 bfd_signed_vma refcount;
822 bfd_vma offset;
823 } tls_ld_got;
824
825 /* The amount of space used by the jump slots in the GOT. */
826 bfd_vma sgotplt_jump_table_size;
827
828 /* Small local sym cache. */
829 struct sym_cache sym_cache;
830
831 bfd_vma (*r_info) (bfd_vma, bfd_vma);
832 bfd_vma (*r_sym) (bfd_vma);
833 unsigned int pointer_r_type;
834 const char *dynamic_interpreter;
835 int dynamic_interpreter_size;
836
837 /* _TLS_MODULE_BASE_ symbol. */
838 struct bfd_link_hash_entry *tls_module_base;
839
840 /* Used by local STT_GNU_IFUNC symbols. */
841 htab_t loc_hash_table;
842 void * loc_hash_memory;
843
844 /* The offset into splt of the PLT entry for the TLS descriptor
845 resolver. Special values are 0, if not necessary (or not found
846 to be necessary yet), and -1 if needed but not determined
847 yet. */
848 bfd_vma tlsdesc_plt;
849 /* The offset into sgot of the GOT entry used by the PLT entry
850 above. */
851 bfd_vma tlsdesc_got;
852
853 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
854 bfd_vma next_jump_slot_index;
855 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
856 bfd_vma next_irelative_index;
857 };
858
859 /* Get the x86-64 ELF linker hash table from a link_info structure. */
860
861 #define elf_x86_64_hash_table(p) \
862 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
863 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
864
865 #define elf_x86_64_compute_jump_table_size(htab) \
866 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
867
868 /* Create an entry in an x86-64 ELF linker hash table. */
869
870 static struct bfd_hash_entry *
871 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
872 struct bfd_hash_table *table,
873 const char *string)
874 {
875 /* Allocate the structure if it has not already been allocated by a
876 subclass. */
877 if (entry == NULL)
878 {
879 entry = (struct bfd_hash_entry *)
880 bfd_hash_allocate (table,
881 sizeof (struct elf_x86_64_link_hash_entry));
882 if (entry == NULL)
883 return entry;
884 }
885
886 /* Call the allocation method of the superclass. */
887 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
888 if (entry != NULL)
889 {
890 struct elf_x86_64_link_hash_entry *eh;
891
892 eh = (struct elf_x86_64_link_hash_entry *) entry;
893 eh->dyn_relocs = NULL;
894 eh->tls_type = GOT_UNKNOWN;
895 eh->has_bnd_reloc = FALSE;
896 eh->plt_bnd.offset = (bfd_vma) -1;
897 eh->tlsdesc_got = (bfd_vma) -1;
898 }
899
900 return entry;
901 }
902
903 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
904 for local symbol so that we can handle local STT_GNU_IFUNC symbols
905 as global symbol. We reuse indx and dynstr_index for local symbol
906 hash since they aren't used by global symbols in this backend. */
907
908 static hashval_t
909 elf_x86_64_local_htab_hash (const void *ptr)
910 {
911 struct elf_link_hash_entry *h
912 = (struct elf_link_hash_entry *) ptr;
913 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
914 }
915
916 /* Compare local hash entries. */
917
918 static int
919 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
920 {
921 struct elf_link_hash_entry *h1
922 = (struct elf_link_hash_entry *) ptr1;
923 struct elf_link_hash_entry *h2
924 = (struct elf_link_hash_entry *) ptr2;
925
926 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
927 }
928
929 /* Find and/or create a hash entry for local symbol. */
930
931 static struct elf_link_hash_entry *
932 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
933 bfd *abfd, const Elf_Internal_Rela *rel,
934 bfd_boolean create)
935 {
936 struct elf_x86_64_link_hash_entry e, *ret;
937 asection *sec = abfd->sections;
938 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
939 htab->r_sym (rel->r_info));
940 void **slot;
941
942 e.elf.indx = sec->id;
943 e.elf.dynstr_index = htab->r_sym (rel->r_info);
944 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
945 create ? INSERT : NO_INSERT);
946
947 if (!slot)
948 return NULL;
949
950 if (*slot)
951 {
952 ret = (struct elf_x86_64_link_hash_entry *) *slot;
953 return &ret->elf;
954 }
955
956 ret = (struct elf_x86_64_link_hash_entry *)
957 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
958 sizeof (struct elf_x86_64_link_hash_entry));
959 if (ret)
960 {
961 memset (ret, 0, sizeof (*ret));
962 ret->elf.indx = sec->id;
963 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
964 ret->elf.dynindx = -1;
965 *slot = ret;
966 }
967 return &ret->elf;
968 }
969
970 /* Destroy an X86-64 ELF linker hash table. */
971
972 static void
973 elf_x86_64_link_hash_table_free (bfd *obfd)
974 {
975 struct elf_x86_64_link_hash_table *htab
976 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
977
978 if (htab->loc_hash_table)
979 htab_delete (htab->loc_hash_table);
980 if (htab->loc_hash_memory)
981 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
982 _bfd_elf_link_hash_table_free (obfd);
983 }
984
985 /* Create an X86-64 ELF linker hash table. */
986
987 static struct bfd_link_hash_table *
988 elf_x86_64_link_hash_table_create (bfd *abfd)
989 {
990 struct elf_x86_64_link_hash_table *ret;
991 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
992
993 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
994 if (ret == NULL)
995 return NULL;
996
997 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
998 elf_x86_64_link_hash_newfunc,
999 sizeof (struct elf_x86_64_link_hash_entry),
1000 X86_64_ELF_DATA))
1001 {
1002 free (ret);
1003 return NULL;
1004 }
1005
1006 if (ABI_64_P (abfd))
1007 {
1008 ret->r_info = elf64_r_info;
1009 ret->r_sym = elf64_r_sym;
1010 ret->pointer_r_type = R_X86_64_64;
1011 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1012 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1013 }
1014 else
1015 {
1016 ret->r_info = elf32_r_info;
1017 ret->r_sym = elf32_r_sym;
1018 ret->pointer_r_type = R_X86_64_32;
1019 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1020 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1021 }
1022
1023 ret->loc_hash_table = htab_try_create (1024,
1024 elf_x86_64_local_htab_hash,
1025 elf_x86_64_local_htab_eq,
1026 NULL);
1027 ret->loc_hash_memory = objalloc_create ();
1028 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1029 {
1030 elf_x86_64_link_hash_table_free (abfd);
1031 return NULL;
1032 }
1033 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1034
1035 return &ret->elf.root;
1036 }
1037
1038 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1039 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1040 hash table. */
1041
1042 static bfd_boolean
1043 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1044 struct bfd_link_info *info)
1045 {
1046 struct elf_x86_64_link_hash_table *htab;
1047
1048 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1049 return FALSE;
1050
1051 htab = elf_x86_64_hash_table (info);
1052 if (htab == NULL)
1053 return FALSE;
1054
1055 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1056 if (!info->shared)
1057 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
1058
1059 if (!htab->sdynbss
1060 || (!info->shared && !htab->srelbss))
1061 abort ();
1062
1063 if (!info->no_ld_generated_unwind_info
1064 && htab->plt_eh_frame == NULL
1065 && htab->elf.splt != NULL)
1066 {
1067 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1068 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1069 | SEC_LINKER_CREATED);
1070 htab->plt_eh_frame
1071 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1072 if (htab->plt_eh_frame == NULL
1073 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1074 return FALSE;
1075 }
1076 return TRUE;
1077 }
1078
1079 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1080
1081 static void
1082 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1083 struct elf_link_hash_entry *dir,
1084 struct elf_link_hash_entry *ind)
1085 {
1086 struct elf_x86_64_link_hash_entry *edir, *eind;
1087
1088 edir = (struct elf_x86_64_link_hash_entry *) dir;
1089 eind = (struct elf_x86_64_link_hash_entry *) ind;
1090
1091 if (!edir->has_bnd_reloc)
1092 edir->has_bnd_reloc = eind->has_bnd_reloc;
1093
1094 if (eind->dyn_relocs != NULL)
1095 {
1096 if (edir->dyn_relocs != NULL)
1097 {
1098 struct elf_dyn_relocs **pp;
1099 struct elf_dyn_relocs *p;
1100
1101 /* Add reloc counts against the indirect sym to the direct sym
1102 list. Merge any entries against the same section. */
1103 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1104 {
1105 struct elf_dyn_relocs *q;
1106
1107 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1108 if (q->sec == p->sec)
1109 {
1110 q->pc_count += p->pc_count;
1111 q->count += p->count;
1112 *pp = p->next;
1113 break;
1114 }
1115 if (q == NULL)
1116 pp = &p->next;
1117 }
1118 *pp = edir->dyn_relocs;
1119 }
1120
1121 edir->dyn_relocs = eind->dyn_relocs;
1122 eind->dyn_relocs = NULL;
1123 }
1124
1125 if (ind->root.type == bfd_link_hash_indirect
1126 && dir->got.refcount <= 0)
1127 {
1128 edir->tls_type = eind->tls_type;
1129 eind->tls_type = GOT_UNKNOWN;
1130 }
1131
1132 if (ELIMINATE_COPY_RELOCS
1133 && ind->root.type != bfd_link_hash_indirect
1134 && dir->dynamic_adjusted)
1135 {
1136 /* If called to transfer flags for a weakdef during processing
1137 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1138 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1139 dir->ref_dynamic |= ind->ref_dynamic;
1140 dir->ref_regular |= ind->ref_regular;
1141 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1142 dir->needs_plt |= ind->needs_plt;
1143 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1144 }
1145 else
1146 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1147 }
1148
1149 static bfd_boolean
1150 elf64_x86_64_elf_object_p (bfd *abfd)
1151 {
1152 /* Set the right machine number for an x86-64 elf64 file. */
1153 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1154 return TRUE;
1155 }
1156
1157 static bfd_boolean
1158 elf32_x86_64_elf_object_p (bfd *abfd)
1159 {
1160 /* Set the right machine number for an x86-64 elf32 file. */
1161 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1162 return TRUE;
1163 }
1164
1165 /* Return TRUE if the TLS access code sequence support transition
1166 from R_TYPE. */
1167
1168 static bfd_boolean
1169 elf_x86_64_check_tls_transition (bfd *abfd,
1170 struct bfd_link_info *info,
1171 asection *sec,
1172 bfd_byte *contents,
1173 Elf_Internal_Shdr *symtab_hdr,
1174 struct elf_link_hash_entry **sym_hashes,
1175 unsigned int r_type,
1176 const Elf_Internal_Rela *rel,
1177 const Elf_Internal_Rela *relend)
1178 {
1179 unsigned int val;
1180 unsigned long r_symndx;
1181 bfd_boolean largepic = FALSE;
1182 struct elf_link_hash_entry *h;
1183 bfd_vma offset;
1184 struct elf_x86_64_link_hash_table *htab;
1185
1186 /* Get the section contents. */
1187 if (contents == NULL)
1188 {
1189 if (elf_section_data (sec)->this_hdr.contents != NULL)
1190 contents = elf_section_data (sec)->this_hdr.contents;
1191 else
1192 {
1193 /* FIXME: How to better handle error condition? */
1194 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1195 return FALSE;
1196
1197 /* Cache the section contents for elf_link_input_bfd. */
1198 elf_section_data (sec)->this_hdr.contents = contents;
1199 }
1200 }
1201
1202 htab = elf_x86_64_hash_table (info);
1203 offset = rel->r_offset;
1204 switch (r_type)
1205 {
1206 case R_X86_64_TLSGD:
1207 case R_X86_64_TLSLD:
1208 if ((rel + 1) >= relend)
1209 return FALSE;
1210
1211 if (r_type == R_X86_64_TLSGD)
1212 {
1213 /* Check transition from GD access model. For 64bit, only
1214 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1215 .word 0x6666; rex64; call __tls_get_addr
1216 can transit to different access model. For 32bit, only
1217 leaq foo@tlsgd(%rip), %rdi
1218 .word 0x6666; rex64; call __tls_get_addr
1219 can transit to different access model. For largepic
1220 we also support:
1221 leaq foo@tlsgd(%rip), %rdi
1222 movabsq $__tls_get_addr@pltoff, %rax
1223 addq $rbx, %rax
1224 call *%rax. */
1225
1226 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1227 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1228
1229 if ((offset + 12) > sec->size)
1230 return FALSE;
1231
1232 if (memcmp (contents + offset + 4, call, 4) != 0)
1233 {
1234 if (!ABI_64_P (abfd)
1235 || (offset + 19) > sec->size
1236 || offset < 3
1237 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1238 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1239 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1240 != 0)
1241 return FALSE;
1242 largepic = TRUE;
1243 }
1244 else if (ABI_64_P (abfd))
1245 {
1246 if (offset < 4
1247 || memcmp (contents + offset - 4, leaq, 4) != 0)
1248 return FALSE;
1249 }
1250 else
1251 {
1252 if (offset < 3
1253 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1254 return FALSE;
1255 }
1256 }
1257 else
1258 {
1259 /* Check transition from LD access model. Only
1260 leaq foo@tlsld(%rip), %rdi;
1261 call __tls_get_addr
1262 can transit to different access model. For largepic
1263 we also support:
1264 leaq foo@tlsld(%rip), %rdi
1265 movabsq $__tls_get_addr@pltoff, %rax
1266 addq $rbx, %rax
1267 call *%rax. */
1268
1269 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1270
1271 if (offset < 3 || (offset + 9) > sec->size)
1272 return FALSE;
1273
1274 if (memcmp (contents + offset - 3, lea, 3) != 0)
1275 return FALSE;
1276
1277 if (0xe8 != *(contents + offset + 4))
1278 {
1279 if (!ABI_64_P (abfd)
1280 || (offset + 19) > sec->size
1281 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1282 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1283 != 0)
1284 return FALSE;
1285 largepic = TRUE;
1286 }
1287 }
1288
1289 r_symndx = htab->r_sym (rel[1].r_info);
1290 if (r_symndx < symtab_hdr->sh_info)
1291 return FALSE;
1292
1293 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1294 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1295 may be versioned. */
1296 return (h != NULL
1297 && h->root.root.string != NULL
1298 && (largepic
1299 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1300 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1301 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1302 && (strncmp (h->root.root.string,
1303 "__tls_get_addr", 14) == 0));
1304
1305 case R_X86_64_GOTTPOFF:
1306 /* Check transition from IE access model:
1307 mov foo@gottpoff(%rip), %reg
1308 add foo@gottpoff(%rip), %reg
1309 */
1310
1311 /* Check REX prefix first. */
1312 if (offset >= 3 && (offset + 4) <= sec->size)
1313 {
1314 val = bfd_get_8 (abfd, contents + offset - 3);
1315 if (val != 0x48 && val != 0x4c)
1316 {
1317 /* X32 may have 0x44 REX prefix or no REX prefix. */
1318 if (ABI_64_P (abfd))
1319 return FALSE;
1320 }
1321 }
1322 else
1323 {
1324 /* X32 may not have any REX prefix. */
1325 if (ABI_64_P (abfd))
1326 return FALSE;
1327 if (offset < 2 || (offset + 3) > sec->size)
1328 return FALSE;
1329 }
1330
1331 val = bfd_get_8 (abfd, contents + offset - 2);
1332 if (val != 0x8b && val != 0x03)
1333 return FALSE;
1334
1335 val = bfd_get_8 (abfd, contents + offset - 1);
1336 return (val & 0xc7) == 5;
1337
1338 case R_X86_64_GOTPC32_TLSDESC:
1339 /* Check transition from GDesc access model:
1340 leaq x@tlsdesc(%rip), %rax
1341
1342 Make sure it's a leaq adding rip to a 32-bit offset
1343 into any register, although it's probably almost always
1344 going to be rax. */
1345
1346 if (offset < 3 || (offset + 4) > sec->size)
1347 return FALSE;
1348
1349 val = bfd_get_8 (abfd, contents + offset - 3);
1350 if ((val & 0xfb) != 0x48)
1351 return FALSE;
1352
1353 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1354 return FALSE;
1355
1356 val = bfd_get_8 (abfd, contents + offset - 1);
1357 return (val & 0xc7) == 0x05;
1358
1359 case R_X86_64_TLSDESC_CALL:
1360 /* Check transition from GDesc access model:
1361 call *x@tlsdesc(%rax)
1362 */
1363 if (offset + 2 <= sec->size)
1364 {
1365 /* Make sure that it's a call *x@tlsdesc(%rax). */
1366 static const unsigned char call[] = { 0xff, 0x10 };
1367 return memcmp (contents + offset, call, 2) == 0;
1368 }
1369
1370 return FALSE;
1371
1372 default:
1373 abort ();
1374 }
1375 }
1376
1377 /* Return TRUE if the TLS access transition is OK or no transition
1378 will be performed. Update R_TYPE if there is a transition. */
1379
1380 static bfd_boolean
1381 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1382 asection *sec, bfd_byte *contents,
1383 Elf_Internal_Shdr *symtab_hdr,
1384 struct elf_link_hash_entry **sym_hashes,
1385 unsigned int *r_type, int tls_type,
1386 const Elf_Internal_Rela *rel,
1387 const Elf_Internal_Rela *relend,
1388 struct elf_link_hash_entry *h,
1389 unsigned long r_symndx)
1390 {
1391 unsigned int from_type = *r_type;
1392 unsigned int to_type = from_type;
1393 bfd_boolean check = TRUE;
1394
1395 /* Skip TLS transition for functions. */
1396 if (h != NULL
1397 && (h->type == STT_FUNC
1398 || h->type == STT_GNU_IFUNC))
1399 return TRUE;
1400
1401 switch (from_type)
1402 {
1403 case R_X86_64_TLSGD:
1404 case R_X86_64_GOTPC32_TLSDESC:
1405 case R_X86_64_TLSDESC_CALL:
1406 case R_X86_64_GOTTPOFF:
1407 if (info->executable)
1408 {
1409 if (h == NULL)
1410 to_type = R_X86_64_TPOFF32;
1411 else
1412 to_type = R_X86_64_GOTTPOFF;
1413 }
1414
1415 /* When we are called from elf_x86_64_relocate_section,
1416 CONTENTS isn't NULL and there may be additional transitions
1417 based on TLS_TYPE. */
1418 if (contents != NULL)
1419 {
1420 unsigned int new_to_type = to_type;
1421
1422 if (info->executable
1423 && h != NULL
1424 && h->dynindx == -1
1425 && tls_type == GOT_TLS_IE)
1426 new_to_type = R_X86_64_TPOFF32;
1427
1428 if (to_type == R_X86_64_TLSGD
1429 || to_type == R_X86_64_GOTPC32_TLSDESC
1430 || to_type == R_X86_64_TLSDESC_CALL)
1431 {
1432 if (tls_type == GOT_TLS_IE)
1433 new_to_type = R_X86_64_GOTTPOFF;
1434 }
1435
1436 /* We checked the transition before when we were called from
1437 elf_x86_64_check_relocs. We only want to check the new
1438 transition which hasn't been checked before. */
1439 check = new_to_type != to_type && from_type == to_type;
1440 to_type = new_to_type;
1441 }
1442
1443 break;
1444
1445 case R_X86_64_TLSLD:
1446 if (info->executable)
1447 to_type = R_X86_64_TPOFF32;
1448 break;
1449
1450 default:
1451 return TRUE;
1452 }
1453
1454 /* Return TRUE if there is no transition. */
1455 if (from_type == to_type)
1456 return TRUE;
1457
1458 /* Check if the transition can be performed. */
1459 if (check
1460 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1461 symtab_hdr, sym_hashes,
1462 from_type, rel, relend))
1463 {
1464 reloc_howto_type *from, *to;
1465 const char *name;
1466
1467 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1468 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1469
1470 if (h)
1471 name = h->root.root.string;
1472 else
1473 {
1474 struct elf_x86_64_link_hash_table *htab;
1475
1476 htab = elf_x86_64_hash_table (info);
1477 if (htab == NULL)
1478 name = "*unknown*";
1479 else
1480 {
1481 Elf_Internal_Sym *isym;
1482
1483 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1484 abfd, r_symndx);
1485 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1486 }
1487 }
1488
1489 (*_bfd_error_handler)
1490 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1491 "in section `%A' failed"),
1492 abfd, sec, from->name, to->name, name,
1493 (unsigned long) rel->r_offset);
1494 bfd_set_error (bfd_error_bad_value);
1495 return FALSE;
1496 }
1497
1498 *r_type = to_type;
1499 return TRUE;
1500 }
1501
1502 /* Look through the relocs for a section during the first phase, and
1503 calculate needed space in the global offset table, procedure
1504 linkage table, and dynamic reloc sections. */
1505
1506 static bfd_boolean
1507 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1508 asection *sec,
1509 const Elf_Internal_Rela *relocs)
1510 {
1511 struct elf_x86_64_link_hash_table *htab;
1512 Elf_Internal_Shdr *symtab_hdr;
1513 struct elf_link_hash_entry **sym_hashes;
1514 const Elf_Internal_Rela *rel;
1515 const Elf_Internal_Rela *rel_end;
1516 asection *sreloc;
1517
1518 if (info->relocatable)
1519 return TRUE;
1520
1521 BFD_ASSERT (is_x86_64_elf (abfd));
1522
1523 htab = elf_x86_64_hash_table (info);
1524 if (htab == NULL)
1525 return FALSE;
1526
1527 symtab_hdr = &elf_symtab_hdr (abfd);
1528 sym_hashes = elf_sym_hashes (abfd);
1529
1530 sreloc = NULL;
1531
1532 rel_end = relocs + sec->reloc_count;
1533 for (rel = relocs; rel < rel_end; rel++)
1534 {
1535 unsigned int r_type;
1536 unsigned long r_symndx;
1537 struct elf_link_hash_entry *h;
1538 Elf_Internal_Sym *isym;
1539 const char *name;
1540 bfd_boolean size_reloc;
1541
1542 r_symndx = htab->r_sym (rel->r_info);
1543 r_type = ELF32_R_TYPE (rel->r_info);
1544
1545 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1546 {
1547 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1548 abfd, r_symndx);
1549 return FALSE;
1550 }
1551
1552 if (r_symndx < symtab_hdr->sh_info)
1553 {
1554 /* A local symbol. */
1555 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1556 abfd, r_symndx);
1557 if (isym == NULL)
1558 return FALSE;
1559
1560 /* Check relocation against local STT_GNU_IFUNC symbol. */
1561 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1562 {
1563 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1564 TRUE);
1565 if (h == NULL)
1566 return FALSE;
1567
1568 /* Fake a STT_GNU_IFUNC symbol. */
1569 h->type = STT_GNU_IFUNC;
1570 h->def_regular = 1;
1571 h->ref_regular = 1;
1572 h->forced_local = 1;
1573 h->root.type = bfd_link_hash_defined;
1574 }
1575 else
1576 h = NULL;
1577 }
1578 else
1579 {
1580 isym = NULL;
1581 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1582 while (h->root.type == bfd_link_hash_indirect
1583 || h->root.type == bfd_link_hash_warning)
1584 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1585 }
1586
1587 /* Check invalid x32 relocations. */
1588 if (!ABI_64_P (abfd))
1589 switch (r_type)
1590 {
1591 default:
1592 break;
1593
1594 case R_X86_64_DTPOFF64:
1595 case R_X86_64_TPOFF64:
1596 case R_X86_64_PC64:
1597 case R_X86_64_GOTOFF64:
1598 case R_X86_64_GOT64:
1599 case R_X86_64_GOTPCREL64:
1600 case R_X86_64_GOTPC64:
1601 case R_X86_64_GOTPLT64:
1602 case R_X86_64_PLTOFF64:
1603 {
1604 if (h)
1605 name = h->root.root.string;
1606 else
1607 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1608 NULL);
1609 (*_bfd_error_handler)
1610 (_("%B: relocation %s against symbol `%s' isn't "
1611 "supported in x32 mode"), abfd,
1612 x86_64_elf_howto_table[r_type].name, name);
1613 bfd_set_error (bfd_error_bad_value);
1614 return FALSE;
1615 }
1616 break;
1617 }
1618
1619 if (h != NULL)
1620 {
1621 /* Create the ifunc sections for static executables. If we
1622 never see an indirect function symbol nor we are building
1623 a static executable, those sections will be empty and
1624 won't appear in output. */
1625 switch (r_type)
1626 {
1627 default:
1628 break;
1629
1630 case R_X86_64_PC32_BND:
1631 case R_X86_64_PLT32_BND:
1632 /* MPX PLT is supported only if elf_x86_64_arch_bed
1633 is used in 64-bit mode. */
1634 if (ABI_64_P (abfd)
1635 && (get_elf_x86_64_backend_data (abfd)
1636 == &elf_x86_64_arch_bed))
1637 {
1638 elf_x86_64_hash_entry (h)->has_bnd_reloc = TRUE;
1639
1640 /* Create the second PLT for Intel MPX support. */
1641 if (htab->plt_bnd == NULL)
1642 {
1643 unsigned int plt_bnd_align;
1644 const struct elf_backend_data *bed;
1645
1646 bed = get_elf_backend_data (info->output_bfd);
1647 switch (sizeof (elf_x86_64_bnd_plt2_entry))
1648 {
1649 case 8:
1650 plt_bnd_align = 3;
1651 break;
1652 case 16:
1653 plt_bnd_align = 4;
1654 break;
1655 default:
1656 abort ();
1657 }
1658
1659 if (htab->elf.dynobj == NULL)
1660 htab->elf.dynobj = abfd;
1661 htab->plt_bnd
1662 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1663 ".plt.bnd",
1664 (bed->dynamic_sec_flags
1665 | SEC_ALLOC
1666 | SEC_CODE
1667 | SEC_LOAD
1668 | SEC_READONLY));
1669 if (htab->plt_bnd == NULL
1670 || !bfd_set_section_alignment (htab->elf.dynobj,
1671 htab->plt_bnd,
1672 plt_bnd_align))
1673 return FALSE;
1674 }
1675 }
1676
1677 case R_X86_64_32S:
1678 case R_X86_64_32:
1679 case R_X86_64_64:
1680 case R_X86_64_PC32:
1681 case R_X86_64_PC64:
1682 case R_X86_64_PLT32:
1683 case R_X86_64_GOTPCREL:
1684 case R_X86_64_GOTPCREL64:
1685 if (htab->elf.dynobj == NULL)
1686 htab->elf.dynobj = abfd;
1687 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1688 return FALSE;
1689 break;
1690 }
1691
1692 /* It is referenced by a non-shared object. */
1693 h->ref_regular = 1;
1694 h->root.non_ir_ref = 1;
1695 }
1696
1697 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1698 symtab_hdr, sym_hashes,
1699 &r_type, GOT_UNKNOWN,
1700 rel, rel_end, h, r_symndx))
1701 return FALSE;
1702
1703 switch (r_type)
1704 {
1705 case R_X86_64_TLSLD:
1706 htab->tls_ld_got.refcount += 1;
1707 goto create_got;
1708
1709 case R_X86_64_TPOFF32:
1710 if (!info->executable && ABI_64_P (abfd))
1711 {
1712 if (h)
1713 name = h->root.root.string;
1714 else
1715 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1716 NULL);
1717 (*_bfd_error_handler)
1718 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1719 abfd,
1720 x86_64_elf_howto_table[r_type].name, name);
1721 bfd_set_error (bfd_error_bad_value);
1722 return FALSE;
1723 }
1724 break;
1725
1726 case R_X86_64_GOTTPOFF:
1727 if (!info->executable)
1728 info->flags |= DF_STATIC_TLS;
1729 /* Fall through */
1730
1731 case R_X86_64_GOT32:
1732 case R_X86_64_GOTPCREL:
1733 case R_X86_64_TLSGD:
1734 case R_X86_64_GOT64:
1735 case R_X86_64_GOTPCREL64:
1736 case R_X86_64_GOTPLT64:
1737 case R_X86_64_GOTPC32_TLSDESC:
1738 case R_X86_64_TLSDESC_CALL:
1739 /* This symbol requires a global offset table entry. */
1740 {
1741 int tls_type, old_tls_type;
1742
1743 switch (r_type)
1744 {
1745 default: tls_type = GOT_NORMAL; break;
1746 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1747 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1748 case R_X86_64_GOTPC32_TLSDESC:
1749 case R_X86_64_TLSDESC_CALL:
1750 tls_type = GOT_TLS_GDESC; break;
1751 }
1752
1753 if (h != NULL)
1754 {
1755 h->got.refcount += 1;
1756 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1757 }
1758 else
1759 {
1760 bfd_signed_vma *local_got_refcounts;
1761
1762 /* This is a global offset table entry for a local symbol. */
1763 local_got_refcounts = elf_local_got_refcounts (abfd);
1764 if (local_got_refcounts == NULL)
1765 {
1766 bfd_size_type size;
1767
1768 size = symtab_hdr->sh_info;
1769 size *= sizeof (bfd_signed_vma)
1770 + sizeof (bfd_vma) + sizeof (char);
1771 local_got_refcounts = ((bfd_signed_vma *)
1772 bfd_zalloc (abfd, size));
1773 if (local_got_refcounts == NULL)
1774 return FALSE;
1775 elf_local_got_refcounts (abfd) = local_got_refcounts;
1776 elf_x86_64_local_tlsdesc_gotent (abfd)
1777 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1778 elf_x86_64_local_got_tls_type (abfd)
1779 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1780 }
1781 local_got_refcounts[r_symndx] += 1;
1782 old_tls_type
1783 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1784 }
1785
1786 /* If a TLS symbol is accessed using IE at least once,
1787 there is no point to use dynamic model for it. */
1788 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1789 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1790 || tls_type != GOT_TLS_IE))
1791 {
1792 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1793 tls_type = old_tls_type;
1794 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1795 && GOT_TLS_GD_ANY_P (tls_type))
1796 tls_type |= old_tls_type;
1797 else
1798 {
1799 if (h)
1800 name = h->root.root.string;
1801 else
1802 name = bfd_elf_sym_name (abfd, symtab_hdr,
1803 isym, NULL);
1804 (*_bfd_error_handler)
1805 (_("%B: '%s' accessed both as normal and thread local symbol"),
1806 abfd, name);
1807 bfd_set_error (bfd_error_bad_value);
1808 return FALSE;
1809 }
1810 }
1811
1812 if (old_tls_type != tls_type)
1813 {
1814 if (h != NULL)
1815 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1816 else
1817 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1818 }
1819 }
1820 /* Fall through */
1821
1822 case R_X86_64_GOTOFF64:
1823 case R_X86_64_GOTPC32:
1824 case R_X86_64_GOTPC64:
1825 create_got:
1826 if (htab->elf.sgot == NULL)
1827 {
1828 if (htab->elf.dynobj == NULL)
1829 htab->elf.dynobj = abfd;
1830 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1831 info))
1832 return FALSE;
1833 }
1834 break;
1835
1836 case R_X86_64_PLT32:
1837 case R_X86_64_PLT32_BND:
1838 /* This symbol requires a procedure linkage table entry. We
1839 actually build the entry in adjust_dynamic_symbol,
1840 because this might be a case of linking PIC code which is
1841 never referenced by a dynamic object, in which case we
1842 don't need to generate a procedure linkage table entry
1843 after all. */
1844
1845 /* If this is a local symbol, we resolve it directly without
1846 creating a procedure linkage table entry. */
1847 if (h == NULL)
1848 continue;
1849
1850 h->needs_plt = 1;
1851 h->plt.refcount += 1;
1852 break;
1853
1854 case R_X86_64_PLTOFF64:
1855 /* This tries to form the 'address' of a function relative
1856 to GOT. For global symbols we need a PLT entry. */
1857 if (h != NULL)
1858 {
1859 h->needs_plt = 1;
1860 h->plt.refcount += 1;
1861 }
1862 goto create_got;
1863
1864 case R_X86_64_SIZE32:
1865 case R_X86_64_SIZE64:
1866 size_reloc = TRUE;
1867 goto do_size;
1868
1869 case R_X86_64_32:
1870 if (!ABI_64_P (abfd))
1871 goto pointer;
1872 case R_X86_64_8:
1873 case R_X86_64_16:
1874 case R_X86_64_32S:
1875 /* Let's help debug shared library creation. These relocs
1876 cannot be used in shared libs. Don't error out for
1877 sections we don't care about, such as debug sections or
1878 non-constant sections. */
1879 if (info->shared
1880 && (sec->flags & SEC_ALLOC) != 0
1881 && (sec->flags & SEC_READONLY) != 0)
1882 {
1883 if (h)
1884 name = h->root.root.string;
1885 else
1886 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1887 (*_bfd_error_handler)
1888 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1889 abfd, x86_64_elf_howto_table[r_type].name, name);
1890 bfd_set_error (bfd_error_bad_value);
1891 return FALSE;
1892 }
1893 /* Fall through. */
1894
1895 case R_X86_64_PC8:
1896 case R_X86_64_PC16:
1897 case R_X86_64_PC32:
1898 case R_X86_64_PC32_BND:
1899 case R_X86_64_PC64:
1900 case R_X86_64_64:
1901 pointer:
1902 if (h != NULL && info->executable)
1903 {
1904 /* If this reloc is in a read-only section, we might
1905 need a copy reloc. We can't check reliably at this
1906 stage whether the section is read-only, as input
1907 sections have not yet been mapped to output sections.
1908 Tentatively set the flag for now, and correct in
1909 adjust_dynamic_symbol. */
1910 h->non_got_ref = 1;
1911
1912 /* We may need a .plt entry if the function this reloc
1913 refers to is in a shared lib. */
1914 h->plt.refcount += 1;
1915 if (r_type != R_X86_64_PC32
1916 && r_type != R_X86_64_PC32_BND
1917 && r_type != R_X86_64_PC64)
1918 h->pointer_equality_needed = 1;
1919 }
1920
1921 size_reloc = FALSE;
1922 do_size:
1923 /* If we are creating a shared library, and this is a reloc
1924 against a global symbol, or a non PC relative reloc
1925 against a local symbol, then we need to copy the reloc
1926 into the shared library. However, if we are linking with
1927 -Bsymbolic, we do not need to copy a reloc against a
1928 global symbol which is defined in an object we are
1929 including in the link (i.e., DEF_REGULAR is set). At
1930 this point we have not seen all the input files, so it is
1931 possible that DEF_REGULAR is not set now but will be set
1932 later (it is never cleared). In case of a weak definition,
1933 DEF_REGULAR may be cleared later by a strong definition in
1934 a shared library. We account for that possibility below by
1935 storing information in the relocs_copied field of the hash
1936 table entry. A similar situation occurs when creating
1937 shared libraries and symbol visibility changes render the
1938 symbol local.
1939
1940 If on the other hand, we are creating an executable, we
1941 may need to keep relocations for symbols satisfied by a
1942 dynamic library if we manage to avoid copy relocs for the
1943 symbol. */
1944 if ((info->shared
1945 && (sec->flags & SEC_ALLOC) != 0
1946 && (! IS_X86_64_PCREL_TYPE (r_type)
1947 || (h != NULL
1948 && (! SYMBOLIC_BIND (info, h)
1949 || h->root.type == bfd_link_hash_defweak
1950 || !h->def_regular))))
1951 || (ELIMINATE_COPY_RELOCS
1952 && !info->shared
1953 && (sec->flags & SEC_ALLOC) != 0
1954 && h != NULL
1955 && (h->root.type == bfd_link_hash_defweak
1956 || !h->def_regular)))
1957 {
1958 struct elf_dyn_relocs *p;
1959 struct elf_dyn_relocs **head;
1960
1961 /* We must copy these reloc types into the output file.
1962 Create a reloc section in dynobj and make room for
1963 this reloc. */
1964 if (sreloc == NULL)
1965 {
1966 if (htab->elf.dynobj == NULL)
1967 htab->elf.dynobj = abfd;
1968
1969 sreloc = _bfd_elf_make_dynamic_reloc_section
1970 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1971 abfd, /*rela?*/ TRUE);
1972
1973 if (sreloc == NULL)
1974 return FALSE;
1975 }
1976
1977 /* If this is a global symbol, we count the number of
1978 relocations we need for this symbol. */
1979 if (h != NULL)
1980 {
1981 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
1982 }
1983 else
1984 {
1985 /* Track dynamic relocs needed for local syms too.
1986 We really need local syms available to do this
1987 easily. Oh well. */
1988 asection *s;
1989 void **vpp;
1990
1991 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1992 abfd, r_symndx);
1993 if (isym == NULL)
1994 return FALSE;
1995
1996 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
1997 if (s == NULL)
1998 s = sec;
1999
2000 /* Beware of type punned pointers vs strict aliasing
2001 rules. */
2002 vpp = &(elf_section_data (s)->local_dynrel);
2003 head = (struct elf_dyn_relocs **)vpp;
2004 }
2005
2006 p = *head;
2007 if (p == NULL || p->sec != sec)
2008 {
2009 bfd_size_type amt = sizeof *p;
2010
2011 p = ((struct elf_dyn_relocs *)
2012 bfd_alloc (htab->elf.dynobj, amt));
2013 if (p == NULL)
2014 return FALSE;
2015 p->next = *head;
2016 *head = p;
2017 p->sec = sec;
2018 p->count = 0;
2019 p->pc_count = 0;
2020 }
2021
2022 p->count += 1;
2023 /* Count size relocation as PC-relative relocation. */
2024 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2025 p->pc_count += 1;
2026 }
2027 break;
2028
2029 /* This relocation describes the C++ object vtable hierarchy.
2030 Reconstruct it for later use during GC. */
2031 case R_X86_64_GNU_VTINHERIT:
2032 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2033 return FALSE;
2034 break;
2035
2036 /* This relocation describes which C++ vtable entries are actually
2037 used. Record for later use during GC. */
2038 case R_X86_64_GNU_VTENTRY:
2039 BFD_ASSERT (h != NULL);
2040 if (h != NULL
2041 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2042 return FALSE;
2043 break;
2044
2045 default:
2046 break;
2047 }
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 /* Return the section that should be marked against GC for a given
2054 relocation. */
2055
2056 static asection *
2057 elf_x86_64_gc_mark_hook (asection *sec,
2058 struct bfd_link_info *info,
2059 Elf_Internal_Rela *rel,
2060 struct elf_link_hash_entry *h,
2061 Elf_Internal_Sym *sym)
2062 {
2063 if (h != NULL)
2064 switch (ELF32_R_TYPE (rel->r_info))
2065 {
2066 case R_X86_64_GNU_VTINHERIT:
2067 case R_X86_64_GNU_VTENTRY:
2068 return NULL;
2069 }
2070
2071 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2072 }
2073
2074 /* Update the got entry reference counts for the section being removed. */
2075
2076 static bfd_boolean
2077 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2078 asection *sec,
2079 const Elf_Internal_Rela *relocs)
2080 {
2081 struct elf_x86_64_link_hash_table *htab;
2082 Elf_Internal_Shdr *symtab_hdr;
2083 struct elf_link_hash_entry **sym_hashes;
2084 bfd_signed_vma *local_got_refcounts;
2085 const Elf_Internal_Rela *rel, *relend;
2086
2087 if (info->relocatable)
2088 return TRUE;
2089
2090 htab = elf_x86_64_hash_table (info);
2091 if (htab == NULL)
2092 return FALSE;
2093
2094 elf_section_data (sec)->local_dynrel = NULL;
2095
2096 symtab_hdr = &elf_symtab_hdr (abfd);
2097 sym_hashes = elf_sym_hashes (abfd);
2098 local_got_refcounts = elf_local_got_refcounts (abfd);
2099
2100 htab = elf_x86_64_hash_table (info);
2101 relend = relocs + sec->reloc_count;
2102 for (rel = relocs; rel < relend; rel++)
2103 {
2104 unsigned long r_symndx;
2105 unsigned int r_type;
2106 struct elf_link_hash_entry *h = NULL;
2107
2108 r_symndx = htab->r_sym (rel->r_info);
2109 if (r_symndx >= symtab_hdr->sh_info)
2110 {
2111 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2112 while (h->root.type == bfd_link_hash_indirect
2113 || h->root.type == bfd_link_hash_warning)
2114 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2115 }
2116 else
2117 {
2118 /* A local symbol. */
2119 Elf_Internal_Sym *isym;
2120
2121 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2122 abfd, r_symndx);
2123
2124 /* Check relocation against local STT_GNU_IFUNC symbol. */
2125 if (isym != NULL
2126 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2127 {
2128 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2129 if (h == NULL)
2130 abort ();
2131 }
2132 }
2133
2134 if (h)
2135 {
2136 struct elf_x86_64_link_hash_entry *eh;
2137 struct elf_dyn_relocs **pp;
2138 struct elf_dyn_relocs *p;
2139
2140 eh = (struct elf_x86_64_link_hash_entry *) h;
2141
2142 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2143 if (p->sec == sec)
2144 {
2145 /* Everything must go for SEC. */
2146 *pp = p->next;
2147 break;
2148 }
2149 }
2150
2151 r_type = ELF32_R_TYPE (rel->r_info);
2152 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2153 symtab_hdr, sym_hashes,
2154 &r_type, GOT_UNKNOWN,
2155 rel, relend, h, r_symndx))
2156 return FALSE;
2157
2158 switch (r_type)
2159 {
2160 case R_X86_64_TLSLD:
2161 if (htab->tls_ld_got.refcount > 0)
2162 htab->tls_ld_got.refcount -= 1;
2163 break;
2164
2165 case R_X86_64_TLSGD:
2166 case R_X86_64_GOTPC32_TLSDESC:
2167 case R_X86_64_TLSDESC_CALL:
2168 case R_X86_64_GOTTPOFF:
2169 case R_X86_64_GOT32:
2170 case R_X86_64_GOTPCREL:
2171 case R_X86_64_GOT64:
2172 case R_X86_64_GOTPCREL64:
2173 case R_X86_64_GOTPLT64:
2174 if (h != NULL)
2175 {
2176 if (h->got.refcount > 0)
2177 h->got.refcount -= 1;
2178 if (h->type == STT_GNU_IFUNC)
2179 {
2180 if (h->plt.refcount > 0)
2181 h->plt.refcount -= 1;
2182 }
2183 }
2184 else if (local_got_refcounts != NULL)
2185 {
2186 if (local_got_refcounts[r_symndx] > 0)
2187 local_got_refcounts[r_symndx] -= 1;
2188 }
2189 break;
2190
2191 case R_X86_64_8:
2192 case R_X86_64_16:
2193 case R_X86_64_32:
2194 case R_X86_64_64:
2195 case R_X86_64_32S:
2196 case R_X86_64_PC8:
2197 case R_X86_64_PC16:
2198 case R_X86_64_PC32:
2199 case R_X86_64_PC32_BND:
2200 case R_X86_64_PC64:
2201 case R_X86_64_SIZE32:
2202 case R_X86_64_SIZE64:
2203 if (info->shared
2204 && (h == NULL || h->type != STT_GNU_IFUNC))
2205 break;
2206 /* Fall thru */
2207
2208 case R_X86_64_PLT32:
2209 case R_X86_64_PLT32_BND:
2210 case R_X86_64_PLTOFF64:
2211 if (h != NULL)
2212 {
2213 if (h->plt.refcount > 0)
2214 h->plt.refcount -= 1;
2215 }
2216 break;
2217
2218 default:
2219 break;
2220 }
2221 }
2222
2223 return TRUE;
2224 }
2225
2226 /* Adjust a symbol defined by a dynamic object and referenced by a
2227 regular object. The current definition is in some section of the
2228 dynamic object, but we're not including those sections. We have to
2229 change the definition to something the rest of the link can
2230 understand. */
2231
2232 static bfd_boolean
2233 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2234 struct elf_link_hash_entry *h)
2235 {
2236 struct elf_x86_64_link_hash_table *htab;
2237 asection *s;
2238 struct elf_x86_64_link_hash_entry *eh;
2239 struct elf_dyn_relocs *p;
2240
2241 /* STT_GNU_IFUNC symbol must go through PLT. */
2242 if (h->type == STT_GNU_IFUNC)
2243 {
2244 /* All local STT_GNU_IFUNC references must be treate as local
2245 calls via local PLT. */
2246 if (h->ref_regular
2247 && SYMBOL_CALLS_LOCAL (info, h))
2248 {
2249 bfd_size_type pc_count = 0, count = 0;
2250 struct elf_dyn_relocs **pp;
2251
2252 eh = (struct elf_x86_64_link_hash_entry *) h;
2253 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2254 {
2255 pc_count += p->pc_count;
2256 p->count -= p->pc_count;
2257 p->pc_count = 0;
2258 count += p->count;
2259 if (p->count == 0)
2260 *pp = p->next;
2261 else
2262 pp = &p->next;
2263 }
2264
2265 if (pc_count || count)
2266 {
2267 h->needs_plt = 1;
2268 h->non_got_ref = 1;
2269 if (h->plt.refcount <= 0)
2270 h->plt.refcount = 1;
2271 else
2272 h->plt.refcount += 1;
2273 }
2274 }
2275
2276 if (h->plt.refcount <= 0)
2277 {
2278 h->plt.offset = (bfd_vma) -1;
2279 h->needs_plt = 0;
2280 }
2281 return TRUE;
2282 }
2283
2284 /* If this is a function, put it in the procedure linkage table. We
2285 will fill in the contents of the procedure linkage table later,
2286 when we know the address of the .got section. */
2287 if (h->type == STT_FUNC
2288 || h->needs_plt)
2289 {
2290 if (h->plt.refcount <= 0
2291 || SYMBOL_CALLS_LOCAL (info, h)
2292 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2293 && h->root.type == bfd_link_hash_undefweak))
2294 {
2295 /* This case can occur if we saw a PLT32 reloc in an input
2296 file, but the symbol was never referred to by a dynamic
2297 object, or if all references were garbage collected. In
2298 such a case, we don't actually need to build a procedure
2299 linkage table, and we can just do a PC32 reloc instead. */
2300 h->plt.offset = (bfd_vma) -1;
2301 h->needs_plt = 0;
2302 }
2303
2304 return TRUE;
2305 }
2306 else
2307 /* It's possible that we incorrectly decided a .plt reloc was
2308 needed for an R_X86_64_PC32 reloc to a non-function sym in
2309 check_relocs. We can't decide accurately between function and
2310 non-function syms in check-relocs; Objects loaded later in
2311 the link may change h->type. So fix it now. */
2312 h->plt.offset = (bfd_vma) -1;
2313
2314 /* If this is a weak symbol, and there is a real definition, the
2315 processor independent code will have arranged for us to see the
2316 real definition first, and we can just use the same value. */
2317 if (h->u.weakdef != NULL)
2318 {
2319 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2320 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2321 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2322 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2323 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2324 h->non_got_ref = h->u.weakdef->non_got_ref;
2325 return TRUE;
2326 }
2327
2328 /* This is a reference to a symbol defined by a dynamic object which
2329 is not a function. */
2330
2331 /* If we are creating a shared library, we must presume that the
2332 only references to the symbol are via the global offset table.
2333 For such cases we need not do anything here; the relocations will
2334 be handled correctly by relocate_section. */
2335 if (info->shared)
2336 return TRUE;
2337
2338 /* If there are no references to this symbol that do not use the
2339 GOT, we don't need to generate a copy reloc. */
2340 if (!h->non_got_ref)
2341 return TRUE;
2342
2343 /* If -z nocopyreloc was given, we won't generate them either. */
2344 if (info->nocopyreloc)
2345 {
2346 h->non_got_ref = 0;
2347 return TRUE;
2348 }
2349
2350 if (ELIMINATE_COPY_RELOCS)
2351 {
2352 eh = (struct elf_x86_64_link_hash_entry *) h;
2353 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2354 {
2355 s = p->sec->output_section;
2356 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2357 break;
2358 }
2359
2360 /* If we didn't find any dynamic relocs in read-only sections, then
2361 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2362 if (p == NULL)
2363 {
2364 h->non_got_ref = 0;
2365 return TRUE;
2366 }
2367 }
2368
2369 /* We must allocate the symbol in our .dynbss section, which will
2370 become part of the .bss section of the executable. There will be
2371 an entry for this symbol in the .dynsym section. The dynamic
2372 object will contain position independent code, so all references
2373 from the dynamic object to this symbol will go through the global
2374 offset table. The dynamic linker will use the .dynsym entry to
2375 determine the address it must put in the global offset table, so
2376 both the dynamic object and the regular object will refer to the
2377 same memory location for the variable. */
2378
2379 htab = elf_x86_64_hash_table (info);
2380 if (htab == NULL)
2381 return FALSE;
2382
2383 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2384 to copy the initial value out of the dynamic object and into the
2385 runtime process image. */
2386 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2387 {
2388 const struct elf_backend_data *bed;
2389 bed = get_elf_backend_data (info->output_bfd);
2390 htab->srelbss->size += bed->s->sizeof_rela;
2391 h->needs_copy = 1;
2392 }
2393
2394 s = htab->sdynbss;
2395
2396 return _bfd_elf_adjust_dynamic_copy (h, s);
2397 }
2398
2399 /* Allocate space in .plt, .got and associated reloc sections for
2400 dynamic relocs. */
2401
2402 static bfd_boolean
2403 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2404 {
2405 struct bfd_link_info *info;
2406 struct elf_x86_64_link_hash_table *htab;
2407 struct elf_x86_64_link_hash_entry *eh;
2408 struct elf_dyn_relocs *p;
2409 const struct elf_backend_data *bed;
2410 unsigned int plt_entry_size;
2411
2412 if (h->root.type == bfd_link_hash_indirect)
2413 return TRUE;
2414
2415 eh = (struct elf_x86_64_link_hash_entry *) h;
2416
2417 info = (struct bfd_link_info *) inf;
2418 htab = elf_x86_64_hash_table (info);
2419 if (htab == NULL)
2420 return FALSE;
2421 bed = get_elf_backend_data (info->output_bfd);
2422 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2423
2424 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2425 here if it is defined and referenced in a non-shared object. */
2426 if (h->type == STT_GNU_IFUNC
2427 && h->def_regular)
2428 {
2429 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2430 &eh->dyn_relocs,
2431 plt_entry_size,
2432 plt_entry_size,
2433 GOT_ENTRY_SIZE))
2434 {
2435 asection *s = htab->plt_bnd;
2436 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2437 {
2438 /* Use the .plt.bnd section if it is created. */
2439 eh->plt_bnd.offset = s->size;
2440
2441 /* Make room for this entry in the .plt.bnd section. */
2442 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2443 }
2444
2445 return TRUE;
2446 }
2447 else
2448 return FALSE;
2449 }
2450 else if (htab->elf.dynamic_sections_created
2451 && h->plt.refcount > 0)
2452 {
2453 /* Make sure this symbol is output as a dynamic symbol.
2454 Undefined weak syms won't yet be marked as dynamic. */
2455 if (h->dynindx == -1
2456 && !h->forced_local)
2457 {
2458 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2459 return FALSE;
2460 }
2461
2462 if (info->shared
2463 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2464 {
2465 asection *s = htab->elf.splt;
2466 asection *bnd_s = htab->plt_bnd;
2467
2468 /* If this is the first .plt entry, make room for the special
2469 first entry. */
2470 if (s->size == 0)
2471 s->size = plt_entry_size;
2472
2473 h->plt.offset = s->size;
2474 if (bnd_s)
2475 eh->plt_bnd.offset = bnd_s->size;
2476
2477 /* If this symbol is not defined in a regular file, and we are
2478 not generating a shared library, then set the symbol to this
2479 location in the .plt. This is required to make function
2480 pointers compare as equal between the normal executable and
2481 the shared library. */
2482 if (! info->shared
2483 && !h->def_regular)
2484 {
2485 if (bnd_s)
2486 {
2487 /* We need to make a call to the entry of the second
2488 PLT instead of regular PLT entry. */
2489 h->root.u.def.section = bnd_s;
2490 h->root.u.def.value = eh->plt_bnd.offset;
2491 }
2492 else
2493 {
2494 h->root.u.def.section = s;
2495 h->root.u.def.value = h->plt.offset;
2496 }
2497 }
2498
2499 /* Make room for this entry. */
2500 s->size += plt_entry_size;
2501 if (bnd_s)
2502 {
2503 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry)
2504 == sizeof (elf_x86_64_legacy_plt2_entry));
2505 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2506 }
2507
2508 /* We also need to make an entry in the .got.plt section, which
2509 will be placed in the .got section by the linker script. */
2510 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2511
2512 /* We also need to make an entry in the .rela.plt section. */
2513 htab->elf.srelplt->size += bed->s->sizeof_rela;
2514 htab->elf.srelplt->reloc_count++;
2515 }
2516 else
2517 {
2518 h->plt.offset = (bfd_vma) -1;
2519 h->needs_plt = 0;
2520 }
2521 }
2522 else
2523 {
2524 h->plt.offset = (bfd_vma) -1;
2525 h->needs_plt = 0;
2526 }
2527
2528 eh->tlsdesc_got = (bfd_vma) -1;
2529
2530 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2531 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2532 if (h->got.refcount > 0
2533 && info->executable
2534 && h->dynindx == -1
2535 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2536 {
2537 h->got.offset = (bfd_vma) -1;
2538 }
2539 else if (h->got.refcount > 0)
2540 {
2541 asection *s;
2542 bfd_boolean dyn;
2543 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2544
2545 /* Make sure this symbol is output as a dynamic symbol.
2546 Undefined weak syms won't yet be marked as dynamic. */
2547 if (h->dynindx == -1
2548 && !h->forced_local)
2549 {
2550 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2551 return FALSE;
2552 }
2553
2554 if (GOT_TLS_GDESC_P (tls_type))
2555 {
2556 eh->tlsdesc_got = htab->elf.sgotplt->size
2557 - elf_x86_64_compute_jump_table_size (htab);
2558 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2559 h->got.offset = (bfd_vma) -2;
2560 }
2561 if (! GOT_TLS_GDESC_P (tls_type)
2562 || GOT_TLS_GD_P (tls_type))
2563 {
2564 s = htab->elf.sgot;
2565 h->got.offset = s->size;
2566 s->size += GOT_ENTRY_SIZE;
2567 if (GOT_TLS_GD_P (tls_type))
2568 s->size += GOT_ENTRY_SIZE;
2569 }
2570 dyn = htab->elf.dynamic_sections_created;
2571 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2572 and two if global.
2573 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2574 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2575 || tls_type == GOT_TLS_IE)
2576 htab->elf.srelgot->size += bed->s->sizeof_rela;
2577 else if (GOT_TLS_GD_P (tls_type))
2578 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2579 else if (! GOT_TLS_GDESC_P (tls_type)
2580 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2581 || h->root.type != bfd_link_hash_undefweak)
2582 && (info->shared
2583 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2584 htab->elf.srelgot->size += bed->s->sizeof_rela;
2585 if (GOT_TLS_GDESC_P (tls_type))
2586 {
2587 htab->elf.srelplt->size += bed->s->sizeof_rela;
2588 htab->tlsdesc_plt = (bfd_vma) -1;
2589 }
2590 }
2591 else
2592 h->got.offset = (bfd_vma) -1;
2593
2594 if (eh->dyn_relocs == NULL)
2595 return TRUE;
2596
2597 /* In the shared -Bsymbolic case, discard space allocated for
2598 dynamic pc-relative relocs against symbols which turn out to be
2599 defined in regular objects. For the normal shared case, discard
2600 space for pc-relative relocs that have become local due to symbol
2601 visibility changes. */
2602
2603 if (info->shared)
2604 {
2605 /* Relocs that use pc_count are those that appear on a call
2606 insn, or certain REL relocs that can generated via assembly.
2607 We want calls to protected symbols to resolve directly to the
2608 function rather than going via the plt. If people want
2609 function pointer comparisons to work as expected then they
2610 should avoid writing weird assembly. */
2611 if (SYMBOL_CALLS_LOCAL (info, h))
2612 {
2613 struct elf_dyn_relocs **pp;
2614
2615 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2616 {
2617 p->count -= p->pc_count;
2618 p->pc_count = 0;
2619 if (p->count == 0)
2620 *pp = p->next;
2621 else
2622 pp = &p->next;
2623 }
2624 }
2625
2626 /* Also discard relocs on undefined weak syms with non-default
2627 visibility. */
2628 if (eh->dyn_relocs != NULL
2629 && h->root.type == bfd_link_hash_undefweak)
2630 {
2631 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2632 eh->dyn_relocs = NULL;
2633
2634 /* Make sure undefined weak symbols are output as a dynamic
2635 symbol in PIEs. */
2636 else if (h->dynindx == -1
2637 && ! h->forced_local
2638 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2639 return FALSE;
2640 }
2641
2642 }
2643 else if (ELIMINATE_COPY_RELOCS)
2644 {
2645 /* For the non-shared case, discard space for relocs against
2646 symbols which turn out to need copy relocs or are not
2647 dynamic. */
2648
2649 if (!h->non_got_ref
2650 && ((h->def_dynamic
2651 && !h->def_regular)
2652 || (htab->elf.dynamic_sections_created
2653 && (h->root.type == bfd_link_hash_undefweak
2654 || h->root.type == bfd_link_hash_undefined))))
2655 {
2656 /* Make sure this symbol is output as a dynamic symbol.
2657 Undefined weak syms won't yet be marked as dynamic. */
2658 if (h->dynindx == -1
2659 && ! h->forced_local
2660 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2661 return FALSE;
2662
2663 /* If that succeeded, we know we'll be keeping all the
2664 relocs. */
2665 if (h->dynindx != -1)
2666 goto keep;
2667 }
2668
2669 eh->dyn_relocs = NULL;
2670
2671 keep: ;
2672 }
2673
2674 /* Finally, allocate space. */
2675 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2676 {
2677 asection * sreloc;
2678
2679 sreloc = elf_section_data (p->sec)->sreloc;
2680
2681 BFD_ASSERT (sreloc != NULL);
2682
2683 sreloc->size += p->count * bed->s->sizeof_rela;
2684 }
2685
2686 return TRUE;
2687 }
2688
2689 /* Allocate space in .plt, .got and associated reloc sections for
2690 local dynamic relocs. */
2691
2692 static bfd_boolean
2693 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2694 {
2695 struct elf_link_hash_entry *h
2696 = (struct elf_link_hash_entry *) *slot;
2697
2698 if (h->type != STT_GNU_IFUNC
2699 || !h->def_regular
2700 || !h->ref_regular
2701 || !h->forced_local
2702 || h->root.type != bfd_link_hash_defined)
2703 abort ();
2704
2705 return elf_x86_64_allocate_dynrelocs (h, inf);
2706 }
2707
2708 /* Find any dynamic relocs that apply to read-only sections. */
2709
2710 static bfd_boolean
2711 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2712 void * inf)
2713 {
2714 struct elf_x86_64_link_hash_entry *eh;
2715 struct elf_dyn_relocs *p;
2716
2717 /* Skip local IFUNC symbols. */
2718 if (h->forced_local && h->type == STT_GNU_IFUNC)
2719 return TRUE;
2720
2721 eh = (struct elf_x86_64_link_hash_entry *) h;
2722 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2723 {
2724 asection *s = p->sec->output_section;
2725
2726 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2727 {
2728 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2729
2730 info->flags |= DF_TEXTREL;
2731
2732 if (info->warn_shared_textrel && info->shared)
2733 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2734 p->sec->owner, h->root.root.string,
2735 p->sec);
2736
2737 /* Not an error, just cut short the traversal. */
2738 return FALSE;
2739 }
2740 }
2741 return TRUE;
2742 }
2743
2744 /* Convert
2745 mov foo@GOTPCREL(%rip), %reg
2746 to
2747 lea foo(%rip), %reg
2748 with the local symbol, foo. */
2749
2750 static bfd_boolean
2751 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2752 struct bfd_link_info *link_info)
2753 {
2754 Elf_Internal_Shdr *symtab_hdr;
2755 Elf_Internal_Rela *internal_relocs;
2756 Elf_Internal_Rela *irel, *irelend;
2757 bfd_byte *contents;
2758 struct elf_x86_64_link_hash_table *htab;
2759 bfd_boolean changed_contents;
2760 bfd_boolean changed_relocs;
2761 bfd_signed_vma *local_got_refcounts;
2762
2763 /* Don't even try to convert non-ELF outputs. */
2764 if (!is_elf_hash_table (link_info->hash))
2765 return FALSE;
2766
2767 /* Nothing to do if there are no codes, no relocations or no output. */
2768 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2769 || sec->reloc_count == 0
2770 || bfd_is_abs_section (sec->output_section))
2771 return TRUE;
2772
2773 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2774
2775 /* Load the relocations for this section. */
2776 internal_relocs = (_bfd_elf_link_read_relocs
2777 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2778 link_info->keep_memory));
2779 if (internal_relocs == NULL)
2780 return FALSE;
2781
2782 htab = elf_x86_64_hash_table (link_info);
2783 changed_contents = FALSE;
2784 changed_relocs = FALSE;
2785 local_got_refcounts = elf_local_got_refcounts (abfd);
2786
2787 /* Get the section contents. */
2788 if (elf_section_data (sec)->this_hdr.contents != NULL)
2789 contents = elf_section_data (sec)->this_hdr.contents;
2790 else
2791 {
2792 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2793 goto error_return;
2794 }
2795
2796 irelend = internal_relocs + sec->reloc_count;
2797 for (irel = internal_relocs; irel < irelend; irel++)
2798 {
2799 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2800 unsigned int r_symndx = htab->r_sym (irel->r_info);
2801 unsigned int indx;
2802 struct elf_link_hash_entry *h;
2803
2804 if (r_type != R_X86_64_GOTPCREL)
2805 continue;
2806
2807 /* Get the symbol referred to by the reloc. */
2808 if (r_symndx < symtab_hdr->sh_info)
2809 {
2810 Elf_Internal_Sym *isym;
2811
2812 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2813 abfd, r_symndx);
2814
2815 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2816 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2817 && irel->r_offset >= 2
2818 && bfd_get_8 (input_bfd,
2819 contents + irel->r_offset - 2) == 0x8b)
2820 {
2821 bfd_put_8 (output_bfd, 0x8d,
2822 contents + irel->r_offset - 2);
2823 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2824 if (local_got_refcounts != NULL
2825 && local_got_refcounts[r_symndx] > 0)
2826 local_got_refcounts[r_symndx] -= 1;
2827 changed_contents = TRUE;
2828 changed_relocs = TRUE;
2829 }
2830 continue;
2831 }
2832
2833 indx = r_symndx - symtab_hdr->sh_info;
2834 h = elf_sym_hashes (abfd)[indx];
2835 BFD_ASSERT (h != NULL);
2836
2837 while (h->root.type == bfd_link_hash_indirect
2838 || h->root.type == bfd_link_hash_warning)
2839 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2840
2841 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2842 avoid optimizing _DYNAMIC since ld.so may use its link-time
2843 address. */
2844 if (h->def_regular
2845 && h->type != STT_GNU_IFUNC
2846 && h != htab->elf.hdynamic
2847 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2848 && irel->r_offset >= 2
2849 && bfd_get_8 (input_bfd,
2850 contents + irel->r_offset - 2) == 0x8b)
2851 {
2852 bfd_put_8 (output_bfd, 0x8d,
2853 contents + irel->r_offset - 2);
2854 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2855 if (h->got.refcount > 0)
2856 h->got.refcount -= 1;
2857 changed_contents = TRUE;
2858 changed_relocs = TRUE;
2859 }
2860 }
2861
2862 if (contents != NULL
2863 && elf_section_data (sec)->this_hdr.contents != contents)
2864 {
2865 if (!changed_contents && !link_info->keep_memory)
2866 free (contents);
2867 else
2868 {
2869 /* Cache the section contents for elf_link_input_bfd. */
2870 elf_section_data (sec)->this_hdr.contents = contents;
2871 }
2872 }
2873
2874 if (elf_section_data (sec)->relocs != internal_relocs)
2875 {
2876 if (!changed_relocs)
2877 free (internal_relocs);
2878 else
2879 elf_section_data (sec)->relocs = internal_relocs;
2880 }
2881
2882 return TRUE;
2883
2884 error_return:
2885 if (contents != NULL
2886 && elf_section_data (sec)->this_hdr.contents != contents)
2887 free (contents);
2888 if (internal_relocs != NULL
2889 && elf_section_data (sec)->relocs != internal_relocs)
2890 free (internal_relocs);
2891 return FALSE;
2892 }
2893
2894 /* Set the sizes of the dynamic sections. */
2895
2896 static bfd_boolean
2897 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
2898 struct bfd_link_info *info)
2899 {
2900 struct elf_x86_64_link_hash_table *htab;
2901 bfd *dynobj;
2902 asection *s;
2903 bfd_boolean relocs;
2904 bfd *ibfd;
2905 const struct elf_backend_data *bed;
2906
2907 htab = elf_x86_64_hash_table (info);
2908 if (htab == NULL)
2909 return FALSE;
2910 bed = get_elf_backend_data (output_bfd);
2911
2912 dynobj = htab->elf.dynobj;
2913 if (dynobj == NULL)
2914 abort ();
2915
2916 if (htab->elf.dynamic_sections_created)
2917 {
2918 /* Set the contents of the .interp section to the interpreter. */
2919 if (info->executable)
2920 {
2921 s = bfd_get_linker_section (dynobj, ".interp");
2922 if (s == NULL)
2923 abort ();
2924 s->size = htab->dynamic_interpreter_size;
2925 s->contents = (unsigned char *) htab->dynamic_interpreter;
2926 }
2927 }
2928
2929 /* Set up .got offsets for local syms, and space for local dynamic
2930 relocs. */
2931 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2932 {
2933 bfd_signed_vma *local_got;
2934 bfd_signed_vma *end_local_got;
2935 char *local_tls_type;
2936 bfd_vma *local_tlsdesc_gotent;
2937 bfd_size_type locsymcount;
2938 Elf_Internal_Shdr *symtab_hdr;
2939 asection *srel;
2940
2941 if (! is_x86_64_elf (ibfd))
2942 continue;
2943
2944 for (s = ibfd->sections; s != NULL; s = s->next)
2945 {
2946 struct elf_dyn_relocs *p;
2947
2948 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
2949 return FALSE;
2950
2951 for (p = (struct elf_dyn_relocs *)
2952 (elf_section_data (s)->local_dynrel);
2953 p != NULL;
2954 p = p->next)
2955 {
2956 if (!bfd_is_abs_section (p->sec)
2957 && bfd_is_abs_section (p->sec->output_section))
2958 {
2959 /* Input section has been discarded, either because
2960 it is a copy of a linkonce section or due to
2961 linker script /DISCARD/, so we'll be discarding
2962 the relocs too. */
2963 }
2964 else if (p->count != 0)
2965 {
2966 srel = elf_section_data (p->sec)->sreloc;
2967 srel->size += p->count * bed->s->sizeof_rela;
2968 if ((p->sec->output_section->flags & SEC_READONLY) != 0
2969 && (info->flags & DF_TEXTREL) == 0)
2970 {
2971 info->flags |= DF_TEXTREL;
2972 if (info->warn_shared_textrel && info->shared)
2973 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
2974 p->sec->owner, p->sec);
2975 }
2976 }
2977 }
2978 }
2979
2980 local_got = elf_local_got_refcounts (ibfd);
2981 if (!local_got)
2982 continue;
2983
2984 symtab_hdr = &elf_symtab_hdr (ibfd);
2985 locsymcount = symtab_hdr->sh_info;
2986 end_local_got = local_got + locsymcount;
2987 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
2988 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
2989 s = htab->elf.sgot;
2990 srel = htab->elf.srelgot;
2991 for (; local_got < end_local_got;
2992 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
2993 {
2994 *local_tlsdesc_gotent = (bfd_vma) -1;
2995 if (*local_got > 0)
2996 {
2997 if (GOT_TLS_GDESC_P (*local_tls_type))
2998 {
2999 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3000 - elf_x86_64_compute_jump_table_size (htab);
3001 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3002 *local_got = (bfd_vma) -2;
3003 }
3004 if (! GOT_TLS_GDESC_P (*local_tls_type)
3005 || GOT_TLS_GD_P (*local_tls_type))
3006 {
3007 *local_got = s->size;
3008 s->size += GOT_ENTRY_SIZE;
3009 if (GOT_TLS_GD_P (*local_tls_type))
3010 s->size += GOT_ENTRY_SIZE;
3011 }
3012 if (info->shared
3013 || GOT_TLS_GD_ANY_P (*local_tls_type)
3014 || *local_tls_type == GOT_TLS_IE)
3015 {
3016 if (GOT_TLS_GDESC_P (*local_tls_type))
3017 {
3018 htab->elf.srelplt->size
3019 += bed->s->sizeof_rela;
3020 htab->tlsdesc_plt = (bfd_vma) -1;
3021 }
3022 if (! GOT_TLS_GDESC_P (*local_tls_type)
3023 || GOT_TLS_GD_P (*local_tls_type))
3024 srel->size += bed->s->sizeof_rela;
3025 }
3026 }
3027 else
3028 *local_got = (bfd_vma) -1;
3029 }
3030 }
3031
3032 if (htab->tls_ld_got.refcount > 0)
3033 {
3034 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3035 relocs. */
3036 htab->tls_ld_got.offset = htab->elf.sgot->size;
3037 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3038 htab->elf.srelgot->size += bed->s->sizeof_rela;
3039 }
3040 else
3041 htab->tls_ld_got.offset = -1;
3042
3043 /* Allocate global sym .plt and .got entries, and space for global
3044 sym dynamic relocs. */
3045 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3046 info);
3047
3048 /* Allocate .plt and .got entries, and space for local symbols. */
3049 htab_traverse (htab->loc_hash_table,
3050 elf_x86_64_allocate_local_dynrelocs,
3051 info);
3052
3053 /* For every jump slot reserved in the sgotplt, reloc_count is
3054 incremented. However, when we reserve space for TLS descriptors,
3055 it's not incremented, so in order to compute the space reserved
3056 for them, it suffices to multiply the reloc count by the jump
3057 slot size.
3058
3059 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3060 so that R_X86_64_IRELATIVE entries come last. */
3061 if (htab->elf.srelplt)
3062 {
3063 htab->sgotplt_jump_table_size
3064 = elf_x86_64_compute_jump_table_size (htab);
3065 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3066 }
3067 else if (htab->elf.irelplt)
3068 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3069
3070 if (htab->tlsdesc_plt)
3071 {
3072 /* If we're not using lazy TLS relocations, don't generate the
3073 PLT and GOT entries they require. */
3074 if ((info->flags & DF_BIND_NOW))
3075 htab->tlsdesc_plt = 0;
3076 else
3077 {
3078 htab->tlsdesc_got = htab->elf.sgot->size;
3079 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3080 /* Reserve room for the initial entry.
3081 FIXME: we could probably do away with it in this case. */
3082 if (htab->elf.splt->size == 0)
3083 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3084 htab->tlsdesc_plt = htab->elf.splt->size;
3085 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3086 }
3087 }
3088
3089 if (htab->elf.sgotplt)
3090 {
3091 /* Don't allocate .got.plt section if there are no GOT nor PLT
3092 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3093 if ((htab->elf.hgot == NULL
3094 || !htab->elf.hgot->ref_regular_nonweak)
3095 && (htab->elf.sgotplt->size
3096 == get_elf_backend_data (output_bfd)->got_header_size)
3097 && (htab->elf.splt == NULL
3098 || htab->elf.splt->size == 0)
3099 && (htab->elf.sgot == NULL
3100 || htab->elf.sgot->size == 0)
3101 && (htab->elf.iplt == NULL
3102 || htab->elf.iplt->size == 0)
3103 && (htab->elf.igotplt == NULL
3104 || htab->elf.igotplt->size == 0))
3105 htab->elf.sgotplt->size = 0;
3106 }
3107
3108 if (htab->plt_eh_frame != NULL
3109 && htab->elf.splt != NULL
3110 && htab->elf.splt->size != 0
3111 && !bfd_is_abs_section (htab->elf.splt->output_section)
3112 && _bfd_elf_eh_frame_present (info))
3113 {
3114 const struct elf_x86_64_backend_data *arch_data
3115 = get_elf_x86_64_arch_data (bed);
3116 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3117 }
3118
3119 /* We now have determined the sizes of the various dynamic sections.
3120 Allocate memory for them. */
3121 relocs = FALSE;
3122 for (s = dynobj->sections; s != NULL; s = s->next)
3123 {
3124 if ((s->flags & SEC_LINKER_CREATED) == 0)
3125 continue;
3126
3127 if (s == htab->elf.splt
3128 || s == htab->elf.sgot
3129 || s == htab->elf.sgotplt
3130 || s == htab->elf.iplt
3131 || s == htab->elf.igotplt
3132 || s == htab->plt_bnd
3133 || s == htab->plt_eh_frame
3134 || s == htab->sdynbss)
3135 {
3136 /* Strip this section if we don't need it; see the
3137 comment below. */
3138 }
3139 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3140 {
3141 if (s->size != 0 && s != htab->elf.srelplt)
3142 relocs = TRUE;
3143
3144 /* We use the reloc_count field as a counter if we need
3145 to copy relocs into the output file. */
3146 if (s != htab->elf.srelplt)
3147 s->reloc_count = 0;
3148 }
3149 else
3150 {
3151 /* It's not one of our sections, so don't allocate space. */
3152 continue;
3153 }
3154
3155 if (s->size == 0)
3156 {
3157 /* If we don't need this section, strip it from the
3158 output file. This is mostly to handle .rela.bss and
3159 .rela.plt. We must create both sections in
3160 create_dynamic_sections, because they must be created
3161 before the linker maps input sections to output
3162 sections. The linker does that before
3163 adjust_dynamic_symbol is called, and it is that
3164 function which decides whether anything needs to go
3165 into these sections. */
3166
3167 s->flags |= SEC_EXCLUDE;
3168 continue;
3169 }
3170
3171 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3172 continue;
3173
3174 /* Allocate memory for the section contents. We use bfd_zalloc
3175 here in case unused entries are not reclaimed before the
3176 section's contents are written out. This should not happen,
3177 but this way if it does, we get a R_X86_64_NONE reloc instead
3178 of garbage. */
3179 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3180 if (s->contents == NULL)
3181 return FALSE;
3182 }
3183
3184 if (htab->plt_eh_frame != NULL
3185 && htab->plt_eh_frame->contents != NULL)
3186 {
3187 const struct elf_x86_64_backend_data *arch_data
3188 = get_elf_x86_64_arch_data (bed);
3189
3190 memcpy (htab->plt_eh_frame->contents,
3191 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3192 bfd_put_32 (dynobj, htab->elf.splt->size,
3193 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3194 }
3195
3196 if (htab->elf.dynamic_sections_created)
3197 {
3198 /* Add some entries to the .dynamic section. We fill in the
3199 values later, in elf_x86_64_finish_dynamic_sections, but we
3200 must add the entries now so that we get the correct size for
3201 the .dynamic section. The DT_DEBUG entry is filled in by the
3202 dynamic linker and used by the debugger. */
3203 #define add_dynamic_entry(TAG, VAL) \
3204 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3205
3206 if (info->executable)
3207 {
3208 if (!add_dynamic_entry (DT_DEBUG, 0))
3209 return FALSE;
3210 }
3211
3212 if (htab->elf.splt->size != 0)
3213 {
3214 if (!add_dynamic_entry (DT_PLTGOT, 0)
3215 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3216 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3217 || !add_dynamic_entry (DT_JMPREL, 0))
3218 return FALSE;
3219
3220 if (htab->tlsdesc_plt
3221 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3222 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3223 return FALSE;
3224 }
3225
3226 if (relocs)
3227 {
3228 if (!add_dynamic_entry (DT_RELA, 0)
3229 || !add_dynamic_entry (DT_RELASZ, 0)
3230 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3231 return FALSE;
3232
3233 /* If any dynamic relocs apply to a read-only section,
3234 then we need a DT_TEXTREL entry. */
3235 if ((info->flags & DF_TEXTREL) == 0)
3236 elf_link_hash_traverse (&htab->elf,
3237 elf_x86_64_readonly_dynrelocs,
3238 info);
3239
3240 if ((info->flags & DF_TEXTREL) != 0)
3241 {
3242 if (!add_dynamic_entry (DT_TEXTREL, 0))
3243 return FALSE;
3244 }
3245 }
3246 }
3247 #undef add_dynamic_entry
3248
3249 return TRUE;
3250 }
3251
3252 static bfd_boolean
3253 elf_x86_64_always_size_sections (bfd *output_bfd,
3254 struct bfd_link_info *info)
3255 {
3256 asection *tls_sec = elf_hash_table (info)->tls_sec;
3257
3258 if (tls_sec)
3259 {
3260 struct elf_link_hash_entry *tlsbase;
3261
3262 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3263 "_TLS_MODULE_BASE_",
3264 FALSE, FALSE, FALSE);
3265
3266 if (tlsbase && tlsbase->type == STT_TLS)
3267 {
3268 struct elf_x86_64_link_hash_table *htab;
3269 struct bfd_link_hash_entry *bh = NULL;
3270 const struct elf_backend_data *bed
3271 = get_elf_backend_data (output_bfd);
3272
3273 htab = elf_x86_64_hash_table (info);
3274 if (htab == NULL)
3275 return FALSE;
3276
3277 if (!(_bfd_generic_link_add_one_symbol
3278 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3279 tls_sec, 0, NULL, FALSE,
3280 bed->collect, &bh)))
3281 return FALSE;
3282
3283 htab->tls_module_base = bh;
3284
3285 tlsbase = (struct elf_link_hash_entry *)bh;
3286 tlsbase->def_regular = 1;
3287 tlsbase->other = STV_HIDDEN;
3288 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3289 }
3290 }
3291
3292 return TRUE;
3293 }
3294
3295 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3296 executables. Rather than setting it to the beginning of the TLS
3297 section, we have to set it to the end. This function may be called
3298 multiple times, it is idempotent. */
3299
3300 static void
3301 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3302 {
3303 struct elf_x86_64_link_hash_table *htab;
3304 struct bfd_link_hash_entry *base;
3305
3306 if (!info->executable)
3307 return;
3308
3309 htab = elf_x86_64_hash_table (info);
3310 if (htab == NULL)
3311 return;
3312
3313 base = htab->tls_module_base;
3314 if (base == NULL)
3315 return;
3316
3317 base->u.def.value = htab->elf.tls_size;
3318 }
3319
3320 /* Return the base VMA address which should be subtracted from real addresses
3321 when resolving @dtpoff relocation.
3322 This is PT_TLS segment p_vaddr. */
3323
3324 static bfd_vma
3325 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3326 {
3327 /* If tls_sec is NULL, we should have signalled an error already. */
3328 if (elf_hash_table (info)->tls_sec == NULL)
3329 return 0;
3330 return elf_hash_table (info)->tls_sec->vma;
3331 }
3332
3333 /* Return the relocation value for @tpoff relocation
3334 if STT_TLS virtual address is ADDRESS. */
3335
3336 static bfd_vma
3337 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3338 {
3339 struct elf_link_hash_table *htab = elf_hash_table (info);
3340 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3341 bfd_vma static_tls_size;
3342
3343 /* If tls_segment is NULL, we should have signalled an error already. */
3344 if (htab->tls_sec == NULL)
3345 return 0;
3346
3347 /* Consider special static TLS alignment requirements. */
3348 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3349 return address - static_tls_size - htab->tls_sec->vma;
3350 }
3351
3352 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3353 branch? */
3354
3355 static bfd_boolean
3356 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3357 {
3358 /* Opcode Instruction
3359 0xe8 call
3360 0xe9 jump
3361 0x0f 0x8x conditional jump */
3362 return ((offset > 0
3363 && (contents [offset - 1] == 0xe8
3364 || contents [offset - 1] == 0xe9))
3365 || (offset > 1
3366 && contents [offset - 2] == 0x0f
3367 && (contents [offset - 1] & 0xf0) == 0x80));
3368 }
3369
3370 /* Relocate an x86_64 ELF section. */
3371
3372 static bfd_boolean
3373 elf_x86_64_relocate_section (bfd *output_bfd,
3374 struct bfd_link_info *info,
3375 bfd *input_bfd,
3376 asection *input_section,
3377 bfd_byte *contents,
3378 Elf_Internal_Rela *relocs,
3379 Elf_Internal_Sym *local_syms,
3380 asection **local_sections)
3381 {
3382 struct elf_x86_64_link_hash_table *htab;
3383 Elf_Internal_Shdr *symtab_hdr;
3384 struct elf_link_hash_entry **sym_hashes;
3385 bfd_vma *local_got_offsets;
3386 bfd_vma *local_tlsdesc_gotents;
3387 Elf_Internal_Rela *rel;
3388 Elf_Internal_Rela *relend;
3389 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3390
3391 BFD_ASSERT (is_x86_64_elf (input_bfd));
3392
3393 htab = elf_x86_64_hash_table (info);
3394 if (htab == NULL)
3395 return FALSE;
3396 symtab_hdr = &elf_symtab_hdr (input_bfd);
3397 sym_hashes = elf_sym_hashes (input_bfd);
3398 local_got_offsets = elf_local_got_offsets (input_bfd);
3399 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3400
3401 elf_x86_64_set_tls_module_base (info);
3402
3403 rel = relocs;
3404 relend = relocs + input_section->reloc_count;
3405 for (; rel < relend; rel++)
3406 {
3407 unsigned int r_type;
3408 reloc_howto_type *howto;
3409 unsigned long r_symndx;
3410 struct elf_link_hash_entry *h;
3411 struct elf_x86_64_link_hash_entry *eh;
3412 Elf_Internal_Sym *sym;
3413 asection *sec;
3414 bfd_vma off, offplt, plt_offset;
3415 bfd_vma relocation;
3416 bfd_boolean unresolved_reloc;
3417 bfd_reloc_status_type r;
3418 int tls_type;
3419 asection *base_got, *resolved_plt;
3420 bfd_vma st_size;
3421
3422 r_type = ELF32_R_TYPE (rel->r_info);
3423 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3424 || r_type == (int) R_X86_64_GNU_VTENTRY)
3425 continue;
3426
3427 if (r_type >= (int) R_X86_64_standard)
3428 {
3429 (*_bfd_error_handler)
3430 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3431 input_bfd, input_section, r_type);
3432 bfd_set_error (bfd_error_bad_value);
3433 return FALSE;
3434 }
3435
3436 if (r_type != (int) R_X86_64_32
3437 || ABI_64_P (output_bfd))
3438 howto = x86_64_elf_howto_table + r_type;
3439 else
3440 howto = (x86_64_elf_howto_table
3441 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3442 r_symndx = htab->r_sym (rel->r_info);
3443 h = NULL;
3444 sym = NULL;
3445 sec = NULL;
3446 unresolved_reloc = FALSE;
3447 if (r_symndx < symtab_hdr->sh_info)
3448 {
3449 sym = local_syms + r_symndx;
3450 sec = local_sections[r_symndx];
3451
3452 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3453 &sec, rel);
3454 st_size = sym->st_size;
3455
3456 /* Relocate against local STT_GNU_IFUNC symbol. */
3457 if (!info->relocatable
3458 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3459 {
3460 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3461 rel, FALSE);
3462 if (h == NULL)
3463 abort ();
3464
3465 /* Set STT_GNU_IFUNC symbol value. */
3466 h->root.u.def.value = sym->st_value;
3467 h->root.u.def.section = sec;
3468 }
3469 }
3470 else
3471 {
3472 bfd_boolean warned ATTRIBUTE_UNUSED;
3473 bfd_boolean ignored ATTRIBUTE_UNUSED;
3474
3475 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3476 r_symndx, symtab_hdr, sym_hashes,
3477 h, sec, relocation,
3478 unresolved_reloc, warned, ignored);
3479 st_size = h->size;
3480 }
3481
3482 if (sec != NULL && discarded_section (sec))
3483 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3484 rel, 1, relend, howto, 0, contents);
3485
3486 if (info->relocatable)
3487 continue;
3488
3489 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3490 {
3491 if (r_type == R_X86_64_64)
3492 {
3493 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3494 zero-extend it to 64bit if addend is zero. */
3495 r_type = R_X86_64_32;
3496 memset (contents + rel->r_offset + 4, 0, 4);
3497 }
3498 else if (r_type == R_X86_64_SIZE64)
3499 {
3500 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3501 zero-extend it to 64bit if addend is zero. */
3502 r_type = R_X86_64_SIZE32;
3503 memset (contents + rel->r_offset + 4, 0, 4);
3504 }
3505 }
3506
3507 eh = (struct elf_x86_64_link_hash_entry *) h;
3508
3509 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3510 it here if it is defined in a non-shared object. */
3511 if (h != NULL
3512 && h->type == STT_GNU_IFUNC
3513 && h->def_regular)
3514 {
3515 bfd_vma plt_index;
3516 const char *name;
3517
3518 if ((input_section->flags & SEC_ALLOC) == 0
3519 || h->plt.offset == (bfd_vma) -1)
3520 abort ();
3521
3522 /* STT_GNU_IFUNC symbol must go through PLT. */
3523 if (htab->elf.splt != NULL)
3524 {
3525 if (htab->plt_bnd != NULL)
3526 {
3527 resolved_plt = htab->plt_bnd;
3528 plt_offset = eh->plt_bnd.offset;
3529 }
3530 else
3531 {
3532 resolved_plt = htab->elf.splt;
3533 plt_offset = h->plt.offset;
3534 }
3535 }
3536 else
3537 {
3538 resolved_plt = htab->elf.iplt;
3539 plt_offset = h->plt.offset;
3540 }
3541
3542 relocation = (resolved_plt->output_section->vma
3543 + resolved_plt->output_offset + plt_offset);
3544
3545 switch (r_type)
3546 {
3547 default:
3548 if (h->root.root.string)
3549 name = h->root.root.string;
3550 else
3551 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3552 NULL);
3553 (*_bfd_error_handler)
3554 (_("%B: relocation %s against STT_GNU_IFUNC "
3555 "symbol `%s' isn't handled by %s"), input_bfd,
3556 x86_64_elf_howto_table[r_type].name,
3557 name, __FUNCTION__);
3558 bfd_set_error (bfd_error_bad_value);
3559 return FALSE;
3560
3561 case R_X86_64_32S:
3562 if (info->shared)
3563 abort ();
3564 goto do_relocation;
3565
3566 case R_X86_64_32:
3567 if (ABI_64_P (output_bfd))
3568 goto do_relocation;
3569 /* FALLTHROUGH */
3570 case R_X86_64_64:
3571 if (rel->r_addend != 0)
3572 {
3573 if (h->root.root.string)
3574 name = h->root.root.string;
3575 else
3576 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3577 sym, NULL);
3578 (*_bfd_error_handler)
3579 (_("%B: relocation %s against STT_GNU_IFUNC "
3580 "symbol `%s' has non-zero addend: %d"),
3581 input_bfd, x86_64_elf_howto_table[r_type].name,
3582 name, rel->r_addend);
3583 bfd_set_error (bfd_error_bad_value);
3584 return FALSE;
3585 }
3586
3587 /* Generate dynamic relcoation only when there is a
3588 non-GOT reference in a shared object. */
3589 if (info->shared && h->non_got_ref)
3590 {
3591 Elf_Internal_Rela outrel;
3592 asection *sreloc;
3593
3594 /* Need a dynamic relocation to get the real function
3595 address. */
3596 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3597 info,
3598 input_section,
3599 rel->r_offset);
3600 if (outrel.r_offset == (bfd_vma) -1
3601 || outrel.r_offset == (bfd_vma) -2)
3602 abort ();
3603
3604 outrel.r_offset += (input_section->output_section->vma
3605 + input_section->output_offset);
3606
3607 if (h->dynindx == -1
3608 || h->forced_local
3609 || info->executable)
3610 {
3611 /* This symbol is resolved locally. */
3612 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3613 outrel.r_addend = (h->root.u.def.value
3614 + h->root.u.def.section->output_section->vma
3615 + h->root.u.def.section->output_offset);
3616 }
3617 else
3618 {
3619 outrel.r_info = htab->r_info (h->dynindx, r_type);
3620 outrel.r_addend = 0;
3621 }
3622
3623 sreloc = htab->elf.irelifunc;
3624 elf_append_rela (output_bfd, sreloc, &outrel);
3625
3626 /* If this reloc is against an external symbol, we
3627 do not want to fiddle with the addend. Otherwise,
3628 we need to include the symbol value so that it
3629 becomes an addend for the dynamic reloc. For an
3630 internal symbol, we have updated addend. */
3631 continue;
3632 }
3633 /* FALLTHROUGH */
3634 case R_X86_64_PC32:
3635 case R_X86_64_PC32_BND:
3636 case R_X86_64_PC64:
3637 case R_X86_64_PLT32:
3638 case R_X86_64_PLT32_BND:
3639 goto do_relocation;
3640
3641 case R_X86_64_GOTPCREL:
3642 case R_X86_64_GOTPCREL64:
3643 base_got = htab->elf.sgot;
3644 off = h->got.offset;
3645
3646 if (base_got == NULL)
3647 abort ();
3648
3649 if (off == (bfd_vma) -1)
3650 {
3651 /* We can't use h->got.offset here to save state, or
3652 even just remember the offset, as finish_dynamic_symbol
3653 would use that as offset into .got. */
3654
3655 if (htab->elf.splt != NULL)
3656 {
3657 plt_index = h->plt.offset / plt_entry_size - 1;
3658 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3659 base_got = htab->elf.sgotplt;
3660 }
3661 else
3662 {
3663 plt_index = h->plt.offset / plt_entry_size;
3664 off = plt_index * GOT_ENTRY_SIZE;
3665 base_got = htab->elf.igotplt;
3666 }
3667
3668 if (h->dynindx == -1
3669 || h->forced_local
3670 || info->symbolic)
3671 {
3672 /* This references the local defitionion. We must
3673 initialize this entry in the global offset table.
3674 Since the offset must always be a multiple of 8,
3675 we use the least significant bit to record
3676 whether we have initialized it already.
3677
3678 When doing a dynamic link, we create a .rela.got
3679 relocation entry to initialize the value. This
3680 is done in the finish_dynamic_symbol routine. */
3681 if ((off & 1) != 0)
3682 off &= ~1;
3683 else
3684 {
3685 bfd_put_64 (output_bfd, relocation,
3686 base_got->contents + off);
3687 /* Note that this is harmless for the GOTPLT64
3688 case, as -1 | 1 still is -1. */
3689 h->got.offset |= 1;
3690 }
3691 }
3692 }
3693
3694 relocation = (base_got->output_section->vma
3695 + base_got->output_offset + off);
3696
3697 goto do_relocation;
3698 }
3699 }
3700
3701 /* When generating a shared object, the relocations handled here are
3702 copied into the output file to be resolved at run time. */
3703 switch (r_type)
3704 {
3705 case R_X86_64_GOT32:
3706 case R_X86_64_GOT64:
3707 /* Relocation is to the entry for this symbol in the global
3708 offset table. */
3709 case R_X86_64_GOTPCREL:
3710 case R_X86_64_GOTPCREL64:
3711 /* Use global offset table entry as symbol value. */
3712 case R_X86_64_GOTPLT64:
3713 /* This is obsolete and treated the the same as GOT64. */
3714 base_got = htab->elf.sgot;
3715
3716 if (htab->elf.sgot == NULL)
3717 abort ();
3718
3719 if (h != NULL)
3720 {
3721 bfd_boolean dyn;
3722
3723 off = h->got.offset;
3724 if (h->needs_plt
3725 && h->plt.offset != (bfd_vma)-1
3726 && off == (bfd_vma)-1)
3727 {
3728 /* We can't use h->got.offset here to save
3729 state, or even just remember the offset, as
3730 finish_dynamic_symbol would use that as offset into
3731 .got. */
3732 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3733 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3734 base_got = htab->elf.sgotplt;
3735 }
3736
3737 dyn = htab->elf.dynamic_sections_created;
3738
3739 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3740 || (info->shared
3741 && SYMBOL_REFERENCES_LOCAL (info, h))
3742 || (ELF_ST_VISIBILITY (h->other)
3743 && h->root.type == bfd_link_hash_undefweak))
3744 {
3745 /* This is actually a static link, or it is a -Bsymbolic
3746 link and the symbol is defined locally, or the symbol
3747 was forced to be local because of a version file. We
3748 must initialize this entry in the global offset table.
3749 Since the offset must always be a multiple of 8, we
3750 use the least significant bit to record whether we
3751 have initialized it already.
3752
3753 When doing a dynamic link, we create a .rela.got
3754 relocation entry to initialize the value. This is
3755 done in the finish_dynamic_symbol routine. */
3756 if ((off & 1) != 0)
3757 off &= ~1;
3758 else
3759 {
3760 bfd_put_64 (output_bfd, relocation,
3761 base_got->contents + off);
3762 /* Note that this is harmless for the GOTPLT64 case,
3763 as -1 | 1 still is -1. */
3764 h->got.offset |= 1;
3765 }
3766 }
3767 else
3768 unresolved_reloc = FALSE;
3769 }
3770 else
3771 {
3772 if (local_got_offsets == NULL)
3773 abort ();
3774
3775 off = local_got_offsets[r_symndx];
3776
3777 /* The offset must always be a multiple of 8. We use
3778 the least significant bit to record whether we have
3779 already generated the necessary reloc. */
3780 if ((off & 1) != 0)
3781 off &= ~1;
3782 else
3783 {
3784 bfd_put_64 (output_bfd, relocation,
3785 base_got->contents + off);
3786
3787 if (info->shared)
3788 {
3789 asection *s;
3790 Elf_Internal_Rela outrel;
3791
3792 /* We need to generate a R_X86_64_RELATIVE reloc
3793 for the dynamic linker. */
3794 s = htab->elf.srelgot;
3795 if (s == NULL)
3796 abort ();
3797
3798 outrel.r_offset = (base_got->output_section->vma
3799 + base_got->output_offset
3800 + off);
3801 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3802 outrel.r_addend = relocation;
3803 elf_append_rela (output_bfd, s, &outrel);
3804 }
3805
3806 local_got_offsets[r_symndx] |= 1;
3807 }
3808 }
3809
3810 if (off >= (bfd_vma) -2)
3811 abort ();
3812
3813 relocation = base_got->output_section->vma
3814 + base_got->output_offset + off;
3815 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3816 relocation -= htab->elf.sgotplt->output_section->vma
3817 - htab->elf.sgotplt->output_offset;
3818
3819 break;
3820
3821 case R_X86_64_GOTOFF64:
3822 /* Relocation is relative to the start of the global offset
3823 table. */
3824
3825 /* Check to make sure it isn't a protected function symbol
3826 for shared library since it may not be local when used
3827 as function address. */
3828 if (!info->executable
3829 && h
3830 && !SYMBOLIC_BIND (info, h)
3831 && h->def_regular
3832 && h->type == STT_FUNC
3833 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3834 {
3835 (*_bfd_error_handler)
3836 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3837 input_bfd, h->root.root.string);
3838 bfd_set_error (bfd_error_bad_value);
3839 return FALSE;
3840 }
3841
3842 /* Note that sgot is not involved in this
3843 calculation. We always want the start of .got.plt. If we
3844 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3845 permitted by the ABI, we might have to change this
3846 calculation. */
3847 relocation -= htab->elf.sgotplt->output_section->vma
3848 + htab->elf.sgotplt->output_offset;
3849 break;
3850
3851 case R_X86_64_GOTPC32:
3852 case R_X86_64_GOTPC64:
3853 /* Use global offset table as symbol value. */
3854 relocation = htab->elf.sgotplt->output_section->vma
3855 + htab->elf.sgotplt->output_offset;
3856 unresolved_reloc = FALSE;
3857 break;
3858
3859 case R_X86_64_PLTOFF64:
3860 /* Relocation is PLT entry relative to GOT. For local
3861 symbols it's the symbol itself relative to GOT. */
3862 if (h != NULL
3863 /* See PLT32 handling. */
3864 && h->plt.offset != (bfd_vma) -1
3865 && htab->elf.splt != NULL)
3866 {
3867 if (htab->plt_bnd != NULL)
3868 {
3869 resolved_plt = htab->plt_bnd;
3870 plt_offset = eh->plt_bnd.offset;
3871 }
3872 else
3873 {
3874 resolved_plt = htab->elf.splt;
3875 plt_offset = h->plt.offset;
3876 }
3877
3878 relocation = (resolved_plt->output_section->vma
3879 + resolved_plt->output_offset
3880 + plt_offset);
3881 unresolved_reloc = FALSE;
3882 }
3883
3884 relocation -= htab->elf.sgotplt->output_section->vma
3885 + htab->elf.sgotplt->output_offset;
3886 break;
3887
3888 case R_X86_64_PLT32:
3889 case R_X86_64_PLT32_BND:
3890 /* Relocation is to the entry for this symbol in the
3891 procedure linkage table. */
3892
3893 /* Resolve a PLT32 reloc against a local symbol directly,
3894 without using the procedure linkage table. */
3895 if (h == NULL)
3896 break;
3897
3898 if (h->plt.offset == (bfd_vma) -1
3899 || htab->elf.splt == NULL)
3900 {
3901 /* We didn't make a PLT entry for this symbol. This
3902 happens when statically linking PIC code, or when
3903 using -Bsymbolic. */
3904 break;
3905 }
3906
3907 if (htab->plt_bnd != NULL)
3908 {
3909 resolved_plt = htab->plt_bnd;
3910 plt_offset = eh->plt_bnd.offset;
3911 }
3912 else
3913 {
3914 resolved_plt = htab->elf.splt;
3915 plt_offset = h->plt.offset;
3916 }
3917
3918 relocation = (resolved_plt->output_section->vma
3919 + resolved_plt->output_offset
3920 + plt_offset);
3921 unresolved_reloc = FALSE;
3922 break;
3923
3924 case R_X86_64_SIZE32:
3925 case R_X86_64_SIZE64:
3926 /* Set to symbol size. */
3927 relocation = st_size;
3928 goto direct;
3929
3930 case R_X86_64_PC8:
3931 case R_X86_64_PC16:
3932 case R_X86_64_PC32:
3933 case R_X86_64_PC32_BND:
3934 if (info->shared
3935 && (input_section->flags & SEC_ALLOC) != 0
3936 && (input_section->flags & SEC_READONLY) != 0
3937 && h != NULL)
3938 {
3939 bfd_boolean fail = FALSE;
3940 bfd_boolean branch
3941 = ((r_type == R_X86_64_PC32
3942 || r_type == R_X86_64_PC32_BND)
3943 && is_32bit_relative_branch (contents, rel->r_offset));
3944
3945 if (SYMBOL_REFERENCES_LOCAL (info, h))
3946 {
3947 /* Symbol is referenced locally. Make sure it is
3948 defined locally or for a branch. */
3949 fail = !h->def_regular && !branch;
3950 }
3951 else
3952 {
3953 /* Symbol isn't referenced locally. We only allow
3954 branch to symbol with non-default visibility. */
3955 fail = (!branch
3956 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3957 }
3958
3959 if (fail)
3960 {
3961 const char *fmt;
3962 const char *v;
3963 const char *pic = "";
3964
3965 switch (ELF_ST_VISIBILITY (h->other))
3966 {
3967 case STV_HIDDEN:
3968 v = _("hidden symbol");
3969 break;
3970 case STV_INTERNAL:
3971 v = _("internal symbol");
3972 break;
3973 case STV_PROTECTED:
3974 v = _("protected symbol");
3975 break;
3976 default:
3977 v = _("symbol");
3978 pic = _("; recompile with -fPIC");
3979 break;
3980 }
3981
3982 if (h->def_regular)
3983 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
3984 else
3985 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
3986
3987 (*_bfd_error_handler) (fmt, input_bfd,
3988 x86_64_elf_howto_table[r_type].name,
3989 v, h->root.root.string, pic);
3990 bfd_set_error (bfd_error_bad_value);
3991 return FALSE;
3992 }
3993 }
3994 /* Fall through. */
3995
3996 case R_X86_64_8:
3997 case R_X86_64_16:
3998 case R_X86_64_32:
3999 case R_X86_64_PC64:
4000 case R_X86_64_64:
4001 /* FIXME: The ABI says the linker should make sure the value is
4002 the same when it's zeroextended to 64 bit. */
4003
4004 direct:
4005 if ((input_section->flags & SEC_ALLOC) == 0)
4006 break;
4007
4008 if ((info->shared
4009 && (h == NULL
4010 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4011 || h->root.type != bfd_link_hash_undefweak)
4012 && ((! IS_X86_64_PCREL_TYPE (r_type)
4013 && r_type != R_X86_64_SIZE32
4014 && r_type != R_X86_64_SIZE64)
4015 || ! SYMBOL_CALLS_LOCAL (info, h)))
4016 || (ELIMINATE_COPY_RELOCS
4017 && !info->shared
4018 && h != NULL
4019 && h->dynindx != -1
4020 && !h->non_got_ref
4021 && ((h->def_dynamic
4022 && !h->def_regular)
4023 || h->root.type == bfd_link_hash_undefweak
4024 || h->root.type == bfd_link_hash_undefined)))
4025 {
4026 Elf_Internal_Rela outrel;
4027 bfd_boolean skip, relocate;
4028 asection *sreloc;
4029
4030 /* When generating a shared object, these relocations
4031 are copied into the output file to be resolved at run
4032 time. */
4033 skip = FALSE;
4034 relocate = FALSE;
4035
4036 outrel.r_offset =
4037 _bfd_elf_section_offset (output_bfd, info, input_section,
4038 rel->r_offset);
4039 if (outrel.r_offset == (bfd_vma) -1)
4040 skip = TRUE;
4041 else if (outrel.r_offset == (bfd_vma) -2)
4042 skip = TRUE, relocate = TRUE;
4043
4044 outrel.r_offset += (input_section->output_section->vma
4045 + input_section->output_offset);
4046
4047 if (skip)
4048 memset (&outrel, 0, sizeof outrel);
4049
4050 /* h->dynindx may be -1 if this symbol was marked to
4051 become local. */
4052 else if (h != NULL
4053 && h->dynindx != -1
4054 && (IS_X86_64_PCREL_TYPE (r_type)
4055 || ! info->shared
4056 || ! SYMBOLIC_BIND (info, h)
4057 || ! h->def_regular))
4058 {
4059 outrel.r_info = htab->r_info (h->dynindx, r_type);
4060 outrel.r_addend = rel->r_addend;
4061 }
4062 else
4063 {
4064 /* This symbol is local, or marked to become local. */
4065 if (r_type == htab->pointer_r_type)
4066 {
4067 relocate = TRUE;
4068 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4069 outrel.r_addend = relocation + rel->r_addend;
4070 }
4071 else if (r_type == R_X86_64_64
4072 && !ABI_64_P (output_bfd))
4073 {
4074 relocate = TRUE;
4075 outrel.r_info = htab->r_info (0,
4076 R_X86_64_RELATIVE64);
4077 outrel.r_addend = relocation + rel->r_addend;
4078 /* Check addend overflow. */
4079 if ((outrel.r_addend & 0x80000000)
4080 != (rel->r_addend & 0x80000000))
4081 {
4082 const char *name;
4083 int addend = rel->r_addend;
4084 if (h && h->root.root.string)
4085 name = h->root.root.string;
4086 else
4087 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4088 sym, NULL);
4089 if (addend < 0)
4090 (*_bfd_error_handler)
4091 (_("%B: addend -0x%x in relocation %s against "
4092 "symbol `%s' at 0x%lx in section `%A' is "
4093 "out of range"),
4094 input_bfd, input_section, addend,
4095 x86_64_elf_howto_table[r_type].name,
4096 name, (unsigned long) rel->r_offset);
4097 else
4098 (*_bfd_error_handler)
4099 (_("%B: addend 0x%x in relocation %s against "
4100 "symbol `%s' at 0x%lx in section `%A' is "
4101 "out of range"),
4102 input_bfd, input_section, addend,
4103 x86_64_elf_howto_table[r_type].name,
4104 name, (unsigned long) rel->r_offset);
4105 bfd_set_error (bfd_error_bad_value);
4106 return FALSE;
4107 }
4108 }
4109 else
4110 {
4111 long sindx;
4112
4113 if (bfd_is_abs_section (sec))
4114 sindx = 0;
4115 else if (sec == NULL || sec->owner == NULL)
4116 {
4117 bfd_set_error (bfd_error_bad_value);
4118 return FALSE;
4119 }
4120 else
4121 {
4122 asection *osec;
4123
4124 /* We are turning this relocation into one
4125 against a section symbol. It would be
4126 proper to subtract the symbol's value,
4127 osec->vma, from the emitted reloc addend,
4128 but ld.so expects buggy relocs. */
4129 osec = sec->output_section;
4130 sindx = elf_section_data (osec)->dynindx;
4131 if (sindx == 0)
4132 {
4133 asection *oi = htab->elf.text_index_section;
4134 sindx = elf_section_data (oi)->dynindx;
4135 }
4136 BFD_ASSERT (sindx != 0);
4137 }
4138
4139 outrel.r_info = htab->r_info (sindx, r_type);
4140 outrel.r_addend = relocation + rel->r_addend;
4141 }
4142 }
4143
4144 sreloc = elf_section_data (input_section)->sreloc;
4145
4146 if (sreloc == NULL || sreloc->contents == NULL)
4147 {
4148 r = bfd_reloc_notsupported;
4149 goto check_relocation_error;
4150 }
4151
4152 elf_append_rela (output_bfd, sreloc, &outrel);
4153
4154 /* If this reloc is against an external symbol, we do
4155 not want to fiddle with the addend. Otherwise, we
4156 need to include the symbol value so that it becomes
4157 an addend for the dynamic reloc. */
4158 if (! relocate)
4159 continue;
4160 }
4161
4162 break;
4163
4164 case R_X86_64_TLSGD:
4165 case R_X86_64_GOTPC32_TLSDESC:
4166 case R_X86_64_TLSDESC_CALL:
4167 case R_X86_64_GOTTPOFF:
4168 tls_type = GOT_UNKNOWN;
4169 if (h == NULL && local_got_offsets)
4170 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4171 else if (h != NULL)
4172 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4173
4174 if (! elf_x86_64_tls_transition (info, input_bfd,
4175 input_section, contents,
4176 symtab_hdr, sym_hashes,
4177 &r_type, tls_type, rel,
4178 relend, h, r_symndx))
4179 return FALSE;
4180
4181 if (r_type == R_X86_64_TPOFF32)
4182 {
4183 bfd_vma roff = rel->r_offset;
4184
4185 BFD_ASSERT (! unresolved_reloc);
4186
4187 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4188 {
4189 /* GD->LE transition. For 64bit, change
4190 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4191 .word 0x6666; rex64; call __tls_get_addr
4192 into:
4193 movq %fs:0, %rax
4194 leaq foo@tpoff(%rax), %rax
4195 For 32bit, change
4196 leaq foo@tlsgd(%rip), %rdi
4197 .word 0x6666; rex64; call __tls_get_addr
4198 into:
4199 movl %fs:0, %eax
4200 leaq foo@tpoff(%rax), %rax
4201 For largepic, change:
4202 leaq foo@tlsgd(%rip), %rdi
4203 movabsq $__tls_get_addr@pltoff, %rax
4204 addq %rbx, %rax
4205 call *%rax
4206 into:
4207 movq %fs:0, %rax
4208 leaq foo@tpoff(%rax), %rax
4209 nopw 0x0(%rax,%rax,1) */
4210 int largepic = 0;
4211 if (ABI_64_P (output_bfd)
4212 && contents[roff + 5] == (bfd_byte) '\xb8')
4213 {
4214 memcpy (contents + roff - 3,
4215 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4216 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4217 largepic = 1;
4218 }
4219 else if (ABI_64_P (output_bfd))
4220 memcpy (contents + roff - 4,
4221 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4222 16);
4223 else
4224 memcpy (contents + roff - 3,
4225 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4226 15);
4227 bfd_put_32 (output_bfd,
4228 elf_x86_64_tpoff (info, relocation),
4229 contents + roff + 8 + largepic);
4230 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4231 rel++;
4232 continue;
4233 }
4234 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4235 {
4236 /* GDesc -> LE transition.
4237 It's originally something like:
4238 leaq x@tlsdesc(%rip), %rax
4239
4240 Change it to:
4241 movl $x@tpoff, %rax. */
4242
4243 unsigned int val, type;
4244
4245 type = bfd_get_8 (input_bfd, contents + roff - 3);
4246 val = bfd_get_8 (input_bfd, contents + roff - 1);
4247 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4248 contents + roff - 3);
4249 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4250 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4251 contents + roff - 1);
4252 bfd_put_32 (output_bfd,
4253 elf_x86_64_tpoff (info, relocation),
4254 contents + roff);
4255 continue;
4256 }
4257 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4258 {
4259 /* GDesc -> LE transition.
4260 It's originally:
4261 call *(%rax)
4262 Turn it into:
4263 xchg %ax,%ax. */
4264 bfd_put_8 (output_bfd, 0x66, contents + roff);
4265 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4266 continue;
4267 }
4268 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4269 {
4270 /* IE->LE transition:
4271 For 64bit, originally it can be one of:
4272 movq foo@gottpoff(%rip), %reg
4273 addq foo@gottpoff(%rip), %reg
4274 We change it into:
4275 movq $foo, %reg
4276 leaq foo(%reg), %reg
4277 addq $foo, %reg.
4278 For 32bit, originally it can be one of:
4279 movq foo@gottpoff(%rip), %reg
4280 addl foo@gottpoff(%rip), %reg
4281 We change it into:
4282 movq $foo, %reg
4283 leal foo(%reg), %reg
4284 addl $foo, %reg. */
4285
4286 unsigned int val, type, reg;
4287
4288 if (roff >= 3)
4289 val = bfd_get_8 (input_bfd, contents + roff - 3);
4290 else
4291 val = 0;
4292 type = bfd_get_8 (input_bfd, contents + roff - 2);
4293 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4294 reg >>= 3;
4295 if (type == 0x8b)
4296 {
4297 /* movq */
4298 if (val == 0x4c)
4299 bfd_put_8 (output_bfd, 0x49,
4300 contents + roff - 3);
4301 else if (!ABI_64_P (output_bfd) && val == 0x44)
4302 bfd_put_8 (output_bfd, 0x41,
4303 contents + roff - 3);
4304 bfd_put_8 (output_bfd, 0xc7,
4305 contents + roff - 2);
4306 bfd_put_8 (output_bfd, 0xc0 | reg,
4307 contents + roff - 1);
4308 }
4309 else if (reg == 4)
4310 {
4311 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4312 is special */
4313 if (val == 0x4c)
4314 bfd_put_8 (output_bfd, 0x49,
4315 contents + roff - 3);
4316 else if (!ABI_64_P (output_bfd) && val == 0x44)
4317 bfd_put_8 (output_bfd, 0x41,
4318 contents + roff - 3);
4319 bfd_put_8 (output_bfd, 0x81,
4320 contents + roff - 2);
4321 bfd_put_8 (output_bfd, 0xc0 | reg,
4322 contents + roff - 1);
4323 }
4324 else
4325 {
4326 /* addq/addl -> leaq/leal */
4327 if (val == 0x4c)
4328 bfd_put_8 (output_bfd, 0x4d,
4329 contents + roff - 3);
4330 else if (!ABI_64_P (output_bfd) && val == 0x44)
4331 bfd_put_8 (output_bfd, 0x45,
4332 contents + roff - 3);
4333 bfd_put_8 (output_bfd, 0x8d,
4334 contents + roff - 2);
4335 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4336 contents + roff - 1);
4337 }
4338 bfd_put_32 (output_bfd,
4339 elf_x86_64_tpoff (info, relocation),
4340 contents + roff);
4341 continue;
4342 }
4343 else
4344 BFD_ASSERT (FALSE);
4345 }
4346
4347 if (htab->elf.sgot == NULL)
4348 abort ();
4349
4350 if (h != NULL)
4351 {
4352 off = h->got.offset;
4353 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4354 }
4355 else
4356 {
4357 if (local_got_offsets == NULL)
4358 abort ();
4359
4360 off = local_got_offsets[r_symndx];
4361 offplt = local_tlsdesc_gotents[r_symndx];
4362 }
4363
4364 if ((off & 1) != 0)
4365 off &= ~1;
4366 else
4367 {
4368 Elf_Internal_Rela outrel;
4369 int dr_type, indx;
4370 asection *sreloc;
4371
4372 if (htab->elf.srelgot == NULL)
4373 abort ();
4374
4375 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4376
4377 if (GOT_TLS_GDESC_P (tls_type))
4378 {
4379 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4380 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4381 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4382 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4383 + htab->elf.sgotplt->output_offset
4384 + offplt
4385 + htab->sgotplt_jump_table_size);
4386 sreloc = htab->elf.srelplt;
4387 if (indx == 0)
4388 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4389 else
4390 outrel.r_addend = 0;
4391 elf_append_rela (output_bfd, sreloc, &outrel);
4392 }
4393
4394 sreloc = htab->elf.srelgot;
4395
4396 outrel.r_offset = (htab->elf.sgot->output_section->vma
4397 + htab->elf.sgot->output_offset + off);
4398
4399 if (GOT_TLS_GD_P (tls_type))
4400 dr_type = R_X86_64_DTPMOD64;
4401 else if (GOT_TLS_GDESC_P (tls_type))
4402 goto dr_done;
4403 else
4404 dr_type = R_X86_64_TPOFF64;
4405
4406 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4407 outrel.r_addend = 0;
4408 if ((dr_type == R_X86_64_TPOFF64
4409 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4410 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4411 outrel.r_info = htab->r_info (indx, dr_type);
4412
4413 elf_append_rela (output_bfd, sreloc, &outrel);
4414
4415 if (GOT_TLS_GD_P (tls_type))
4416 {
4417 if (indx == 0)
4418 {
4419 BFD_ASSERT (! unresolved_reloc);
4420 bfd_put_64 (output_bfd,
4421 relocation - elf_x86_64_dtpoff_base (info),
4422 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4423 }
4424 else
4425 {
4426 bfd_put_64 (output_bfd, 0,
4427 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4428 outrel.r_info = htab->r_info (indx,
4429 R_X86_64_DTPOFF64);
4430 outrel.r_offset += GOT_ENTRY_SIZE;
4431 elf_append_rela (output_bfd, sreloc,
4432 &outrel);
4433 }
4434 }
4435
4436 dr_done:
4437 if (h != NULL)
4438 h->got.offset |= 1;
4439 else
4440 local_got_offsets[r_symndx] |= 1;
4441 }
4442
4443 if (off >= (bfd_vma) -2
4444 && ! GOT_TLS_GDESC_P (tls_type))
4445 abort ();
4446 if (r_type == ELF32_R_TYPE (rel->r_info))
4447 {
4448 if (r_type == R_X86_64_GOTPC32_TLSDESC
4449 || r_type == R_X86_64_TLSDESC_CALL)
4450 relocation = htab->elf.sgotplt->output_section->vma
4451 + htab->elf.sgotplt->output_offset
4452 + offplt + htab->sgotplt_jump_table_size;
4453 else
4454 relocation = htab->elf.sgot->output_section->vma
4455 + htab->elf.sgot->output_offset + off;
4456 unresolved_reloc = FALSE;
4457 }
4458 else
4459 {
4460 bfd_vma roff = rel->r_offset;
4461
4462 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4463 {
4464 /* GD->IE transition. For 64bit, change
4465 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4466 .word 0x6666; rex64; call __tls_get_addr@plt
4467 into:
4468 movq %fs:0, %rax
4469 addq foo@gottpoff(%rip), %rax
4470 For 32bit, change
4471 leaq foo@tlsgd(%rip), %rdi
4472 .word 0x6666; rex64; call __tls_get_addr@plt
4473 into:
4474 movl %fs:0, %eax
4475 addq foo@gottpoff(%rip), %rax
4476 For largepic, change:
4477 leaq foo@tlsgd(%rip), %rdi
4478 movabsq $__tls_get_addr@pltoff, %rax
4479 addq %rbx, %rax
4480 call *%rax
4481 into:
4482 movq %fs:0, %rax
4483 addq foo@gottpoff(%rax), %rax
4484 nopw 0x0(%rax,%rax,1) */
4485 int largepic = 0;
4486 if (ABI_64_P (output_bfd)
4487 && contents[roff + 5] == (bfd_byte) '\xb8')
4488 {
4489 memcpy (contents + roff - 3,
4490 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4491 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4492 largepic = 1;
4493 }
4494 else if (ABI_64_P (output_bfd))
4495 memcpy (contents + roff - 4,
4496 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4497 16);
4498 else
4499 memcpy (contents + roff - 3,
4500 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4501 15);
4502
4503 relocation = (htab->elf.sgot->output_section->vma
4504 + htab->elf.sgot->output_offset + off
4505 - roff
4506 - largepic
4507 - input_section->output_section->vma
4508 - input_section->output_offset
4509 - 12);
4510 bfd_put_32 (output_bfd, relocation,
4511 contents + roff + 8 + largepic);
4512 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4513 rel++;
4514 continue;
4515 }
4516 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4517 {
4518 /* GDesc -> IE transition.
4519 It's originally something like:
4520 leaq x@tlsdesc(%rip), %rax
4521
4522 Change it to:
4523 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4524
4525 /* Now modify the instruction as appropriate. To
4526 turn a leaq into a movq in the form we use it, it
4527 suffices to change the second byte from 0x8d to
4528 0x8b. */
4529 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4530
4531 bfd_put_32 (output_bfd,
4532 htab->elf.sgot->output_section->vma
4533 + htab->elf.sgot->output_offset + off
4534 - rel->r_offset
4535 - input_section->output_section->vma
4536 - input_section->output_offset
4537 - 4,
4538 contents + roff);
4539 continue;
4540 }
4541 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4542 {
4543 /* GDesc -> IE transition.
4544 It's originally:
4545 call *(%rax)
4546
4547 Change it to:
4548 xchg %ax, %ax. */
4549
4550 bfd_put_8 (output_bfd, 0x66, contents + roff);
4551 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4552 continue;
4553 }
4554 else
4555 BFD_ASSERT (FALSE);
4556 }
4557 break;
4558
4559 case R_X86_64_TLSLD:
4560 if (! elf_x86_64_tls_transition (info, input_bfd,
4561 input_section, contents,
4562 symtab_hdr, sym_hashes,
4563 &r_type, GOT_UNKNOWN,
4564 rel, relend, h, r_symndx))
4565 return FALSE;
4566
4567 if (r_type != R_X86_64_TLSLD)
4568 {
4569 /* LD->LE transition:
4570 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4571 For 64bit, we change it into:
4572 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4573 For 32bit, we change it into:
4574 nopl 0x0(%rax); movl %fs:0, %eax.
4575 For largepic, change:
4576 leaq foo@tlsgd(%rip), %rdi
4577 movabsq $__tls_get_addr@pltoff, %rax
4578 addq %rbx, %rax
4579 call *%rax
4580 into:
4581 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4582 movq %fs:0, %eax */
4583
4584 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4585 if (ABI_64_P (output_bfd)
4586 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4587 memcpy (contents + rel->r_offset - 3,
4588 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4589 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4590 else if (ABI_64_P (output_bfd))
4591 memcpy (contents + rel->r_offset - 3,
4592 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4593 else
4594 memcpy (contents + rel->r_offset - 3,
4595 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4596 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4597 rel++;
4598 continue;
4599 }
4600
4601 if (htab->elf.sgot == NULL)
4602 abort ();
4603
4604 off = htab->tls_ld_got.offset;
4605 if (off & 1)
4606 off &= ~1;
4607 else
4608 {
4609 Elf_Internal_Rela outrel;
4610
4611 if (htab->elf.srelgot == NULL)
4612 abort ();
4613
4614 outrel.r_offset = (htab->elf.sgot->output_section->vma
4615 + htab->elf.sgot->output_offset + off);
4616
4617 bfd_put_64 (output_bfd, 0,
4618 htab->elf.sgot->contents + off);
4619 bfd_put_64 (output_bfd, 0,
4620 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4621 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4622 outrel.r_addend = 0;
4623 elf_append_rela (output_bfd, htab->elf.srelgot,
4624 &outrel);
4625 htab->tls_ld_got.offset |= 1;
4626 }
4627 relocation = htab->elf.sgot->output_section->vma
4628 + htab->elf.sgot->output_offset + off;
4629 unresolved_reloc = FALSE;
4630 break;
4631
4632 case R_X86_64_DTPOFF32:
4633 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4634 relocation -= elf_x86_64_dtpoff_base (info);
4635 else
4636 relocation = elf_x86_64_tpoff (info, relocation);
4637 break;
4638
4639 case R_X86_64_TPOFF32:
4640 case R_X86_64_TPOFF64:
4641 BFD_ASSERT (info->executable);
4642 relocation = elf_x86_64_tpoff (info, relocation);
4643 break;
4644
4645 case R_X86_64_DTPOFF64:
4646 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4647 relocation -= elf_x86_64_dtpoff_base (info);
4648 break;
4649
4650 default:
4651 break;
4652 }
4653
4654 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4655 because such sections are not SEC_ALLOC and thus ld.so will
4656 not process them. */
4657 if (unresolved_reloc
4658 && !((input_section->flags & SEC_DEBUGGING) != 0
4659 && h->def_dynamic)
4660 && _bfd_elf_section_offset (output_bfd, info, input_section,
4661 rel->r_offset) != (bfd_vma) -1)
4662 {
4663 (*_bfd_error_handler)
4664 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4665 input_bfd,
4666 input_section,
4667 (long) rel->r_offset,
4668 howto->name,
4669 h->root.root.string);
4670 return FALSE;
4671 }
4672
4673 do_relocation:
4674 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4675 contents, rel->r_offset,
4676 relocation, rel->r_addend);
4677
4678 check_relocation_error:
4679 if (r != bfd_reloc_ok)
4680 {
4681 const char *name;
4682
4683 if (h != NULL)
4684 name = h->root.root.string;
4685 else
4686 {
4687 name = bfd_elf_string_from_elf_section (input_bfd,
4688 symtab_hdr->sh_link,
4689 sym->st_name);
4690 if (name == NULL)
4691 return FALSE;
4692 if (*name == '\0')
4693 name = bfd_section_name (input_bfd, sec);
4694 }
4695
4696 if (r == bfd_reloc_overflow)
4697 {
4698 if (! ((*info->callbacks->reloc_overflow)
4699 (info, (h ? &h->root : NULL), name, howto->name,
4700 (bfd_vma) 0, input_bfd, input_section,
4701 rel->r_offset)))
4702 return FALSE;
4703 }
4704 else
4705 {
4706 (*_bfd_error_handler)
4707 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4708 input_bfd, input_section,
4709 (long) rel->r_offset, name, (int) r);
4710 return FALSE;
4711 }
4712 }
4713 }
4714
4715 return TRUE;
4716 }
4717
4718 /* Finish up dynamic symbol handling. We set the contents of various
4719 dynamic sections here. */
4720
4721 static bfd_boolean
4722 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4723 struct bfd_link_info *info,
4724 struct elf_link_hash_entry *h,
4725 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4726 {
4727 struct elf_x86_64_link_hash_table *htab;
4728 const struct elf_x86_64_backend_data *abed;
4729 bfd_boolean use_plt_bnd;
4730
4731 htab = elf_x86_64_hash_table (info);
4732 if (htab == NULL)
4733 return FALSE;
4734
4735 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4736 section only if there is .plt section. */
4737 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4738 abed = (use_plt_bnd
4739 ? &elf_x86_64_bnd_arch_bed
4740 : get_elf_x86_64_backend_data (output_bfd));
4741
4742 if (h->plt.offset != (bfd_vma) -1)
4743 {
4744 bfd_vma plt_index;
4745 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4746 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4747 Elf_Internal_Rela rela;
4748 bfd_byte *loc;
4749 asection *plt, *gotplt, *relplt, *resolved_plt;
4750 const struct elf_backend_data *bed;
4751
4752 /* When building a static executable, use .iplt, .igot.plt and
4753 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4754 if (htab->elf.splt != NULL)
4755 {
4756 plt = htab->elf.splt;
4757 gotplt = htab->elf.sgotplt;
4758 relplt = htab->elf.srelplt;
4759 }
4760 else
4761 {
4762 plt = htab->elf.iplt;
4763 gotplt = htab->elf.igotplt;
4764 relplt = htab->elf.irelplt;
4765 }
4766
4767 /* This symbol has an entry in the procedure linkage table. Set
4768 it up. */
4769 if ((h->dynindx == -1
4770 && !((h->forced_local || info->executable)
4771 && h->def_regular
4772 && h->type == STT_GNU_IFUNC))
4773 || plt == NULL
4774 || gotplt == NULL
4775 || relplt == NULL)
4776 abort ();
4777
4778 /* Get the index in the procedure linkage table which
4779 corresponds to this symbol. This is the index of this symbol
4780 in all the symbols for which we are making plt entries. The
4781 first entry in the procedure linkage table is reserved.
4782
4783 Get the offset into the .got table of the entry that
4784 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4785 bytes. The first three are reserved for the dynamic linker.
4786
4787 For static executables, we don't reserve anything. */
4788
4789 if (plt == htab->elf.splt)
4790 {
4791 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4792 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4793 }
4794 else
4795 {
4796 got_offset = h->plt.offset / abed->plt_entry_size;
4797 got_offset = got_offset * GOT_ENTRY_SIZE;
4798 }
4799
4800 plt_plt_insn_end = abed->plt_plt_insn_end;
4801 plt_plt_offset = abed->plt_plt_offset;
4802 plt_got_insn_size = abed->plt_got_insn_size;
4803 plt_got_offset = abed->plt_got_offset;
4804 if (use_plt_bnd)
4805 {
4806 /* Use the second PLT with BND relocations. */
4807 const bfd_byte *plt_entry, *plt2_entry;
4808 struct elf_x86_64_link_hash_entry *eh
4809 = (struct elf_x86_64_link_hash_entry *) h;
4810
4811 if (eh->has_bnd_reloc)
4812 {
4813 plt_entry = elf_x86_64_bnd_plt_entry;
4814 plt2_entry = elf_x86_64_bnd_plt2_entry;
4815 }
4816 else
4817 {
4818 plt_entry = elf_x86_64_legacy_plt_entry;
4819 plt2_entry = elf_x86_64_legacy_plt2_entry;
4820
4821 /* Subtract 1 since there is no BND prefix. */
4822 plt_plt_insn_end -= 1;
4823 plt_plt_offset -= 1;
4824 plt_got_insn_size -= 1;
4825 plt_got_offset -= 1;
4826 }
4827
4828 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4829 == sizeof (elf_x86_64_legacy_plt_entry));
4830
4831 /* Fill in the entry in the procedure linkage table. */
4832 memcpy (plt->contents + h->plt.offset,
4833 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4834 /* Fill in the entry in the second PLT. */
4835 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4836 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4837
4838 resolved_plt = htab->plt_bnd;
4839 plt_offset = eh->plt_bnd.offset;
4840 }
4841 else
4842 {
4843 /* Fill in the entry in the procedure linkage table. */
4844 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4845 abed->plt_entry_size);
4846
4847 resolved_plt = plt;
4848 plt_offset = h->plt.offset;
4849 }
4850
4851 /* Insert the relocation positions of the plt section. */
4852
4853 /* Put offset the PC-relative instruction referring to the GOT entry,
4854 subtracting the size of that instruction. */
4855 bfd_put_32 (output_bfd,
4856 (gotplt->output_section->vma
4857 + gotplt->output_offset
4858 + got_offset
4859 - resolved_plt->output_section->vma
4860 - resolved_plt->output_offset
4861 - plt_offset
4862 - plt_got_insn_size),
4863 resolved_plt->contents + plt_offset + plt_got_offset);
4864
4865 /* Fill in the entry in the global offset table, initially this
4866 points to the second part of the PLT entry. */
4867 bfd_put_64 (output_bfd, (plt->output_section->vma
4868 + plt->output_offset
4869 + h->plt.offset + abed->plt_lazy_offset),
4870 gotplt->contents + got_offset);
4871
4872 /* Fill in the entry in the .rela.plt section. */
4873 rela.r_offset = (gotplt->output_section->vma
4874 + gotplt->output_offset
4875 + got_offset);
4876 if (h->dynindx == -1
4877 || ((info->executable
4878 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
4879 && h->def_regular
4880 && h->type == STT_GNU_IFUNC))
4881 {
4882 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4883 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4884 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4885 rela.r_addend = (h->root.u.def.value
4886 + h->root.u.def.section->output_section->vma
4887 + h->root.u.def.section->output_offset);
4888 /* R_X86_64_IRELATIVE comes last. */
4889 plt_index = htab->next_irelative_index--;
4890 }
4891 else
4892 {
4893 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4894 rela.r_addend = 0;
4895 plt_index = htab->next_jump_slot_index++;
4896 }
4897
4898 /* Don't fill PLT entry for static executables. */
4899 if (plt == htab->elf.splt)
4900 {
4901 /* Put relocation index. */
4902 bfd_put_32 (output_bfd, plt_index,
4903 plt->contents + h->plt.offset + abed->plt_reloc_offset);
4904 /* Put offset for jmp .PLT0. */
4905 bfd_put_32 (output_bfd, - (h->plt.offset + plt_plt_insn_end),
4906 plt->contents + h->plt.offset + plt_plt_offset);
4907 }
4908
4909 bed = get_elf_backend_data (output_bfd);
4910 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4911 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4912
4913 if (!h->def_regular)
4914 {
4915 /* Mark the symbol as undefined, rather than as defined in
4916 the .plt section. Leave the value if there were any
4917 relocations where pointer equality matters (this is a clue
4918 for the dynamic linker, to make function pointer
4919 comparisons work between an application and shared
4920 library), otherwise set it to zero. If a function is only
4921 called from a binary, there is no need to slow down
4922 shared libraries because of that. */
4923 sym->st_shndx = SHN_UNDEF;
4924 if (!h->pointer_equality_needed)
4925 sym->st_value = 0;
4926 }
4927 }
4928
4929 if (h->got.offset != (bfd_vma) -1
4930 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
4931 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
4932 {
4933 Elf_Internal_Rela rela;
4934
4935 /* This symbol has an entry in the global offset table. Set it
4936 up. */
4937 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4938 abort ();
4939
4940 rela.r_offset = (htab->elf.sgot->output_section->vma
4941 + htab->elf.sgot->output_offset
4942 + (h->got.offset &~ (bfd_vma) 1));
4943
4944 /* If this is a static link, or it is a -Bsymbolic link and the
4945 symbol is defined locally or was forced to be local because
4946 of a version file, we just want to emit a RELATIVE reloc.
4947 The entry in the global offset table will already have been
4948 initialized in the relocate_section function. */
4949 if (h->def_regular
4950 && h->type == STT_GNU_IFUNC)
4951 {
4952 if (info->shared)
4953 {
4954 /* Generate R_X86_64_GLOB_DAT. */
4955 goto do_glob_dat;
4956 }
4957 else
4958 {
4959 asection *plt;
4960
4961 if (!h->pointer_equality_needed)
4962 abort ();
4963
4964 /* For non-shared object, we can't use .got.plt, which
4965 contains the real function addres if we need pointer
4966 equality. We load the GOT entry with the PLT entry. */
4967 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4968 bfd_put_64 (output_bfd, (plt->output_section->vma
4969 + plt->output_offset
4970 + h->plt.offset),
4971 htab->elf.sgot->contents + h->got.offset);
4972 return TRUE;
4973 }
4974 }
4975 else if (info->shared
4976 && SYMBOL_REFERENCES_LOCAL (info, h))
4977 {
4978 if (!h->def_regular)
4979 return FALSE;
4980 BFD_ASSERT((h->got.offset & 1) != 0);
4981 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4982 rela.r_addend = (h->root.u.def.value
4983 + h->root.u.def.section->output_section->vma
4984 + h->root.u.def.section->output_offset);
4985 }
4986 else
4987 {
4988 BFD_ASSERT((h->got.offset & 1) == 0);
4989 do_glob_dat:
4990 bfd_put_64 (output_bfd, (bfd_vma) 0,
4991 htab->elf.sgot->contents + h->got.offset);
4992 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4993 rela.r_addend = 0;
4994 }
4995
4996 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
4997 }
4998
4999 if (h->needs_copy)
5000 {
5001 Elf_Internal_Rela rela;
5002
5003 /* This symbol needs a copy reloc. Set it up. */
5004
5005 if (h->dynindx == -1
5006 || (h->root.type != bfd_link_hash_defined
5007 && h->root.type != bfd_link_hash_defweak)
5008 || htab->srelbss == NULL)
5009 abort ();
5010
5011 rela.r_offset = (h->root.u.def.value
5012 + h->root.u.def.section->output_section->vma
5013 + h->root.u.def.section->output_offset);
5014 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5015 rela.r_addend = 0;
5016 elf_append_rela (output_bfd, htab->srelbss, &rela);
5017 }
5018
5019 return TRUE;
5020 }
5021
5022 /* Finish up local dynamic symbol handling. We set the contents of
5023 various dynamic sections here. */
5024
5025 static bfd_boolean
5026 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5027 {
5028 struct elf_link_hash_entry *h
5029 = (struct elf_link_hash_entry *) *slot;
5030 struct bfd_link_info *info
5031 = (struct bfd_link_info *) inf;
5032
5033 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5034 info, h, NULL);
5035 }
5036
5037 /* Used to decide how to sort relocs in an optimal manner for the
5038 dynamic linker, before writing them out. */
5039
5040 static enum elf_reloc_type_class
5041 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5042 const asection *rel_sec ATTRIBUTE_UNUSED,
5043 const Elf_Internal_Rela *rela)
5044 {
5045 switch ((int) ELF32_R_TYPE (rela->r_info))
5046 {
5047 case R_X86_64_RELATIVE:
5048 case R_X86_64_RELATIVE64:
5049 return reloc_class_relative;
5050 case R_X86_64_JUMP_SLOT:
5051 return reloc_class_plt;
5052 case R_X86_64_COPY:
5053 return reloc_class_copy;
5054 default:
5055 return reloc_class_normal;
5056 }
5057 }
5058
5059 /* Finish up the dynamic sections. */
5060
5061 static bfd_boolean
5062 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5063 struct bfd_link_info *info)
5064 {
5065 struct elf_x86_64_link_hash_table *htab;
5066 bfd *dynobj;
5067 asection *sdyn;
5068 const struct elf_x86_64_backend_data *abed;
5069
5070 htab = elf_x86_64_hash_table (info);
5071 if (htab == NULL)
5072 return FALSE;
5073
5074 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5075 section only if there is .plt section. */
5076 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5077 ? &elf_x86_64_bnd_arch_bed
5078 : get_elf_x86_64_backend_data (output_bfd));
5079
5080 dynobj = htab->elf.dynobj;
5081 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5082
5083 if (htab->elf.dynamic_sections_created)
5084 {
5085 bfd_byte *dyncon, *dynconend;
5086 const struct elf_backend_data *bed;
5087 bfd_size_type sizeof_dyn;
5088
5089 if (sdyn == NULL || htab->elf.sgot == NULL)
5090 abort ();
5091
5092 bed = get_elf_backend_data (dynobj);
5093 sizeof_dyn = bed->s->sizeof_dyn;
5094 dyncon = sdyn->contents;
5095 dynconend = sdyn->contents + sdyn->size;
5096 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5097 {
5098 Elf_Internal_Dyn dyn;
5099 asection *s;
5100
5101 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5102
5103 switch (dyn.d_tag)
5104 {
5105 default:
5106 continue;
5107
5108 case DT_PLTGOT:
5109 s = htab->elf.sgotplt;
5110 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5111 break;
5112
5113 case DT_JMPREL:
5114 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5115 break;
5116
5117 case DT_PLTRELSZ:
5118 s = htab->elf.srelplt->output_section;
5119 dyn.d_un.d_val = s->size;
5120 break;
5121
5122 case DT_RELASZ:
5123 /* The procedure linkage table relocs (DT_JMPREL) should
5124 not be included in the overall relocs (DT_RELA).
5125 Therefore, we override the DT_RELASZ entry here to
5126 make it not include the JMPREL relocs. Since the
5127 linker script arranges for .rela.plt to follow all
5128 other relocation sections, we don't have to worry
5129 about changing the DT_RELA entry. */
5130 if (htab->elf.srelplt != NULL)
5131 {
5132 s = htab->elf.srelplt->output_section;
5133 dyn.d_un.d_val -= s->size;
5134 }
5135 break;
5136
5137 case DT_TLSDESC_PLT:
5138 s = htab->elf.splt;
5139 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5140 + htab->tlsdesc_plt;
5141 break;
5142
5143 case DT_TLSDESC_GOT:
5144 s = htab->elf.sgot;
5145 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5146 + htab->tlsdesc_got;
5147 break;
5148 }
5149
5150 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5151 }
5152
5153 /* Fill in the special first entry in the procedure linkage table. */
5154 if (htab->elf.splt && htab->elf.splt->size > 0)
5155 {
5156 /* Fill in the first entry in the procedure linkage table. */
5157 memcpy (htab->elf.splt->contents,
5158 abed->plt0_entry, abed->plt_entry_size);
5159 /* Add offset for pushq GOT+8(%rip), since the instruction
5160 uses 6 bytes subtract this value. */
5161 bfd_put_32 (output_bfd,
5162 (htab->elf.sgotplt->output_section->vma
5163 + htab->elf.sgotplt->output_offset
5164 + 8
5165 - htab->elf.splt->output_section->vma
5166 - htab->elf.splt->output_offset
5167 - 6),
5168 htab->elf.splt->contents + abed->plt0_got1_offset);
5169 /* Add offset for the PC-relative instruction accessing GOT+16,
5170 subtracting the offset to the end of that instruction. */
5171 bfd_put_32 (output_bfd,
5172 (htab->elf.sgotplt->output_section->vma
5173 + htab->elf.sgotplt->output_offset
5174 + 16
5175 - htab->elf.splt->output_section->vma
5176 - htab->elf.splt->output_offset
5177 - abed->plt0_got2_insn_end),
5178 htab->elf.splt->contents + abed->plt0_got2_offset);
5179
5180 elf_section_data (htab->elf.splt->output_section)
5181 ->this_hdr.sh_entsize = abed->plt_entry_size;
5182
5183 if (htab->tlsdesc_plt)
5184 {
5185 bfd_put_64 (output_bfd, (bfd_vma) 0,
5186 htab->elf.sgot->contents + htab->tlsdesc_got);
5187
5188 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5189 abed->plt0_entry, abed->plt_entry_size);
5190
5191 /* Add offset for pushq GOT+8(%rip), since the
5192 instruction uses 6 bytes subtract this value. */
5193 bfd_put_32 (output_bfd,
5194 (htab->elf.sgotplt->output_section->vma
5195 + htab->elf.sgotplt->output_offset
5196 + 8
5197 - htab->elf.splt->output_section->vma
5198 - htab->elf.splt->output_offset
5199 - htab->tlsdesc_plt
5200 - 6),
5201 htab->elf.splt->contents
5202 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5203 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5204 where TGD stands for htab->tlsdesc_got, subtracting the offset
5205 to the end of that instruction. */
5206 bfd_put_32 (output_bfd,
5207 (htab->elf.sgot->output_section->vma
5208 + htab->elf.sgot->output_offset
5209 + htab->tlsdesc_got
5210 - htab->elf.splt->output_section->vma
5211 - htab->elf.splt->output_offset
5212 - htab->tlsdesc_plt
5213 - abed->plt0_got2_insn_end),
5214 htab->elf.splt->contents
5215 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5216 }
5217 }
5218 }
5219
5220 if (htab->plt_bnd != NULL)
5221 elf_section_data (htab->plt_bnd->output_section)
5222 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5223
5224 if (htab->elf.sgotplt)
5225 {
5226 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5227 {
5228 (*_bfd_error_handler)
5229 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5230 return FALSE;
5231 }
5232
5233 /* Fill in the first three entries in the global offset table. */
5234 if (htab->elf.sgotplt->size > 0)
5235 {
5236 /* Set the first entry in the global offset table to the address of
5237 the dynamic section. */
5238 if (sdyn == NULL)
5239 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5240 else
5241 bfd_put_64 (output_bfd,
5242 sdyn->output_section->vma + sdyn->output_offset,
5243 htab->elf.sgotplt->contents);
5244 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5245 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5246 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5247 }
5248
5249 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5250 GOT_ENTRY_SIZE;
5251 }
5252
5253 /* Adjust .eh_frame for .plt section. */
5254 if (htab->plt_eh_frame != NULL
5255 && htab->plt_eh_frame->contents != NULL)
5256 {
5257 if (htab->elf.splt != NULL
5258 && htab->elf.splt->size != 0
5259 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5260 && htab->elf.splt->output_section != NULL
5261 && htab->plt_eh_frame->output_section != NULL)
5262 {
5263 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5264 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5265 + htab->plt_eh_frame->output_offset
5266 + PLT_FDE_START_OFFSET;
5267 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5268 htab->plt_eh_frame->contents
5269 + PLT_FDE_START_OFFSET);
5270 }
5271 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5272 {
5273 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5274 htab->plt_eh_frame,
5275 htab->plt_eh_frame->contents))
5276 return FALSE;
5277 }
5278 }
5279
5280 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5281 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5282 = GOT_ENTRY_SIZE;
5283
5284 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5285 htab_traverse (htab->loc_hash_table,
5286 elf_x86_64_finish_local_dynamic_symbol,
5287 info);
5288
5289 return TRUE;
5290 }
5291
5292 /* Return address in section PLT for the Ith GOTPLT relocation, for
5293 relocation REL or (bfd_vma) -1 if it should not be included. */
5294
5295 static bfd_vma
5296 elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt,
5297 const arelent *rel)
5298 {
5299 bfd *abfd;
5300 const struct elf_x86_64_backend_data *bed;
5301 bfd_vma plt_offset;
5302
5303 /* Only match R_X86_64_JUMP_SLOT and R_X86_64_IRELATIVE. */
5304 if (rel->howto->type != R_X86_64_JUMP_SLOT
5305 && rel->howto->type != R_X86_64_IRELATIVE)
5306 return (bfd_vma) -1;
5307
5308 abfd = plt->owner;
5309 bed = get_elf_x86_64_backend_data (abfd);
5310 plt_offset = bed->plt_entry_size;
5311
5312 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5313 return plt->vma + (i + 1) * plt_offset;
5314
5315 while (plt_offset < plt->size)
5316 {
5317 bfd_vma reloc_index;
5318 bfd_byte reloc_index_raw[4];
5319
5320 if (!bfd_get_section_contents (abfd, (asection *) plt,
5321 reloc_index_raw,
5322 plt_offset + bed->plt_reloc_offset,
5323 sizeof (reloc_index_raw)))
5324 return (bfd_vma) -1;
5325
5326 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5327 if (reloc_index == i)
5328 return plt->vma + plt_offset;
5329 plt_offset += bed->plt_entry_size;
5330 }
5331
5332 abort ();
5333 }
5334
5335 /* Return offset in .plt.bnd section for the Ith GOTPLT relocation with
5336 PLT section, or (bfd_vma) -1 if it should not be included. */
5337
5338 static bfd_vma
5339 elf_x86_64_plt_sym_val_offset_plt_bnd (bfd_vma i, const asection *plt)
5340 {
5341 const struct elf_x86_64_backend_data *bed = &elf_x86_64_bnd_arch_bed;
5342 bfd *abfd = plt->owner;
5343 bfd_vma plt_offset = bed->plt_entry_size;
5344
5345 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5346 return i * sizeof (elf_x86_64_legacy_plt2_entry);
5347
5348 while (plt_offset < plt->size)
5349 {
5350 bfd_vma reloc_index;
5351 bfd_byte reloc_index_raw[4];
5352
5353 if (!bfd_get_section_contents (abfd, (asection *) plt,
5354 reloc_index_raw,
5355 plt_offset + bed->plt_reloc_offset,
5356 sizeof (reloc_index_raw)))
5357 return (bfd_vma) -1;
5358
5359 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5360 if (reloc_index == i)
5361 {
5362 /* This is the index in .plt section. */
5363 long plt_index = plt_offset / bed->plt_entry_size;
5364 /* Return the offset in .plt.bnd section. */
5365 return (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry);
5366 }
5367 plt_offset += bed->plt_entry_size;
5368 }
5369
5370 abort ();
5371 }
5372
5373 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5374 support. */
5375
5376 static long
5377 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5378 long symcount,
5379 asymbol **syms,
5380 long dynsymcount,
5381 asymbol **dynsyms,
5382 asymbol **ret)
5383 {
5384 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5385 asection *relplt;
5386 asymbol *s;
5387 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5388 arelent *p;
5389 long count, i, n;
5390 size_t size;
5391 Elf_Internal_Shdr *hdr;
5392 char *names;
5393 asection *plt, *plt_push;
5394
5395 plt_push = bfd_get_section_by_name (abfd, ".plt");
5396 if (plt_push == NULL)
5397 return 0;
5398
5399 plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5400 /* Use the generic ELF version if there is no .plt.bnd section. */
5401 if (plt == NULL)
5402 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
5403 dynsymcount, dynsyms, ret);
5404
5405 *ret = NULL;
5406
5407 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5408 return 0;
5409
5410 if (dynsymcount <= 0)
5411 return 0;
5412
5413 relplt = bfd_get_section_by_name (abfd, ".rela.plt");
5414 if (relplt == NULL)
5415 return 0;
5416
5417 hdr = &elf_section_data (relplt)->this_hdr;
5418 if (hdr->sh_link != elf_dynsymtab (abfd)
5419 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
5420 return 0;
5421
5422 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5423 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5424 return -1;
5425
5426 count = relplt->size / hdr->sh_entsize;
5427 size = count * sizeof (asymbol);
5428 p = relplt->relocation;
5429 for (i = 0; i < count; i++, p += bed->s->int_rels_per_ext_rel)
5430 {
5431 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
5432 if (p->addend != 0)
5433 size += sizeof ("+0x") - 1 + 8 + 8;
5434 }
5435
5436 s = *ret = (asymbol *) bfd_malloc (size);
5437 if (s == NULL)
5438 return -1;
5439
5440 names = (char *) (s + count);
5441 p = relplt->relocation;
5442 n = 0;
5443 for (i = 0; i < count; i++, p++)
5444 {
5445 bfd_vma offset;
5446 size_t len;
5447
5448 if (p->howto->type != R_X86_64_JUMP_SLOT
5449 && p->howto->type != R_X86_64_IRELATIVE)
5450 continue;
5451
5452 offset = elf_x86_64_plt_sym_val_offset_plt_bnd (i, plt_push);
5453
5454 *s = **p->sym_ptr_ptr;
5455 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
5456 we are defining a symbol, ensure one of them is set. */
5457 if ((s->flags & BSF_LOCAL) == 0)
5458 s->flags |= BSF_GLOBAL;
5459 s->flags |= BSF_SYNTHETIC;
5460 s->section = plt;
5461 s->value = offset;
5462 s->name = names;
5463 s->udata.p = NULL;
5464 len = strlen ((*p->sym_ptr_ptr)->name);
5465 memcpy (names, (*p->sym_ptr_ptr)->name, len);
5466 names += len;
5467 if (p->addend != 0)
5468 {
5469 char buf[30], *a;
5470
5471 memcpy (names, "+0x", sizeof ("+0x") - 1);
5472 names += sizeof ("+0x") - 1;
5473 bfd_sprintf_vma (abfd, buf, p->addend);
5474 for (a = buf; *a == '0'; ++a)
5475 ;
5476 len = strlen (a);
5477 memcpy (names, a, len);
5478 names += len;
5479 }
5480 memcpy (names, "@plt", sizeof ("@plt"));
5481 names += sizeof ("@plt");
5482 ++s, ++n;
5483 }
5484
5485 return n;
5486 }
5487
5488 /* Handle an x86-64 specific section when reading an object file. This
5489 is called when elfcode.h finds a section with an unknown type. */
5490
5491 static bfd_boolean
5492 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5493 const char *name, int shindex)
5494 {
5495 if (hdr->sh_type != SHT_X86_64_UNWIND)
5496 return FALSE;
5497
5498 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5499 return FALSE;
5500
5501 return TRUE;
5502 }
5503
5504 /* Hook called by the linker routine which adds symbols from an object
5505 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5506 of .bss. */
5507
5508 static bfd_boolean
5509 elf_x86_64_add_symbol_hook (bfd *abfd,
5510 struct bfd_link_info *info,
5511 Elf_Internal_Sym *sym,
5512 const char **namep ATTRIBUTE_UNUSED,
5513 flagword *flagsp ATTRIBUTE_UNUSED,
5514 asection **secp,
5515 bfd_vma *valp)
5516 {
5517 asection *lcomm;
5518
5519 switch (sym->st_shndx)
5520 {
5521 case SHN_X86_64_LCOMMON:
5522 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5523 if (lcomm == NULL)
5524 {
5525 lcomm = bfd_make_section_with_flags (abfd,
5526 "LARGE_COMMON",
5527 (SEC_ALLOC
5528 | SEC_IS_COMMON
5529 | SEC_LINKER_CREATED));
5530 if (lcomm == NULL)
5531 return FALSE;
5532 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5533 }
5534 *secp = lcomm;
5535 *valp = sym->st_size;
5536 return TRUE;
5537 }
5538
5539 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5540 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5541 && (abfd->flags & DYNAMIC) == 0
5542 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5543 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5544
5545 return TRUE;
5546 }
5547
5548
5549 /* Given a BFD section, try to locate the corresponding ELF section
5550 index. */
5551
5552 static bfd_boolean
5553 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5554 asection *sec, int *index_return)
5555 {
5556 if (sec == &_bfd_elf_large_com_section)
5557 {
5558 *index_return = SHN_X86_64_LCOMMON;
5559 return TRUE;
5560 }
5561 return FALSE;
5562 }
5563
5564 /* Process a symbol. */
5565
5566 static void
5567 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5568 asymbol *asym)
5569 {
5570 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5571
5572 switch (elfsym->internal_elf_sym.st_shndx)
5573 {
5574 case SHN_X86_64_LCOMMON:
5575 asym->section = &_bfd_elf_large_com_section;
5576 asym->value = elfsym->internal_elf_sym.st_size;
5577 /* Common symbol doesn't set BSF_GLOBAL. */
5578 asym->flags &= ~BSF_GLOBAL;
5579 break;
5580 }
5581 }
5582
5583 static bfd_boolean
5584 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5585 {
5586 return (sym->st_shndx == SHN_COMMON
5587 || sym->st_shndx == SHN_X86_64_LCOMMON);
5588 }
5589
5590 static unsigned int
5591 elf_x86_64_common_section_index (asection *sec)
5592 {
5593 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5594 return SHN_COMMON;
5595 else
5596 return SHN_X86_64_LCOMMON;
5597 }
5598
5599 static asection *
5600 elf_x86_64_common_section (asection *sec)
5601 {
5602 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5603 return bfd_com_section_ptr;
5604 else
5605 return &_bfd_elf_large_com_section;
5606 }
5607
5608 static bfd_boolean
5609 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5610 const Elf_Internal_Sym *sym,
5611 asection **psec,
5612 bfd_boolean newdef,
5613 bfd_boolean olddef,
5614 bfd *oldbfd,
5615 const asection *oldsec)
5616 {
5617 /* A normal common symbol and a large common symbol result in a
5618 normal common symbol. We turn the large common symbol into a
5619 normal one. */
5620 if (!olddef
5621 && h->root.type == bfd_link_hash_common
5622 && !newdef
5623 && bfd_is_com_section (*psec)
5624 && oldsec != *psec)
5625 {
5626 if (sym->st_shndx == SHN_COMMON
5627 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5628 {
5629 h->root.u.c.p->section
5630 = bfd_make_section_old_way (oldbfd, "COMMON");
5631 h->root.u.c.p->section->flags = SEC_ALLOC;
5632 }
5633 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5634 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5635 *psec = bfd_com_section_ptr;
5636 }
5637
5638 return TRUE;
5639 }
5640
5641 static int
5642 elf_x86_64_additional_program_headers (bfd *abfd,
5643 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5644 {
5645 asection *s;
5646 int count = 0;
5647
5648 /* Check to see if we need a large readonly segment. */
5649 s = bfd_get_section_by_name (abfd, ".lrodata");
5650 if (s && (s->flags & SEC_LOAD))
5651 count++;
5652
5653 /* Check to see if we need a large data segment. Since .lbss sections
5654 is placed right after the .bss section, there should be no need for
5655 a large data segment just because of .lbss. */
5656 s = bfd_get_section_by_name (abfd, ".ldata");
5657 if (s && (s->flags & SEC_LOAD))
5658 count++;
5659
5660 return count;
5661 }
5662
5663 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5664
5665 static bfd_boolean
5666 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5667 {
5668 if (h->plt.offset != (bfd_vma) -1
5669 && !h->def_regular
5670 && !h->pointer_equality_needed)
5671 return FALSE;
5672
5673 return _bfd_elf_hash_symbol (h);
5674 }
5675
5676 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5677
5678 static bfd_boolean
5679 elf_x86_64_relocs_compatible (const bfd_target *input,
5680 const bfd_target *output)
5681 {
5682 return ((xvec_get_elf_backend_data (input)->s->elfclass
5683 == xvec_get_elf_backend_data (output)->s->elfclass)
5684 && _bfd_elf_relocs_compatible (input, output));
5685 }
5686
5687 static const struct bfd_elf_special_section
5688 elf_x86_64_special_sections[]=
5689 {
5690 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5691 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5692 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5693 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5694 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5695 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5696 { NULL, 0, 0, 0, 0 }
5697 };
5698
5699 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5700 #define TARGET_LITTLE_NAME "elf64-x86-64"
5701 #define ELF_ARCH bfd_arch_i386
5702 #define ELF_TARGET_ID X86_64_ELF_DATA
5703 #define ELF_MACHINE_CODE EM_X86_64
5704 #define ELF_MAXPAGESIZE 0x200000
5705 #define ELF_MINPAGESIZE 0x1000
5706 #define ELF_COMMONPAGESIZE 0x1000
5707
5708 #define elf_backend_can_gc_sections 1
5709 #define elf_backend_can_refcount 1
5710 #define elf_backend_want_got_plt 1
5711 #define elf_backend_plt_readonly 1
5712 #define elf_backend_want_plt_sym 0
5713 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5714 #define elf_backend_rela_normal 1
5715 #define elf_backend_plt_alignment 4
5716
5717 #define elf_info_to_howto elf_x86_64_info_to_howto
5718
5719 #define bfd_elf64_bfd_link_hash_table_create \
5720 elf_x86_64_link_hash_table_create
5721 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5722 #define bfd_elf64_bfd_reloc_name_lookup \
5723 elf_x86_64_reloc_name_lookup
5724
5725 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5726 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5727 #define elf_backend_check_relocs elf_x86_64_check_relocs
5728 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5729 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5730 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5731 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5732 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5733 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5734 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5735 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5736 #ifdef CORE_HEADER
5737 #define elf_backend_write_core_note elf_x86_64_write_core_note
5738 #endif
5739 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5740 #define elf_backend_relocate_section elf_x86_64_relocate_section
5741 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5742 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5743 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5744 #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val
5745 #define elf_backend_object_p elf64_x86_64_elf_object_p
5746 #define bfd_elf64_mkobject elf_x86_64_mkobject
5747 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5748
5749 #define elf_backend_section_from_shdr \
5750 elf_x86_64_section_from_shdr
5751
5752 #define elf_backend_section_from_bfd_section \
5753 elf_x86_64_elf_section_from_bfd_section
5754 #define elf_backend_add_symbol_hook \
5755 elf_x86_64_add_symbol_hook
5756 #define elf_backend_symbol_processing \
5757 elf_x86_64_symbol_processing
5758 #define elf_backend_common_section_index \
5759 elf_x86_64_common_section_index
5760 #define elf_backend_common_section \
5761 elf_x86_64_common_section
5762 #define elf_backend_common_definition \
5763 elf_x86_64_common_definition
5764 #define elf_backend_merge_symbol \
5765 elf_x86_64_merge_symbol
5766 #define elf_backend_special_sections \
5767 elf_x86_64_special_sections
5768 #define elf_backend_additional_program_headers \
5769 elf_x86_64_additional_program_headers
5770 #define elf_backend_hash_symbol \
5771 elf_x86_64_hash_symbol
5772
5773 #include "elf64-target.h"
5774
5775 /* FreeBSD support. */
5776
5777 #undef TARGET_LITTLE_SYM
5778 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5779 #undef TARGET_LITTLE_NAME
5780 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5781
5782 #undef ELF_OSABI
5783 #define ELF_OSABI ELFOSABI_FREEBSD
5784
5785 #undef elf64_bed
5786 #define elf64_bed elf64_x86_64_fbsd_bed
5787
5788 #include "elf64-target.h"
5789
5790 /* Solaris 2 support. */
5791
5792 #undef TARGET_LITTLE_SYM
5793 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5794 #undef TARGET_LITTLE_NAME
5795 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5796
5797 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5798 objects won't be recognized. */
5799 #undef ELF_OSABI
5800
5801 #undef elf64_bed
5802 #define elf64_bed elf64_x86_64_sol2_bed
5803
5804 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5805 boundary. */
5806 #undef elf_backend_static_tls_alignment
5807 #define elf_backend_static_tls_alignment 16
5808
5809 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5810
5811 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5812 File, p.63. */
5813 #undef elf_backend_want_plt_sym
5814 #define elf_backend_want_plt_sym 1
5815
5816 #include "elf64-target.h"
5817
5818 #undef bfd_elf64_get_synthetic_symtab
5819
5820 /* Native Client support. */
5821
5822 static bfd_boolean
5823 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5824 {
5825 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5826 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5827 return TRUE;
5828 }
5829
5830 #undef TARGET_LITTLE_SYM
5831 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5832 #undef TARGET_LITTLE_NAME
5833 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5834 #undef elf64_bed
5835 #define elf64_bed elf64_x86_64_nacl_bed
5836
5837 #undef ELF_MAXPAGESIZE
5838 #undef ELF_MINPAGESIZE
5839 #undef ELF_COMMONPAGESIZE
5840 #define ELF_MAXPAGESIZE 0x10000
5841 #define ELF_MINPAGESIZE 0x10000
5842 #define ELF_COMMONPAGESIZE 0x10000
5843
5844 /* Restore defaults. */
5845 #undef ELF_OSABI
5846 #undef elf_backend_static_tls_alignment
5847 #undef elf_backend_want_plt_sym
5848 #define elf_backend_want_plt_sym 0
5849
5850 /* NaCl uses substantially different PLT entries for the same effects. */
5851
5852 #undef elf_backend_plt_alignment
5853 #define elf_backend_plt_alignment 5
5854 #define NACL_PLT_ENTRY_SIZE 64
5855 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5856
5857 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5858 {
5859 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5860 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5861 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5862 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5863 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5864
5865 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5866 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5867
5868 /* 32 bytes of nop to pad out to the standard size. */
5869 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5870 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5871 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5872 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5873 0x66, /* excess data32 prefix */
5874 0x90 /* nop */
5875 };
5876
5877 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5878 {
5879 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5880 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5881 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5882 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5883
5884 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5885 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5886 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5887
5888 /* Lazy GOT entries point here (32-byte aligned). */
5889 0x68, /* pushq immediate */
5890 0, 0, 0, 0, /* replaced with index into relocation table. */
5891 0xe9, /* jmp relative */
5892 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5893
5894 /* 22 bytes of nop to pad out to the standard size. */
5895 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5896 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5897 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5898 };
5899
5900 /* .eh_frame covering the .plt section. */
5901
5902 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5903 {
5904 #if (PLT_CIE_LENGTH != 20 \
5905 || PLT_FDE_LENGTH != 36 \
5906 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5907 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5908 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
5909 #endif
5910 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5911 0, 0, 0, 0, /* CIE ID */
5912 1, /* CIE version */
5913 'z', 'R', 0, /* Augmentation string */
5914 1, /* Code alignment factor */
5915 0x78, /* Data alignment factor */
5916 16, /* Return address column */
5917 1, /* Augmentation size */
5918 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5919 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5920 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5921 DW_CFA_nop, DW_CFA_nop,
5922
5923 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5924 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5925 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5926 0, 0, 0, 0, /* .plt size goes here */
5927 0, /* Augmentation size */
5928 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5929 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5930 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5931 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5932 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5933 13, /* Block length */
5934 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5935 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5936 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5937 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5938 DW_CFA_nop, DW_CFA_nop
5939 };
5940
5941 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
5942 {
5943 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5944 elf_x86_64_nacl_plt_entry, /* plt_entry */
5945 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5946 2, /* plt0_got1_offset */
5947 9, /* plt0_got2_offset */
5948 13, /* plt0_got2_insn_end */
5949 3, /* plt_got_offset */
5950 33, /* plt_reloc_offset */
5951 38, /* plt_plt_offset */
5952 7, /* plt_got_insn_size */
5953 42, /* plt_plt_insn_end */
5954 32, /* plt_lazy_offset */
5955 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5956 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
5957 };
5958
5959 #undef elf_backend_arch_data
5960 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5961
5962 #undef elf_backend_object_p
5963 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5964 #undef elf_backend_modify_segment_map
5965 #define elf_backend_modify_segment_map nacl_modify_segment_map
5966 #undef elf_backend_modify_program_headers
5967 #define elf_backend_modify_program_headers nacl_modify_program_headers
5968 #undef elf_backend_final_write_processing
5969 #define elf_backend_final_write_processing nacl_final_write_processing
5970
5971 #include "elf64-target.h"
5972
5973 /* Native Client x32 support. */
5974
5975 static bfd_boolean
5976 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5977 {
5978 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5979 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5980 return TRUE;
5981 }
5982
5983 #undef TARGET_LITTLE_SYM
5984 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5985 #undef TARGET_LITTLE_NAME
5986 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5987 #undef elf32_bed
5988 #define elf32_bed elf32_x86_64_nacl_bed
5989
5990 #define bfd_elf32_bfd_link_hash_table_create \
5991 elf_x86_64_link_hash_table_create
5992 #define bfd_elf32_bfd_reloc_type_lookup \
5993 elf_x86_64_reloc_type_lookup
5994 #define bfd_elf32_bfd_reloc_name_lookup \
5995 elf_x86_64_reloc_name_lookup
5996 #define bfd_elf32_mkobject \
5997 elf_x86_64_mkobject
5998
5999 #undef elf_backend_object_p
6000 #define elf_backend_object_p \
6001 elf32_x86_64_nacl_elf_object_p
6002
6003 #undef elf_backend_bfd_from_remote_memory
6004 #define elf_backend_bfd_from_remote_memory \
6005 _bfd_elf32_bfd_from_remote_memory
6006
6007 #undef elf_backend_size_info
6008 #define elf_backend_size_info \
6009 _bfd_elf32_size_info
6010
6011 #include "elf32-target.h"
6012
6013 /* Restore defaults. */
6014 #undef elf_backend_object_p
6015 #define elf_backend_object_p elf64_x86_64_elf_object_p
6016 #undef elf_backend_bfd_from_remote_memory
6017 #undef elf_backend_size_info
6018 #undef elf_backend_modify_segment_map
6019 #undef elf_backend_modify_program_headers
6020 #undef elf_backend_final_write_processing
6021
6022 /* Intel L1OM support. */
6023
6024 static bfd_boolean
6025 elf64_l1om_elf_object_p (bfd *abfd)
6026 {
6027 /* Set the right machine number for an L1OM elf64 file. */
6028 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6029 return TRUE;
6030 }
6031
6032 #undef TARGET_LITTLE_SYM
6033 #define TARGET_LITTLE_SYM l1om_elf64_vec
6034 #undef TARGET_LITTLE_NAME
6035 #define TARGET_LITTLE_NAME "elf64-l1om"
6036 #undef ELF_ARCH
6037 #define ELF_ARCH bfd_arch_l1om
6038
6039 #undef ELF_MACHINE_CODE
6040 #define ELF_MACHINE_CODE EM_L1OM
6041
6042 #undef ELF_OSABI
6043
6044 #undef elf64_bed
6045 #define elf64_bed elf64_l1om_bed
6046
6047 #undef elf_backend_object_p
6048 #define elf_backend_object_p elf64_l1om_elf_object_p
6049
6050 /* Restore defaults. */
6051 #undef ELF_MAXPAGESIZE
6052 #undef ELF_MINPAGESIZE
6053 #undef ELF_COMMONPAGESIZE
6054 #define ELF_MAXPAGESIZE 0x200000
6055 #define ELF_MINPAGESIZE 0x1000
6056 #define ELF_COMMONPAGESIZE 0x1000
6057 #undef elf_backend_plt_alignment
6058 #define elf_backend_plt_alignment 4
6059 #undef elf_backend_arch_data
6060 #define elf_backend_arch_data &elf_x86_64_arch_bed
6061
6062 #include "elf64-target.h"
6063
6064 /* FreeBSD L1OM support. */
6065
6066 #undef TARGET_LITTLE_SYM
6067 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6068 #undef TARGET_LITTLE_NAME
6069 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6070
6071 #undef ELF_OSABI
6072 #define ELF_OSABI ELFOSABI_FREEBSD
6073
6074 #undef elf64_bed
6075 #define elf64_bed elf64_l1om_fbsd_bed
6076
6077 #include "elf64-target.h"
6078
6079 /* Intel K1OM support. */
6080
6081 static bfd_boolean
6082 elf64_k1om_elf_object_p (bfd *abfd)
6083 {
6084 /* Set the right machine number for an K1OM elf64 file. */
6085 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6086 return TRUE;
6087 }
6088
6089 #undef TARGET_LITTLE_SYM
6090 #define TARGET_LITTLE_SYM k1om_elf64_vec
6091 #undef TARGET_LITTLE_NAME
6092 #define TARGET_LITTLE_NAME "elf64-k1om"
6093 #undef ELF_ARCH
6094 #define ELF_ARCH bfd_arch_k1om
6095
6096 #undef ELF_MACHINE_CODE
6097 #define ELF_MACHINE_CODE EM_K1OM
6098
6099 #undef ELF_OSABI
6100
6101 #undef elf64_bed
6102 #define elf64_bed elf64_k1om_bed
6103
6104 #undef elf_backend_object_p
6105 #define elf_backend_object_p elf64_k1om_elf_object_p
6106
6107 #undef elf_backend_static_tls_alignment
6108
6109 #undef elf_backend_want_plt_sym
6110 #define elf_backend_want_plt_sym 0
6111
6112 #include "elf64-target.h"
6113
6114 /* FreeBSD K1OM support. */
6115
6116 #undef TARGET_LITTLE_SYM
6117 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6118 #undef TARGET_LITTLE_NAME
6119 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6120
6121 #undef ELF_OSABI
6122 #define ELF_OSABI ELFOSABI_FREEBSD
6123
6124 #undef elf64_bed
6125 #define elf64_bed elf64_k1om_fbsd_bed
6126
6127 #include "elf64-target.h"
6128
6129 /* 32bit x86-64 support. */
6130
6131 #undef TARGET_LITTLE_SYM
6132 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6133 #undef TARGET_LITTLE_NAME
6134 #define TARGET_LITTLE_NAME "elf32-x86-64"
6135 #undef elf32_bed
6136
6137 #undef ELF_ARCH
6138 #define ELF_ARCH bfd_arch_i386
6139
6140 #undef ELF_MACHINE_CODE
6141 #define ELF_MACHINE_CODE EM_X86_64
6142
6143 #undef ELF_OSABI
6144
6145 #undef elf_backend_object_p
6146 #define elf_backend_object_p \
6147 elf32_x86_64_elf_object_p
6148
6149 #undef elf_backend_bfd_from_remote_memory
6150 #define elf_backend_bfd_from_remote_memory \
6151 _bfd_elf32_bfd_from_remote_memory
6152
6153 #undef elf_backend_size_info
6154 #define elf_backend_size_info \
6155 _bfd_elf32_size_info
6156
6157 #include "elf32-target.h"
This page took 0.16242 seconds and 4 git commands to generate.