f7aea98d6bf9f9aba0722b05829f784216d60836
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Rename some of the generic section flags to better document how they
1533 are used here. */
1534 #define need_convert_mov_to_lea sec_flg0
1535
1536 /* Look through the relocs for a section during the first phase, and
1537 calculate needed space in the global offset table, procedure
1538 linkage table, and dynamic reloc sections. */
1539
1540 static bfd_boolean
1541 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1542 asection *sec,
1543 const Elf_Internal_Rela *relocs)
1544 {
1545 struct elf_x86_64_link_hash_table *htab;
1546 Elf_Internal_Shdr *symtab_hdr;
1547 struct elf_link_hash_entry **sym_hashes;
1548 const Elf_Internal_Rela *rel;
1549 const Elf_Internal_Rela *rel_end;
1550 asection *sreloc;
1551 bfd_boolean use_plt_got;
1552
1553 if (info->relocatable)
1554 return TRUE;
1555
1556 BFD_ASSERT (is_x86_64_elf (abfd));
1557
1558 htab = elf_x86_64_hash_table (info);
1559 if (htab == NULL)
1560 return FALSE;
1561
1562 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1563
1564 symtab_hdr = &elf_symtab_hdr (abfd);
1565 sym_hashes = elf_sym_hashes (abfd);
1566
1567 sreloc = NULL;
1568
1569 rel_end = relocs + sec->reloc_count;
1570 for (rel = relocs; rel < rel_end; rel++)
1571 {
1572 unsigned int r_type;
1573 unsigned long r_symndx;
1574 struct elf_link_hash_entry *h;
1575 Elf_Internal_Sym *isym;
1576 const char *name;
1577 bfd_boolean size_reloc;
1578
1579 r_symndx = htab->r_sym (rel->r_info);
1580 r_type = ELF32_R_TYPE (rel->r_info);
1581
1582 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1583 {
1584 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1585 abfd, r_symndx);
1586 return FALSE;
1587 }
1588
1589 if (r_symndx < symtab_hdr->sh_info)
1590 {
1591 /* A local symbol. */
1592 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1593 abfd, r_symndx);
1594 if (isym == NULL)
1595 return FALSE;
1596
1597 /* Check relocation against local STT_GNU_IFUNC symbol. */
1598 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1599 {
1600 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1601 TRUE);
1602 if (h == NULL)
1603 return FALSE;
1604
1605 /* Fake a STT_GNU_IFUNC symbol. */
1606 h->type = STT_GNU_IFUNC;
1607 h->def_regular = 1;
1608 h->ref_regular = 1;
1609 h->forced_local = 1;
1610 h->root.type = bfd_link_hash_defined;
1611 }
1612 else
1613 h = NULL;
1614 }
1615 else
1616 {
1617 isym = NULL;
1618 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1619 while (h->root.type == bfd_link_hash_indirect
1620 || h->root.type == bfd_link_hash_warning)
1621 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1622 }
1623
1624 /* Check invalid x32 relocations. */
1625 if (!ABI_64_P (abfd))
1626 switch (r_type)
1627 {
1628 default:
1629 break;
1630
1631 case R_X86_64_DTPOFF64:
1632 case R_X86_64_TPOFF64:
1633 case R_X86_64_PC64:
1634 case R_X86_64_GOTOFF64:
1635 case R_X86_64_GOT64:
1636 case R_X86_64_GOTPCREL64:
1637 case R_X86_64_GOTPC64:
1638 case R_X86_64_GOTPLT64:
1639 case R_X86_64_PLTOFF64:
1640 {
1641 if (h)
1642 name = h->root.root.string;
1643 else
1644 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1645 NULL);
1646 (*_bfd_error_handler)
1647 (_("%B: relocation %s against symbol `%s' isn't "
1648 "supported in x32 mode"), abfd,
1649 x86_64_elf_howto_table[r_type].name, name);
1650 bfd_set_error (bfd_error_bad_value);
1651 return FALSE;
1652 }
1653 break;
1654 }
1655
1656 if (h != NULL)
1657 {
1658 /* Create the ifunc sections for static executables. If we
1659 never see an indirect function symbol nor we are building
1660 a static executable, those sections will be empty and
1661 won't appear in output. */
1662 switch (r_type)
1663 {
1664 default:
1665 break;
1666
1667 case R_X86_64_PC32_BND:
1668 case R_X86_64_PLT32_BND:
1669 case R_X86_64_PC32:
1670 case R_X86_64_PLT32:
1671 case R_X86_64_32:
1672 case R_X86_64_64:
1673 /* MPX PLT is supported only if elf_x86_64_arch_bed
1674 is used in 64-bit mode. */
1675 if (ABI_64_P (abfd)
1676 && info->bndplt
1677 && (get_elf_x86_64_backend_data (abfd)
1678 == &elf_x86_64_arch_bed))
1679 {
1680 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1681
1682 /* Create the second PLT for Intel MPX support. */
1683 if (htab->plt_bnd == NULL)
1684 {
1685 unsigned int plt_bnd_align;
1686 const struct elf_backend_data *bed;
1687
1688 bed = get_elf_backend_data (info->output_bfd);
1689 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1690 && (sizeof (elf_x86_64_bnd_plt2_entry)
1691 == sizeof (elf_x86_64_legacy_plt2_entry)));
1692 plt_bnd_align = 3;
1693
1694 if (htab->elf.dynobj == NULL)
1695 htab->elf.dynobj = abfd;
1696 htab->plt_bnd
1697 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1698 ".plt.bnd",
1699 (bed->dynamic_sec_flags
1700 | SEC_ALLOC
1701 | SEC_CODE
1702 | SEC_LOAD
1703 | SEC_READONLY));
1704 if (htab->plt_bnd == NULL
1705 || !bfd_set_section_alignment (htab->elf.dynobj,
1706 htab->plt_bnd,
1707 plt_bnd_align))
1708 return FALSE;
1709 }
1710 }
1711
1712 case R_X86_64_32S:
1713 case R_X86_64_PC64:
1714 case R_X86_64_GOTPCREL:
1715 case R_X86_64_GOTPCREL64:
1716 if (htab->elf.dynobj == NULL)
1717 htab->elf.dynobj = abfd;
1718 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1719 return FALSE;
1720 break;
1721 }
1722
1723 /* It is referenced by a non-shared object. */
1724 h->ref_regular = 1;
1725 h->root.non_ir_ref = 1;
1726 }
1727
1728 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1729 symtab_hdr, sym_hashes,
1730 &r_type, GOT_UNKNOWN,
1731 rel, rel_end, h, r_symndx))
1732 return FALSE;
1733
1734 switch (r_type)
1735 {
1736 case R_X86_64_TLSLD:
1737 htab->tls_ld_got.refcount += 1;
1738 goto create_got;
1739
1740 case R_X86_64_TPOFF32:
1741 if (!info->executable && ABI_64_P (abfd))
1742 {
1743 if (h)
1744 name = h->root.root.string;
1745 else
1746 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1747 NULL);
1748 (*_bfd_error_handler)
1749 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1750 abfd,
1751 x86_64_elf_howto_table[r_type].name, name);
1752 bfd_set_error (bfd_error_bad_value);
1753 return FALSE;
1754 }
1755 break;
1756
1757 case R_X86_64_GOTTPOFF:
1758 if (!info->executable)
1759 info->flags |= DF_STATIC_TLS;
1760 /* Fall through */
1761
1762 case R_X86_64_GOT32:
1763 case R_X86_64_GOTPCREL:
1764 case R_X86_64_TLSGD:
1765 case R_X86_64_GOT64:
1766 case R_X86_64_GOTPCREL64:
1767 case R_X86_64_GOTPLT64:
1768 case R_X86_64_GOTPC32_TLSDESC:
1769 case R_X86_64_TLSDESC_CALL:
1770 /* This symbol requires a global offset table entry. */
1771 {
1772 int tls_type, old_tls_type;
1773
1774 switch (r_type)
1775 {
1776 default: tls_type = GOT_NORMAL; break;
1777 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1778 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1779 case R_X86_64_GOTPC32_TLSDESC:
1780 case R_X86_64_TLSDESC_CALL:
1781 tls_type = GOT_TLS_GDESC; break;
1782 }
1783
1784 if (h != NULL)
1785 {
1786 h->got.refcount += 1;
1787 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1788 }
1789 else
1790 {
1791 bfd_signed_vma *local_got_refcounts;
1792
1793 /* This is a global offset table entry for a local symbol. */
1794 local_got_refcounts = elf_local_got_refcounts (abfd);
1795 if (local_got_refcounts == NULL)
1796 {
1797 bfd_size_type size;
1798
1799 size = symtab_hdr->sh_info;
1800 size *= sizeof (bfd_signed_vma)
1801 + sizeof (bfd_vma) + sizeof (char);
1802 local_got_refcounts = ((bfd_signed_vma *)
1803 bfd_zalloc (abfd, size));
1804 if (local_got_refcounts == NULL)
1805 return FALSE;
1806 elf_local_got_refcounts (abfd) = local_got_refcounts;
1807 elf_x86_64_local_tlsdesc_gotent (abfd)
1808 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1809 elf_x86_64_local_got_tls_type (abfd)
1810 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1811 }
1812 local_got_refcounts[r_symndx] += 1;
1813 old_tls_type
1814 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1815 }
1816
1817 /* If a TLS symbol is accessed using IE at least once,
1818 there is no point to use dynamic model for it. */
1819 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1820 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1821 || tls_type != GOT_TLS_IE))
1822 {
1823 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1824 tls_type = old_tls_type;
1825 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1826 && GOT_TLS_GD_ANY_P (tls_type))
1827 tls_type |= old_tls_type;
1828 else
1829 {
1830 if (h)
1831 name = h->root.root.string;
1832 else
1833 name = bfd_elf_sym_name (abfd, symtab_hdr,
1834 isym, NULL);
1835 (*_bfd_error_handler)
1836 (_("%B: '%s' accessed both as normal and thread local symbol"),
1837 abfd, name);
1838 bfd_set_error (bfd_error_bad_value);
1839 return FALSE;
1840 }
1841 }
1842
1843 if (old_tls_type != tls_type)
1844 {
1845 if (h != NULL)
1846 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1847 else
1848 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1849 }
1850 }
1851 /* Fall through */
1852
1853 case R_X86_64_GOTOFF64:
1854 case R_X86_64_GOTPC32:
1855 case R_X86_64_GOTPC64:
1856 create_got:
1857 if (htab->elf.sgot == NULL)
1858 {
1859 if (htab->elf.dynobj == NULL)
1860 htab->elf.dynobj = abfd;
1861 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1862 info))
1863 return FALSE;
1864 }
1865 break;
1866
1867 case R_X86_64_PLT32:
1868 case R_X86_64_PLT32_BND:
1869 /* This symbol requires a procedure linkage table entry. We
1870 actually build the entry in adjust_dynamic_symbol,
1871 because this might be a case of linking PIC code which is
1872 never referenced by a dynamic object, in which case we
1873 don't need to generate a procedure linkage table entry
1874 after all. */
1875
1876 /* If this is a local symbol, we resolve it directly without
1877 creating a procedure linkage table entry. */
1878 if (h == NULL)
1879 continue;
1880
1881 h->needs_plt = 1;
1882 h->plt.refcount += 1;
1883 break;
1884
1885 case R_X86_64_PLTOFF64:
1886 /* This tries to form the 'address' of a function relative
1887 to GOT. For global symbols we need a PLT entry. */
1888 if (h != NULL)
1889 {
1890 h->needs_plt = 1;
1891 h->plt.refcount += 1;
1892 }
1893 goto create_got;
1894
1895 case R_X86_64_SIZE32:
1896 case R_X86_64_SIZE64:
1897 size_reloc = TRUE;
1898 goto do_size;
1899
1900 case R_X86_64_32:
1901 if (!ABI_64_P (abfd))
1902 goto pointer;
1903 case R_X86_64_8:
1904 case R_X86_64_16:
1905 case R_X86_64_32S:
1906 /* Let's help debug shared library creation. These relocs
1907 cannot be used in shared libs. Don't error out for
1908 sections we don't care about, such as debug sections or
1909 non-constant sections. */
1910 if (info->shared
1911 && (sec->flags & SEC_ALLOC) != 0
1912 && (sec->flags & SEC_READONLY) != 0)
1913 {
1914 if (h)
1915 name = h->root.root.string;
1916 else
1917 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1918 (*_bfd_error_handler)
1919 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1920 abfd, x86_64_elf_howto_table[r_type].name, name);
1921 bfd_set_error (bfd_error_bad_value);
1922 return FALSE;
1923 }
1924 /* Fall through. */
1925
1926 case R_X86_64_PC8:
1927 case R_X86_64_PC16:
1928 case R_X86_64_PC32:
1929 case R_X86_64_PC32_BND:
1930 case R_X86_64_PC64:
1931 case R_X86_64_64:
1932 pointer:
1933 if (h != NULL && info->executable)
1934 {
1935 /* If this reloc is in a read-only section, we might
1936 need a copy reloc. We can't check reliably at this
1937 stage whether the section is read-only, as input
1938 sections have not yet been mapped to output sections.
1939 Tentatively set the flag for now, and correct in
1940 adjust_dynamic_symbol. */
1941 h->non_got_ref = 1;
1942
1943 /* We may need a .plt entry if the function this reloc
1944 refers to is in a shared lib. */
1945 h->plt.refcount += 1;
1946 if (r_type != R_X86_64_PC32
1947 && r_type != R_X86_64_PC32_BND
1948 && r_type != R_X86_64_PC64)
1949 h->pointer_equality_needed = 1;
1950 }
1951
1952 size_reloc = FALSE;
1953 do_size:
1954 /* If we are creating a shared library, and this is a reloc
1955 against a global symbol, or a non PC relative reloc
1956 against a local symbol, then we need to copy the reloc
1957 into the shared library. However, if we are linking with
1958 -Bsymbolic, we do not need to copy a reloc against a
1959 global symbol which is defined in an object we are
1960 including in the link (i.e., DEF_REGULAR is set). At
1961 this point we have not seen all the input files, so it is
1962 possible that DEF_REGULAR is not set now but will be set
1963 later (it is never cleared). In case of a weak definition,
1964 DEF_REGULAR may be cleared later by a strong definition in
1965 a shared library. We account for that possibility below by
1966 storing information in the relocs_copied field of the hash
1967 table entry. A similar situation occurs when creating
1968 shared libraries and symbol visibility changes render the
1969 symbol local.
1970
1971 If on the other hand, we are creating an executable, we
1972 may need to keep relocations for symbols satisfied by a
1973 dynamic library if we manage to avoid copy relocs for the
1974 symbol. */
1975 if ((info->shared
1976 && (sec->flags & SEC_ALLOC) != 0
1977 && (! IS_X86_64_PCREL_TYPE (r_type)
1978 || (h != NULL
1979 && (! SYMBOLIC_BIND (info, h)
1980 || h->root.type == bfd_link_hash_defweak
1981 || !h->def_regular))))
1982 || (ELIMINATE_COPY_RELOCS
1983 && !info->shared
1984 && (sec->flags & SEC_ALLOC) != 0
1985 && h != NULL
1986 && (h->root.type == bfd_link_hash_defweak
1987 || !h->def_regular)))
1988 {
1989 struct elf_dyn_relocs *p;
1990 struct elf_dyn_relocs **head;
1991
1992 /* We must copy these reloc types into the output file.
1993 Create a reloc section in dynobj and make room for
1994 this reloc. */
1995 if (sreloc == NULL)
1996 {
1997 if (htab->elf.dynobj == NULL)
1998 htab->elf.dynobj = abfd;
1999
2000 sreloc = _bfd_elf_make_dynamic_reloc_section
2001 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2002 abfd, /*rela?*/ TRUE);
2003
2004 if (sreloc == NULL)
2005 return FALSE;
2006 }
2007
2008 /* If this is a global symbol, we count the number of
2009 relocations we need for this symbol. */
2010 if (h != NULL)
2011 {
2012 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2013 }
2014 else
2015 {
2016 /* Track dynamic relocs needed for local syms too.
2017 We really need local syms available to do this
2018 easily. Oh well. */
2019 asection *s;
2020 void **vpp;
2021
2022 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2023 abfd, r_symndx);
2024 if (isym == NULL)
2025 return FALSE;
2026
2027 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2028 if (s == NULL)
2029 s = sec;
2030
2031 /* Beware of type punned pointers vs strict aliasing
2032 rules. */
2033 vpp = &(elf_section_data (s)->local_dynrel);
2034 head = (struct elf_dyn_relocs **)vpp;
2035 }
2036
2037 p = *head;
2038 if (p == NULL || p->sec != sec)
2039 {
2040 bfd_size_type amt = sizeof *p;
2041
2042 p = ((struct elf_dyn_relocs *)
2043 bfd_alloc (htab->elf.dynobj, amt));
2044 if (p == NULL)
2045 return FALSE;
2046 p->next = *head;
2047 *head = p;
2048 p->sec = sec;
2049 p->count = 0;
2050 p->pc_count = 0;
2051 }
2052
2053 p->count += 1;
2054 /* Count size relocation as PC-relative relocation. */
2055 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2056 p->pc_count += 1;
2057 }
2058 break;
2059
2060 /* This relocation describes the C++ object vtable hierarchy.
2061 Reconstruct it for later use during GC. */
2062 case R_X86_64_GNU_VTINHERIT:
2063 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2064 return FALSE;
2065 break;
2066
2067 /* This relocation describes which C++ vtable entries are actually
2068 used. Record for later use during GC. */
2069 case R_X86_64_GNU_VTENTRY:
2070 BFD_ASSERT (h != NULL);
2071 if (h != NULL
2072 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2073 return FALSE;
2074 break;
2075
2076 default:
2077 break;
2078 }
2079
2080 if (use_plt_got
2081 && h != NULL
2082 && h->plt.refcount > 0
2083 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2084 || h->got.refcount > 0)
2085 && htab->plt_got == NULL)
2086 {
2087 /* Create the GOT procedure linkage table. */
2088 unsigned int plt_got_align;
2089 const struct elf_backend_data *bed;
2090
2091 bed = get_elf_backend_data (info->output_bfd);
2092 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2093 && (sizeof (elf_x86_64_bnd_plt2_entry)
2094 == sizeof (elf_x86_64_legacy_plt2_entry)));
2095 plt_got_align = 3;
2096
2097 if (htab->elf.dynobj == NULL)
2098 htab->elf.dynobj = abfd;
2099 htab->plt_got
2100 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2101 ".plt.got",
2102 (bed->dynamic_sec_flags
2103 | SEC_ALLOC
2104 | SEC_CODE
2105 | SEC_LOAD
2106 | SEC_READONLY));
2107 if (htab->plt_got == NULL
2108 || !bfd_set_section_alignment (htab->elf.dynobj,
2109 htab->plt_got,
2110 plt_got_align))
2111 return FALSE;
2112 }
2113
2114 if (r_type == R_X86_64_GOTPCREL
2115 && (h == NULL || h->type != STT_GNU_IFUNC))
2116 sec->need_convert_mov_to_lea = 1;
2117 }
2118
2119 return TRUE;
2120 }
2121
2122 /* Return the section that should be marked against GC for a given
2123 relocation. */
2124
2125 static asection *
2126 elf_x86_64_gc_mark_hook (asection *sec,
2127 struct bfd_link_info *info,
2128 Elf_Internal_Rela *rel,
2129 struct elf_link_hash_entry *h,
2130 Elf_Internal_Sym *sym)
2131 {
2132 if (h != NULL)
2133 switch (ELF32_R_TYPE (rel->r_info))
2134 {
2135 case R_X86_64_GNU_VTINHERIT:
2136 case R_X86_64_GNU_VTENTRY:
2137 return NULL;
2138 }
2139
2140 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2141 }
2142
2143 /* Update the got entry reference counts for the section being removed. */
2144
2145 static bfd_boolean
2146 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2147 asection *sec,
2148 const Elf_Internal_Rela *relocs)
2149 {
2150 struct elf_x86_64_link_hash_table *htab;
2151 Elf_Internal_Shdr *symtab_hdr;
2152 struct elf_link_hash_entry **sym_hashes;
2153 bfd_signed_vma *local_got_refcounts;
2154 const Elf_Internal_Rela *rel, *relend;
2155
2156 if (info->relocatable)
2157 return TRUE;
2158
2159 htab = elf_x86_64_hash_table (info);
2160 if (htab == NULL)
2161 return FALSE;
2162
2163 elf_section_data (sec)->local_dynrel = NULL;
2164
2165 symtab_hdr = &elf_symtab_hdr (abfd);
2166 sym_hashes = elf_sym_hashes (abfd);
2167 local_got_refcounts = elf_local_got_refcounts (abfd);
2168
2169 htab = elf_x86_64_hash_table (info);
2170 relend = relocs + sec->reloc_count;
2171 for (rel = relocs; rel < relend; rel++)
2172 {
2173 unsigned long r_symndx;
2174 unsigned int r_type;
2175 struct elf_link_hash_entry *h = NULL;
2176
2177 r_symndx = htab->r_sym (rel->r_info);
2178 if (r_symndx >= symtab_hdr->sh_info)
2179 {
2180 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2181 while (h->root.type == bfd_link_hash_indirect
2182 || h->root.type == bfd_link_hash_warning)
2183 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2184 }
2185 else
2186 {
2187 /* A local symbol. */
2188 Elf_Internal_Sym *isym;
2189
2190 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2191 abfd, r_symndx);
2192
2193 /* Check relocation against local STT_GNU_IFUNC symbol. */
2194 if (isym != NULL
2195 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2196 {
2197 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2198 if (h == NULL)
2199 abort ();
2200 }
2201 }
2202
2203 if (h)
2204 {
2205 struct elf_x86_64_link_hash_entry *eh;
2206 struct elf_dyn_relocs **pp;
2207 struct elf_dyn_relocs *p;
2208
2209 eh = (struct elf_x86_64_link_hash_entry *) h;
2210
2211 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2212 if (p->sec == sec)
2213 {
2214 /* Everything must go for SEC. */
2215 *pp = p->next;
2216 break;
2217 }
2218 }
2219
2220 r_type = ELF32_R_TYPE (rel->r_info);
2221 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2222 symtab_hdr, sym_hashes,
2223 &r_type, GOT_UNKNOWN,
2224 rel, relend, h, r_symndx))
2225 return FALSE;
2226
2227 switch (r_type)
2228 {
2229 case R_X86_64_TLSLD:
2230 if (htab->tls_ld_got.refcount > 0)
2231 htab->tls_ld_got.refcount -= 1;
2232 break;
2233
2234 case R_X86_64_TLSGD:
2235 case R_X86_64_GOTPC32_TLSDESC:
2236 case R_X86_64_TLSDESC_CALL:
2237 case R_X86_64_GOTTPOFF:
2238 case R_X86_64_GOT32:
2239 case R_X86_64_GOTPCREL:
2240 case R_X86_64_GOT64:
2241 case R_X86_64_GOTPCREL64:
2242 case R_X86_64_GOTPLT64:
2243 if (h != NULL)
2244 {
2245 if (h->got.refcount > 0)
2246 h->got.refcount -= 1;
2247 if (h->type == STT_GNU_IFUNC)
2248 {
2249 if (h->plt.refcount > 0)
2250 h->plt.refcount -= 1;
2251 }
2252 }
2253 else if (local_got_refcounts != NULL)
2254 {
2255 if (local_got_refcounts[r_symndx] > 0)
2256 local_got_refcounts[r_symndx] -= 1;
2257 }
2258 break;
2259
2260 case R_X86_64_8:
2261 case R_X86_64_16:
2262 case R_X86_64_32:
2263 case R_X86_64_64:
2264 case R_X86_64_32S:
2265 case R_X86_64_PC8:
2266 case R_X86_64_PC16:
2267 case R_X86_64_PC32:
2268 case R_X86_64_PC32_BND:
2269 case R_X86_64_PC64:
2270 case R_X86_64_SIZE32:
2271 case R_X86_64_SIZE64:
2272 if (info->shared
2273 && (h == NULL || h->type != STT_GNU_IFUNC))
2274 break;
2275 /* Fall thru */
2276
2277 case R_X86_64_PLT32:
2278 case R_X86_64_PLT32_BND:
2279 case R_X86_64_PLTOFF64:
2280 if (h != NULL)
2281 {
2282 if (h->plt.refcount > 0)
2283 h->plt.refcount -= 1;
2284 }
2285 break;
2286
2287 default:
2288 break;
2289 }
2290 }
2291
2292 return TRUE;
2293 }
2294
2295 /* Adjust a symbol defined by a dynamic object and referenced by a
2296 regular object. The current definition is in some section of the
2297 dynamic object, but we're not including those sections. We have to
2298 change the definition to something the rest of the link can
2299 understand. */
2300
2301 static bfd_boolean
2302 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2303 struct elf_link_hash_entry *h)
2304 {
2305 struct elf_x86_64_link_hash_table *htab;
2306 asection *s;
2307 struct elf_x86_64_link_hash_entry *eh;
2308 struct elf_dyn_relocs *p;
2309
2310 /* STT_GNU_IFUNC symbol must go through PLT. */
2311 if (h->type == STT_GNU_IFUNC)
2312 {
2313 /* All local STT_GNU_IFUNC references must be treate as local
2314 calls via local PLT. */
2315 if (h->ref_regular
2316 && SYMBOL_CALLS_LOCAL (info, h))
2317 {
2318 bfd_size_type pc_count = 0, count = 0;
2319 struct elf_dyn_relocs **pp;
2320
2321 eh = (struct elf_x86_64_link_hash_entry *) h;
2322 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2323 {
2324 pc_count += p->pc_count;
2325 p->count -= p->pc_count;
2326 p->pc_count = 0;
2327 count += p->count;
2328 if (p->count == 0)
2329 *pp = p->next;
2330 else
2331 pp = &p->next;
2332 }
2333
2334 if (pc_count || count)
2335 {
2336 h->needs_plt = 1;
2337 h->non_got_ref = 1;
2338 if (h->plt.refcount <= 0)
2339 h->plt.refcount = 1;
2340 else
2341 h->plt.refcount += 1;
2342 }
2343 }
2344
2345 if (h->plt.refcount <= 0)
2346 {
2347 h->plt.offset = (bfd_vma) -1;
2348 h->needs_plt = 0;
2349 }
2350 return TRUE;
2351 }
2352
2353 /* If this is a function, put it in the procedure linkage table. We
2354 will fill in the contents of the procedure linkage table later,
2355 when we know the address of the .got section. */
2356 if (h->type == STT_FUNC
2357 || h->needs_plt)
2358 {
2359 if (h->plt.refcount <= 0
2360 || SYMBOL_CALLS_LOCAL (info, h)
2361 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2362 && h->root.type == bfd_link_hash_undefweak))
2363 {
2364 /* This case can occur if we saw a PLT32 reloc in an input
2365 file, but the symbol was never referred to by a dynamic
2366 object, or if all references were garbage collected. In
2367 such a case, we don't actually need to build a procedure
2368 linkage table, and we can just do a PC32 reloc instead. */
2369 h->plt.offset = (bfd_vma) -1;
2370 h->needs_plt = 0;
2371 }
2372
2373 return TRUE;
2374 }
2375 else
2376 /* It's possible that we incorrectly decided a .plt reloc was
2377 needed for an R_X86_64_PC32 reloc to a non-function sym in
2378 check_relocs. We can't decide accurately between function and
2379 non-function syms in check-relocs; Objects loaded later in
2380 the link may change h->type. So fix it now. */
2381 h->plt.offset = (bfd_vma) -1;
2382
2383 /* If this is a weak symbol, and there is a real definition, the
2384 processor independent code will have arranged for us to see the
2385 real definition first, and we can just use the same value. */
2386 if (h->u.weakdef != NULL)
2387 {
2388 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2389 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2390 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2391 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2392 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2393 {
2394 eh = (struct elf_x86_64_link_hash_entry *) h;
2395 h->non_got_ref = h->u.weakdef->non_got_ref;
2396 eh->needs_copy = h->u.weakdef->needs_copy;
2397 }
2398 return TRUE;
2399 }
2400
2401 /* This is a reference to a symbol defined by a dynamic object which
2402 is not a function. */
2403
2404 /* If we are creating a shared library, we must presume that the
2405 only references to the symbol are via the global offset table.
2406 For such cases we need not do anything here; the relocations will
2407 be handled correctly by relocate_section. */
2408 if (!info->executable)
2409 return TRUE;
2410
2411 /* If there are no references to this symbol that do not use the
2412 GOT, we don't need to generate a copy reloc. */
2413 if (!h->non_got_ref)
2414 return TRUE;
2415
2416 /* If -z nocopyreloc was given, we won't generate them either. */
2417 if (info->nocopyreloc)
2418 {
2419 h->non_got_ref = 0;
2420 return TRUE;
2421 }
2422
2423 if (ELIMINATE_COPY_RELOCS)
2424 {
2425 eh = (struct elf_x86_64_link_hash_entry *) h;
2426 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2427 {
2428 s = p->sec->output_section;
2429 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2430 break;
2431 }
2432
2433 /* If we didn't find any dynamic relocs in read-only sections, then
2434 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2435 if (p == NULL)
2436 {
2437 h->non_got_ref = 0;
2438 return TRUE;
2439 }
2440 }
2441
2442 /* We must allocate the symbol in our .dynbss section, which will
2443 become part of the .bss section of the executable. There will be
2444 an entry for this symbol in the .dynsym section. The dynamic
2445 object will contain position independent code, so all references
2446 from the dynamic object to this symbol will go through the global
2447 offset table. The dynamic linker will use the .dynsym entry to
2448 determine the address it must put in the global offset table, so
2449 both the dynamic object and the regular object will refer to the
2450 same memory location for the variable. */
2451
2452 htab = elf_x86_64_hash_table (info);
2453 if (htab == NULL)
2454 return FALSE;
2455
2456 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2457 to copy the initial value out of the dynamic object and into the
2458 runtime process image. */
2459 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2460 {
2461 const struct elf_backend_data *bed;
2462 bed = get_elf_backend_data (info->output_bfd);
2463 htab->srelbss->size += bed->s->sizeof_rela;
2464 h->needs_copy = 1;
2465 }
2466
2467 s = htab->sdynbss;
2468
2469 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2470 }
2471
2472 /* Allocate space in .plt, .got and associated reloc sections for
2473 dynamic relocs. */
2474
2475 static bfd_boolean
2476 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2477 {
2478 struct bfd_link_info *info;
2479 struct elf_x86_64_link_hash_table *htab;
2480 struct elf_x86_64_link_hash_entry *eh;
2481 struct elf_dyn_relocs *p;
2482 const struct elf_backend_data *bed;
2483 unsigned int plt_entry_size;
2484
2485 if (h->root.type == bfd_link_hash_indirect)
2486 return TRUE;
2487
2488 eh = (struct elf_x86_64_link_hash_entry *) h;
2489
2490 info = (struct bfd_link_info *) inf;
2491 htab = elf_x86_64_hash_table (info);
2492 if (htab == NULL)
2493 return FALSE;
2494 bed = get_elf_backend_data (info->output_bfd);
2495 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2496
2497 /* We can't use the GOT PLT if pointer equality is needed since
2498 finish_dynamic_symbol won't clear symbol value and the dynamic
2499 linker won't update the GOT slot. We will get into an infinite
2500 loop at run-time. */
2501 if (htab->plt_got != NULL
2502 && h->type != STT_GNU_IFUNC
2503 && !h->pointer_equality_needed
2504 && h->plt.refcount > 0
2505 && h->got.refcount > 0)
2506 {
2507 /* Don't use the regular PLT if there are both GOT and GOTPLT
2508 reloctions. */
2509 h->plt.offset = (bfd_vma) -1;
2510
2511 /* Use the GOT PLT. */
2512 eh->plt_got.refcount = 1;
2513 }
2514
2515 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2516 here if it is defined and referenced in a non-shared object. */
2517 if (h->type == STT_GNU_IFUNC
2518 && h->def_regular)
2519 {
2520 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2521 &eh->dyn_relocs,
2522 plt_entry_size,
2523 plt_entry_size,
2524 GOT_ENTRY_SIZE))
2525 {
2526 asection *s = htab->plt_bnd;
2527 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2528 {
2529 /* Use the .plt.bnd section if it is created. */
2530 eh->plt_bnd.offset = s->size;
2531
2532 /* Make room for this entry in the .plt.bnd section. */
2533 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2534 }
2535
2536 return TRUE;
2537 }
2538 else
2539 return FALSE;
2540 }
2541 else if (htab->elf.dynamic_sections_created
2542 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2543 {
2544 bfd_boolean use_plt_got;
2545
2546 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2547 {
2548 /* Don't use the regular PLT for DF_BIND_NOW. */
2549 h->plt.offset = (bfd_vma) -1;
2550
2551 /* Use the GOT PLT. */
2552 h->got.refcount = 1;
2553 eh->plt_got.refcount = 1;
2554 }
2555
2556 use_plt_got = eh->plt_got.refcount > 0;
2557
2558 /* Make sure this symbol is output as a dynamic symbol.
2559 Undefined weak syms won't yet be marked as dynamic. */
2560 if (h->dynindx == -1
2561 && !h->forced_local)
2562 {
2563 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2564 return FALSE;
2565 }
2566
2567 if (info->shared
2568 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2569 {
2570 asection *s = htab->elf.splt;
2571 asection *bnd_s = htab->plt_bnd;
2572 asection *got_s = htab->plt_got;
2573
2574 if (use_plt_got)
2575 eh->plt_got.offset = got_s->size;
2576 else
2577 {
2578 /* If this is the first .plt entry, make room for the
2579 special first entry. */
2580 if (s->size == 0)
2581 s->size = plt_entry_size;
2582 h->plt.offset = s->size;
2583 if (bnd_s)
2584 eh->plt_bnd.offset = bnd_s->size;
2585 }
2586
2587 /* If this symbol is not defined in a regular file, and we are
2588 not generating a shared library, then set the symbol to this
2589 location in the .plt. This is required to make function
2590 pointers compare as equal between the normal executable and
2591 the shared library. */
2592 if (! info->shared
2593 && !h->def_regular)
2594 {
2595 if (use_plt_got)
2596 {
2597 /* We need to make a call to the entry of the GOT PLT
2598 instead of regular PLT entry. */
2599 h->root.u.def.section = got_s;
2600 h->root.u.def.value = eh->plt_got.offset;
2601 }
2602 else
2603 {
2604 if (bnd_s)
2605 {
2606 /* We need to make a call to the entry of the second
2607 PLT instead of regular PLT entry. */
2608 h->root.u.def.section = bnd_s;
2609 h->root.u.def.value = eh->plt_bnd.offset;
2610 }
2611 else
2612 {
2613 h->root.u.def.section = s;
2614 h->root.u.def.value = h->plt.offset;
2615 }
2616 }
2617 }
2618
2619 /* Make room for this entry. */
2620 if (use_plt_got)
2621 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2622 else
2623 {
2624 s->size += plt_entry_size;
2625 if (bnd_s)
2626 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2627
2628 /* We also need to make an entry in the .got.plt section,
2629 which will be placed in the .got section by the linker
2630 script. */
2631 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2632
2633 /* We also need to make an entry in the .rela.plt
2634 section. */
2635 htab->elf.srelplt->size += bed->s->sizeof_rela;
2636 htab->elf.srelplt->reloc_count++;
2637 }
2638 }
2639 else
2640 {
2641 h->plt.offset = (bfd_vma) -1;
2642 h->needs_plt = 0;
2643 }
2644 }
2645 else
2646 {
2647 h->plt.offset = (bfd_vma) -1;
2648 h->needs_plt = 0;
2649 }
2650
2651 eh->tlsdesc_got = (bfd_vma) -1;
2652
2653 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2654 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2655 if (h->got.refcount > 0
2656 && info->executable
2657 && h->dynindx == -1
2658 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2659 {
2660 h->got.offset = (bfd_vma) -1;
2661 }
2662 else if (h->got.refcount > 0)
2663 {
2664 asection *s;
2665 bfd_boolean dyn;
2666 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2667
2668 /* Make sure this symbol is output as a dynamic symbol.
2669 Undefined weak syms won't yet be marked as dynamic. */
2670 if (h->dynindx == -1
2671 && !h->forced_local)
2672 {
2673 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2674 return FALSE;
2675 }
2676
2677 if (GOT_TLS_GDESC_P (tls_type))
2678 {
2679 eh->tlsdesc_got = htab->elf.sgotplt->size
2680 - elf_x86_64_compute_jump_table_size (htab);
2681 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2682 h->got.offset = (bfd_vma) -2;
2683 }
2684 if (! GOT_TLS_GDESC_P (tls_type)
2685 || GOT_TLS_GD_P (tls_type))
2686 {
2687 s = htab->elf.sgot;
2688 h->got.offset = s->size;
2689 s->size += GOT_ENTRY_SIZE;
2690 if (GOT_TLS_GD_P (tls_type))
2691 s->size += GOT_ENTRY_SIZE;
2692 }
2693 dyn = htab->elf.dynamic_sections_created;
2694 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2695 and two if global.
2696 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2697 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2698 || tls_type == GOT_TLS_IE)
2699 htab->elf.srelgot->size += bed->s->sizeof_rela;
2700 else if (GOT_TLS_GD_P (tls_type))
2701 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2702 else if (! GOT_TLS_GDESC_P (tls_type)
2703 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2704 || h->root.type != bfd_link_hash_undefweak)
2705 && (info->shared
2706 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2707 htab->elf.srelgot->size += bed->s->sizeof_rela;
2708 if (GOT_TLS_GDESC_P (tls_type))
2709 {
2710 htab->elf.srelplt->size += bed->s->sizeof_rela;
2711 htab->tlsdesc_plt = (bfd_vma) -1;
2712 }
2713 }
2714 else
2715 h->got.offset = (bfd_vma) -1;
2716
2717 if (eh->dyn_relocs == NULL)
2718 return TRUE;
2719
2720 /* In the shared -Bsymbolic case, discard space allocated for
2721 dynamic pc-relative relocs against symbols which turn out to be
2722 defined in regular objects. For the normal shared case, discard
2723 space for pc-relative relocs that have become local due to symbol
2724 visibility changes. */
2725
2726 if (info->shared)
2727 {
2728 /* Relocs that use pc_count are those that appear on a call
2729 insn, or certain REL relocs that can generated via assembly.
2730 We want calls to protected symbols to resolve directly to the
2731 function rather than going via the plt. If people want
2732 function pointer comparisons to work as expected then they
2733 should avoid writing weird assembly. */
2734 if (SYMBOL_CALLS_LOCAL (info, h))
2735 {
2736 struct elf_dyn_relocs **pp;
2737
2738 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2739 {
2740 p->count -= p->pc_count;
2741 p->pc_count = 0;
2742 if (p->count == 0)
2743 *pp = p->next;
2744 else
2745 pp = &p->next;
2746 }
2747 }
2748
2749 /* Also discard relocs on undefined weak syms with non-default
2750 visibility. */
2751 if (eh->dyn_relocs != NULL)
2752 {
2753 if (h->root.type == bfd_link_hash_undefweak)
2754 {
2755 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2756 eh->dyn_relocs = NULL;
2757
2758 /* Make sure undefined weak symbols are output as a dynamic
2759 symbol in PIEs. */
2760 else if (h->dynindx == -1
2761 && ! h->forced_local
2762 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2763 return FALSE;
2764 }
2765 /* For PIE, discard space for pc-relative relocs against
2766 symbols which turn out to need copy relocs. */
2767 else if (info->executable
2768 && (h->needs_copy || eh->needs_copy)
2769 && h->def_dynamic
2770 && !h->def_regular)
2771 {
2772 struct elf_dyn_relocs **pp;
2773
2774 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2775 {
2776 if (p->pc_count != 0)
2777 *pp = p->next;
2778 else
2779 pp = &p->next;
2780 }
2781 }
2782 }
2783 }
2784 else if (ELIMINATE_COPY_RELOCS)
2785 {
2786 /* For the non-shared case, discard space for relocs against
2787 symbols which turn out to need copy relocs or are not
2788 dynamic. */
2789
2790 if (!h->non_got_ref
2791 && ((h->def_dynamic
2792 && !h->def_regular)
2793 || (htab->elf.dynamic_sections_created
2794 && (h->root.type == bfd_link_hash_undefweak
2795 || h->root.type == bfd_link_hash_undefined))))
2796 {
2797 /* Make sure this symbol is output as a dynamic symbol.
2798 Undefined weak syms won't yet be marked as dynamic. */
2799 if (h->dynindx == -1
2800 && ! h->forced_local
2801 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2802 return FALSE;
2803
2804 /* If that succeeded, we know we'll be keeping all the
2805 relocs. */
2806 if (h->dynindx != -1)
2807 goto keep;
2808 }
2809
2810 eh->dyn_relocs = NULL;
2811
2812 keep: ;
2813 }
2814
2815 /* Finally, allocate space. */
2816 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2817 {
2818 asection * sreloc;
2819
2820 sreloc = elf_section_data (p->sec)->sreloc;
2821
2822 BFD_ASSERT (sreloc != NULL);
2823
2824 sreloc->size += p->count * bed->s->sizeof_rela;
2825 }
2826
2827 return TRUE;
2828 }
2829
2830 /* Allocate space in .plt, .got and associated reloc sections for
2831 local dynamic relocs. */
2832
2833 static bfd_boolean
2834 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2835 {
2836 struct elf_link_hash_entry *h
2837 = (struct elf_link_hash_entry *) *slot;
2838
2839 if (h->type != STT_GNU_IFUNC
2840 || !h->def_regular
2841 || !h->ref_regular
2842 || !h->forced_local
2843 || h->root.type != bfd_link_hash_defined)
2844 abort ();
2845
2846 return elf_x86_64_allocate_dynrelocs (h, inf);
2847 }
2848
2849 /* Find any dynamic relocs that apply to read-only sections. */
2850
2851 static bfd_boolean
2852 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2853 void * inf)
2854 {
2855 struct elf_x86_64_link_hash_entry *eh;
2856 struct elf_dyn_relocs *p;
2857
2858 /* Skip local IFUNC symbols. */
2859 if (h->forced_local && h->type == STT_GNU_IFUNC)
2860 return TRUE;
2861
2862 eh = (struct elf_x86_64_link_hash_entry *) h;
2863 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2864 {
2865 asection *s = p->sec->output_section;
2866
2867 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2868 {
2869 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2870
2871 info->flags |= DF_TEXTREL;
2872
2873 if ((info->warn_shared_textrel && info->shared)
2874 || info->error_textrel)
2875 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2876 p->sec->owner, h->root.root.string,
2877 p->sec);
2878
2879 /* Not an error, just cut short the traversal. */
2880 return FALSE;
2881 }
2882 }
2883 return TRUE;
2884 }
2885
2886 /* Convert
2887 mov foo@GOTPCREL(%rip), %reg
2888 to
2889 lea foo(%rip), %reg
2890 with the local symbol, foo. */
2891
2892 static bfd_boolean
2893 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2894 struct bfd_link_info *link_info)
2895 {
2896 Elf_Internal_Shdr *symtab_hdr;
2897 Elf_Internal_Rela *internal_relocs;
2898 Elf_Internal_Rela *irel, *irelend;
2899 bfd_byte *contents;
2900 struct elf_x86_64_link_hash_table *htab;
2901 bfd_boolean changed_contents;
2902 bfd_boolean changed_relocs;
2903 bfd_signed_vma *local_got_refcounts;
2904 bfd_vma maxpagesize;
2905
2906 /* Don't even try to convert non-ELF outputs. */
2907 if (!is_elf_hash_table (link_info->hash))
2908 return FALSE;
2909
2910 /* Nothing to do if there is no need or no output. */
2911 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2912 || sec->need_convert_mov_to_lea == 0
2913 || bfd_is_abs_section (sec->output_section))
2914 return TRUE;
2915
2916 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2917
2918 /* Load the relocations for this section. */
2919 internal_relocs = (_bfd_elf_link_read_relocs
2920 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2921 link_info->keep_memory));
2922 if (internal_relocs == NULL)
2923 return FALSE;
2924
2925 htab = elf_x86_64_hash_table (link_info);
2926 changed_contents = FALSE;
2927 changed_relocs = FALSE;
2928 local_got_refcounts = elf_local_got_refcounts (abfd);
2929 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
2930
2931 /* Get the section contents. */
2932 if (elf_section_data (sec)->this_hdr.contents != NULL)
2933 contents = elf_section_data (sec)->this_hdr.contents;
2934 else
2935 {
2936 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2937 goto error_return;
2938 }
2939
2940 irelend = internal_relocs + sec->reloc_count;
2941 for (irel = internal_relocs; irel < irelend; irel++)
2942 {
2943 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2944 unsigned int r_symndx = htab->r_sym (irel->r_info);
2945 unsigned int indx;
2946 struct elf_link_hash_entry *h;
2947 asection *tsec;
2948 char symtype;
2949 bfd_vma toff, roff;
2950 enum {
2951 none, local, global
2952 } convert_mov_to_lea;
2953
2954 if (r_type != R_X86_64_GOTPCREL)
2955 continue;
2956
2957 roff = irel->r_offset;
2958
2959 /* Don't convert R_X86_64_GOTPCREL relocation if it isn't for mov
2960 instruction. */
2961 if (roff < 2
2962 || bfd_get_8 (abfd, contents + roff - 2) != 0x8b)
2963 continue;
2964
2965 tsec = NULL;
2966 convert_mov_to_lea = none;
2967
2968 /* Get the symbol referred to by the reloc. */
2969 if (r_symndx < symtab_hdr->sh_info)
2970 {
2971 Elf_Internal_Sym *isym;
2972
2973 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2974 abfd, r_symndx);
2975
2976 symtype = ELF_ST_TYPE (isym->st_info);
2977
2978 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and
2979 skip relocation against undefined symbols. */
2980 if (symtype != STT_GNU_IFUNC && isym->st_shndx != SHN_UNDEF)
2981 {
2982 if (isym->st_shndx == SHN_ABS)
2983 tsec = bfd_abs_section_ptr;
2984 else if (isym->st_shndx == SHN_COMMON)
2985 tsec = bfd_com_section_ptr;
2986 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
2987 tsec = &_bfd_elf_large_com_section;
2988 else
2989 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
2990
2991 toff = isym->st_value;
2992 convert_mov_to_lea = local;
2993 }
2994 }
2995 else
2996 {
2997 indx = r_symndx - symtab_hdr->sh_info;
2998 h = elf_sym_hashes (abfd)[indx];
2999 BFD_ASSERT (h != NULL);
3000
3001 while (h->root.type == bfd_link_hash_indirect
3002 || h->root.type == bfd_link_hash_warning)
3003 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3004
3005 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
3006 avoid optimizing _DYNAMIC since ld.so may use its link-time
3007 address. */
3008 if (h->def_regular
3009 && h->type != STT_GNU_IFUNC
3010 && h != htab->elf.hdynamic
3011 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3012 {
3013 tsec = h->root.u.def.section;
3014 toff = h->root.u.def.value;
3015 symtype = h->type;
3016 convert_mov_to_lea = global;
3017 }
3018 }
3019
3020 if (convert_mov_to_lea == none)
3021 continue;
3022
3023 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3024 {
3025 /* At this stage in linking, no SEC_MERGE symbol has been
3026 adjusted, so all references to such symbols need to be
3027 passed through _bfd_merged_section_offset. (Later, in
3028 relocate_section, all SEC_MERGE symbols *except* for
3029 section symbols have been adjusted.)
3030
3031 gas may reduce relocations against symbols in SEC_MERGE
3032 sections to a relocation against the section symbol when
3033 the original addend was zero. When the reloc is against
3034 a section symbol we should include the addend in the
3035 offset passed to _bfd_merged_section_offset, since the
3036 location of interest is the original symbol. On the
3037 other hand, an access to "sym+addend" where "sym" is not
3038 a section symbol should not include the addend; Such an
3039 access is presumed to be an offset from "sym"; The
3040 location of interest is just "sym". */
3041 if (symtype == STT_SECTION)
3042 toff += irel->r_addend;
3043
3044 toff = _bfd_merged_section_offset (abfd, &tsec,
3045 elf_section_data (tsec)->sec_info,
3046 toff);
3047
3048 if (symtype != STT_SECTION)
3049 toff += irel->r_addend;
3050 }
3051 else
3052 toff += irel->r_addend;
3053
3054 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3055 if (tsec->output_section == sec->output_section)
3056 {
3057 if ((toff - roff + 0x80000000) > 0xffffffff)
3058 continue;
3059 }
3060 else
3061 {
3062 asection *asect;
3063 bfd_size_type size;
3064
3065 /* At this point, we don't know the load addresses of TSEC
3066 section nor SEC section. We estimate the distrance between
3067 SEC and TSEC. */
3068 size = 0;
3069 for (asect = sec->output_section;
3070 asect != NULL && asect != tsec->output_section;
3071 asect = asect->next)
3072 {
3073 asection *i;
3074 for (i = asect->output_section->map_head.s;
3075 i != NULL;
3076 i = i->map_head.s)
3077 {
3078 size = align_power (size, i->alignment_power);
3079 size += i->size;
3080 }
3081 }
3082
3083 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after
3084 SEC. */
3085 if (asect == NULL)
3086 continue;
3087
3088 /* Take PT_GNU_RELRO segment into account by adding
3089 maxpagesize. */
3090 if ((toff + size + maxpagesize - roff + 0x80000000)
3091 > 0xffffffff)
3092 continue;
3093 }
3094
3095 bfd_put_8 (abfd, 0x8d, contents + roff - 2);
3096 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
3097 changed_contents = TRUE;
3098 changed_relocs = TRUE;
3099
3100 if (convert_mov_to_lea == local)
3101 {
3102 if (local_got_refcounts != NULL
3103 && local_got_refcounts[r_symndx] > 0)
3104 local_got_refcounts[r_symndx] -= 1;
3105 }
3106 else
3107 {
3108 if (h->got.refcount > 0)
3109 h->got.refcount -= 1;
3110 }
3111 }
3112
3113 if (contents != NULL
3114 && elf_section_data (sec)->this_hdr.contents != contents)
3115 {
3116 if (!changed_contents && !link_info->keep_memory)
3117 free (contents);
3118 else
3119 {
3120 /* Cache the section contents for elf_link_input_bfd. */
3121 elf_section_data (sec)->this_hdr.contents = contents;
3122 }
3123 }
3124
3125 if (elf_section_data (sec)->relocs != internal_relocs)
3126 {
3127 if (!changed_relocs)
3128 free (internal_relocs);
3129 else
3130 elf_section_data (sec)->relocs = internal_relocs;
3131 }
3132
3133 return TRUE;
3134
3135 error_return:
3136 if (contents != NULL
3137 && elf_section_data (sec)->this_hdr.contents != contents)
3138 free (contents);
3139 if (internal_relocs != NULL
3140 && elf_section_data (sec)->relocs != internal_relocs)
3141 free (internal_relocs);
3142 return FALSE;
3143 }
3144
3145 /* Set the sizes of the dynamic sections. */
3146
3147 static bfd_boolean
3148 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3149 struct bfd_link_info *info)
3150 {
3151 struct elf_x86_64_link_hash_table *htab;
3152 bfd *dynobj;
3153 asection *s;
3154 bfd_boolean relocs;
3155 bfd *ibfd;
3156 const struct elf_backend_data *bed;
3157
3158 htab = elf_x86_64_hash_table (info);
3159 if (htab == NULL)
3160 return FALSE;
3161 bed = get_elf_backend_data (output_bfd);
3162
3163 dynobj = htab->elf.dynobj;
3164 if (dynobj == NULL)
3165 abort ();
3166
3167 if (htab->elf.dynamic_sections_created)
3168 {
3169 /* Set the contents of the .interp section to the interpreter. */
3170 if (info->executable)
3171 {
3172 s = bfd_get_linker_section (dynobj, ".interp");
3173 if (s == NULL)
3174 abort ();
3175 s->size = htab->dynamic_interpreter_size;
3176 s->contents = (unsigned char *) htab->dynamic_interpreter;
3177 }
3178 }
3179
3180 /* Set up .got offsets for local syms, and space for local dynamic
3181 relocs. */
3182 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3183 {
3184 bfd_signed_vma *local_got;
3185 bfd_signed_vma *end_local_got;
3186 char *local_tls_type;
3187 bfd_vma *local_tlsdesc_gotent;
3188 bfd_size_type locsymcount;
3189 Elf_Internal_Shdr *symtab_hdr;
3190 asection *srel;
3191
3192 if (! is_x86_64_elf (ibfd))
3193 continue;
3194
3195 for (s = ibfd->sections; s != NULL; s = s->next)
3196 {
3197 struct elf_dyn_relocs *p;
3198
3199 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3200 return FALSE;
3201
3202 for (p = (struct elf_dyn_relocs *)
3203 (elf_section_data (s)->local_dynrel);
3204 p != NULL;
3205 p = p->next)
3206 {
3207 if (!bfd_is_abs_section (p->sec)
3208 && bfd_is_abs_section (p->sec->output_section))
3209 {
3210 /* Input section has been discarded, either because
3211 it is a copy of a linkonce section or due to
3212 linker script /DISCARD/, so we'll be discarding
3213 the relocs too. */
3214 }
3215 else if (p->count != 0)
3216 {
3217 srel = elf_section_data (p->sec)->sreloc;
3218 srel->size += p->count * bed->s->sizeof_rela;
3219 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3220 && (info->flags & DF_TEXTREL) == 0)
3221 {
3222 info->flags |= DF_TEXTREL;
3223 if ((info->warn_shared_textrel && info->shared)
3224 || info->error_textrel)
3225 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3226 p->sec->owner, p->sec);
3227 }
3228 }
3229 }
3230 }
3231
3232 local_got = elf_local_got_refcounts (ibfd);
3233 if (!local_got)
3234 continue;
3235
3236 symtab_hdr = &elf_symtab_hdr (ibfd);
3237 locsymcount = symtab_hdr->sh_info;
3238 end_local_got = local_got + locsymcount;
3239 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3240 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3241 s = htab->elf.sgot;
3242 srel = htab->elf.srelgot;
3243 for (; local_got < end_local_got;
3244 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3245 {
3246 *local_tlsdesc_gotent = (bfd_vma) -1;
3247 if (*local_got > 0)
3248 {
3249 if (GOT_TLS_GDESC_P (*local_tls_type))
3250 {
3251 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3252 - elf_x86_64_compute_jump_table_size (htab);
3253 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3254 *local_got = (bfd_vma) -2;
3255 }
3256 if (! GOT_TLS_GDESC_P (*local_tls_type)
3257 || GOT_TLS_GD_P (*local_tls_type))
3258 {
3259 *local_got = s->size;
3260 s->size += GOT_ENTRY_SIZE;
3261 if (GOT_TLS_GD_P (*local_tls_type))
3262 s->size += GOT_ENTRY_SIZE;
3263 }
3264 if (info->shared
3265 || GOT_TLS_GD_ANY_P (*local_tls_type)
3266 || *local_tls_type == GOT_TLS_IE)
3267 {
3268 if (GOT_TLS_GDESC_P (*local_tls_type))
3269 {
3270 htab->elf.srelplt->size
3271 += bed->s->sizeof_rela;
3272 htab->tlsdesc_plt = (bfd_vma) -1;
3273 }
3274 if (! GOT_TLS_GDESC_P (*local_tls_type)
3275 || GOT_TLS_GD_P (*local_tls_type))
3276 srel->size += bed->s->sizeof_rela;
3277 }
3278 }
3279 else
3280 *local_got = (bfd_vma) -1;
3281 }
3282 }
3283
3284 if (htab->tls_ld_got.refcount > 0)
3285 {
3286 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3287 relocs. */
3288 htab->tls_ld_got.offset = htab->elf.sgot->size;
3289 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3290 htab->elf.srelgot->size += bed->s->sizeof_rela;
3291 }
3292 else
3293 htab->tls_ld_got.offset = -1;
3294
3295 /* Allocate global sym .plt and .got entries, and space for global
3296 sym dynamic relocs. */
3297 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3298 info);
3299
3300 /* Allocate .plt and .got entries, and space for local symbols. */
3301 htab_traverse (htab->loc_hash_table,
3302 elf_x86_64_allocate_local_dynrelocs,
3303 info);
3304
3305 /* For every jump slot reserved in the sgotplt, reloc_count is
3306 incremented. However, when we reserve space for TLS descriptors,
3307 it's not incremented, so in order to compute the space reserved
3308 for them, it suffices to multiply the reloc count by the jump
3309 slot size.
3310
3311 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3312 so that R_X86_64_IRELATIVE entries come last. */
3313 if (htab->elf.srelplt)
3314 {
3315 htab->sgotplt_jump_table_size
3316 = elf_x86_64_compute_jump_table_size (htab);
3317 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3318 }
3319 else if (htab->elf.irelplt)
3320 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3321
3322 if (htab->tlsdesc_plt)
3323 {
3324 /* If we're not using lazy TLS relocations, don't generate the
3325 PLT and GOT entries they require. */
3326 if ((info->flags & DF_BIND_NOW))
3327 htab->tlsdesc_plt = 0;
3328 else
3329 {
3330 htab->tlsdesc_got = htab->elf.sgot->size;
3331 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3332 /* Reserve room for the initial entry.
3333 FIXME: we could probably do away with it in this case. */
3334 if (htab->elf.splt->size == 0)
3335 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3336 htab->tlsdesc_plt = htab->elf.splt->size;
3337 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3338 }
3339 }
3340
3341 if (htab->elf.sgotplt)
3342 {
3343 /* Don't allocate .got.plt section if there are no GOT nor PLT
3344 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3345 if ((htab->elf.hgot == NULL
3346 || !htab->elf.hgot->ref_regular_nonweak)
3347 && (htab->elf.sgotplt->size
3348 == get_elf_backend_data (output_bfd)->got_header_size)
3349 && (htab->elf.splt == NULL
3350 || htab->elf.splt->size == 0)
3351 && (htab->elf.sgot == NULL
3352 || htab->elf.sgot->size == 0)
3353 && (htab->elf.iplt == NULL
3354 || htab->elf.iplt->size == 0)
3355 && (htab->elf.igotplt == NULL
3356 || htab->elf.igotplt->size == 0))
3357 htab->elf.sgotplt->size = 0;
3358 }
3359
3360 if (htab->plt_eh_frame != NULL
3361 && htab->elf.splt != NULL
3362 && htab->elf.splt->size != 0
3363 && !bfd_is_abs_section (htab->elf.splt->output_section)
3364 && _bfd_elf_eh_frame_present (info))
3365 {
3366 const struct elf_x86_64_backend_data *arch_data
3367 = get_elf_x86_64_arch_data (bed);
3368 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3369 }
3370
3371 /* We now have determined the sizes of the various dynamic sections.
3372 Allocate memory for them. */
3373 relocs = FALSE;
3374 for (s = dynobj->sections; s != NULL; s = s->next)
3375 {
3376 if ((s->flags & SEC_LINKER_CREATED) == 0)
3377 continue;
3378
3379 if (s == htab->elf.splt
3380 || s == htab->elf.sgot
3381 || s == htab->elf.sgotplt
3382 || s == htab->elf.iplt
3383 || s == htab->elf.igotplt
3384 || s == htab->plt_bnd
3385 || s == htab->plt_got
3386 || s == htab->plt_eh_frame
3387 || s == htab->sdynbss)
3388 {
3389 /* Strip this section if we don't need it; see the
3390 comment below. */
3391 }
3392 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3393 {
3394 if (s->size != 0 && s != htab->elf.srelplt)
3395 relocs = TRUE;
3396
3397 /* We use the reloc_count field as a counter if we need
3398 to copy relocs into the output file. */
3399 if (s != htab->elf.srelplt)
3400 s->reloc_count = 0;
3401 }
3402 else
3403 {
3404 /* It's not one of our sections, so don't allocate space. */
3405 continue;
3406 }
3407
3408 if (s->size == 0)
3409 {
3410 /* If we don't need this section, strip it from the
3411 output file. This is mostly to handle .rela.bss and
3412 .rela.plt. We must create both sections in
3413 create_dynamic_sections, because they must be created
3414 before the linker maps input sections to output
3415 sections. The linker does that before
3416 adjust_dynamic_symbol is called, and it is that
3417 function which decides whether anything needs to go
3418 into these sections. */
3419
3420 s->flags |= SEC_EXCLUDE;
3421 continue;
3422 }
3423
3424 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3425 continue;
3426
3427 /* Allocate memory for the section contents. We use bfd_zalloc
3428 here in case unused entries are not reclaimed before the
3429 section's contents are written out. This should not happen,
3430 but this way if it does, we get a R_X86_64_NONE reloc instead
3431 of garbage. */
3432 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3433 if (s->contents == NULL)
3434 return FALSE;
3435 }
3436
3437 if (htab->plt_eh_frame != NULL
3438 && htab->plt_eh_frame->contents != NULL)
3439 {
3440 const struct elf_x86_64_backend_data *arch_data
3441 = get_elf_x86_64_arch_data (bed);
3442
3443 memcpy (htab->plt_eh_frame->contents,
3444 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3445 bfd_put_32 (dynobj, htab->elf.splt->size,
3446 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3447 }
3448
3449 if (htab->elf.dynamic_sections_created)
3450 {
3451 /* Add some entries to the .dynamic section. We fill in the
3452 values later, in elf_x86_64_finish_dynamic_sections, but we
3453 must add the entries now so that we get the correct size for
3454 the .dynamic section. The DT_DEBUG entry is filled in by the
3455 dynamic linker and used by the debugger. */
3456 #define add_dynamic_entry(TAG, VAL) \
3457 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3458
3459 if (info->executable)
3460 {
3461 if (!add_dynamic_entry (DT_DEBUG, 0))
3462 return FALSE;
3463 }
3464
3465 if (htab->elf.splt->size != 0)
3466 {
3467 if (!add_dynamic_entry (DT_PLTGOT, 0)
3468 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3469 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3470 || !add_dynamic_entry (DT_JMPREL, 0))
3471 return FALSE;
3472
3473 if (htab->tlsdesc_plt
3474 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3475 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3476 return FALSE;
3477 }
3478
3479 if (relocs)
3480 {
3481 if (!add_dynamic_entry (DT_RELA, 0)
3482 || !add_dynamic_entry (DT_RELASZ, 0)
3483 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3484 return FALSE;
3485
3486 /* If any dynamic relocs apply to a read-only section,
3487 then we need a DT_TEXTREL entry. */
3488 if ((info->flags & DF_TEXTREL) == 0)
3489 elf_link_hash_traverse (&htab->elf,
3490 elf_x86_64_readonly_dynrelocs,
3491 info);
3492
3493 if ((info->flags & DF_TEXTREL) != 0)
3494 {
3495 if (!add_dynamic_entry (DT_TEXTREL, 0))
3496 return FALSE;
3497 }
3498 }
3499 }
3500 #undef add_dynamic_entry
3501
3502 return TRUE;
3503 }
3504
3505 static bfd_boolean
3506 elf_x86_64_always_size_sections (bfd *output_bfd,
3507 struct bfd_link_info *info)
3508 {
3509 asection *tls_sec = elf_hash_table (info)->tls_sec;
3510
3511 if (tls_sec)
3512 {
3513 struct elf_link_hash_entry *tlsbase;
3514
3515 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3516 "_TLS_MODULE_BASE_",
3517 FALSE, FALSE, FALSE);
3518
3519 if (tlsbase && tlsbase->type == STT_TLS)
3520 {
3521 struct elf_x86_64_link_hash_table *htab;
3522 struct bfd_link_hash_entry *bh = NULL;
3523 const struct elf_backend_data *bed
3524 = get_elf_backend_data (output_bfd);
3525
3526 htab = elf_x86_64_hash_table (info);
3527 if (htab == NULL)
3528 return FALSE;
3529
3530 if (!(_bfd_generic_link_add_one_symbol
3531 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3532 tls_sec, 0, NULL, FALSE,
3533 bed->collect, &bh)))
3534 return FALSE;
3535
3536 htab->tls_module_base = bh;
3537
3538 tlsbase = (struct elf_link_hash_entry *)bh;
3539 tlsbase->def_regular = 1;
3540 tlsbase->other = STV_HIDDEN;
3541 tlsbase->root.linker_def = 1;
3542 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3543 }
3544 }
3545
3546 return TRUE;
3547 }
3548
3549 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3550 executables. Rather than setting it to the beginning of the TLS
3551 section, we have to set it to the end. This function may be called
3552 multiple times, it is idempotent. */
3553
3554 static void
3555 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3556 {
3557 struct elf_x86_64_link_hash_table *htab;
3558 struct bfd_link_hash_entry *base;
3559
3560 if (!info->executable)
3561 return;
3562
3563 htab = elf_x86_64_hash_table (info);
3564 if (htab == NULL)
3565 return;
3566
3567 base = htab->tls_module_base;
3568 if (base == NULL)
3569 return;
3570
3571 base->u.def.value = htab->elf.tls_size;
3572 }
3573
3574 /* Return the base VMA address which should be subtracted from real addresses
3575 when resolving @dtpoff relocation.
3576 This is PT_TLS segment p_vaddr. */
3577
3578 static bfd_vma
3579 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3580 {
3581 /* If tls_sec is NULL, we should have signalled an error already. */
3582 if (elf_hash_table (info)->tls_sec == NULL)
3583 return 0;
3584 return elf_hash_table (info)->tls_sec->vma;
3585 }
3586
3587 /* Return the relocation value for @tpoff relocation
3588 if STT_TLS virtual address is ADDRESS. */
3589
3590 static bfd_vma
3591 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3592 {
3593 struct elf_link_hash_table *htab = elf_hash_table (info);
3594 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3595 bfd_vma static_tls_size;
3596
3597 /* If tls_segment is NULL, we should have signalled an error already. */
3598 if (htab->tls_sec == NULL)
3599 return 0;
3600
3601 /* Consider special static TLS alignment requirements. */
3602 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3603 return address - static_tls_size - htab->tls_sec->vma;
3604 }
3605
3606 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3607 branch? */
3608
3609 static bfd_boolean
3610 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3611 {
3612 /* Opcode Instruction
3613 0xe8 call
3614 0xe9 jump
3615 0x0f 0x8x conditional jump */
3616 return ((offset > 0
3617 && (contents [offset - 1] == 0xe8
3618 || contents [offset - 1] == 0xe9))
3619 || (offset > 1
3620 && contents [offset - 2] == 0x0f
3621 && (contents [offset - 1] & 0xf0) == 0x80));
3622 }
3623
3624 /* Relocate an x86_64 ELF section. */
3625
3626 static bfd_boolean
3627 elf_x86_64_relocate_section (bfd *output_bfd,
3628 struct bfd_link_info *info,
3629 bfd *input_bfd,
3630 asection *input_section,
3631 bfd_byte *contents,
3632 Elf_Internal_Rela *relocs,
3633 Elf_Internal_Sym *local_syms,
3634 asection **local_sections)
3635 {
3636 struct elf_x86_64_link_hash_table *htab;
3637 Elf_Internal_Shdr *symtab_hdr;
3638 struct elf_link_hash_entry **sym_hashes;
3639 bfd_vma *local_got_offsets;
3640 bfd_vma *local_tlsdesc_gotents;
3641 Elf_Internal_Rela *rel;
3642 Elf_Internal_Rela *relend;
3643 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3644
3645 BFD_ASSERT (is_x86_64_elf (input_bfd));
3646
3647 htab = elf_x86_64_hash_table (info);
3648 if (htab == NULL)
3649 return FALSE;
3650 symtab_hdr = &elf_symtab_hdr (input_bfd);
3651 sym_hashes = elf_sym_hashes (input_bfd);
3652 local_got_offsets = elf_local_got_offsets (input_bfd);
3653 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3654
3655 elf_x86_64_set_tls_module_base (info);
3656
3657 rel = relocs;
3658 relend = relocs + input_section->reloc_count;
3659 for (; rel < relend; rel++)
3660 {
3661 unsigned int r_type;
3662 reloc_howto_type *howto;
3663 unsigned long r_symndx;
3664 struct elf_link_hash_entry *h;
3665 struct elf_x86_64_link_hash_entry *eh;
3666 Elf_Internal_Sym *sym;
3667 asection *sec;
3668 bfd_vma off, offplt, plt_offset;
3669 bfd_vma relocation;
3670 bfd_boolean unresolved_reloc;
3671 bfd_reloc_status_type r;
3672 int tls_type;
3673 asection *base_got, *resolved_plt;
3674 bfd_vma st_size;
3675
3676 r_type = ELF32_R_TYPE (rel->r_info);
3677 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3678 || r_type == (int) R_X86_64_GNU_VTENTRY)
3679 continue;
3680
3681 if (r_type >= (int) R_X86_64_standard)
3682 {
3683 (*_bfd_error_handler)
3684 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3685 input_bfd, input_section, r_type);
3686 bfd_set_error (bfd_error_bad_value);
3687 return FALSE;
3688 }
3689
3690 if (r_type != (int) R_X86_64_32
3691 || ABI_64_P (output_bfd))
3692 howto = x86_64_elf_howto_table + r_type;
3693 else
3694 howto = (x86_64_elf_howto_table
3695 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3696 r_symndx = htab->r_sym (rel->r_info);
3697 h = NULL;
3698 sym = NULL;
3699 sec = NULL;
3700 unresolved_reloc = FALSE;
3701 if (r_symndx < symtab_hdr->sh_info)
3702 {
3703 sym = local_syms + r_symndx;
3704 sec = local_sections[r_symndx];
3705
3706 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3707 &sec, rel);
3708 st_size = sym->st_size;
3709
3710 /* Relocate against local STT_GNU_IFUNC symbol. */
3711 if (!info->relocatable
3712 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3713 {
3714 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3715 rel, FALSE);
3716 if (h == NULL)
3717 abort ();
3718
3719 /* Set STT_GNU_IFUNC symbol value. */
3720 h->root.u.def.value = sym->st_value;
3721 h->root.u.def.section = sec;
3722 }
3723 }
3724 else
3725 {
3726 bfd_boolean warned ATTRIBUTE_UNUSED;
3727 bfd_boolean ignored ATTRIBUTE_UNUSED;
3728
3729 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3730 r_symndx, symtab_hdr, sym_hashes,
3731 h, sec, relocation,
3732 unresolved_reloc, warned, ignored);
3733 st_size = h->size;
3734 }
3735
3736 if (sec != NULL && discarded_section (sec))
3737 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3738 rel, 1, relend, howto, 0, contents);
3739
3740 if (info->relocatable)
3741 continue;
3742
3743 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3744 {
3745 if (r_type == R_X86_64_64)
3746 {
3747 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3748 zero-extend it to 64bit if addend is zero. */
3749 r_type = R_X86_64_32;
3750 memset (contents + rel->r_offset + 4, 0, 4);
3751 }
3752 else if (r_type == R_X86_64_SIZE64)
3753 {
3754 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3755 zero-extend it to 64bit if addend is zero. */
3756 r_type = R_X86_64_SIZE32;
3757 memset (contents + rel->r_offset + 4, 0, 4);
3758 }
3759 }
3760
3761 eh = (struct elf_x86_64_link_hash_entry *) h;
3762
3763 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3764 it here if it is defined in a non-shared object. */
3765 if (h != NULL
3766 && h->type == STT_GNU_IFUNC
3767 && h->def_regular)
3768 {
3769 bfd_vma plt_index;
3770 const char *name;
3771
3772 if ((input_section->flags & SEC_ALLOC) == 0
3773 || h->plt.offset == (bfd_vma) -1)
3774 abort ();
3775
3776 /* STT_GNU_IFUNC symbol must go through PLT. */
3777 if (htab->elf.splt != NULL)
3778 {
3779 if (htab->plt_bnd != NULL)
3780 {
3781 resolved_plt = htab->plt_bnd;
3782 plt_offset = eh->plt_bnd.offset;
3783 }
3784 else
3785 {
3786 resolved_plt = htab->elf.splt;
3787 plt_offset = h->plt.offset;
3788 }
3789 }
3790 else
3791 {
3792 resolved_plt = htab->elf.iplt;
3793 plt_offset = h->plt.offset;
3794 }
3795
3796 relocation = (resolved_plt->output_section->vma
3797 + resolved_plt->output_offset + plt_offset);
3798
3799 switch (r_type)
3800 {
3801 default:
3802 if (h->root.root.string)
3803 name = h->root.root.string;
3804 else
3805 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3806 NULL);
3807 (*_bfd_error_handler)
3808 (_("%B: relocation %s against STT_GNU_IFUNC "
3809 "symbol `%s' isn't handled by %s"), input_bfd,
3810 x86_64_elf_howto_table[r_type].name,
3811 name, __FUNCTION__);
3812 bfd_set_error (bfd_error_bad_value);
3813 return FALSE;
3814
3815 case R_X86_64_32S:
3816 if (info->shared)
3817 abort ();
3818 goto do_relocation;
3819
3820 case R_X86_64_32:
3821 if (ABI_64_P (output_bfd))
3822 goto do_relocation;
3823 /* FALLTHROUGH */
3824 case R_X86_64_64:
3825 if (rel->r_addend != 0)
3826 {
3827 if (h->root.root.string)
3828 name = h->root.root.string;
3829 else
3830 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3831 sym, NULL);
3832 (*_bfd_error_handler)
3833 (_("%B: relocation %s against STT_GNU_IFUNC "
3834 "symbol `%s' has non-zero addend: %d"),
3835 input_bfd, x86_64_elf_howto_table[r_type].name,
3836 name, rel->r_addend);
3837 bfd_set_error (bfd_error_bad_value);
3838 return FALSE;
3839 }
3840
3841 /* Generate dynamic relcoation only when there is a
3842 non-GOT reference in a shared object. */
3843 if (info->shared && h->non_got_ref)
3844 {
3845 Elf_Internal_Rela outrel;
3846 asection *sreloc;
3847
3848 /* Need a dynamic relocation to get the real function
3849 address. */
3850 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3851 info,
3852 input_section,
3853 rel->r_offset);
3854 if (outrel.r_offset == (bfd_vma) -1
3855 || outrel.r_offset == (bfd_vma) -2)
3856 abort ();
3857
3858 outrel.r_offset += (input_section->output_section->vma
3859 + input_section->output_offset);
3860
3861 if (h->dynindx == -1
3862 || h->forced_local
3863 || info->executable)
3864 {
3865 /* This symbol is resolved locally. */
3866 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3867 outrel.r_addend = (h->root.u.def.value
3868 + h->root.u.def.section->output_section->vma
3869 + h->root.u.def.section->output_offset);
3870 }
3871 else
3872 {
3873 outrel.r_info = htab->r_info (h->dynindx, r_type);
3874 outrel.r_addend = 0;
3875 }
3876
3877 sreloc = htab->elf.irelifunc;
3878 elf_append_rela (output_bfd, sreloc, &outrel);
3879
3880 /* If this reloc is against an external symbol, we
3881 do not want to fiddle with the addend. Otherwise,
3882 we need to include the symbol value so that it
3883 becomes an addend for the dynamic reloc. For an
3884 internal symbol, we have updated addend. */
3885 continue;
3886 }
3887 /* FALLTHROUGH */
3888 case R_X86_64_PC32:
3889 case R_X86_64_PC32_BND:
3890 case R_X86_64_PC64:
3891 case R_X86_64_PLT32:
3892 case R_X86_64_PLT32_BND:
3893 goto do_relocation;
3894
3895 case R_X86_64_GOTPCREL:
3896 case R_X86_64_GOTPCREL64:
3897 base_got = htab->elf.sgot;
3898 off = h->got.offset;
3899
3900 if (base_got == NULL)
3901 abort ();
3902
3903 if (off == (bfd_vma) -1)
3904 {
3905 /* We can't use h->got.offset here to save state, or
3906 even just remember the offset, as finish_dynamic_symbol
3907 would use that as offset into .got. */
3908
3909 if (htab->elf.splt != NULL)
3910 {
3911 plt_index = h->plt.offset / plt_entry_size - 1;
3912 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3913 base_got = htab->elf.sgotplt;
3914 }
3915 else
3916 {
3917 plt_index = h->plt.offset / plt_entry_size;
3918 off = plt_index * GOT_ENTRY_SIZE;
3919 base_got = htab->elf.igotplt;
3920 }
3921
3922 if (h->dynindx == -1
3923 || h->forced_local
3924 || info->symbolic)
3925 {
3926 /* This references the local defitionion. We must
3927 initialize this entry in the global offset table.
3928 Since the offset must always be a multiple of 8,
3929 we use the least significant bit to record
3930 whether we have initialized it already.
3931
3932 When doing a dynamic link, we create a .rela.got
3933 relocation entry to initialize the value. This
3934 is done in the finish_dynamic_symbol routine. */
3935 if ((off & 1) != 0)
3936 off &= ~1;
3937 else
3938 {
3939 bfd_put_64 (output_bfd, relocation,
3940 base_got->contents + off);
3941 /* Note that this is harmless for the GOTPLT64
3942 case, as -1 | 1 still is -1. */
3943 h->got.offset |= 1;
3944 }
3945 }
3946 }
3947
3948 relocation = (base_got->output_section->vma
3949 + base_got->output_offset + off);
3950
3951 goto do_relocation;
3952 }
3953 }
3954
3955 /* When generating a shared object, the relocations handled here are
3956 copied into the output file to be resolved at run time. */
3957 switch (r_type)
3958 {
3959 case R_X86_64_GOT32:
3960 case R_X86_64_GOT64:
3961 /* Relocation is to the entry for this symbol in the global
3962 offset table. */
3963 case R_X86_64_GOTPCREL:
3964 case R_X86_64_GOTPCREL64:
3965 /* Use global offset table entry as symbol value. */
3966 case R_X86_64_GOTPLT64:
3967 /* This is obsolete and treated the the same as GOT64. */
3968 base_got = htab->elf.sgot;
3969
3970 if (htab->elf.sgot == NULL)
3971 abort ();
3972
3973 if (h != NULL)
3974 {
3975 bfd_boolean dyn;
3976
3977 off = h->got.offset;
3978 if (h->needs_plt
3979 && h->plt.offset != (bfd_vma)-1
3980 && off == (bfd_vma)-1)
3981 {
3982 /* We can't use h->got.offset here to save
3983 state, or even just remember the offset, as
3984 finish_dynamic_symbol would use that as offset into
3985 .got. */
3986 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3987 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3988 base_got = htab->elf.sgotplt;
3989 }
3990
3991 dyn = htab->elf.dynamic_sections_created;
3992
3993 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3994 || (info->shared
3995 && SYMBOL_REFERENCES_LOCAL (info, h))
3996 || (ELF_ST_VISIBILITY (h->other)
3997 && h->root.type == bfd_link_hash_undefweak))
3998 {
3999 /* This is actually a static link, or it is a -Bsymbolic
4000 link and the symbol is defined locally, or the symbol
4001 was forced to be local because of a version file. We
4002 must initialize this entry in the global offset table.
4003 Since the offset must always be a multiple of 8, we
4004 use the least significant bit to record whether we
4005 have initialized it already.
4006
4007 When doing a dynamic link, we create a .rela.got
4008 relocation entry to initialize the value. This is
4009 done in the finish_dynamic_symbol routine. */
4010 if ((off & 1) != 0)
4011 off &= ~1;
4012 else
4013 {
4014 bfd_put_64 (output_bfd, relocation,
4015 base_got->contents + off);
4016 /* Note that this is harmless for the GOTPLT64 case,
4017 as -1 | 1 still is -1. */
4018 h->got.offset |= 1;
4019 }
4020 }
4021 else
4022 unresolved_reloc = FALSE;
4023 }
4024 else
4025 {
4026 if (local_got_offsets == NULL)
4027 abort ();
4028
4029 off = local_got_offsets[r_symndx];
4030
4031 /* The offset must always be a multiple of 8. We use
4032 the least significant bit to record whether we have
4033 already generated the necessary reloc. */
4034 if ((off & 1) != 0)
4035 off &= ~1;
4036 else
4037 {
4038 bfd_put_64 (output_bfd, relocation,
4039 base_got->contents + off);
4040
4041 if (info->shared)
4042 {
4043 asection *s;
4044 Elf_Internal_Rela outrel;
4045
4046 /* We need to generate a R_X86_64_RELATIVE reloc
4047 for the dynamic linker. */
4048 s = htab->elf.srelgot;
4049 if (s == NULL)
4050 abort ();
4051
4052 outrel.r_offset = (base_got->output_section->vma
4053 + base_got->output_offset
4054 + off);
4055 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4056 outrel.r_addend = relocation;
4057 elf_append_rela (output_bfd, s, &outrel);
4058 }
4059
4060 local_got_offsets[r_symndx] |= 1;
4061 }
4062 }
4063
4064 if (off >= (bfd_vma) -2)
4065 abort ();
4066
4067 relocation = base_got->output_section->vma
4068 + base_got->output_offset + off;
4069 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
4070 relocation -= htab->elf.sgotplt->output_section->vma
4071 - htab->elf.sgotplt->output_offset;
4072
4073 break;
4074
4075 case R_X86_64_GOTOFF64:
4076 /* Relocation is relative to the start of the global offset
4077 table. */
4078
4079 /* Check to make sure it isn't a protected function or data
4080 symbol for shared library since it may not be local when
4081 used as function address or with copy relocation. We also
4082 need to make sure that a symbol is referenced locally. */
4083 if (info->shared && h)
4084 {
4085 if (!h->def_regular)
4086 {
4087 const char *v;
4088
4089 switch (ELF_ST_VISIBILITY (h->other))
4090 {
4091 case STV_HIDDEN:
4092 v = _("hidden symbol");
4093 break;
4094 case STV_INTERNAL:
4095 v = _("internal symbol");
4096 break;
4097 case STV_PROTECTED:
4098 v = _("protected symbol");
4099 break;
4100 default:
4101 v = _("symbol");
4102 break;
4103 }
4104
4105 (*_bfd_error_handler)
4106 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4107 input_bfd, v, h->root.root.string);
4108 bfd_set_error (bfd_error_bad_value);
4109 return FALSE;
4110 }
4111 else if (!info->executable
4112 && !SYMBOL_REFERENCES_LOCAL (info, h)
4113 && (h->type == STT_FUNC
4114 || h->type == STT_OBJECT)
4115 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4116 {
4117 (*_bfd_error_handler)
4118 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4119 input_bfd,
4120 h->type == STT_FUNC ? "function" : "data",
4121 h->root.root.string);
4122 bfd_set_error (bfd_error_bad_value);
4123 return FALSE;
4124 }
4125 }
4126
4127 /* Note that sgot is not involved in this
4128 calculation. We always want the start of .got.plt. If we
4129 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4130 permitted by the ABI, we might have to change this
4131 calculation. */
4132 relocation -= htab->elf.sgotplt->output_section->vma
4133 + htab->elf.sgotplt->output_offset;
4134 break;
4135
4136 case R_X86_64_GOTPC32:
4137 case R_X86_64_GOTPC64:
4138 /* Use global offset table as symbol value. */
4139 relocation = htab->elf.sgotplt->output_section->vma
4140 + htab->elf.sgotplt->output_offset;
4141 unresolved_reloc = FALSE;
4142 break;
4143
4144 case R_X86_64_PLTOFF64:
4145 /* Relocation is PLT entry relative to GOT. For local
4146 symbols it's the symbol itself relative to GOT. */
4147 if (h != NULL
4148 /* See PLT32 handling. */
4149 && h->plt.offset != (bfd_vma) -1
4150 && htab->elf.splt != NULL)
4151 {
4152 if (htab->plt_bnd != NULL)
4153 {
4154 resolved_plt = htab->plt_bnd;
4155 plt_offset = eh->plt_bnd.offset;
4156 }
4157 else
4158 {
4159 resolved_plt = htab->elf.splt;
4160 plt_offset = h->plt.offset;
4161 }
4162
4163 relocation = (resolved_plt->output_section->vma
4164 + resolved_plt->output_offset
4165 + plt_offset);
4166 unresolved_reloc = FALSE;
4167 }
4168
4169 relocation -= htab->elf.sgotplt->output_section->vma
4170 + htab->elf.sgotplt->output_offset;
4171 break;
4172
4173 case R_X86_64_PLT32:
4174 case R_X86_64_PLT32_BND:
4175 /* Relocation is to the entry for this symbol in the
4176 procedure linkage table. */
4177
4178 /* Resolve a PLT32 reloc against a local symbol directly,
4179 without using the procedure linkage table. */
4180 if (h == NULL)
4181 break;
4182
4183 if ((h->plt.offset == (bfd_vma) -1
4184 && eh->plt_got.offset == (bfd_vma) -1)
4185 || htab->elf.splt == NULL)
4186 {
4187 /* We didn't make a PLT entry for this symbol. This
4188 happens when statically linking PIC code, or when
4189 using -Bsymbolic. */
4190 break;
4191 }
4192
4193 if (h->plt.offset != (bfd_vma) -1)
4194 {
4195 if (htab->plt_bnd != NULL)
4196 {
4197 resolved_plt = htab->plt_bnd;
4198 plt_offset = eh->plt_bnd.offset;
4199 }
4200 else
4201 {
4202 resolved_plt = htab->elf.splt;
4203 plt_offset = h->plt.offset;
4204 }
4205 }
4206 else
4207 {
4208 /* Use the GOT PLT. */
4209 resolved_plt = htab->plt_got;
4210 plt_offset = eh->plt_got.offset;
4211 }
4212
4213 relocation = (resolved_plt->output_section->vma
4214 + resolved_plt->output_offset
4215 + plt_offset);
4216 unresolved_reloc = FALSE;
4217 break;
4218
4219 case R_X86_64_SIZE32:
4220 case R_X86_64_SIZE64:
4221 /* Set to symbol size. */
4222 relocation = st_size;
4223 goto direct;
4224
4225 case R_X86_64_PC8:
4226 case R_X86_64_PC16:
4227 case R_X86_64_PC32:
4228 case R_X86_64_PC32_BND:
4229 /* Don't complain about -fPIC if the symbol is undefined when
4230 building executable. */
4231 if (info->shared
4232 && (input_section->flags & SEC_ALLOC) != 0
4233 && (input_section->flags & SEC_READONLY) != 0
4234 && h != NULL
4235 && !(info->executable
4236 && h->root.type == bfd_link_hash_undefined))
4237 {
4238 bfd_boolean fail = FALSE;
4239 bfd_boolean branch
4240 = ((r_type == R_X86_64_PC32
4241 || r_type == R_X86_64_PC32_BND)
4242 && is_32bit_relative_branch (contents, rel->r_offset));
4243
4244 if (SYMBOL_REFERENCES_LOCAL (info, h))
4245 {
4246 /* Symbol is referenced locally. Make sure it is
4247 defined locally or for a branch. */
4248 fail = !h->def_regular && !branch;
4249 }
4250 else if (!(info->executable
4251 && (h->needs_copy || eh->needs_copy)))
4252 {
4253 /* Symbol doesn't need copy reloc and isn't referenced
4254 locally. We only allow branch to symbol with
4255 non-default visibility. */
4256 fail = (!branch
4257 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4258 }
4259
4260 if (fail)
4261 {
4262 const char *fmt;
4263 const char *v;
4264 const char *pic = "";
4265
4266 switch (ELF_ST_VISIBILITY (h->other))
4267 {
4268 case STV_HIDDEN:
4269 v = _("hidden symbol");
4270 break;
4271 case STV_INTERNAL:
4272 v = _("internal symbol");
4273 break;
4274 case STV_PROTECTED:
4275 v = _("protected symbol");
4276 break;
4277 default:
4278 v = _("symbol");
4279 pic = _("; recompile with -fPIC");
4280 break;
4281 }
4282
4283 if (h->def_regular)
4284 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4285 else
4286 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4287
4288 (*_bfd_error_handler) (fmt, input_bfd,
4289 x86_64_elf_howto_table[r_type].name,
4290 v, h->root.root.string, pic);
4291 bfd_set_error (bfd_error_bad_value);
4292 return FALSE;
4293 }
4294 }
4295 /* Fall through. */
4296
4297 case R_X86_64_8:
4298 case R_X86_64_16:
4299 case R_X86_64_32:
4300 case R_X86_64_PC64:
4301 case R_X86_64_64:
4302 /* FIXME: The ABI says the linker should make sure the value is
4303 the same when it's zeroextended to 64 bit. */
4304
4305 direct:
4306 if ((input_section->flags & SEC_ALLOC) == 0)
4307 break;
4308
4309 /* Don't copy a pc-relative relocation into the output file
4310 if the symbol needs copy reloc or the symbol is undefined
4311 when building executable. */
4312 if ((info->shared
4313 && !(info->executable
4314 && h != NULL
4315 && (h->needs_copy
4316 || eh->needs_copy
4317 || h->root.type == bfd_link_hash_undefined)
4318 && IS_X86_64_PCREL_TYPE (r_type))
4319 && (h == NULL
4320 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4321 || h->root.type != bfd_link_hash_undefweak)
4322 && ((! IS_X86_64_PCREL_TYPE (r_type)
4323 && r_type != R_X86_64_SIZE32
4324 && r_type != R_X86_64_SIZE64)
4325 || ! SYMBOL_CALLS_LOCAL (info, h)))
4326 || (ELIMINATE_COPY_RELOCS
4327 && !info->shared
4328 && h != NULL
4329 && h->dynindx != -1
4330 && !h->non_got_ref
4331 && ((h->def_dynamic
4332 && !h->def_regular)
4333 || h->root.type == bfd_link_hash_undefweak
4334 || h->root.type == bfd_link_hash_undefined)))
4335 {
4336 Elf_Internal_Rela outrel;
4337 bfd_boolean skip, relocate;
4338 asection *sreloc;
4339
4340 /* When generating a shared object, these relocations
4341 are copied into the output file to be resolved at run
4342 time. */
4343 skip = FALSE;
4344 relocate = FALSE;
4345
4346 outrel.r_offset =
4347 _bfd_elf_section_offset (output_bfd, info, input_section,
4348 rel->r_offset);
4349 if (outrel.r_offset == (bfd_vma) -1)
4350 skip = TRUE;
4351 else if (outrel.r_offset == (bfd_vma) -2)
4352 skip = TRUE, relocate = TRUE;
4353
4354 outrel.r_offset += (input_section->output_section->vma
4355 + input_section->output_offset);
4356
4357 if (skip)
4358 memset (&outrel, 0, sizeof outrel);
4359
4360 /* h->dynindx may be -1 if this symbol was marked to
4361 become local. */
4362 else if (h != NULL
4363 && h->dynindx != -1
4364 && (IS_X86_64_PCREL_TYPE (r_type)
4365 || ! info->shared
4366 || ! SYMBOLIC_BIND (info, h)
4367 || ! h->def_regular))
4368 {
4369 outrel.r_info = htab->r_info (h->dynindx, r_type);
4370 outrel.r_addend = rel->r_addend;
4371 }
4372 else
4373 {
4374 /* This symbol is local, or marked to become local. */
4375 if (r_type == htab->pointer_r_type)
4376 {
4377 relocate = TRUE;
4378 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4379 outrel.r_addend = relocation + rel->r_addend;
4380 }
4381 else if (r_type == R_X86_64_64
4382 && !ABI_64_P (output_bfd))
4383 {
4384 relocate = TRUE;
4385 outrel.r_info = htab->r_info (0,
4386 R_X86_64_RELATIVE64);
4387 outrel.r_addend = relocation + rel->r_addend;
4388 /* Check addend overflow. */
4389 if ((outrel.r_addend & 0x80000000)
4390 != (rel->r_addend & 0x80000000))
4391 {
4392 const char *name;
4393 int addend = rel->r_addend;
4394 if (h && h->root.root.string)
4395 name = h->root.root.string;
4396 else
4397 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4398 sym, NULL);
4399 if (addend < 0)
4400 (*_bfd_error_handler)
4401 (_("%B: addend -0x%x in relocation %s against "
4402 "symbol `%s' at 0x%lx in section `%A' is "
4403 "out of range"),
4404 input_bfd, input_section, addend,
4405 x86_64_elf_howto_table[r_type].name,
4406 name, (unsigned long) rel->r_offset);
4407 else
4408 (*_bfd_error_handler)
4409 (_("%B: addend 0x%x in relocation %s against "
4410 "symbol `%s' at 0x%lx in section `%A' is "
4411 "out of range"),
4412 input_bfd, input_section, addend,
4413 x86_64_elf_howto_table[r_type].name,
4414 name, (unsigned long) rel->r_offset);
4415 bfd_set_error (bfd_error_bad_value);
4416 return FALSE;
4417 }
4418 }
4419 else
4420 {
4421 long sindx;
4422
4423 if (bfd_is_abs_section (sec))
4424 sindx = 0;
4425 else if (sec == NULL || sec->owner == NULL)
4426 {
4427 bfd_set_error (bfd_error_bad_value);
4428 return FALSE;
4429 }
4430 else
4431 {
4432 asection *osec;
4433
4434 /* We are turning this relocation into one
4435 against a section symbol. It would be
4436 proper to subtract the symbol's value,
4437 osec->vma, from the emitted reloc addend,
4438 but ld.so expects buggy relocs. */
4439 osec = sec->output_section;
4440 sindx = elf_section_data (osec)->dynindx;
4441 if (sindx == 0)
4442 {
4443 asection *oi = htab->elf.text_index_section;
4444 sindx = elf_section_data (oi)->dynindx;
4445 }
4446 BFD_ASSERT (sindx != 0);
4447 }
4448
4449 outrel.r_info = htab->r_info (sindx, r_type);
4450 outrel.r_addend = relocation + rel->r_addend;
4451 }
4452 }
4453
4454 sreloc = elf_section_data (input_section)->sreloc;
4455
4456 if (sreloc == NULL || sreloc->contents == NULL)
4457 {
4458 r = bfd_reloc_notsupported;
4459 goto check_relocation_error;
4460 }
4461
4462 elf_append_rela (output_bfd, sreloc, &outrel);
4463
4464 /* If this reloc is against an external symbol, we do
4465 not want to fiddle with the addend. Otherwise, we
4466 need to include the symbol value so that it becomes
4467 an addend for the dynamic reloc. */
4468 if (! relocate)
4469 continue;
4470 }
4471
4472 break;
4473
4474 case R_X86_64_TLSGD:
4475 case R_X86_64_GOTPC32_TLSDESC:
4476 case R_X86_64_TLSDESC_CALL:
4477 case R_X86_64_GOTTPOFF:
4478 tls_type = GOT_UNKNOWN;
4479 if (h == NULL && local_got_offsets)
4480 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4481 else if (h != NULL)
4482 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4483
4484 if (! elf_x86_64_tls_transition (info, input_bfd,
4485 input_section, contents,
4486 symtab_hdr, sym_hashes,
4487 &r_type, tls_type, rel,
4488 relend, h, r_symndx))
4489 return FALSE;
4490
4491 if (r_type == R_X86_64_TPOFF32)
4492 {
4493 bfd_vma roff = rel->r_offset;
4494
4495 BFD_ASSERT (! unresolved_reloc);
4496
4497 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4498 {
4499 /* GD->LE transition. For 64bit, change
4500 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4501 .word 0x6666; rex64; call __tls_get_addr
4502 into:
4503 movq %fs:0, %rax
4504 leaq foo@tpoff(%rax), %rax
4505 For 32bit, change
4506 leaq foo@tlsgd(%rip), %rdi
4507 .word 0x6666; rex64; call __tls_get_addr
4508 into:
4509 movl %fs:0, %eax
4510 leaq foo@tpoff(%rax), %rax
4511 For largepic, change:
4512 leaq foo@tlsgd(%rip), %rdi
4513 movabsq $__tls_get_addr@pltoff, %rax
4514 addq %rbx, %rax
4515 call *%rax
4516 into:
4517 movq %fs:0, %rax
4518 leaq foo@tpoff(%rax), %rax
4519 nopw 0x0(%rax,%rax,1) */
4520 int largepic = 0;
4521 if (ABI_64_P (output_bfd)
4522 && contents[roff + 5] == (bfd_byte) '\xb8')
4523 {
4524 memcpy (contents + roff - 3,
4525 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4526 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4527 largepic = 1;
4528 }
4529 else if (ABI_64_P (output_bfd))
4530 memcpy (contents + roff - 4,
4531 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4532 16);
4533 else
4534 memcpy (contents + roff - 3,
4535 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4536 15);
4537 bfd_put_32 (output_bfd,
4538 elf_x86_64_tpoff (info, relocation),
4539 contents + roff + 8 + largepic);
4540 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4541 rel++;
4542 continue;
4543 }
4544 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4545 {
4546 /* GDesc -> LE transition.
4547 It's originally something like:
4548 leaq x@tlsdesc(%rip), %rax
4549
4550 Change it to:
4551 movl $x@tpoff, %rax. */
4552
4553 unsigned int val, type;
4554
4555 type = bfd_get_8 (input_bfd, contents + roff - 3);
4556 val = bfd_get_8 (input_bfd, contents + roff - 1);
4557 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4558 contents + roff - 3);
4559 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4560 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4561 contents + roff - 1);
4562 bfd_put_32 (output_bfd,
4563 elf_x86_64_tpoff (info, relocation),
4564 contents + roff);
4565 continue;
4566 }
4567 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4568 {
4569 /* GDesc -> LE transition.
4570 It's originally:
4571 call *(%rax)
4572 Turn it into:
4573 xchg %ax,%ax. */
4574 bfd_put_8 (output_bfd, 0x66, contents + roff);
4575 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4576 continue;
4577 }
4578 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4579 {
4580 /* IE->LE transition:
4581 For 64bit, originally it can be one of:
4582 movq foo@gottpoff(%rip), %reg
4583 addq foo@gottpoff(%rip), %reg
4584 We change it into:
4585 movq $foo, %reg
4586 leaq foo(%reg), %reg
4587 addq $foo, %reg.
4588 For 32bit, originally it can be one of:
4589 movq foo@gottpoff(%rip), %reg
4590 addl foo@gottpoff(%rip), %reg
4591 We change it into:
4592 movq $foo, %reg
4593 leal foo(%reg), %reg
4594 addl $foo, %reg. */
4595
4596 unsigned int val, type, reg;
4597
4598 if (roff >= 3)
4599 val = bfd_get_8 (input_bfd, contents + roff - 3);
4600 else
4601 val = 0;
4602 type = bfd_get_8 (input_bfd, contents + roff - 2);
4603 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4604 reg >>= 3;
4605 if (type == 0x8b)
4606 {
4607 /* movq */
4608 if (val == 0x4c)
4609 bfd_put_8 (output_bfd, 0x49,
4610 contents + roff - 3);
4611 else if (!ABI_64_P (output_bfd) && val == 0x44)
4612 bfd_put_8 (output_bfd, 0x41,
4613 contents + roff - 3);
4614 bfd_put_8 (output_bfd, 0xc7,
4615 contents + roff - 2);
4616 bfd_put_8 (output_bfd, 0xc0 | reg,
4617 contents + roff - 1);
4618 }
4619 else if (reg == 4)
4620 {
4621 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4622 is special */
4623 if (val == 0x4c)
4624 bfd_put_8 (output_bfd, 0x49,
4625 contents + roff - 3);
4626 else if (!ABI_64_P (output_bfd) && val == 0x44)
4627 bfd_put_8 (output_bfd, 0x41,
4628 contents + roff - 3);
4629 bfd_put_8 (output_bfd, 0x81,
4630 contents + roff - 2);
4631 bfd_put_8 (output_bfd, 0xc0 | reg,
4632 contents + roff - 1);
4633 }
4634 else
4635 {
4636 /* addq/addl -> leaq/leal */
4637 if (val == 0x4c)
4638 bfd_put_8 (output_bfd, 0x4d,
4639 contents + roff - 3);
4640 else if (!ABI_64_P (output_bfd) && val == 0x44)
4641 bfd_put_8 (output_bfd, 0x45,
4642 contents + roff - 3);
4643 bfd_put_8 (output_bfd, 0x8d,
4644 contents + roff - 2);
4645 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4646 contents + roff - 1);
4647 }
4648 bfd_put_32 (output_bfd,
4649 elf_x86_64_tpoff (info, relocation),
4650 contents + roff);
4651 continue;
4652 }
4653 else
4654 BFD_ASSERT (FALSE);
4655 }
4656
4657 if (htab->elf.sgot == NULL)
4658 abort ();
4659
4660 if (h != NULL)
4661 {
4662 off = h->got.offset;
4663 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4664 }
4665 else
4666 {
4667 if (local_got_offsets == NULL)
4668 abort ();
4669
4670 off = local_got_offsets[r_symndx];
4671 offplt = local_tlsdesc_gotents[r_symndx];
4672 }
4673
4674 if ((off & 1) != 0)
4675 off &= ~1;
4676 else
4677 {
4678 Elf_Internal_Rela outrel;
4679 int dr_type, indx;
4680 asection *sreloc;
4681
4682 if (htab->elf.srelgot == NULL)
4683 abort ();
4684
4685 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4686
4687 if (GOT_TLS_GDESC_P (tls_type))
4688 {
4689 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4690 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4691 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4692 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4693 + htab->elf.sgotplt->output_offset
4694 + offplt
4695 + htab->sgotplt_jump_table_size);
4696 sreloc = htab->elf.srelplt;
4697 if (indx == 0)
4698 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4699 else
4700 outrel.r_addend = 0;
4701 elf_append_rela (output_bfd, sreloc, &outrel);
4702 }
4703
4704 sreloc = htab->elf.srelgot;
4705
4706 outrel.r_offset = (htab->elf.sgot->output_section->vma
4707 + htab->elf.sgot->output_offset + off);
4708
4709 if (GOT_TLS_GD_P (tls_type))
4710 dr_type = R_X86_64_DTPMOD64;
4711 else if (GOT_TLS_GDESC_P (tls_type))
4712 goto dr_done;
4713 else
4714 dr_type = R_X86_64_TPOFF64;
4715
4716 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4717 outrel.r_addend = 0;
4718 if ((dr_type == R_X86_64_TPOFF64
4719 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4720 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4721 outrel.r_info = htab->r_info (indx, dr_type);
4722
4723 elf_append_rela (output_bfd, sreloc, &outrel);
4724
4725 if (GOT_TLS_GD_P (tls_type))
4726 {
4727 if (indx == 0)
4728 {
4729 BFD_ASSERT (! unresolved_reloc);
4730 bfd_put_64 (output_bfd,
4731 relocation - elf_x86_64_dtpoff_base (info),
4732 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4733 }
4734 else
4735 {
4736 bfd_put_64 (output_bfd, 0,
4737 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4738 outrel.r_info = htab->r_info (indx,
4739 R_X86_64_DTPOFF64);
4740 outrel.r_offset += GOT_ENTRY_SIZE;
4741 elf_append_rela (output_bfd, sreloc,
4742 &outrel);
4743 }
4744 }
4745
4746 dr_done:
4747 if (h != NULL)
4748 h->got.offset |= 1;
4749 else
4750 local_got_offsets[r_symndx] |= 1;
4751 }
4752
4753 if (off >= (bfd_vma) -2
4754 && ! GOT_TLS_GDESC_P (tls_type))
4755 abort ();
4756 if (r_type == ELF32_R_TYPE (rel->r_info))
4757 {
4758 if (r_type == R_X86_64_GOTPC32_TLSDESC
4759 || r_type == R_X86_64_TLSDESC_CALL)
4760 relocation = htab->elf.sgotplt->output_section->vma
4761 + htab->elf.sgotplt->output_offset
4762 + offplt + htab->sgotplt_jump_table_size;
4763 else
4764 relocation = htab->elf.sgot->output_section->vma
4765 + htab->elf.sgot->output_offset + off;
4766 unresolved_reloc = FALSE;
4767 }
4768 else
4769 {
4770 bfd_vma roff = rel->r_offset;
4771
4772 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4773 {
4774 /* GD->IE transition. For 64bit, change
4775 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4776 .word 0x6666; rex64; call __tls_get_addr@plt
4777 into:
4778 movq %fs:0, %rax
4779 addq foo@gottpoff(%rip), %rax
4780 For 32bit, change
4781 leaq foo@tlsgd(%rip), %rdi
4782 .word 0x6666; rex64; call __tls_get_addr@plt
4783 into:
4784 movl %fs:0, %eax
4785 addq foo@gottpoff(%rip), %rax
4786 For largepic, change:
4787 leaq foo@tlsgd(%rip), %rdi
4788 movabsq $__tls_get_addr@pltoff, %rax
4789 addq %rbx, %rax
4790 call *%rax
4791 into:
4792 movq %fs:0, %rax
4793 addq foo@gottpoff(%rax), %rax
4794 nopw 0x0(%rax,%rax,1) */
4795 int largepic = 0;
4796 if (ABI_64_P (output_bfd)
4797 && contents[roff + 5] == (bfd_byte) '\xb8')
4798 {
4799 memcpy (contents + roff - 3,
4800 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4801 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4802 largepic = 1;
4803 }
4804 else if (ABI_64_P (output_bfd))
4805 memcpy (contents + roff - 4,
4806 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4807 16);
4808 else
4809 memcpy (contents + roff - 3,
4810 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4811 15);
4812
4813 relocation = (htab->elf.sgot->output_section->vma
4814 + htab->elf.sgot->output_offset + off
4815 - roff
4816 - largepic
4817 - input_section->output_section->vma
4818 - input_section->output_offset
4819 - 12);
4820 bfd_put_32 (output_bfd, relocation,
4821 contents + roff + 8 + largepic);
4822 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4823 rel++;
4824 continue;
4825 }
4826 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4827 {
4828 /* GDesc -> IE transition.
4829 It's originally something like:
4830 leaq x@tlsdesc(%rip), %rax
4831
4832 Change it to:
4833 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4834
4835 /* Now modify the instruction as appropriate. To
4836 turn a leaq into a movq in the form we use it, it
4837 suffices to change the second byte from 0x8d to
4838 0x8b. */
4839 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4840
4841 bfd_put_32 (output_bfd,
4842 htab->elf.sgot->output_section->vma
4843 + htab->elf.sgot->output_offset + off
4844 - rel->r_offset
4845 - input_section->output_section->vma
4846 - input_section->output_offset
4847 - 4,
4848 contents + roff);
4849 continue;
4850 }
4851 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4852 {
4853 /* GDesc -> IE transition.
4854 It's originally:
4855 call *(%rax)
4856
4857 Change it to:
4858 xchg %ax, %ax. */
4859
4860 bfd_put_8 (output_bfd, 0x66, contents + roff);
4861 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4862 continue;
4863 }
4864 else
4865 BFD_ASSERT (FALSE);
4866 }
4867 break;
4868
4869 case R_X86_64_TLSLD:
4870 if (! elf_x86_64_tls_transition (info, input_bfd,
4871 input_section, contents,
4872 symtab_hdr, sym_hashes,
4873 &r_type, GOT_UNKNOWN,
4874 rel, relend, h, r_symndx))
4875 return FALSE;
4876
4877 if (r_type != R_X86_64_TLSLD)
4878 {
4879 /* LD->LE transition:
4880 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4881 For 64bit, we change it into:
4882 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4883 For 32bit, we change it into:
4884 nopl 0x0(%rax); movl %fs:0, %eax.
4885 For largepic, change:
4886 leaq foo@tlsgd(%rip), %rdi
4887 movabsq $__tls_get_addr@pltoff, %rax
4888 addq %rbx, %rax
4889 call *%rax
4890 into:
4891 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4892 movq %fs:0, %eax */
4893
4894 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4895 if (ABI_64_P (output_bfd)
4896 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4897 memcpy (contents + rel->r_offset - 3,
4898 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4899 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4900 else if (ABI_64_P (output_bfd))
4901 memcpy (contents + rel->r_offset - 3,
4902 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4903 else
4904 memcpy (contents + rel->r_offset - 3,
4905 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4906 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4907 rel++;
4908 continue;
4909 }
4910
4911 if (htab->elf.sgot == NULL)
4912 abort ();
4913
4914 off = htab->tls_ld_got.offset;
4915 if (off & 1)
4916 off &= ~1;
4917 else
4918 {
4919 Elf_Internal_Rela outrel;
4920
4921 if (htab->elf.srelgot == NULL)
4922 abort ();
4923
4924 outrel.r_offset = (htab->elf.sgot->output_section->vma
4925 + htab->elf.sgot->output_offset + off);
4926
4927 bfd_put_64 (output_bfd, 0,
4928 htab->elf.sgot->contents + off);
4929 bfd_put_64 (output_bfd, 0,
4930 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4931 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4932 outrel.r_addend = 0;
4933 elf_append_rela (output_bfd, htab->elf.srelgot,
4934 &outrel);
4935 htab->tls_ld_got.offset |= 1;
4936 }
4937 relocation = htab->elf.sgot->output_section->vma
4938 + htab->elf.sgot->output_offset + off;
4939 unresolved_reloc = FALSE;
4940 break;
4941
4942 case R_X86_64_DTPOFF32:
4943 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4944 relocation -= elf_x86_64_dtpoff_base (info);
4945 else
4946 relocation = elf_x86_64_tpoff (info, relocation);
4947 break;
4948
4949 case R_X86_64_TPOFF32:
4950 case R_X86_64_TPOFF64:
4951 BFD_ASSERT (info->executable);
4952 relocation = elf_x86_64_tpoff (info, relocation);
4953 break;
4954
4955 case R_X86_64_DTPOFF64:
4956 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4957 relocation -= elf_x86_64_dtpoff_base (info);
4958 break;
4959
4960 default:
4961 break;
4962 }
4963
4964 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4965 because such sections are not SEC_ALLOC and thus ld.so will
4966 not process them. */
4967 if (unresolved_reloc
4968 && !((input_section->flags & SEC_DEBUGGING) != 0
4969 && h->def_dynamic)
4970 && _bfd_elf_section_offset (output_bfd, info, input_section,
4971 rel->r_offset) != (bfd_vma) -1)
4972 {
4973 (*_bfd_error_handler)
4974 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4975 input_bfd,
4976 input_section,
4977 (long) rel->r_offset,
4978 howto->name,
4979 h->root.root.string);
4980 return FALSE;
4981 }
4982
4983 do_relocation:
4984 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4985 contents, rel->r_offset,
4986 relocation, rel->r_addend);
4987
4988 check_relocation_error:
4989 if (r != bfd_reloc_ok)
4990 {
4991 const char *name;
4992
4993 if (h != NULL)
4994 name = h->root.root.string;
4995 else
4996 {
4997 name = bfd_elf_string_from_elf_section (input_bfd,
4998 symtab_hdr->sh_link,
4999 sym->st_name);
5000 if (name == NULL)
5001 return FALSE;
5002 if (*name == '\0')
5003 name = bfd_section_name (input_bfd, sec);
5004 }
5005
5006 if (r == bfd_reloc_overflow)
5007 {
5008 if (! ((*info->callbacks->reloc_overflow)
5009 (info, (h ? &h->root : NULL), name, howto->name,
5010 (bfd_vma) 0, input_bfd, input_section,
5011 rel->r_offset)))
5012 return FALSE;
5013 }
5014 else
5015 {
5016 (*_bfd_error_handler)
5017 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5018 input_bfd, input_section,
5019 (long) rel->r_offset, name, (int) r);
5020 return FALSE;
5021 }
5022 }
5023 }
5024
5025 return TRUE;
5026 }
5027
5028 /* Finish up dynamic symbol handling. We set the contents of various
5029 dynamic sections here. */
5030
5031 static bfd_boolean
5032 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5033 struct bfd_link_info *info,
5034 struct elf_link_hash_entry *h,
5035 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5036 {
5037 struct elf_x86_64_link_hash_table *htab;
5038 const struct elf_x86_64_backend_data *abed;
5039 bfd_boolean use_plt_bnd;
5040 struct elf_x86_64_link_hash_entry *eh;
5041
5042 htab = elf_x86_64_hash_table (info);
5043 if (htab == NULL)
5044 return FALSE;
5045
5046 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5047 section only if there is .plt section. */
5048 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5049 abed = (use_plt_bnd
5050 ? &elf_x86_64_bnd_arch_bed
5051 : get_elf_x86_64_backend_data (output_bfd));
5052
5053 eh = (struct elf_x86_64_link_hash_entry *) h;
5054
5055 if (h->plt.offset != (bfd_vma) -1)
5056 {
5057 bfd_vma plt_index;
5058 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5059 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5060 Elf_Internal_Rela rela;
5061 bfd_byte *loc;
5062 asection *plt, *gotplt, *relplt, *resolved_plt;
5063 const struct elf_backend_data *bed;
5064 bfd_vma plt_got_pcrel_offset;
5065
5066 /* When building a static executable, use .iplt, .igot.plt and
5067 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5068 if (htab->elf.splt != NULL)
5069 {
5070 plt = htab->elf.splt;
5071 gotplt = htab->elf.sgotplt;
5072 relplt = htab->elf.srelplt;
5073 }
5074 else
5075 {
5076 plt = htab->elf.iplt;
5077 gotplt = htab->elf.igotplt;
5078 relplt = htab->elf.irelplt;
5079 }
5080
5081 /* This symbol has an entry in the procedure linkage table. Set
5082 it up. */
5083 if ((h->dynindx == -1
5084 && !((h->forced_local || info->executable)
5085 && h->def_regular
5086 && h->type == STT_GNU_IFUNC))
5087 || plt == NULL
5088 || gotplt == NULL
5089 || relplt == NULL)
5090 abort ();
5091
5092 /* Get the index in the procedure linkage table which
5093 corresponds to this symbol. This is the index of this symbol
5094 in all the symbols for which we are making plt entries. The
5095 first entry in the procedure linkage table is reserved.
5096
5097 Get the offset into the .got table of the entry that
5098 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5099 bytes. The first three are reserved for the dynamic linker.
5100
5101 For static executables, we don't reserve anything. */
5102
5103 if (plt == htab->elf.splt)
5104 {
5105 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5106 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5107 }
5108 else
5109 {
5110 got_offset = h->plt.offset / abed->plt_entry_size;
5111 got_offset = got_offset * GOT_ENTRY_SIZE;
5112 }
5113
5114 plt_plt_insn_end = abed->plt_plt_insn_end;
5115 plt_plt_offset = abed->plt_plt_offset;
5116 plt_got_insn_size = abed->plt_got_insn_size;
5117 plt_got_offset = abed->plt_got_offset;
5118 if (use_plt_bnd)
5119 {
5120 /* Use the second PLT with BND relocations. */
5121 const bfd_byte *plt_entry, *plt2_entry;
5122
5123 if (eh->has_bnd_reloc)
5124 {
5125 plt_entry = elf_x86_64_bnd_plt_entry;
5126 plt2_entry = elf_x86_64_bnd_plt2_entry;
5127 }
5128 else
5129 {
5130 plt_entry = elf_x86_64_legacy_plt_entry;
5131 plt2_entry = elf_x86_64_legacy_plt2_entry;
5132
5133 /* Subtract 1 since there is no BND prefix. */
5134 plt_plt_insn_end -= 1;
5135 plt_plt_offset -= 1;
5136 plt_got_insn_size -= 1;
5137 plt_got_offset -= 1;
5138 }
5139
5140 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5141 == sizeof (elf_x86_64_legacy_plt_entry));
5142
5143 /* Fill in the entry in the procedure linkage table. */
5144 memcpy (plt->contents + h->plt.offset,
5145 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5146 /* Fill in the entry in the second PLT. */
5147 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5148 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5149
5150 resolved_plt = htab->plt_bnd;
5151 plt_offset = eh->plt_bnd.offset;
5152 }
5153 else
5154 {
5155 /* Fill in the entry in the procedure linkage table. */
5156 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5157 abed->plt_entry_size);
5158
5159 resolved_plt = plt;
5160 plt_offset = h->plt.offset;
5161 }
5162
5163 /* Insert the relocation positions of the plt section. */
5164
5165 /* Put offset the PC-relative instruction referring to the GOT entry,
5166 subtracting the size of that instruction. */
5167 plt_got_pcrel_offset = (gotplt->output_section->vma
5168 + gotplt->output_offset
5169 + got_offset
5170 - resolved_plt->output_section->vma
5171 - resolved_plt->output_offset
5172 - plt_offset
5173 - plt_got_insn_size);
5174
5175 /* Check PC-relative offset overflow in PLT entry. */
5176 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5177 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5178 output_bfd, h->root.root.string);
5179
5180 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5181 resolved_plt->contents + plt_offset + plt_got_offset);
5182
5183 /* Fill in the entry in the global offset table, initially this
5184 points to the second part of the PLT entry. */
5185 bfd_put_64 (output_bfd, (plt->output_section->vma
5186 + plt->output_offset
5187 + h->plt.offset + abed->plt_lazy_offset),
5188 gotplt->contents + got_offset);
5189
5190 /* Fill in the entry in the .rela.plt section. */
5191 rela.r_offset = (gotplt->output_section->vma
5192 + gotplt->output_offset
5193 + got_offset);
5194 if (h->dynindx == -1
5195 || ((info->executable
5196 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5197 && h->def_regular
5198 && h->type == STT_GNU_IFUNC))
5199 {
5200 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5201 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5202 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5203 rela.r_addend = (h->root.u.def.value
5204 + h->root.u.def.section->output_section->vma
5205 + h->root.u.def.section->output_offset);
5206 /* R_X86_64_IRELATIVE comes last. */
5207 plt_index = htab->next_irelative_index--;
5208 }
5209 else
5210 {
5211 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5212 rela.r_addend = 0;
5213 plt_index = htab->next_jump_slot_index++;
5214 }
5215
5216 /* Don't fill PLT entry for static executables. */
5217 if (plt == htab->elf.splt)
5218 {
5219 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5220
5221 /* Put relocation index. */
5222 bfd_put_32 (output_bfd, plt_index,
5223 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5224
5225 /* Put offset for jmp .PLT0 and check for overflow. We don't
5226 check relocation index for overflow since branch displacement
5227 will overflow first. */
5228 if (plt0_offset > 0x80000000)
5229 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5230 output_bfd, h->root.root.string);
5231 bfd_put_32 (output_bfd, - plt0_offset,
5232 plt->contents + h->plt.offset + plt_plt_offset);
5233 }
5234
5235 bed = get_elf_backend_data (output_bfd);
5236 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5237 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5238 }
5239 else if (eh->plt_got.offset != (bfd_vma) -1)
5240 {
5241 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5242 asection *plt, *got;
5243 bfd_boolean got_after_plt;
5244 int32_t got_pcrel_offset;
5245 const bfd_byte *got_plt_entry;
5246
5247 /* Set the entry in the GOT procedure linkage table. */
5248 plt = htab->plt_got;
5249 got = htab->elf.sgot;
5250 got_offset = h->got.offset;
5251
5252 if (got_offset == (bfd_vma) -1
5253 || h->type == STT_GNU_IFUNC
5254 || plt == NULL
5255 || got == NULL)
5256 abort ();
5257
5258 /* Use the second PLT entry template for the GOT PLT since they
5259 are the identical. */
5260 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5261 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5262 if (eh->has_bnd_reloc)
5263 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5264 else
5265 {
5266 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5267
5268 /* Subtract 1 since there is no BND prefix. */
5269 plt_got_insn_size -= 1;
5270 plt_got_offset -= 1;
5271 }
5272
5273 /* Fill in the entry in the GOT procedure linkage table. */
5274 plt_offset = eh->plt_got.offset;
5275 memcpy (plt->contents + plt_offset,
5276 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5277
5278 /* Put offset the PC-relative instruction referring to the GOT
5279 entry, subtracting the size of that instruction. */
5280 got_pcrel_offset = (got->output_section->vma
5281 + got->output_offset
5282 + got_offset
5283 - plt->output_section->vma
5284 - plt->output_offset
5285 - plt_offset
5286 - plt_got_insn_size);
5287
5288 /* Check PC-relative offset overflow in GOT PLT entry. */
5289 got_after_plt = got->output_section->vma > plt->output_section->vma;
5290 if ((got_after_plt && got_pcrel_offset < 0)
5291 || (!got_after_plt && got_pcrel_offset > 0))
5292 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5293 output_bfd, h->root.root.string);
5294
5295 bfd_put_32 (output_bfd, got_pcrel_offset,
5296 plt->contents + plt_offset + plt_got_offset);
5297 }
5298
5299 if (!h->def_regular
5300 && (h->plt.offset != (bfd_vma) -1
5301 || eh->plt_got.offset != (bfd_vma) -1))
5302 {
5303 /* Mark the symbol as undefined, rather than as defined in
5304 the .plt section. Leave the value if there were any
5305 relocations where pointer equality matters (this is a clue
5306 for the dynamic linker, to make function pointer
5307 comparisons work between an application and shared
5308 library), otherwise set it to zero. If a function is only
5309 called from a binary, there is no need to slow down
5310 shared libraries because of that. */
5311 sym->st_shndx = SHN_UNDEF;
5312 if (!h->pointer_equality_needed)
5313 sym->st_value = 0;
5314 }
5315
5316 if (h->got.offset != (bfd_vma) -1
5317 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5318 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5319 {
5320 Elf_Internal_Rela rela;
5321
5322 /* This symbol has an entry in the global offset table. Set it
5323 up. */
5324 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5325 abort ();
5326
5327 rela.r_offset = (htab->elf.sgot->output_section->vma
5328 + htab->elf.sgot->output_offset
5329 + (h->got.offset &~ (bfd_vma) 1));
5330
5331 /* If this is a static link, or it is a -Bsymbolic link and the
5332 symbol is defined locally or was forced to be local because
5333 of a version file, we just want to emit a RELATIVE reloc.
5334 The entry in the global offset table will already have been
5335 initialized in the relocate_section function. */
5336 if (h->def_regular
5337 && h->type == STT_GNU_IFUNC)
5338 {
5339 if (info->shared)
5340 {
5341 /* Generate R_X86_64_GLOB_DAT. */
5342 goto do_glob_dat;
5343 }
5344 else
5345 {
5346 asection *plt;
5347
5348 if (!h->pointer_equality_needed)
5349 abort ();
5350
5351 /* For non-shared object, we can't use .got.plt, which
5352 contains the real function addres if we need pointer
5353 equality. We load the GOT entry with the PLT entry. */
5354 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5355 bfd_put_64 (output_bfd, (plt->output_section->vma
5356 + plt->output_offset
5357 + h->plt.offset),
5358 htab->elf.sgot->contents + h->got.offset);
5359 return TRUE;
5360 }
5361 }
5362 else if (info->shared
5363 && SYMBOL_REFERENCES_LOCAL (info, h))
5364 {
5365 if (!h->def_regular)
5366 return FALSE;
5367 BFD_ASSERT((h->got.offset & 1) != 0);
5368 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5369 rela.r_addend = (h->root.u.def.value
5370 + h->root.u.def.section->output_section->vma
5371 + h->root.u.def.section->output_offset);
5372 }
5373 else
5374 {
5375 BFD_ASSERT((h->got.offset & 1) == 0);
5376 do_glob_dat:
5377 bfd_put_64 (output_bfd, (bfd_vma) 0,
5378 htab->elf.sgot->contents + h->got.offset);
5379 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5380 rela.r_addend = 0;
5381 }
5382
5383 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5384 }
5385
5386 if (h->needs_copy)
5387 {
5388 Elf_Internal_Rela rela;
5389
5390 /* This symbol needs a copy reloc. Set it up. */
5391
5392 if (h->dynindx == -1
5393 || (h->root.type != bfd_link_hash_defined
5394 && h->root.type != bfd_link_hash_defweak)
5395 || htab->srelbss == NULL)
5396 abort ();
5397
5398 rela.r_offset = (h->root.u.def.value
5399 + h->root.u.def.section->output_section->vma
5400 + h->root.u.def.section->output_offset);
5401 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5402 rela.r_addend = 0;
5403 elf_append_rela (output_bfd, htab->srelbss, &rela);
5404 }
5405
5406 return TRUE;
5407 }
5408
5409 /* Finish up local dynamic symbol handling. We set the contents of
5410 various dynamic sections here. */
5411
5412 static bfd_boolean
5413 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5414 {
5415 struct elf_link_hash_entry *h
5416 = (struct elf_link_hash_entry *) *slot;
5417 struct bfd_link_info *info
5418 = (struct bfd_link_info *) inf;
5419
5420 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5421 info, h, NULL);
5422 }
5423
5424 /* Used to decide how to sort relocs in an optimal manner for the
5425 dynamic linker, before writing them out. */
5426
5427 static enum elf_reloc_type_class
5428 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5429 const asection *rel_sec ATTRIBUTE_UNUSED,
5430 const Elf_Internal_Rela *rela)
5431 {
5432 switch ((int) ELF32_R_TYPE (rela->r_info))
5433 {
5434 case R_X86_64_RELATIVE:
5435 case R_X86_64_RELATIVE64:
5436 return reloc_class_relative;
5437 case R_X86_64_JUMP_SLOT:
5438 return reloc_class_plt;
5439 case R_X86_64_COPY:
5440 return reloc_class_copy;
5441 default:
5442 return reloc_class_normal;
5443 }
5444 }
5445
5446 /* Finish up the dynamic sections. */
5447
5448 static bfd_boolean
5449 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5450 struct bfd_link_info *info)
5451 {
5452 struct elf_x86_64_link_hash_table *htab;
5453 bfd *dynobj;
5454 asection *sdyn;
5455 const struct elf_x86_64_backend_data *abed;
5456
5457 htab = elf_x86_64_hash_table (info);
5458 if (htab == NULL)
5459 return FALSE;
5460
5461 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5462 section only if there is .plt section. */
5463 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5464 ? &elf_x86_64_bnd_arch_bed
5465 : get_elf_x86_64_backend_data (output_bfd));
5466
5467 dynobj = htab->elf.dynobj;
5468 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5469
5470 if (htab->elf.dynamic_sections_created)
5471 {
5472 bfd_byte *dyncon, *dynconend;
5473 const struct elf_backend_data *bed;
5474 bfd_size_type sizeof_dyn;
5475
5476 if (sdyn == NULL || htab->elf.sgot == NULL)
5477 abort ();
5478
5479 bed = get_elf_backend_data (dynobj);
5480 sizeof_dyn = bed->s->sizeof_dyn;
5481 dyncon = sdyn->contents;
5482 dynconend = sdyn->contents + sdyn->size;
5483 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5484 {
5485 Elf_Internal_Dyn dyn;
5486 asection *s;
5487
5488 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5489
5490 switch (dyn.d_tag)
5491 {
5492 default:
5493 continue;
5494
5495 case DT_PLTGOT:
5496 s = htab->elf.sgotplt;
5497 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5498 break;
5499
5500 case DT_JMPREL:
5501 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5502 break;
5503
5504 case DT_PLTRELSZ:
5505 s = htab->elf.srelplt->output_section;
5506 dyn.d_un.d_val = s->size;
5507 break;
5508
5509 case DT_RELASZ:
5510 /* The procedure linkage table relocs (DT_JMPREL) should
5511 not be included in the overall relocs (DT_RELA).
5512 Therefore, we override the DT_RELASZ entry here to
5513 make it not include the JMPREL relocs. Since the
5514 linker script arranges for .rela.plt to follow all
5515 other relocation sections, we don't have to worry
5516 about changing the DT_RELA entry. */
5517 if (htab->elf.srelplt != NULL)
5518 {
5519 s = htab->elf.srelplt->output_section;
5520 dyn.d_un.d_val -= s->size;
5521 }
5522 break;
5523
5524 case DT_TLSDESC_PLT:
5525 s = htab->elf.splt;
5526 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5527 + htab->tlsdesc_plt;
5528 break;
5529
5530 case DT_TLSDESC_GOT:
5531 s = htab->elf.sgot;
5532 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5533 + htab->tlsdesc_got;
5534 break;
5535 }
5536
5537 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5538 }
5539
5540 /* Fill in the special first entry in the procedure linkage table. */
5541 if (htab->elf.splt && htab->elf.splt->size > 0)
5542 {
5543 /* Fill in the first entry in the procedure linkage table. */
5544 memcpy (htab->elf.splt->contents,
5545 abed->plt0_entry, abed->plt_entry_size);
5546 /* Add offset for pushq GOT+8(%rip), since the instruction
5547 uses 6 bytes subtract this value. */
5548 bfd_put_32 (output_bfd,
5549 (htab->elf.sgotplt->output_section->vma
5550 + htab->elf.sgotplt->output_offset
5551 + 8
5552 - htab->elf.splt->output_section->vma
5553 - htab->elf.splt->output_offset
5554 - 6),
5555 htab->elf.splt->contents + abed->plt0_got1_offset);
5556 /* Add offset for the PC-relative instruction accessing GOT+16,
5557 subtracting the offset to the end of that instruction. */
5558 bfd_put_32 (output_bfd,
5559 (htab->elf.sgotplt->output_section->vma
5560 + htab->elf.sgotplt->output_offset
5561 + 16
5562 - htab->elf.splt->output_section->vma
5563 - htab->elf.splt->output_offset
5564 - abed->plt0_got2_insn_end),
5565 htab->elf.splt->contents + abed->plt0_got2_offset);
5566
5567 elf_section_data (htab->elf.splt->output_section)
5568 ->this_hdr.sh_entsize = abed->plt_entry_size;
5569
5570 if (htab->tlsdesc_plt)
5571 {
5572 bfd_put_64 (output_bfd, (bfd_vma) 0,
5573 htab->elf.sgot->contents + htab->tlsdesc_got);
5574
5575 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5576 abed->plt0_entry, abed->plt_entry_size);
5577
5578 /* Add offset for pushq GOT+8(%rip), since the
5579 instruction uses 6 bytes subtract this value. */
5580 bfd_put_32 (output_bfd,
5581 (htab->elf.sgotplt->output_section->vma
5582 + htab->elf.sgotplt->output_offset
5583 + 8
5584 - htab->elf.splt->output_section->vma
5585 - htab->elf.splt->output_offset
5586 - htab->tlsdesc_plt
5587 - 6),
5588 htab->elf.splt->contents
5589 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5590 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5591 where TGD stands for htab->tlsdesc_got, subtracting the offset
5592 to the end of that instruction. */
5593 bfd_put_32 (output_bfd,
5594 (htab->elf.sgot->output_section->vma
5595 + htab->elf.sgot->output_offset
5596 + htab->tlsdesc_got
5597 - htab->elf.splt->output_section->vma
5598 - htab->elf.splt->output_offset
5599 - htab->tlsdesc_plt
5600 - abed->plt0_got2_insn_end),
5601 htab->elf.splt->contents
5602 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5603 }
5604 }
5605 }
5606
5607 if (htab->plt_bnd != NULL)
5608 elf_section_data (htab->plt_bnd->output_section)
5609 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5610
5611 if (htab->elf.sgotplt)
5612 {
5613 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5614 {
5615 (*_bfd_error_handler)
5616 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5617 return FALSE;
5618 }
5619
5620 /* Fill in the first three entries in the global offset table. */
5621 if (htab->elf.sgotplt->size > 0)
5622 {
5623 /* Set the first entry in the global offset table to the address of
5624 the dynamic section. */
5625 if (sdyn == NULL)
5626 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5627 else
5628 bfd_put_64 (output_bfd,
5629 sdyn->output_section->vma + sdyn->output_offset,
5630 htab->elf.sgotplt->contents);
5631 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5632 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5633 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5634 }
5635
5636 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5637 GOT_ENTRY_SIZE;
5638 }
5639
5640 /* Adjust .eh_frame for .plt section. */
5641 if (htab->plt_eh_frame != NULL
5642 && htab->plt_eh_frame->contents != NULL)
5643 {
5644 if (htab->elf.splt != NULL
5645 && htab->elf.splt->size != 0
5646 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5647 && htab->elf.splt->output_section != NULL
5648 && htab->plt_eh_frame->output_section != NULL)
5649 {
5650 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5651 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5652 + htab->plt_eh_frame->output_offset
5653 + PLT_FDE_START_OFFSET;
5654 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5655 htab->plt_eh_frame->contents
5656 + PLT_FDE_START_OFFSET);
5657 }
5658 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5659 {
5660 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5661 htab->plt_eh_frame,
5662 htab->plt_eh_frame->contents))
5663 return FALSE;
5664 }
5665 }
5666
5667 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5668 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5669 = GOT_ENTRY_SIZE;
5670
5671 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5672 htab_traverse (htab->loc_hash_table,
5673 elf_x86_64_finish_local_dynamic_symbol,
5674 info);
5675
5676 return TRUE;
5677 }
5678
5679 /* Return an array of PLT entry symbol values. */
5680
5681 static bfd_vma *
5682 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5683 asection *relplt)
5684 {
5685 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5686 arelent *p;
5687 long count, i;
5688 bfd_vma *plt_sym_val;
5689 bfd_vma plt_offset;
5690 bfd_byte *plt_contents;
5691 const struct elf_x86_64_backend_data *bed;
5692 Elf_Internal_Shdr *hdr;
5693 asection *plt_bnd;
5694
5695 /* Get the .plt section contents. PLT passed down may point to the
5696 .plt.bnd section. Make sure that PLT always points to the .plt
5697 section. */
5698 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5699 if (plt_bnd)
5700 {
5701 if (plt != plt_bnd)
5702 abort ();
5703 plt = bfd_get_section_by_name (abfd, ".plt");
5704 if (plt == NULL)
5705 abort ();
5706 bed = &elf_x86_64_bnd_arch_bed;
5707 }
5708 else
5709 bed = get_elf_x86_64_backend_data (abfd);
5710
5711 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5712 if (plt_contents == NULL)
5713 return NULL;
5714 if (!bfd_get_section_contents (abfd, (asection *) plt,
5715 plt_contents, 0, plt->size))
5716 {
5717 bad_return:
5718 free (plt_contents);
5719 return NULL;
5720 }
5721
5722 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5723 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5724 goto bad_return;
5725
5726 hdr = &elf_section_data (relplt)->this_hdr;
5727 count = relplt->size / hdr->sh_entsize;
5728
5729 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5730 if (plt_sym_val == NULL)
5731 goto bad_return;
5732
5733 for (i = 0; i < count; i++)
5734 plt_sym_val[i] = -1;
5735
5736 plt_offset = bed->plt_entry_size;
5737 p = relplt->relocation;
5738 for (i = 0; i < count; i++, p++)
5739 {
5740 long reloc_index;
5741
5742 /* Skip unknown relocation. */
5743 if (p->howto == NULL)
5744 continue;
5745
5746 if (p->howto->type != R_X86_64_JUMP_SLOT
5747 && p->howto->type != R_X86_64_IRELATIVE)
5748 continue;
5749
5750 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5751 + bed->plt_reloc_offset));
5752 if (reloc_index >= count)
5753 abort ();
5754 if (plt_bnd)
5755 {
5756 /* This is the index in .plt section. */
5757 long plt_index = plt_offset / bed->plt_entry_size;
5758 /* Store VMA + the offset in .plt.bnd section. */
5759 plt_sym_val[reloc_index] =
5760 (plt_bnd->vma
5761 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5762 }
5763 else
5764 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5765 plt_offset += bed->plt_entry_size;
5766
5767 /* PR binutils/18437: Skip extra relocations in the .rela.plt
5768 section. */
5769 if (plt_offset >= plt->size)
5770 break;
5771 }
5772
5773 free (plt_contents);
5774
5775 return plt_sym_val;
5776 }
5777
5778 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5779 support. */
5780
5781 static long
5782 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5783 long symcount,
5784 asymbol **syms,
5785 long dynsymcount,
5786 asymbol **dynsyms,
5787 asymbol **ret)
5788 {
5789 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5790 as PLT if it exists. */
5791 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5792 if (plt == NULL)
5793 plt = bfd_get_section_by_name (abfd, ".plt");
5794 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5795 dynsymcount, dynsyms, ret,
5796 plt,
5797 elf_x86_64_get_plt_sym_val);
5798 }
5799
5800 /* Handle an x86-64 specific section when reading an object file. This
5801 is called when elfcode.h finds a section with an unknown type. */
5802
5803 static bfd_boolean
5804 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5805 const char *name, int shindex)
5806 {
5807 if (hdr->sh_type != SHT_X86_64_UNWIND)
5808 return FALSE;
5809
5810 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5811 return FALSE;
5812
5813 return TRUE;
5814 }
5815
5816 /* Hook called by the linker routine which adds symbols from an object
5817 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5818 of .bss. */
5819
5820 static bfd_boolean
5821 elf_x86_64_add_symbol_hook (bfd *abfd,
5822 struct bfd_link_info *info,
5823 Elf_Internal_Sym *sym,
5824 const char **namep ATTRIBUTE_UNUSED,
5825 flagword *flagsp ATTRIBUTE_UNUSED,
5826 asection **secp,
5827 bfd_vma *valp)
5828 {
5829 asection *lcomm;
5830
5831 switch (sym->st_shndx)
5832 {
5833 case SHN_X86_64_LCOMMON:
5834 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5835 if (lcomm == NULL)
5836 {
5837 lcomm = bfd_make_section_with_flags (abfd,
5838 "LARGE_COMMON",
5839 (SEC_ALLOC
5840 | SEC_IS_COMMON
5841 | SEC_LINKER_CREATED));
5842 if (lcomm == NULL)
5843 return FALSE;
5844 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5845 }
5846 *secp = lcomm;
5847 *valp = sym->st_size;
5848 return TRUE;
5849 }
5850
5851 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5852 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5853 && (abfd->flags & DYNAMIC) == 0
5854 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5855 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5856
5857 return TRUE;
5858 }
5859
5860
5861 /* Given a BFD section, try to locate the corresponding ELF section
5862 index. */
5863
5864 static bfd_boolean
5865 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5866 asection *sec, int *index_return)
5867 {
5868 if (sec == &_bfd_elf_large_com_section)
5869 {
5870 *index_return = SHN_X86_64_LCOMMON;
5871 return TRUE;
5872 }
5873 return FALSE;
5874 }
5875
5876 /* Process a symbol. */
5877
5878 static void
5879 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5880 asymbol *asym)
5881 {
5882 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5883
5884 switch (elfsym->internal_elf_sym.st_shndx)
5885 {
5886 case SHN_X86_64_LCOMMON:
5887 asym->section = &_bfd_elf_large_com_section;
5888 asym->value = elfsym->internal_elf_sym.st_size;
5889 /* Common symbol doesn't set BSF_GLOBAL. */
5890 asym->flags &= ~BSF_GLOBAL;
5891 break;
5892 }
5893 }
5894
5895 static bfd_boolean
5896 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5897 {
5898 return (sym->st_shndx == SHN_COMMON
5899 || sym->st_shndx == SHN_X86_64_LCOMMON);
5900 }
5901
5902 static unsigned int
5903 elf_x86_64_common_section_index (asection *sec)
5904 {
5905 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5906 return SHN_COMMON;
5907 else
5908 return SHN_X86_64_LCOMMON;
5909 }
5910
5911 static asection *
5912 elf_x86_64_common_section (asection *sec)
5913 {
5914 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5915 return bfd_com_section_ptr;
5916 else
5917 return &_bfd_elf_large_com_section;
5918 }
5919
5920 static bfd_boolean
5921 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5922 const Elf_Internal_Sym *sym,
5923 asection **psec,
5924 bfd_boolean newdef,
5925 bfd_boolean olddef,
5926 bfd *oldbfd,
5927 const asection *oldsec)
5928 {
5929 /* A normal common symbol and a large common symbol result in a
5930 normal common symbol. We turn the large common symbol into a
5931 normal one. */
5932 if (!olddef
5933 && h->root.type == bfd_link_hash_common
5934 && !newdef
5935 && bfd_is_com_section (*psec)
5936 && oldsec != *psec)
5937 {
5938 if (sym->st_shndx == SHN_COMMON
5939 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5940 {
5941 h->root.u.c.p->section
5942 = bfd_make_section_old_way (oldbfd, "COMMON");
5943 h->root.u.c.p->section->flags = SEC_ALLOC;
5944 }
5945 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5946 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5947 *psec = bfd_com_section_ptr;
5948 }
5949
5950 return TRUE;
5951 }
5952
5953 static int
5954 elf_x86_64_additional_program_headers (bfd *abfd,
5955 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5956 {
5957 asection *s;
5958 int count = 0;
5959
5960 /* Check to see if we need a large readonly segment. */
5961 s = bfd_get_section_by_name (abfd, ".lrodata");
5962 if (s && (s->flags & SEC_LOAD))
5963 count++;
5964
5965 /* Check to see if we need a large data segment. Since .lbss sections
5966 is placed right after the .bss section, there should be no need for
5967 a large data segment just because of .lbss. */
5968 s = bfd_get_section_by_name (abfd, ".ldata");
5969 if (s && (s->flags & SEC_LOAD))
5970 count++;
5971
5972 return count;
5973 }
5974
5975 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5976
5977 static bfd_boolean
5978 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5979 {
5980 if (h->plt.offset != (bfd_vma) -1
5981 && !h->def_regular
5982 && !h->pointer_equality_needed)
5983 return FALSE;
5984
5985 return _bfd_elf_hash_symbol (h);
5986 }
5987
5988 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5989
5990 static bfd_boolean
5991 elf_x86_64_relocs_compatible (const bfd_target *input,
5992 const bfd_target *output)
5993 {
5994 return ((xvec_get_elf_backend_data (input)->s->elfclass
5995 == xvec_get_elf_backend_data (output)->s->elfclass)
5996 && _bfd_elf_relocs_compatible (input, output));
5997 }
5998
5999 static const struct bfd_elf_special_section
6000 elf_x86_64_special_sections[]=
6001 {
6002 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6003 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6004 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6005 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6006 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6007 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6008 { NULL, 0, 0, 0, 0 }
6009 };
6010
6011 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6012 #define TARGET_LITTLE_NAME "elf64-x86-64"
6013 #define ELF_ARCH bfd_arch_i386
6014 #define ELF_TARGET_ID X86_64_ELF_DATA
6015 #define ELF_MACHINE_CODE EM_X86_64
6016 #define ELF_MAXPAGESIZE 0x200000
6017 #define ELF_MINPAGESIZE 0x1000
6018 #define ELF_COMMONPAGESIZE 0x1000
6019
6020 #define elf_backend_can_gc_sections 1
6021 #define elf_backend_can_refcount 1
6022 #define elf_backend_want_got_plt 1
6023 #define elf_backend_plt_readonly 1
6024 #define elf_backend_want_plt_sym 0
6025 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6026 #define elf_backend_rela_normal 1
6027 #define elf_backend_plt_alignment 4
6028 #define elf_backend_extern_protected_data 1
6029
6030 #define elf_info_to_howto elf_x86_64_info_to_howto
6031
6032 #define bfd_elf64_bfd_link_hash_table_create \
6033 elf_x86_64_link_hash_table_create
6034 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6035 #define bfd_elf64_bfd_reloc_name_lookup \
6036 elf_x86_64_reloc_name_lookup
6037
6038 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6039 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6040 #define elf_backend_check_relocs elf_x86_64_check_relocs
6041 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6042 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6043 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6044 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6045 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6046 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6047 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6048 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6049 #ifdef CORE_HEADER
6050 #define elf_backend_write_core_note elf_x86_64_write_core_note
6051 #endif
6052 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6053 #define elf_backend_relocate_section elf_x86_64_relocate_section
6054 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6055 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6056 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6057 #define elf_backend_object_p elf64_x86_64_elf_object_p
6058 #define bfd_elf64_mkobject elf_x86_64_mkobject
6059 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6060
6061 #define elf_backend_section_from_shdr \
6062 elf_x86_64_section_from_shdr
6063
6064 #define elf_backend_section_from_bfd_section \
6065 elf_x86_64_elf_section_from_bfd_section
6066 #define elf_backend_add_symbol_hook \
6067 elf_x86_64_add_symbol_hook
6068 #define elf_backend_symbol_processing \
6069 elf_x86_64_symbol_processing
6070 #define elf_backend_common_section_index \
6071 elf_x86_64_common_section_index
6072 #define elf_backend_common_section \
6073 elf_x86_64_common_section
6074 #define elf_backend_common_definition \
6075 elf_x86_64_common_definition
6076 #define elf_backend_merge_symbol \
6077 elf_x86_64_merge_symbol
6078 #define elf_backend_special_sections \
6079 elf_x86_64_special_sections
6080 #define elf_backend_additional_program_headers \
6081 elf_x86_64_additional_program_headers
6082 #define elf_backend_hash_symbol \
6083 elf_x86_64_hash_symbol
6084
6085 #include "elf64-target.h"
6086
6087 /* CloudABI support. */
6088
6089 #undef TARGET_LITTLE_SYM
6090 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6091 #undef TARGET_LITTLE_NAME
6092 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6093
6094 #undef ELF_OSABI
6095 #define ELF_OSABI ELFOSABI_CLOUDABI
6096
6097 #undef elf64_bed
6098 #define elf64_bed elf64_x86_64_cloudabi_bed
6099
6100 #include "elf64-target.h"
6101
6102 /* FreeBSD support. */
6103
6104 #undef TARGET_LITTLE_SYM
6105 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6106 #undef TARGET_LITTLE_NAME
6107 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6108
6109 #undef ELF_OSABI
6110 #define ELF_OSABI ELFOSABI_FREEBSD
6111
6112 #undef elf64_bed
6113 #define elf64_bed elf64_x86_64_fbsd_bed
6114
6115 #include "elf64-target.h"
6116
6117 /* Solaris 2 support. */
6118
6119 #undef TARGET_LITTLE_SYM
6120 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6121 #undef TARGET_LITTLE_NAME
6122 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6123
6124 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6125 objects won't be recognized. */
6126 #undef ELF_OSABI
6127
6128 #undef elf64_bed
6129 #define elf64_bed elf64_x86_64_sol2_bed
6130
6131 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6132 boundary. */
6133 #undef elf_backend_static_tls_alignment
6134 #define elf_backend_static_tls_alignment 16
6135
6136 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6137
6138 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6139 File, p.63. */
6140 #undef elf_backend_want_plt_sym
6141 #define elf_backend_want_plt_sym 1
6142
6143 #include "elf64-target.h"
6144
6145 /* Native Client support. */
6146
6147 static bfd_boolean
6148 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6149 {
6150 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6151 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6152 return TRUE;
6153 }
6154
6155 #undef TARGET_LITTLE_SYM
6156 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6157 #undef TARGET_LITTLE_NAME
6158 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6159 #undef elf64_bed
6160 #define elf64_bed elf64_x86_64_nacl_bed
6161
6162 #undef ELF_MAXPAGESIZE
6163 #undef ELF_MINPAGESIZE
6164 #undef ELF_COMMONPAGESIZE
6165 #define ELF_MAXPAGESIZE 0x10000
6166 #define ELF_MINPAGESIZE 0x10000
6167 #define ELF_COMMONPAGESIZE 0x10000
6168
6169 /* Restore defaults. */
6170 #undef ELF_OSABI
6171 #undef elf_backend_static_tls_alignment
6172 #undef elf_backend_want_plt_sym
6173 #define elf_backend_want_plt_sym 0
6174
6175 /* NaCl uses substantially different PLT entries for the same effects. */
6176
6177 #undef elf_backend_plt_alignment
6178 #define elf_backend_plt_alignment 5
6179 #define NACL_PLT_ENTRY_SIZE 64
6180 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6181
6182 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6183 {
6184 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6185 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6186 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6187 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6188 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6189
6190 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6191 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6192
6193 /* 32 bytes of nop to pad out to the standard size. */
6194 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6195 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6196 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6197 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6198 0x66, /* excess data32 prefix */
6199 0x90 /* nop */
6200 };
6201
6202 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6203 {
6204 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6205 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6206 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6207 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6208
6209 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6210 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6211 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6212
6213 /* Lazy GOT entries point here (32-byte aligned). */
6214 0x68, /* pushq immediate */
6215 0, 0, 0, 0, /* replaced with index into relocation table. */
6216 0xe9, /* jmp relative */
6217 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6218
6219 /* 22 bytes of nop to pad out to the standard size. */
6220 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6221 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6222 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6223 };
6224
6225 /* .eh_frame covering the .plt section. */
6226
6227 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6228 {
6229 #if (PLT_CIE_LENGTH != 20 \
6230 || PLT_FDE_LENGTH != 36 \
6231 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6232 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6233 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6234 #endif
6235 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6236 0, 0, 0, 0, /* CIE ID */
6237 1, /* CIE version */
6238 'z', 'R', 0, /* Augmentation string */
6239 1, /* Code alignment factor */
6240 0x78, /* Data alignment factor */
6241 16, /* Return address column */
6242 1, /* Augmentation size */
6243 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6244 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6245 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6246 DW_CFA_nop, DW_CFA_nop,
6247
6248 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6249 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6250 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6251 0, 0, 0, 0, /* .plt size goes here */
6252 0, /* Augmentation size */
6253 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6254 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6255 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6256 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6257 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6258 13, /* Block length */
6259 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6260 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6261 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6262 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6263 DW_CFA_nop, DW_CFA_nop
6264 };
6265
6266 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6267 {
6268 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6269 elf_x86_64_nacl_plt_entry, /* plt_entry */
6270 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6271 2, /* plt0_got1_offset */
6272 9, /* plt0_got2_offset */
6273 13, /* plt0_got2_insn_end */
6274 3, /* plt_got_offset */
6275 33, /* plt_reloc_offset */
6276 38, /* plt_plt_offset */
6277 7, /* plt_got_insn_size */
6278 42, /* plt_plt_insn_end */
6279 32, /* plt_lazy_offset */
6280 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6281 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6282 };
6283
6284 #undef elf_backend_arch_data
6285 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6286
6287 #undef elf_backend_object_p
6288 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6289 #undef elf_backend_modify_segment_map
6290 #define elf_backend_modify_segment_map nacl_modify_segment_map
6291 #undef elf_backend_modify_program_headers
6292 #define elf_backend_modify_program_headers nacl_modify_program_headers
6293 #undef elf_backend_final_write_processing
6294 #define elf_backend_final_write_processing nacl_final_write_processing
6295
6296 #include "elf64-target.h"
6297
6298 /* Native Client x32 support. */
6299
6300 static bfd_boolean
6301 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6302 {
6303 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6304 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6305 return TRUE;
6306 }
6307
6308 #undef TARGET_LITTLE_SYM
6309 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6310 #undef TARGET_LITTLE_NAME
6311 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6312 #undef elf32_bed
6313 #define elf32_bed elf32_x86_64_nacl_bed
6314
6315 #define bfd_elf32_bfd_link_hash_table_create \
6316 elf_x86_64_link_hash_table_create
6317 #define bfd_elf32_bfd_reloc_type_lookup \
6318 elf_x86_64_reloc_type_lookup
6319 #define bfd_elf32_bfd_reloc_name_lookup \
6320 elf_x86_64_reloc_name_lookup
6321 #define bfd_elf32_mkobject \
6322 elf_x86_64_mkobject
6323 #define bfd_elf32_get_synthetic_symtab \
6324 elf_x86_64_get_synthetic_symtab
6325
6326 #undef elf_backend_object_p
6327 #define elf_backend_object_p \
6328 elf32_x86_64_nacl_elf_object_p
6329
6330 #undef elf_backend_bfd_from_remote_memory
6331 #define elf_backend_bfd_from_remote_memory \
6332 _bfd_elf32_bfd_from_remote_memory
6333
6334 #undef elf_backend_size_info
6335 #define elf_backend_size_info \
6336 _bfd_elf32_size_info
6337
6338 #include "elf32-target.h"
6339
6340 /* Restore defaults. */
6341 #undef elf_backend_object_p
6342 #define elf_backend_object_p elf64_x86_64_elf_object_p
6343 #undef elf_backend_bfd_from_remote_memory
6344 #undef elf_backend_size_info
6345 #undef elf_backend_modify_segment_map
6346 #undef elf_backend_modify_program_headers
6347 #undef elf_backend_final_write_processing
6348
6349 /* Intel L1OM support. */
6350
6351 static bfd_boolean
6352 elf64_l1om_elf_object_p (bfd *abfd)
6353 {
6354 /* Set the right machine number for an L1OM elf64 file. */
6355 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6356 return TRUE;
6357 }
6358
6359 #undef TARGET_LITTLE_SYM
6360 #define TARGET_LITTLE_SYM l1om_elf64_vec
6361 #undef TARGET_LITTLE_NAME
6362 #define TARGET_LITTLE_NAME "elf64-l1om"
6363 #undef ELF_ARCH
6364 #define ELF_ARCH bfd_arch_l1om
6365
6366 #undef ELF_MACHINE_CODE
6367 #define ELF_MACHINE_CODE EM_L1OM
6368
6369 #undef ELF_OSABI
6370
6371 #undef elf64_bed
6372 #define elf64_bed elf64_l1om_bed
6373
6374 #undef elf_backend_object_p
6375 #define elf_backend_object_p elf64_l1om_elf_object_p
6376
6377 /* Restore defaults. */
6378 #undef ELF_MAXPAGESIZE
6379 #undef ELF_MINPAGESIZE
6380 #undef ELF_COMMONPAGESIZE
6381 #define ELF_MAXPAGESIZE 0x200000
6382 #define ELF_MINPAGESIZE 0x1000
6383 #define ELF_COMMONPAGESIZE 0x1000
6384 #undef elf_backend_plt_alignment
6385 #define elf_backend_plt_alignment 4
6386 #undef elf_backend_arch_data
6387 #define elf_backend_arch_data &elf_x86_64_arch_bed
6388
6389 #include "elf64-target.h"
6390
6391 /* FreeBSD L1OM support. */
6392
6393 #undef TARGET_LITTLE_SYM
6394 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6395 #undef TARGET_LITTLE_NAME
6396 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6397
6398 #undef ELF_OSABI
6399 #define ELF_OSABI ELFOSABI_FREEBSD
6400
6401 #undef elf64_bed
6402 #define elf64_bed elf64_l1om_fbsd_bed
6403
6404 #include "elf64-target.h"
6405
6406 /* Intel K1OM support. */
6407
6408 static bfd_boolean
6409 elf64_k1om_elf_object_p (bfd *abfd)
6410 {
6411 /* Set the right machine number for an K1OM elf64 file. */
6412 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6413 return TRUE;
6414 }
6415
6416 #undef TARGET_LITTLE_SYM
6417 #define TARGET_LITTLE_SYM k1om_elf64_vec
6418 #undef TARGET_LITTLE_NAME
6419 #define TARGET_LITTLE_NAME "elf64-k1om"
6420 #undef ELF_ARCH
6421 #define ELF_ARCH bfd_arch_k1om
6422
6423 #undef ELF_MACHINE_CODE
6424 #define ELF_MACHINE_CODE EM_K1OM
6425
6426 #undef ELF_OSABI
6427
6428 #undef elf64_bed
6429 #define elf64_bed elf64_k1om_bed
6430
6431 #undef elf_backend_object_p
6432 #define elf_backend_object_p elf64_k1om_elf_object_p
6433
6434 #undef elf_backend_static_tls_alignment
6435
6436 #undef elf_backend_want_plt_sym
6437 #define elf_backend_want_plt_sym 0
6438
6439 #include "elf64-target.h"
6440
6441 /* FreeBSD K1OM support. */
6442
6443 #undef TARGET_LITTLE_SYM
6444 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6445 #undef TARGET_LITTLE_NAME
6446 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6447
6448 #undef ELF_OSABI
6449 #define ELF_OSABI ELFOSABI_FREEBSD
6450
6451 #undef elf64_bed
6452 #define elf64_bed elf64_k1om_fbsd_bed
6453
6454 #include "elf64-target.h"
6455
6456 /* 32bit x86-64 support. */
6457
6458 #undef TARGET_LITTLE_SYM
6459 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6460 #undef TARGET_LITTLE_NAME
6461 #define TARGET_LITTLE_NAME "elf32-x86-64"
6462 #undef elf32_bed
6463
6464 #undef ELF_ARCH
6465 #define ELF_ARCH bfd_arch_i386
6466
6467 #undef ELF_MACHINE_CODE
6468 #define ELF_MACHINE_CODE EM_X86_64
6469
6470 #undef ELF_OSABI
6471
6472 #undef elf_backend_object_p
6473 #define elf_backend_object_p \
6474 elf32_x86_64_elf_object_p
6475
6476 #undef elf_backend_bfd_from_remote_memory
6477 #define elf_backend_bfd_from_remote_memory \
6478 _bfd_elf32_bfd_from_remote_memory
6479
6480 #undef elf_backend_size_info
6481 #define elf_backend_size_info \
6482 _bfd_elf32_size_info
6483
6484 #include "elf32-target.h"
This page took 0.177461 seconds and 4 git commands to generate.