fd9c7265613b77b03a779a836c22abe0a0c5334f
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Look through the relocs for a section during the first phase, and
1533 calculate needed space in the global offset table, procedure
1534 linkage table, and dynamic reloc sections. */
1535
1536 static bfd_boolean
1537 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1538 asection *sec,
1539 const Elf_Internal_Rela *relocs)
1540 {
1541 struct elf_x86_64_link_hash_table *htab;
1542 Elf_Internal_Shdr *symtab_hdr;
1543 struct elf_link_hash_entry **sym_hashes;
1544 const Elf_Internal_Rela *rel;
1545 const Elf_Internal_Rela *rel_end;
1546 asection *sreloc;
1547 bfd_boolean use_plt_got;
1548
1549 if (info->relocatable)
1550 return TRUE;
1551
1552 BFD_ASSERT (is_x86_64_elf (abfd));
1553
1554 htab = elf_x86_64_hash_table (info);
1555 if (htab == NULL)
1556 return FALSE;
1557
1558 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1559
1560 symtab_hdr = &elf_symtab_hdr (abfd);
1561 sym_hashes = elf_sym_hashes (abfd);
1562
1563 sreloc = NULL;
1564
1565 rel_end = relocs + sec->reloc_count;
1566 for (rel = relocs; rel < rel_end; rel++)
1567 {
1568 unsigned int r_type;
1569 unsigned long r_symndx;
1570 struct elf_link_hash_entry *h;
1571 Elf_Internal_Sym *isym;
1572 const char *name;
1573 bfd_boolean size_reloc;
1574
1575 r_symndx = htab->r_sym (rel->r_info);
1576 r_type = ELF32_R_TYPE (rel->r_info);
1577
1578 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1579 {
1580 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1581 abfd, r_symndx);
1582 return FALSE;
1583 }
1584
1585 if (r_symndx < symtab_hdr->sh_info)
1586 {
1587 /* A local symbol. */
1588 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1589 abfd, r_symndx);
1590 if (isym == NULL)
1591 return FALSE;
1592
1593 /* Check relocation against local STT_GNU_IFUNC symbol. */
1594 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1595 {
1596 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1597 TRUE);
1598 if (h == NULL)
1599 return FALSE;
1600
1601 /* Fake a STT_GNU_IFUNC symbol. */
1602 h->type = STT_GNU_IFUNC;
1603 h->def_regular = 1;
1604 h->ref_regular = 1;
1605 h->forced_local = 1;
1606 h->root.type = bfd_link_hash_defined;
1607 }
1608 else
1609 h = NULL;
1610 }
1611 else
1612 {
1613 isym = NULL;
1614 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1615 while (h->root.type == bfd_link_hash_indirect
1616 || h->root.type == bfd_link_hash_warning)
1617 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1618 }
1619
1620 /* Check invalid x32 relocations. */
1621 if (!ABI_64_P (abfd))
1622 switch (r_type)
1623 {
1624 default:
1625 break;
1626
1627 case R_X86_64_DTPOFF64:
1628 case R_X86_64_TPOFF64:
1629 case R_X86_64_PC64:
1630 case R_X86_64_GOTOFF64:
1631 case R_X86_64_GOT64:
1632 case R_X86_64_GOTPCREL64:
1633 case R_X86_64_GOTPC64:
1634 case R_X86_64_GOTPLT64:
1635 case R_X86_64_PLTOFF64:
1636 {
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1641 NULL);
1642 (*_bfd_error_handler)
1643 (_("%B: relocation %s against symbol `%s' isn't "
1644 "supported in x32 mode"), abfd,
1645 x86_64_elf_howto_table[r_type].name, name);
1646 bfd_set_error (bfd_error_bad_value);
1647 return FALSE;
1648 }
1649 break;
1650 }
1651
1652 if (h != NULL)
1653 {
1654 /* Create the ifunc sections for static executables. If we
1655 never see an indirect function symbol nor we are building
1656 a static executable, those sections will be empty and
1657 won't appear in output. */
1658 switch (r_type)
1659 {
1660 default:
1661 break;
1662
1663 case R_X86_64_PC32_BND:
1664 case R_X86_64_PLT32_BND:
1665 case R_X86_64_PC32:
1666 case R_X86_64_PLT32:
1667 case R_X86_64_32:
1668 case R_X86_64_64:
1669 /* MPX PLT is supported only if elf_x86_64_arch_bed
1670 is used in 64-bit mode. */
1671 if (ABI_64_P (abfd)
1672 && info->bndplt
1673 && (get_elf_x86_64_backend_data (abfd)
1674 == &elf_x86_64_arch_bed))
1675 {
1676 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1677
1678 /* Create the second PLT for Intel MPX support. */
1679 if (htab->plt_bnd == NULL)
1680 {
1681 unsigned int plt_bnd_align;
1682 const struct elf_backend_data *bed;
1683
1684 bed = get_elf_backend_data (info->output_bfd);
1685 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1686 && (sizeof (elf_x86_64_bnd_plt2_entry)
1687 == sizeof (elf_x86_64_legacy_plt2_entry)));
1688 plt_bnd_align = 3;
1689
1690 if (htab->elf.dynobj == NULL)
1691 htab->elf.dynobj = abfd;
1692 htab->plt_bnd
1693 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1694 ".plt.bnd",
1695 (bed->dynamic_sec_flags
1696 | SEC_ALLOC
1697 | SEC_CODE
1698 | SEC_LOAD
1699 | SEC_READONLY));
1700 if (htab->plt_bnd == NULL
1701 || !bfd_set_section_alignment (htab->elf.dynobj,
1702 htab->plt_bnd,
1703 plt_bnd_align))
1704 return FALSE;
1705 }
1706 }
1707
1708 case R_X86_64_32S:
1709 case R_X86_64_PC64:
1710 case R_X86_64_GOTPCREL:
1711 case R_X86_64_GOTPCREL64:
1712 if (htab->elf.dynobj == NULL)
1713 htab->elf.dynobj = abfd;
1714 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1715 return FALSE;
1716 break;
1717 }
1718
1719 /* It is referenced by a non-shared object. */
1720 h->ref_regular = 1;
1721 h->root.non_ir_ref = 1;
1722 }
1723
1724 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1725 symtab_hdr, sym_hashes,
1726 &r_type, GOT_UNKNOWN,
1727 rel, rel_end, h, r_symndx))
1728 return FALSE;
1729
1730 switch (r_type)
1731 {
1732 case R_X86_64_TLSLD:
1733 htab->tls_ld_got.refcount += 1;
1734 goto create_got;
1735
1736 case R_X86_64_TPOFF32:
1737 if (!info->executable && ABI_64_P (abfd))
1738 {
1739 if (h)
1740 name = h->root.root.string;
1741 else
1742 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1743 NULL);
1744 (*_bfd_error_handler)
1745 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1746 abfd,
1747 x86_64_elf_howto_table[r_type].name, name);
1748 bfd_set_error (bfd_error_bad_value);
1749 return FALSE;
1750 }
1751 break;
1752
1753 case R_X86_64_GOTTPOFF:
1754 if (!info->executable)
1755 info->flags |= DF_STATIC_TLS;
1756 /* Fall through */
1757
1758 case R_X86_64_GOT32:
1759 case R_X86_64_GOTPCREL:
1760 case R_X86_64_TLSGD:
1761 case R_X86_64_GOT64:
1762 case R_X86_64_GOTPCREL64:
1763 case R_X86_64_GOTPLT64:
1764 case R_X86_64_GOTPC32_TLSDESC:
1765 case R_X86_64_TLSDESC_CALL:
1766 /* This symbol requires a global offset table entry. */
1767 {
1768 int tls_type, old_tls_type;
1769
1770 switch (r_type)
1771 {
1772 default: tls_type = GOT_NORMAL; break;
1773 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1774 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1775 case R_X86_64_GOTPC32_TLSDESC:
1776 case R_X86_64_TLSDESC_CALL:
1777 tls_type = GOT_TLS_GDESC; break;
1778 }
1779
1780 if (h != NULL)
1781 {
1782 h->got.refcount += 1;
1783 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1784 }
1785 else
1786 {
1787 bfd_signed_vma *local_got_refcounts;
1788
1789 /* This is a global offset table entry for a local symbol. */
1790 local_got_refcounts = elf_local_got_refcounts (abfd);
1791 if (local_got_refcounts == NULL)
1792 {
1793 bfd_size_type size;
1794
1795 size = symtab_hdr->sh_info;
1796 size *= sizeof (bfd_signed_vma)
1797 + sizeof (bfd_vma) + sizeof (char);
1798 local_got_refcounts = ((bfd_signed_vma *)
1799 bfd_zalloc (abfd, size));
1800 if (local_got_refcounts == NULL)
1801 return FALSE;
1802 elf_local_got_refcounts (abfd) = local_got_refcounts;
1803 elf_x86_64_local_tlsdesc_gotent (abfd)
1804 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1805 elf_x86_64_local_got_tls_type (abfd)
1806 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1807 }
1808 local_got_refcounts[r_symndx] += 1;
1809 old_tls_type
1810 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1811 }
1812
1813 /* If a TLS symbol is accessed using IE at least once,
1814 there is no point to use dynamic model for it. */
1815 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1816 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1817 || tls_type != GOT_TLS_IE))
1818 {
1819 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1820 tls_type = old_tls_type;
1821 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1822 && GOT_TLS_GD_ANY_P (tls_type))
1823 tls_type |= old_tls_type;
1824 else
1825 {
1826 if (h)
1827 name = h->root.root.string;
1828 else
1829 name = bfd_elf_sym_name (abfd, symtab_hdr,
1830 isym, NULL);
1831 (*_bfd_error_handler)
1832 (_("%B: '%s' accessed both as normal and thread local symbol"),
1833 abfd, name);
1834 bfd_set_error (bfd_error_bad_value);
1835 return FALSE;
1836 }
1837 }
1838
1839 if (old_tls_type != tls_type)
1840 {
1841 if (h != NULL)
1842 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1843 else
1844 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1845 }
1846 }
1847 /* Fall through */
1848
1849 case R_X86_64_GOTOFF64:
1850 case R_X86_64_GOTPC32:
1851 case R_X86_64_GOTPC64:
1852 create_got:
1853 if (htab->elf.sgot == NULL)
1854 {
1855 if (htab->elf.dynobj == NULL)
1856 htab->elf.dynobj = abfd;
1857 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1858 info))
1859 return FALSE;
1860 }
1861 break;
1862
1863 case R_X86_64_PLT32:
1864 case R_X86_64_PLT32_BND:
1865 /* This symbol requires a procedure linkage table entry. We
1866 actually build the entry in adjust_dynamic_symbol,
1867 because this might be a case of linking PIC code which is
1868 never referenced by a dynamic object, in which case we
1869 don't need to generate a procedure linkage table entry
1870 after all. */
1871
1872 /* If this is a local symbol, we resolve it directly without
1873 creating a procedure linkage table entry. */
1874 if (h == NULL)
1875 continue;
1876
1877 h->needs_plt = 1;
1878 h->plt.refcount += 1;
1879 break;
1880
1881 case R_X86_64_PLTOFF64:
1882 /* This tries to form the 'address' of a function relative
1883 to GOT. For global symbols we need a PLT entry. */
1884 if (h != NULL)
1885 {
1886 h->needs_plt = 1;
1887 h->plt.refcount += 1;
1888 }
1889 goto create_got;
1890
1891 case R_X86_64_SIZE32:
1892 case R_X86_64_SIZE64:
1893 size_reloc = TRUE;
1894 goto do_size;
1895
1896 case R_X86_64_32:
1897 if (!ABI_64_P (abfd))
1898 goto pointer;
1899 case R_X86_64_8:
1900 case R_X86_64_16:
1901 case R_X86_64_32S:
1902 /* Let's help debug shared library creation. These relocs
1903 cannot be used in shared libs. Don't error out for
1904 sections we don't care about, such as debug sections or
1905 non-constant sections. */
1906 if (info->shared
1907 && (sec->flags & SEC_ALLOC) != 0
1908 && (sec->flags & SEC_READONLY) != 0)
1909 {
1910 if (h)
1911 name = h->root.root.string;
1912 else
1913 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1914 (*_bfd_error_handler)
1915 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1916 abfd, x86_64_elf_howto_table[r_type].name, name);
1917 bfd_set_error (bfd_error_bad_value);
1918 return FALSE;
1919 }
1920 /* Fall through. */
1921
1922 case R_X86_64_PC8:
1923 case R_X86_64_PC16:
1924 case R_X86_64_PC32:
1925 case R_X86_64_PC32_BND:
1926 case R_X86_64_PC64:
1927 case R_X86_64_64:
1928 pointer:
1929 if (h != NULL && info->executable)
1930 {
1931 /* If this reloc is in a read-only section, we might
1932 need a copy reloc. We can't check reliably at this
1933 stage whether the section is read-only, as input
1934 sections have not yet been mapped to output sections.
1935 Tentatively set the flag for now, and correct in
1936 adjust_dynamic_symbol. */
1937 h->non_got_ref = 1;
1938
1939 /* We may need a .plt entry if the function this reloc
1940 refers to is in a shared lib. */
1941 h->plt.refcount += 1;
1942 if (r_type != R_X86_64_PC32
1943 && r_type != R_X86_64_PC32_BND
1944 && r_type != R_X86_64_PC64)
1945 h->pointer_equality_needed = 1;
1946 }
1947
1948 size_reloc = FALSE;
1949 do_size:
1950 /* If we are creating a shared library, and this is a reloc
1951 against a global symbol, or a non PC relative reloc
1952 against a local symbol, then we need to copy the reloc
1953 into the shared library. However, if we are linking with
1954 -Bsymbolic, we do not need to copy a reloc against a
1955 global symbol which is defined in an object we are
1956 including in the link (i.e., DEF_REGULAR is set). At
1957 this point we have not seen all the input files, so it is
1958 possible that DEF_REGULAR is not set now but will be set
1959 later (it is never cleared). In case of a weak definition,
1960 DEF_REGULAR may be cleared later by a strong definition in
1961 a shared library. We account for that possibility below by
1962 storing information in the relocs_copied field of the hash
1963 table entry. A similar situation occurs when creating
1964 shared libraries and symbol visibility changes render the
1965 symbol local.
1966
1967 If on the other hand, we are creating an executable, we
1968 may need to keep relocations for symbols satisfied by a
1969 dynamic library if we manage to avoid copy relocs for the
1970 symbol. */
1971 if ((info->shared
1972 && (sec->flags & SEC_ALLOC) != 0
1973 && (! IS_X86_64_PCREL_TYPE (r_type)
1974 || (h != NULL
1975 && (! SYMBOLIC_BIND (info, h)
1976 || h->root.type == bfd_link_hash_defweak
1977 || !h->def_regular))))
1978 || (ELIMINATE_COPY_RELOCS
1979 && !info->shared
1980 && (sec->flags & SEC_ALLOC) != 0
1981 && h != NULL
1982 && (h->root.type == bfd_link_hash_defweak
1983 || !h->def_regular)))
1984 {
1985 struct elf_dyn_relocs *p;
1986 struct elf_dyn_relocs **head;
1987
1988 /* We must copy these reloc types into the output file.
1989 Create a reloc section in dynobj and make room for
1990 this reloc. */
1991 if (sreloc == NULL)
1992 {
1993 if (htab->elf.dynobj == NULL)
1994 htab->elf.dynobj = abfd;
1995
1996 sreloc = _bfd_elf_make_dynamic_reloc_section
1997 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1998 abfd, /*rela?*/ TRUE);
1999
2000 if (sreloc == NULL)
2001 return FALSE;
2002 }
2003
2004 /* If this is a global symbol, we count the number of
2005 relocations we need for this symbol. */
2006 if (h != NULL)
2007 {
2008 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2009 }
2010 else
2011 {
2012 /* Track dynamic relocs needed for local syms too.
2013 We really need local syms available to do this
2014 easily. Oh well. */
2015 asection *s;
2016 void **vpp;
2017
2018 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2019 abfd, r_symndx);
2020 if (isym == NULL)
2021 return FALSE;
2022
2023 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2024 if (s == NULL)
2025 s = sec;
2026
2027 /* Beware of type punned pointers vs strict aliasing
2028 rules. */
2029 vpp = &(elf_section_data (s)->local_dynrel);
2030 head = (struct elf_dyn_relocs **)vpp;
2031 }
2032
2033 p = *head;
2034 if (p == NULL || p->sec != sec)
2035 {
2036 bfd_size_type amt = sizeof *p;
2037
2038 p = ((struct elf_dyn_relocs *)
2039 bfd_alloc (htab->elf.dynobj, amt));
2040 if (p == NULL)
2041 return FALSE;
2042 p->next = *head;
2043 *head = p;
2044 p->sec = sec;
2045 p->count = 0;
2046 p->pc_count = 0;
2047 }
2048
2049 p->count += 1;
2050 /* Count size relocation as PC-relative relocation. */
2051 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2052 p->pc_count += 1;
2053 }
2054 break;
2055
2056 /* This relocation describes the C++ object vtable hierarchy.
2057 Reconstruct it for later use during GC. */
2058 case R_X86_64_GNU_VTINHERIT:
2059 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2060 return FALSE;
2061 break;
2062
2063 /* This relocation describes which C++ vtable entries are actually
2064 used. Record for later use during GC. */
2065 case R_X86_64_GNU_VTENTRY:
2066 BFD_ASSERT (h != NULL);
2067 if (h != NULL
2068 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2069 return FALSE;
2070 break;
2071
2072 default:
2073 break;
2074 }
2075
2076 if (use_plt_got
2077 && h != NULL
2078 && h->plt.refcount > 0
2079 && h->got.refcount > 0
2080 && htab->plt_got == NULL)
2081 {
2082 /* Create the GOT procedure linkage table. */
2083 unsigned int plt_got_align;
2084 const struct elf_backend_data *bed;
2085
2086 bed = get_elf_backend_data (info->output_bfd);
2087 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2088 && (sizeof (elf_x86_64_bnd_plt2_entry)
2089 == sizeof (elf_x86_64_legacy_plt2_entry)));
2090 plt_got_align = 3;
2091
2092 if (htab->elf.dynobj == NULL)
2093 htab->elf.dynobj = abfd;
2094 htab->plt_got
2095 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2096 ".plt.got",
2097 (bed->dynamic_sec_flags
2098 | SEC_ALLOC
2099 | SEC_CODE
2100 | SEC_LOAD
2101 | SEC_READONLY));
2102 if (htab->plt_got == NULL
2103 || !bfd_set_section_alignment (htab->elf.dynobj,
2104 htab->plt_got,
2105 plt_got_align))
2106 return FALSE;
2107 }
2108 }
2109
2110 return TRUE;
2111 }
2112
2113 /* Return the section that should be marked against GC for a given
2114 relocation. */
2115
2116 static asection *
2117 elf_x86_64_gc_mark_hook (asection *sec,
2118 struct bfd_link_info *info,
2119 Elf_Internal_Rela *rel,
2120 struct elf_link_hash_entry *h,
2121 Elf_Internal_Sym *sym)
2122 {
2123 if (h != NULL)
2124 switch (ELF32_R_TYPE (rel->r_info))
2125 {
2126 case R_X86_64_GNU_VTINHERIT:
2127 case R_X86_64_GNU_VTENTRY:
2128 return NULL;
2129 }
2130
2131 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2132 }
2133
2134 /* Update the got entry reference counts for the section being removed. */
2135
2136 static bfd_boolean
2137 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2138 asection *sec,
2139 const Elf_Internal_Rela *relocs)
2140 {
2141 struct elf_x86_64_link_hash_table *htab;
2142 Elf_Internal_Shdr *symtab_hdr;
2143 struct elf_link_hash_entry **sym_hashes;
2144 bfd_signed_vma *local_got_refcounts;
2145 const Elf_Internal_Rela *rel, *relend;
2146
2147 if (info->relocatable)
2148 return TRUE;
2149
2150 htab = elf_x86_64_hash_table (info);
2151 if (htab == NULL)
2152 return FALSE;
2153
2154 elf_section_data (sec)->local_dynrel = NULL;
2155
2156 symtab_hdr = &elf_symtab_hdr (abfd);
2157 sym_hashes = elf_sym_hashes (abfd);
2158 local_got_refcounts = elf_local_got_refcounts (abfd);
2159
2160 htab = elf_x86_64_hash_table (info);
2161 relend = relocs + sec->reloc_count;
2162 for (rel = relocs; rel < relend; rel++)
2163 {
2164 unsigned long r_symndx;
2165 unsigned int r_type;
2166 struct elf_link_hash_entry *h = NULL;
2167
2168 r_symndx = htab->r_sym (rel->r_info);
2169 if (r_symndx >= symtab_hdr->sh_info)
2170 {
2171 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2172 while (h->root.type == bfd_link_hash_indirect
2173 || h->root.type == bfd_link_hash_warning)
2174 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2175 }
2176 else
2177 {
2178 /* A local symbol. */
2179 Elf_Internal_Sym *isym;
2180
2181 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2182 abfd, r_symndx);
2183
2184 /* Check relocation against local STT_GNU_IFUNC symbol. */
2185 if (isym != NULL
2186 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2187 {
2188 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2189 if (h == NULL)
2190 abort ();
2191 }
2192 }
2193
2194 if (h)
2195 {
2196 struct elf_x86_64_link_hash_entry *eh;
2197 struct elf_dyn_relocs **pp;
2198 struct elf_dyn_relocs *p;
2199
2200 eh = (struct elf_x86_64_link_hash_entry *) h;
2201
2202 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2203 if (p->sec == sec)
2204 {
2205 /* Everything must go for SEC. */
2206 *pp = p->next;
2207 break;
2208 }
2209 }
2210
2211 r_type = ELF32_R_TYPE (rel->r_info);
2212 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2213 symtab_hdr, sym_hashes,
2214 &r_type, GOT_UNKNOWN,
2215 rel, relend, h, r_symndx))
2216 return FALSE;
2217
2218 switch (r_type)
2219 {
2220 case R_X86_64_TLSLD:
2221 if (htab->tls_ld_got.refcount > 0)
2222 htab->tls_ld_got.refcount -= 1;
2223 break;
2224
2225 case R_X86_64_TLSGD:
2226 case R_X86_64_GOTPC32_TLSDESC:
2227 case R_X86_64_TLSDESC_CALL:
2228 case R_X86_64_GOTTPOFF:
2229 case R_X86_64_GOT32:
2230 case R_X86_64_GOTPCREL:
2231 case R_X86_64_GOT64:
2232 case R_X86_64_GOTPCREL64:
2233 case R_X86_64_GOTPLT64:
2234 if (h != NULL)
2235 {
2236 if (h->got.refcount > 0)
2237 h->got.refcount -= 1;
2238 if (h->type == STT_GNU_IFUNC)
2239 {
2240 if (h->plt.refcount > 0)
2241 h->plt.refcount -= 1;
2242 }
2243 }
2244 else if (local_got_refcounts != NULL)
2245 {
2246 if (local_got_refcounts[r_symndx] > 0)
2247 local_got_refcounts[r_symndx] -= 1;
2248 }
2249 break;
2250
2251 case R_X86_64_8:
2252 case R_X86_64_16:
2253 case R_X86_64_32:
2254 case R_X86_64_64:
2255 case R_X86_64_32S:
2256 case R_X86_64_PC8:
2257 case R_X86_64_PC16:
2258 case R_X86_64_PC32:
2259 case R_X86_64_PC32_BND:
2260 case R_X86_64_PC64:
2261 case R_X86_64_SIZE32:
2262 case R_X86_64_SIZE64:
2263 if (info->shared
2264 && (h == NULL || h->type != STT_GNU_IFUNC))
2265 break;
2266 /* Fall thru */
2267
2268 case R_X86_64_PLT32:
2269 case R_X86_64_PLT32_BND:
2270 case R_X86_64_PLTOFF64:
2271 if (h != NULL)
2272 {
2273 if (h->plt.refcount > 0)
2274 h->plt.refcount -= 1;
2275 }
2276 break;
2277
2278 default:
2279 break;
2280 }
2281 }
2282
2283 return TRUE;
2284 }
2285
2286 /* Adjust a symbol defined by a dynamic object and referenced by a
2287 regular object. The current definition is in some section of the
2288 dynamic object, but we're not including those sections. We have to
2289 change the definition to something the rest of the link can
2290 understand. */
2291
2292 static bfd_boolean
2293 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2294 struct elf_link_hash_entry *h)
2295 {
2296 struct elf_x86_64_link_hash_table *htab;
2297 asection *s;
2298 struct elf_x86_64_link_hash_entry *eh;
2299 struct elf_dyn_relocs *p;
2300
2301 /* STT_GNU_IFUNC symbol must go through PLT. */
2302 if (h->type == STT_GNU_IFUNC)
2303 {
2304 /* All local STT_GNU_IFUNC references must be treate as local
2305 calls via local PLT. */
2306 if (h->ref_regular
2307 && SYMBOL_CALLS_LOCAL (info, h))
2308 {
2309 bfd_size_type pc_count = 0, count = 0;
2310 struct elf_dyn_relocs **pp;
2311
2312 eh = (struct elf_x86_64_link_hash_entry *) h;
2313 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2314 {
2315 pc_count += p->pc_count;
2316 p->count -= p->pc_count;
2317 p->pc_count = 0;
2318 count += p->count;
2319 if (p->count == 0)
2320 *pp = p->next;
2321 else
2322 pp = &p->next;
2323 }
2324
2325 if (pc_count || count)
2326 {
2327 h->needs_plt = 1;
2328 h->non_got_ref = 1;
2329 if (h->plt.refcount <= 0)
2330 h->plt.refcount = 1;
2331 else
2332 h->plt.refcount += 1;
2333 }
2334 }
2335
2336 if (h->plt.refcount <= 0)
2337 {
2338 h->plt.offset = (bfd_vma) -1;
2339 h->needs_plt = 0;
2340 }
2341 return TRUE;
2342 }
2343
2344 /* If this is a function, put it in the procedure linkage table. We
2345 will fill in the contents of the procedure linkage table later,
2346 when we know the address of the .got section. */
2347 if (h->type == STT_FUNC
2348 || h->needs_plt)
2349 {
2350 if (h->plt.refcount <= 0
2351 || SYMBOL_CALLS_LOCAL (info, h)
2352 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2353 && h->root.type == bfd_link_hash_undefweak))
2354 {
2355 /* This case can occur if we saw a PLT32 reloc in an input
2356 file, but the symbol was never referred to by a dynamic
2357 object, or if all references were garbage collected. In
2358 such a case, we don't actually need to build a procedure
2359 linkage table, and we can just do a PC32 reloc instead. */
2360 h->plt.offset = (bfd_vma) -1;
2361 h->needs_plt = 0;
2362 }
2363
2364 return TRUE;
2365 }
2366 else
2367 /* It's possible that we incorrectly decided a .plt reloc was
2368 needed for an R_X86_64_PC32 reloc to a non-function sym in
2369 check_relocs. We can't decide accurately between function and
2370 non-function syms in check-relocs; Objects loaded later in
2371 the link may change h->type. So fix it now. */
2372 h->plt.offset = (bfd_vma) -1;
2373
2374 /* If this is a weak symbol, and there is a real definition, the
2375 processor independent code will have arranged for us to see the
2376 real definition first, and we can just use the same value. */
2377 if (h->u.weakdef != NULL)
2378 {
2379 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2380 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2381 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2382 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2383 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2384 {
2385 eh = (struct elf_x86_64_link_hash_entry *) h;
2386 h->non_got_ref = h->u.weakdef->non_got_ref;
2387 eh->needs_copy = h->u.weakdef->needs_copy;
2388 }
2389 return TRUE;
2390 }
2391
2392 /* This is a reference to a symbol defined by a dynamic object which
2393 is not a function. */
2394
2395 /* If we are creating a shared library, we must presume that the
2396 only references to the symbol are via the global offset table.
2397 For such cases we need not do anything here; the relocations will
2398 be handled correctly by relocate_section. */
2399 if (!info->executable)
2400 return TRUE;
2401
2402 /* If there are no references to this symbol that do not use the
2403 GOT, we don't need to generate a copy reloc. */
2404 if (!h->non_got_ref)
2405 return TRUE;
2406
2407 /* If -z nocopyreloc was given, we won't generate them either. */
2408 if (info->nocopyreloc)
2409 {
2410 h->non_got_ref = 0;
2411 return TRUE;
2412 }
2413
2414 if (ELIMINATE_COPY_RELOCS)
2415 {
2416 eh = (struct elf_x86_64_link_hash_entry *) h;
2417 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2418 {
2419 s = p->sec->output_section;
2420 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2421 break;
2422 }
2423
2424 /* If we didn't find any dynamic relocs in read-only sections, then
2425 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2426 if (p == NULL)
2427 {
2428 h->non_got_ref = 0;
2429 return TRUE;
2430 }
2431 }
2432
2433 /* We must allocate the symbol in our .dynbss section, which will
2434 become part of the .bss section of the executable. There will be
2435 an entry for this symbol in the .dynsym section. The dynamic
2436 object will contain position independent code, so all references
2437 from the dynamic object to this symbol will go through the global
2438 offset table. The dynamic linker will use the .dynsym entry to
2439 determine the address it must put in the global offset table, so
2440 both the dynamic object and the regular object will refer to the
2441 same memory location for the variable. */
2442
2443 htab = elf_x86_64_hash_table (info);
2444 if (htab == NULL)
2445 return FALSE;
2446
2447 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2448 to copy the initial value out of the dynamic object and into the
2449 runtime process image. */
2450 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2451 {
2452 const struct elf_backend_data *bed;
2453 bed = get_elf_backend_data (info->output_bfd);
2454 htab->srelbss->size += bed->s->sizeof_rela;
2455 h->needs_copy = 1;
2456 }
2457
2458 s = htab->sdynbss;
2459
2460 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2461 }
2462
2463 /* Allocate space in .plt, .got and associated reloc sections for
2464 dynamic relocs. */
2465
2466 static bfd_boolean
2467 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2468 {
2469 struct bfd_link_info *info;
2470 struct elf_x86_64_link_hash_table *htab;
2471 struct elf_x86_64_link_hash_entry *eh;
2472 struct elf_dyn_relocs *p;
2473 const struct elf_backend_data *bed;
2474 unsigned int plt_entry_size;
2475
2476 if (h->root.type == bfd_link_hash_indirect)
2477 return TRUE;
2478
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480
2481 info = (struct bfd_link_info *) inf;
2482 htab = elf_x86_64_hash_table (info);
2483 if (htab == NULL)
2484 return FALSE;
2485 bed = get_elf_backend_data (info->output_bfd);
2486 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2487
2488 /* We can't use the GOT PLT if pointer equality is needed since
2489 finish_dynamic_symbol won't clear symbol value and the dynamic
2490 linker won't update the GOT slot. We will get into an infinite
2491 loop at run-time. */
2492 if (htab->plt_got != NULL
2493 && h->type != STT_GNU_IFUNC
2494 && !h->pointer_equality_needed
2495 && h->plt.refcount > 0
2496 && h->got.refcount > 0)
2497 {
2498 /* Don't use the regular PLT if there are both GOT and GOTPLT
2499 reloctions. */
2500 h->plt.offset = (bfd_vma) -1;
2501
2502 /* Use the GOT PLT. */
2503 eh->plt_got.refcount = 1;
2504 }
2505
2506 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2507 here if it is defined and referenced in a non-shared object. */
2508 if (h->type == STT_GNU_IFUNC
2509 && h->def_regular)
2510 {
2511 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2512 &eh->dyn_relocs,
2513 plt_entry_size,
2514 plt_entry_size,
2515 GOT_ENTRY_SIZE))
2516 {
2517 asection *s = htab->plt_bnd;
2518 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2519 {
2520 /* Use the .plt.bnd section if it is created. */
2521 eh->plt_bnd.offset = s->size;
2522
2523 /* Make room for this entry in the .plt.bnd section. */
2524 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2525 }
2526
2527 return TRUE;
2528 }
2529 else
2530 return FALSE;
2531 }
2532 else if (htab->elf.dynamic_sections_created
2533 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2534 {
2535 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2536
2537 /* Make sure this symbol is output as a dynamic symbol.
2538 Undefined weak syms won't yet be marked as dynamic. */
2539 if (h->dynindx == -1
2540 && !h->forced_local)
2541 {
2542 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2543 return FALSE;
2544 }
2545
2546 if (info->shared
2547 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2548 {
2549 asection *s = htab->elf.splt;
2550 asection *bnd_s = htab->plt_bnd;
2551 asection *got_s = htab->plt_got;
2552
2553 /* If this is the first .plt entry, make room for the special
2554 first entry. */
2555 if (s->size == 0)
2556 s->size = plt_entry_size;
2557
2558 if (use_plt_got)
2559 eh->plt_got.offset = got_s->size;
2560 else
2561 {
2562 h->plt.offset = s->size;
2563 if (bnd_s)
2564 eh->plt_bnd.offset = bnd_s->size;
2565 }
2566
2567 /* If this symbol is not defined in a regular file, and we are
2568 not generating a shared library, then set the symbol to this
2569 location in the .plt. This is required to make function
2570 pointers compare as equal between the normal executable and
2571 the shared library. */
2572 if (! info->shared
2573 && !h->def_regular)
2574 {
2575 if (use_plt_got)
2576 {
2577 /* We need to make a call to the entry of the GOT PLT
2578 instead of regular PLT entry. */
2579 h->root.u.def.section = got_s;
2580 h->root.u.def.value = eh->plt_got.offset;
2581 }
2582 else
2583 {
2584 if (bnd_s)
2585 {
2586 /* We need to make a call to the entry of the second
2587 PLT instead of regular PLT entry. */
2588 h->root.u.def.section = bnd_s;
2589 h->root.u.def.value = eh->plt_bnd.offset;
2590 }
2591 else
2592 {
2593 h->root.u.def.section = s;
2594 h->root.u.def.value = h->plt.offset;
2595 }
2596 }
2597 }
2598
2599 /* Make room for this entry. */
2600 if (use_plt_got)
2601 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2602 else
2603 {
2604 s->size += plt_entry_size;
2605 if (bnd_s)
2606 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2607
2608 /* We also need to make an entry in the .got.plt section,
2609 which will be placed in the .got section by the linker
2610 script. */
2611 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2612
2613 /* We also need to make an entry in the .rela.plt
2614 section. */
2615 htab->elf.srelplt->size += bed->s->sizeof_rela;
2616 htab->elf.srelplt->reloc_count++;
2617 }
2618 }
2619 else
2620 {
2621 h->plt.offset = (bfd_vma) -1;
2622 h->needs_plt = 0;
2623 }
2624 }
2625 else
2626 {
2627 h->plt.offset = (bfd_vma) -1;
2628 h->needs_plt = 0;
2629 }
2630
2631 eh->tlsdesc_got = (bfd_vma) -1;
2632
2633 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2634 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2635 if (h->got.refcount > 0
2636 && info->executable
2637 && h->dynindx == -1
2638 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2639 {
2640 h->got.offset = (bfd_vma) -1;
2641 }
2642 else if (h->got.refcount > 0)
2643 {
2644 asection *s;
2645 bfd_boolean dyn;
2646 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2647
2648 /* Make sure this symbol is output as a dynamic symbol.
2649 Undefined weak syms won't yet be marked as dynamic. */
2650 if (h->dynindx == -1
2651 && !h->forced_local)
2652 {
2653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2654 return FALSE;
2655 }
2656
2657 if (GOT_TLS_GDESC_P (tls_type))
2658 {
2659 eh->tlsdesc_got = htab->elf.sgotplt->size
2660 - elf_x86_64_compute_jump_table_size (htab);
2661 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2662 h->got.offset = (bfd_vma) -2;
2663 }
2664 if (! GOT_TLS_GDESC_P (tls_type)
2665 || GOT_TLS_GD_P (tls_type))
2666 {
2667 s = htab->elf.sgot;
2668 h->got.offset = s->size;
2669 s->size += GOT_ENTRY_SIZE;
2670 if (GOT_TLS_GD_P (tls_type))
2671 s->size += GOT_ENTRY_SIZE;
2672 }
2673 dyn = htab->elf.dynamic_sections_created;
2674 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2675 and two if global.
2676 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2677 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2678 || tls_type == GOT_TLS_IE)
2679 htab->elf.srelgot->size += bed->s->sizeof_rela;
2680 else if (GOT_TLS_GD_P (tls_type))
2681 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2682 else if (! GOT_TLS_GDESC_P (tls_type)
2683 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2684 || h->root.type != bfd_link_hash_undefweak)
2685 && (info->shared
2686 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2687 htab->elf.srelgot->size += bed->s->sizeof_rela;
2688 if (GOT_TLS_GDESC_P (tls_type))
2689 {
2690 htab->elf.srelplt->size += bed->s->sizeof_rela;
2691 htab->tlsdesc_plt = (bfd_vma) -1;
2692 }
2693 }
2694 else
2695 h->got.offset = (bfd_vma) -1;
2696
2697 if (eh->dyn_relocs == NULL)
2698 return TRUE;
2699
2700 /* In the shared -Bsymbolic case, discard space allocated for
2701 dynamic pc-relative relocs against symbols which turn out to be
2702 defined in regular objects. For the normal shared case, discard
2703 space for pc-relative relocs that have become local due to symbol
2704 visibility changes. */
2705
2706 if (info->shared)
2707 {
2708 /* Relocs that use pc_count are those that appear on a call
2709 insn, or certain REL relocs that can generated via assembly.
2710 We want calls to protected symbols to resolve directly to the
2711 function rather than going via the plt. If people want
2712 function pointer comparisons to work as expected then they
2713 should avoid writing weird assembly. */
2714 if (SYMBOL_CALLS_LOCAL (info, h))
2715 {
2716 struct elf_dyn_relocs **pp;
2717
2718 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2719 {
2720 p->count -= p->pc_count;
2721 p->pc_count = 0;
2722 if (p->count == 0)
2723 *pp = p->next;
2724 else
2725 pp = &p->next;
2726 }
2727 }
2728
2729 /* Also discard relocs on undefined weak syms with non-default
2730 visibility. */
2731 if (eh->dyn_relocs != NULL)
2732 {
2733 if (h->root.type == bfd_link_hash_undefweak)
2734 {
2735 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2736 eh->dyn_relocs = NULL;
2737
2738 /* Make sure undefined weak symbols are output as a dynamic
2739 symbol in PIEs. */
2740 else if (h->dynindx == -1
2741 && ! h->forced_local
2742 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2743 return FALSE;
2744 }
2745 /* For PIE, discard space for pc-relative relocs against
2746 symbols which turn out to need copy relocs. */
2747 else if (info->executable
2748 && (h->needs_copy || eh->needs_copy)
2749 && h->def_dynamic
2750 && !h->def_regular)
2751 {
2752 struct elf_dyn_relocs **pp;
2753
2754 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2755 {
2756 if (p->pc_count != 0)
2757 *pp = p->next;
2758 else
2759 pp = &p->next;
2760 }
2761 }
2762 }
2763 }
2764 else if (ELIMINATE_COPY_RELOCS)
2765 {
2766 /* For the non-shared case, discard space for relocs against
2767 symbols which turn out to need copy relocs or are not
2768 dynamic. */
2769
2770 if (!h->non_got_ref
2771 && ((h->def_dynamic
2772 && !h->def_regular)
2773 || (htab->elf.dynamic_sections_created
2774 && (h->root.type == bfd_link_hash_undefweak
2775 || h->root.type == bfd_link_hash_undefined))))
2776 {
2777 /* Make sure this symbol is output as a dynamic symbol.
2778 Undefined weak syms won't yet be marked as dynamic. */
2779 if (h->dynindx == -1
2780 && ! h->forced_local
2781 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2782 return FALSE;
2783
2784 /* If that succeeded, we know we'll be keeping all the
2785 relocs. */
2786 if (h->dynindx != -1)
2787 goto keep;
2788 }
2789
2790 eh->dyn_relocs = NULL;
2791
2792 keep: ;
2793 }
2794
2795 /* Finally, allocate space. */
2796 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2797 {
2798 asection * sreloc;
2799
2800 sreloc = elf_section_data (p->sec)->sreloc;
2801
2802 BFD_ASSERT (sreloc != NULL);
2803
2804 sreloc->size += p->count * bed->s->sizeof_rela;
2805 }
2806
2807 return TRUE;
2808 }
2809
2810 /* Allocate space in .plt, .got and associated reloc sections for
2811 local dynamic relocs. */
2812
2813 static bfd_boolean
2814 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2815 {
2816 struct elf_link_hash_entry *h
2817 = (struct elf_link_hash_entry *) *slot;
2818
2819 if (h->type != STT_GNU_IFUNC
2820 || !h->def_regular
2821 || !h->ref_regular
2822 || !h->forced_local
2823 || h->root.type != bfd_link_hash_defined)
2824 abort ();
2825
2826 return elf_x86_64_allocate_dynrelocs (h, inf);
2827 }
2828
2829 /* Find any dynamic relocs that apply to read-only sections. */
2830
2831 static bfd_boolean
2832 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2833 void * inf)
2834 {
2835 struct elf_x86_64_link_hash_entry *eh;
2836 struct elf_dyn_relocs *p;
2837
2838 /* Skip local IFUNC symbols. */
2839 if (h->forced_local && h->type == STT_GNU_IFUNC)
2840 return TRUE;
2841
2842 eh = (struct elf_x86_64_link_hash_entry *) h;
2843 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2844 {
2845 asection *s = p->sec->output_section;
2846
2847 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2848 {
2849 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2850
2851 info->flags |= DF_TEXTREL;
2852
2853 if ((info->warn_shared_textrel && info->shared)
2854 || info->error_textrel)
2855 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2856 p->sec->owner, h->root.root.string,
2857 p->sec);
2858
2859 /* Not an error, just cut short the traversal. */
2860 return FALSE;
2861 }
2862 }
2863 return TRUE;
2864 }
2865
2866 /* Convert
2867 mov foo@GOTPCREL(%rip), %reg
2868 to
2869 lea foo(%rip), %reg
2870 with the local symbol, foo. */
2871
2872 static bfd_boolean
2873 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2874 struct bfd_link_info *link_info)
2875 {
2876 Elf_Internal_Shdr *symtab_hdr;
2877 Elf_Internal_Rela *internal_relocs;
2878 Elf_Internal_Rela *irel, *irelend;
2879 bfd_byte *contents;
2880 struct elf_x86_64_link_hash_table *htab;
2881 bfd_boolean changed_contents;
2882 bfd_boolean changed_relocs;
2883 bfd_signed_vma *local_got_refcounts;
2884
2885 /* Don't even try to convert non-ELF outputs. */
2886 if (!is_elf_hash_table (link_info->hash))
2887 return FALSE;
2888
2889 /* Nothing to do if there are no codes, no relocations or no output. */
2890 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2891 || sec->reloc_count == 0
2892 || bfd_is_abs_section (sec->output_section))
2893 return TRUE;
2894
2895 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2896
2897 /* Load the relocations for this section. */
2898 internal_relocs = (_bfd_elf_link_read_relocs
2899 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2900 link_info->keep_memory));
2901 if (internal_relocs == NULL)
2902 return FALSE;
2903
2904 htab = elf_x86_64_hash_table (link_info);
2905 changed_contents = FALSE;
2906 changed_relocs = FALSE;
2907 local_got_refcounts = elf_local_got_refcounts (abfd);
2908
2909 /* Get the section contents. */
2910 if (elf_section_data (sec)->this_hdr.contents != NULL)
2911 contents = elf_section_data (sec)->this_hdr.contents;
2912 else
2913 {
2914 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2915 goto error_return;
2916 }
2917
2918 irelend = internal_relocs + sec->reloc_count;
2919 for (irel = internal_relocs; irel < irelend; irel++)
2920 {
2921 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2922 unsigned int r_symndx = htab->r_sym (irel->r_info);
2923 unsigned int indx;
2924 struct elf_link_hash_entry *h;
2925
2926 if (r_type != R_X86_64_GOTPCREL)
2927 continue;
2928
2929 /* Get the symbol referred to by the reloc. */
2930 if (r_symndx < symtab_hdr->sh_info)
2931 {
2932 Elf_Internal_Sym *isym;
2933
2934 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2935 abfd, r_symndx);
2936
2937 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2938 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2939 && irel->r_offset >= 2
2940 && bfd_get_8 (input_bfd,
2941 contents + irel->r_offset - 2) == 0x8b)
2942 {
2943 bfd_put_8 (output_bfd, 0x8d,
2944 contents + irel->r_offset - 2);
2945 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2946 if (local_got_refcounts != NULL
2947 && local_got_refcounts[r_symndx] > 0)
2948 local_got_refcounts[r_symndx] -= 1;
2949 changed_contents = TRUE;
2950 changed_relocs = TRUE;
2951 }
2952 continue;
2953 }
2954
2955 indx = r_symndx - symtab_hdr->sh_info;
2956 h = elf_sym_hashes (abfd)[indx];
2957 BFD_ASSERT (h != NULL);
2958
2959 while (h->root.type == bfd_link_hash_indirect
2960 || h->root.type == bfd_link_hash_warning)
2961 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2962
2963 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2964 avoid optimizing _DYNAMIC since ld.so may use its link-time
2965 address. */
2966 if (h->def_regular
2967 && h->type != STT_GNU_IFUNC
2968 && h != htab->elf.hdynamic
2969 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2970 && irel->r_offset >= 2
2971 && bfd_get_8 (input_bfd,
2972 contents + irel->r_offset - 2) == 0x8b)
2973 {
2974 bfd_put_8 (output_bfd, 0x8d,
2975 contents + irel->r_offset - 2);
2976 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2977 if (h->got.refcount > 0)
2978 h->got.refcount -= 1;
2979 changed_contents = TRUE;
2980 changed_relocs = TRUE;
2981 }
2982 }
2983
2984 if (contents != NULL
2985 && elf_section_data (sec)->this_hdr.contents != contents)
2986 {
2987 if (!changed_contents && !link_info->keep_memory)
2988 free (contents);
2989 else
2990 {
2991 /* Cache the section contents for elf_link_input_bfd. */
2992 elf_section_data (sec)->this_hdr.contents = contents;
2993 }
2994 }
2995
2996 if (elf_section_data (sec)->relocs != internal_relocs)
2997 {
2998 if (!changed_relocs)
2999 free (internal_relocs);
3000 else
3001 elf_section_data (sec)->relocs = internal_relocs;
3002 }
3003
3004 return TRUE;
3005
3006 error_return:
3007 if (contents != NULL
3008 && elf_section_data (sec)->this_hdr.contents != contents)
3009 free (contents);
3010 if (internal_relocs != NULL
3011 && elf_section_data (sec)->relocs != internal_relocs)
3012 free (internal_relocs);
3013 return FALSE;
3014 }
3015
3016 /* Set the sizes of the dynamic sections. */
3017
3018 static bfd_boolean
3019 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3020 struct bfd_link_info *info)
3021 {
3022 struct elf_x86_64_link_hash_table *htab;
3023 bfd *dynobj;
3024 asection *s;
3025 bfd_boolean relocs;
3026 bfd *ibfd;
3027 const struct elf_backend_data *bed;
3028
3029 htab = elf_x86_64_hash_table (info);
3030 if (htab == NULL)
3031 return FALSE;
3032 bed = get_elf_backend_data (output_bfd);
3033
3034 dynobj = htab->elf.dynobj;
3035 if (dynobj == NULL)
3036 abort ();
3037
3038 if (htab->elf.dynamic_sections_created)
3039 {
3040 /* Set the contents of the .interp section to the interpreter. */
3041 if (info->executable)
3042 {
3043 s = bfd_get_linker_section (dynobj, ".interp");
3044 if (s == NULL)
3045 abort ();
3046 s->size = htab->dynamic_interpreter_size;
3047 s->contents = (unsigned char *) htab->dynamic_interpreter;
3048 }
3049 }
3050
3051 /* Set up .got offsets for local syms, and space for local dynamic
3052 relocs. */
3053 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3054 {
3055 bfd_signed_vma *local_got;
3056 bfd_signed_vma *end_local_got;
3057 char *local_tls_type;
3058 bfd_vma *local_tlsdesc_gotent;
3059 bfd_size_type locsymcount;
3060 Elf_Internal_Shdr *symtab_hdr;
3061 asection *srel;
3062
3063 if (! is_x86_64_elf (ibfd))
3064 continue;
3065
3066 for (s = ibfd->sections; s != NULL; s = s->next)
3067 {
3068 struct elf_dyn_relocs *p;
3069
3070 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3071 return FALSE;
3072
3073 for (p = (struct elf_dyn_relocs *)
3074 (elf_section_data (s)->local_dynrel);
3075 p != NULL;
3076 p = p->next)
3077 {
3078 if (!bfd_is_abs_section (p->sec)
3079 && bfd_is_abs_section (p->sec->output_section))
3080 {
3081 /* Input section has been discarded, either because
3082 it is a copy of a linkonce section or due to
3083 linker script /DISCARD/, so we'll be discarding
3084 the relocs too. */
3085 }
3086 else if (p->count != 0)
3087 {
3088 srel = elf_section_data (p->sec)->sreloc;
3089 srel->size += p->count * bed->s->sizeof_rela;
3090 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3091 && (info->flags & DF_TEXTREL) == 0)
3092 {
3093 info->flags |= DF_TEXTREL;
3094 if ((info->warn_shared_textrel && info->shared)
3095 || info->error_textrel)
3096 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3097 p->sec->owner, p->sec);
3098 }
3099 }
3100 }
3101 }
3102
3103 local_got = elf_local_got_refcounts (ibfd);
3104 if (!local_got)
3105 continue;
3106
3107 symtab_hdr = &elf_symtab_hdr (ibfd);
3108 locsymcount = symtab_hdr->sh_info;
3109 end_local_got = local_got + locsymcount;
3110 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3111 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3112 s = htab->elf.sgot;
3113 srel = htab->elf.srelgot;
3114 for (; local_got < end_local_got;
3115 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3116 {
3117 *local_tlsdesc_gotent = (bfd_vma) -1;
3118 if (*local_got > 0)
3119 {
3120 if (GOT_TLS_GDESC_P (*local_tls_type))
3121 {
3122 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3123 - elf_x86_64_compute_jump_table_size (htab);
3124 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3125 *local_got = (bfd_vma) -2;
3126 }
3127 if (! GOT_TLS_GDESC_P (*local_tls_type)
3128 || GOT_TLS_GD_P (*local_tls_type))
3129 {
3130 *local_got = s->size;
3131 s->size += GOT_ENTRY_SIZE;
3132 if (GOT_TLS_GD_P (*local_tls_type))
3133 s->size += GOT_ENTRY_SIZE;
3134 }
3135 if (info->shared
3136 || GOT_TLS_GD_ANY_P (*local_tls_type)
3137 || *local_tls_type == GOT_TLS_IE)
3138 {
3139 if (GOT_TLS_GDESC_P (*local_tls_type))
3140 {
3141 htab->elf.srelplt->size
3142 += bed->s->sizeof_rela;
3143 htab->tlsdesc_plt = (bfd_vma) -1;
3144 }
3145 if (! GOT_TLS_GDESC_P (*local_tls_type)
3146 || GOT_TLS_GD_P (*local_tls_type))
3147 srel->size += bed->s->sizeof_rela;
3148 }
3149 }
3150 else
3151 *local_got = (bfd_vma) -1;
3152 }
3153 }
3154
3155 if (htab->tls_ld_got.refcount > 0)
3156 {
3157 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3158 relocs. */
3159 htab->tls_ld_got.offset = htab->elf.sgot->size;
3160 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3161 htab->elf.srelgot->size += bed->s->sizeof_rela;
3162 }
3163 else
3164 htab->tls_ld_got.offset = -1;
3165
3166 /* Allocate global sym .plt and .got entries, and space for global
3167 sym dynamic relocs. */
3168 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3169 info);
3170
3171 /* Allocate .plt and .got entries, and space for local symbols. */
3172 htab_traverse (htab->loc_hash_table,
3173 elf_x86_64_allocate_local_dynrelocs,
3174 info);
3175
3176 /* For every jump slot reserved in the sgotplt, reloc_count is
3177 incremented. However, when we reserve space for TLS descriptors,
3178 it's not incremented, so in order to compute the space reserved
3179 for them, it suffices to multiply the reloc count by the jump
3180 slot size.
3181
3182 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3183 so that R_X86_64_IRELATIVE entries come last. */
3184 if (htab->elf.srelplt)
3185 {
3186 htab->sgotplt_jump_table_size
3187 = elf_x86_64_compute_jump_table_size (htab);
3188 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3189 }
3190 else if (htab->elf.irelplt)
3191 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3192
3193 if (htab->tlsdesc_plt)
3194 {
3195 /* If we're not using lazy TLS relocations, don't generate the
3196 PLT and GOT entries they require. */
3197 if ((info->flags & DF_BIND_NOW))
3198 htab->tlsdesc_plt = 0;
3199 else
3200 {
3201 htab->tlsdesc_got = htab->elf.sgot->size;
3202 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3203 /* Reserve room for the initial entry.
3204 FIXME: we could probably do away with it in this case. */
3205 if (htab->elf.splt->size == 0)
3206 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3207 htab->tlsdesc_plt = htab->elf.splt->size;
3208 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3209 }
3210 }
3211
3212 if (htab->elf.sgotplt)
3213 {
3214 /* Don't allocate .got.plt section if there are no GOT nor PLT
3215 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3216 if ((htab->elf.hgot == NULL
3217 || !htab->elf.hgot->ref_regular_nonweak)
3218 && (htab->elf.sgotplt->size
3219 == get_elf_backend_data (output_bfd)->got_header_size)
3220 && (htab->elf.splt == NULL
3221 || htab->elf.splt->size == 0)
3222 && (htab->elf.sgot == NULL
3223 || htab->elf.sgot->size == 0)
3224 && (htab->elf.iplt == NULL
3225 || htab->elf.iplt->size == 0)
3226 && (htab->elf.igotplt == NULL
3227 || htab->elf.igotplt->size == 0))
3228 htab->elf.sgotplt->size = 0;
3229 }
3230
3231 if (htab->plt_eh_frame != NULL
3232 && htab->elf.splt != NULL
3233 && htab->elf.splt->size != 0
3234 && !bfd_is_abs_section (htab->elf.splt->output_section)
3235 && _bfd_elf_eh_frame_present (info))
3236 {
3237 const struct elf_x86_64_backend_data *arch_data
3238 = get_elf_x86_64_arch_data (bed);
3239 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3240 }
3241
3242 /* We now have determined the sizes of the various dynamic sections.
3243 Allocate memory for them. */
3244 relocs = FALSE;
3245 for (s = dynobj->sections; s != NULL; s = s->next)
3246 {
3247 if ((s->flags & SEC_LINKER_CREATED) == 0)
3248 continue;
3249
3250 if (s == htab->elf.splt
3251 || s == htab->elf.sgot
3252 || s == htab->elf.sgotplt
3253 || s == htab->elf.iplt
3254 || s == htab->elf.igotplt
3255 || s == htab->plt_bnd
3256 || s == htab->plt_got
3257 || s == htab->plt_eh_frame
3258 || s == htab->sdynbss)
3259 {
3260 /* Strip this section if we don't need it; see the
3261 comment below. */
3262 }
3263 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3264 {
3265 if (s->size != 0 && s != htab->elf.srelplt)
3266 relocs = TRUE;
3267
3268 /* We use the reloc_count field as a counter if we need
3269 to copy relocs into the output file. */
3270 if (s != htab->elf.srelplt)
3271 s->reloc_count = 0;
3272 }
3273 else
3274 {
3275 /* It's not one of our sections, so don't allocate space. */
3276 continue;
3277 }
3278
3279 if (s->size == 0)
3280 {
3281 /* If we don't need this section, strip it from the
3282 output file. This is mostly to handle .rela.bss and
3283 .rela.plt. We must create both sections in
3284 create_dynamic_sections, because they must be created
3285 before the linker maps input sections to output
3286 sections. The linker does that before
3287 adjust_dynamic_symbol is called, and it is that
3288 function which decides whether anything needs to go
3289 into these sections. */
3290
3291 s->flags |= SEC_EXCLUDE;
3292 continue;
3293 }
3294
3295 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3296 continue;
3297
3298 /* Allocate memory for the section contents. We use bfd_zalloc
3299 here in case unused entries are not reclaimed before the
3300 section's contents are written out. This should not happen,
3301 but this way if it does, we get a R_X86_64_NONE reloc instead
3302 of garbage. */
3303 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3304 if (s->contents == NULL)
3305 return FALSE;
3306 }
3307
3308 if (htab->plt_eh_frame != NULL
3309 && htab->plt_eh_frame->contents != NULL)
3310 {
3311 const struct elf_x86_64_backend_data *arch_data
3312 = get_elf_x86_64_arch_data (bed);
3313
3314 memcpy (htab->plt_eh_frame->contents,
3315 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3316 bfd_put_32 (dynobj, htab->elf.splt->size,
3317 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3318 }
3319
3320 if (htab->elf.dynamic_sections_created)
3321 {
3322 /* Add some entries to the .dynamic section. We fill in the
3323 values later, in elf_x86_64_finish_dynamic_sections, but we
3324 must add the entries now so that we get the correct size for
3325 the .dynamic section. The DT_DEBUG entry is filled in by the
3326 dynamic linker and used by the debugger. */
3327 #define add_dynamic_entry(TAG, VAL) \
3328 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3329
3330 if (info->executable)
3331 {
3332 if (!add_dynamic_entry (DT_DEBUG, 0))
3333 return FALSE;
3334 }
3335
3336 if (htab->elf.splt->size != 0)
3337 {
3338 if (!add_dynamic_entry (DT_PLTGOT, 0)
3339 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3340 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3341 || !add_dynamic_entry (DT_JMPREL, 0))
3342 return FALSE;
3343
3344 if (htab->tlsdesc_plt
3345 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3346 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3347 return FALSE;
3348 }
3349
3350 if (relocs)
3351 {
3352 if (!add_dynamic_entry (DT_RELA, 0)
3353 || !add_dynamic_entry (DT_RELASZ, 0)
3354 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3355 return FALSE;
3356
3357 /* If any dynamic relocs apply to a read-only section,
3358 then we need a DT_TEXTREL entry. */
3359 if ((info->flags & DF_TEXTREL) == 0)
3360 elf_link_hash_traverse (&htab->elf,
3361 elf_x86_64_readonly_dynrelocs,
3362 info);
3363
3364 if ((info->flags & DF_TEXTREL) != 0)
3365 {
3366 if (!add_dynamic_entry (DT_TEXTREL, 0))
3367 return FALSE;
3368 }
3369 }
3370 }
3371 #undef add_dynamic_entry
3372
3373 return TRUE;
3374 }
3375
3376 static bfd_boolean
3377 elf_x86_64_always_size_sections (bfd *output_bfd,
3378 struct bfd_link_info *info)
3379 {
3380 asection *tls_sec = elf_hash_table (info)->tls_sec;
3381
3382 if (tls_sec)
3383 {
3384 struct elf_link_hash_entry *tlsbase;
3385
3386 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3387 "_TLS_MODULE_BASE_",
3388 FALSE, FALSE, FALSE);
3389
3390 if (tlsbase && tlsbase->type == STT_TLS)
3391 {
3392 struct elf_x86_64_link_hash_table *htab;
3393 struct bfd_link_hash_entry *bh = NULL;
3394 const struct elf_backend_data *bed
3395 = get_elf_backend_data (output_bfd);
3396
3397 htab = elf_x86_64_hash_table (info);
3398 if (htab == NULL)
3399 return FALSE;
3400
3401 if (!(_bfd_generic_link_add_one_symbol
3402 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3403 tls_sec, 0, NULL, FALSE,
3404 bed->collect, &bh)))
3405 return FALSE;
3406
3407 htab->tls_module_base = bh;
3408
3409 tlsbase = (struct elf_link_hash_entry *)bh;
3410 tlsbase->def_regular = 1;
3411 tlsbase->other = STV_HIDDEN;
3412 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3413 }
3414 }
3415
3416 return TRUE;
3417 }
3418
3419 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3420 executables. Rather than setting it to the beginning of the TLS
3421 section, we have to set it to the end. This function may be called
3422 multiple times, it is idempotent. */
3423
3424 static void
3425 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3426 {
3427 struct elf_x86_64_link_hash_table *htab;
3428 struct bfd_link_hash_entry *base;
3429
3430 if (!info->executable)
3431 return;
3432
3433 htab = elf_x86_64_hash_table (info);
3434 if (htab == NULL)
3435 return;
3436
3437 base = htab->tls_module_base;
3438 if (base == NULL)
3439 return;
3440
3441 base->u.def.value = htab->elf.tls_size;
3442 }
3443
3444 /* Return the base VMA address which should be subtracted from real addresses
3445 when resolving @dtpoff relocation.
3446 This is PT_TLS segment p_vaddr. */
3447
3448 static bfd_vma
3449 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3450 {
3451 /* If tls_sec is NULL, we should have signalled an error already. */
3452 if (elf_hash_table (info)->tls_sec == NULL)
3453 return 0;
3454 return elf_hash_table (info)->tls_sec->vma;
3455 }
3456
3457 /* Return the relocation value for @tpoff relocation
3458 if STT_TLS virtual address is ADDRESS. */
3459
3460 static bfd_vma
3461 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3462 {
3463 struct elf_link_hash_table *htab = elf_hash_table (info);
3464 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3465 bfd_vma static_tls_size;
3466
3467 /* If tls_segment is NULL, we should have signalled an error already. */
3468 if (htab->tls_sec == NULL)
3469 return 0;
3470
3471 /* Consider special static TLS alignment requirements. */
3472 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3473 return address - static_tls_size - htab->tls_sec->vma;
3474 }
3475
3476 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3477 branch? */
3478
3479 static bfd_boolean
3480 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3481 {
3482 /* Opcode Instruction
3483 0xe8 call
3484 0xe9 jump
3485 0x0f 0x8x conditional jump */
3486 return ((offset > 0
3487 && (contents [offset - 1] == 0xe8
3488 || contents [offset - 1] == 0xe9))
3489 || (offset > 1
3490 && contents [offset - 2] == 0x0f
3491 && (contents [offset - 1] & 0xf0) == 0x80));
3492 }
3493
3494 /* Relocate an x86_64 ELF section. */
3495
3496 static bfd_boolean
3497 elf_x86_64_relocate_section (bfd *output_bfd,
3498 struct bfd_link_info *info,
3499 bfd *input_bfd,
3500 asection *input_section,
3501 bfd_byte *contents,
3502 Elf_Internal_Rela *relocs,
3503 Elf_Internal_Sym *local_syms,
3504 asection **local_sections)
3505 {
3506 struct elf_x86_64_link_hash_table *htab;
3507 Elf_Internal_Shdr *symtab_hdr;
3508 struct elf_link_hash_entry **sym_hashes;
3509 bfd_vma *local_got_offsets;
3510 bfd_vma *local_tlsdesc_gotents;
3511 Elf_Internal_Rela *rel;
3512 Elf_Internal_Rela *relend;
3513 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3514
3515 BFD_ASSERT (is_x86_64_elf (input_bfd));
3516
3517 htab = elf_x86_64_hash_table (info);
3518 if (htab == NULL)
3519 return FALSE;
3520 symtab_hdr = &elf_symtab_hdr (input_bfd);
3521 sym_hashes = elf_sym_hashes (input_bfd);
3522 local_got_offsets = elf_local_got_offsets (input_bfd);
3523 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3524
3525 elf_x86_64_set_tls_module_base (info);
3526
3527 rel = relocs;
3528 relend = relocs + input_section->reloc_count;
3529 for (; rel < relend; rel++)
3530 {
3531 unsigned int r_type;
3532 reloc_howto_type *howto;
3533 unsigned long r_symndx;
3534 struct elf_link_hash_entry *h;
3535 struct elf_x86_64_link_hash_entry *eh;
3536 Elf_Internal_Sym *sym;
3537 asection *sec;
3538 bfd_vma off, offplt, plt_offset;
3539 bfd_vma relocation;
3540 bfd_boolean unresolved_reloc;
3541 bfd_reloc_status_type r;
3542 int tls_type;
3543 asection *base_got, *resolved_plt;
3544 bfd_vma st_size;
3545
3546 r_type = ELF32_R_TYPE (rel->r_info);
3547 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3548 || r_type == (int) R_X86_64_GNU_VTENTRY)
3549 continue;
3550
3551 if (r_type >= (int) R_X86_64_standard)
3552 {
3553 (*_bfd_error_handler)
3554 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3555 input_bfd, input_section, r_type);
3556 bfd_set_error (bfd_error_bad_value);
3557 return FALSE;
3558 }
3559
3560 if (r_type != (int) R_X86_64_32
3561 || ABI_64_P (output_bfd))
3562 howto = x86_64_elf_howto_table + r_type;
3563 else
3564 howto = (x86_64_elf_howto_table
3565 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3566 r_symndx = htab->r_sym (rel->r_info);
3567 h = NULL;
3568 sym = NULL;
3569 sec = NULL;
3570 unresolved_reloc = FALSE;
3571 if (r_symndx < symtab_hdr->sh_info)
3572 {
3573 sym = local_syms + r_symndx;
3574 sec = local_sections[r_symndx];
3575
3576 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3577 &sec, rel);
3578 st_size = sym->st_size;
3579
3580 /* Relocate against local STT_GNU_IFUNC symbol. */
3581 if (!info->relocatable
3582 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3583 {
3584 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3585 rel, FALSE);
3586 if (h == NULL)
3587 abort ();
3588
3589 /* Set STT_GNU_IFUNC symbol value. */
3590 h->root.u.def.value = sym->st_value;
3591 h->root.u.def.section = sec;
3592 }
3593 }
3594 else
3595 {
3596 bfd_boolean warned ATTRIBUTE_UNUSED;
3597 bfd_boolean ignored ATTRIBUTE_UNUSED;
3598
3599 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3600 r_symndx, symtab_hdr, sym_hashes,
3601 h, sec, relocation,
3602 unresolved_reloc, warned, ignored);
3603 st_size = h->size;
3604 }
3605
3606 if (sec != NULL && discarded_section (sec))
3607 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3608 rel, 1, relend, howto, 0, contents);
3609
3610 if (info->relocatable)
3611 continue;
3612
3613 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3614 {
3615 if (r_type == R_X86_64_64)
3616 {
3617 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3618 zero-extend it to 64bit if addend is zero. */
3619 r_type = R_X86_64_32;
3620 memset (contents + rel->r_offset + 4, 0, 4);
3621 }
3622 else if (r_type == R_X86_64_SIZE64)
3623 {
3624 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3625 zero-extend it to 64bit if addend is zero. */
3626 r_type = R_X86_64_SIZE32;
3627 memset (contents + rel->r_offset + 4, 0, 4);
3628 }
3629 }
3630
3631 eh = (struct elf_x86_64_link_hash_entry *) h;
3632
3633 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3634 it here if it is defined in a non-shared object. */
3635 if (h != NULL
3636 && h->type == STT_GNU_IFUNC
3637 && h->def_regular)
3638 {
3639 bfd_vma plt_index;
3640 const char *name;
3641
3642 if ((input_section->flags & SEC_ALLOC) == 0
3643 || h->plt.offset == (bfd_vma) -1)
3644 abort ();
3645
3646 /* STT_GNU_IFUNC symbol must go through PLT. */
3647 if (htab->elf.splt != NULL)
3648 {
3649 if (htab->plt_bnd != NULL)
3650 {
3651 resolved_plt = htab->plt_bnd;
3652 plt_offset = eh->plt_bnd.offset;
3653 }
3654 else
3655 {
3656 resolved_plt = htab->elf.splt;
3657 plt_offset = h->plt.offset;
3658 }
3659 }
3660 else
3661 {
3662 resolved_plt = htab->elf.iplt;
3663 plt_offset = h->plt.offset;
3664 }
3665
3666 relocation = (resolved_plt->output_section->vma
3667 + resolved_plt->output_offset + plt_offset);
3668
3669 switch (r_type)
3670 {
3671 default:
3672 if (h->root.root.string)
3673 name = h->root.root.string;
3674 else
3675 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3676 NULL);
3677 (*_bfd_error_handler)
3678 (_("%B: relocation %s against STT_GNU_IFUNC "
3679 "symbol `%s' isn't handled by %s"), input_bfd,
3680 x86_64_elf_howto_table[r_type].name,
3681 name, __FUNCTION__);
3682 bfd_set_error (bfd_error_bad_value);
3683 return FALSE;
3684
3685 case R_X86_64_32S:
3686 if (info->shared)
3687 abort ();
3688 goto do_relocation;
3689
3690 case R_X86_64_32:
3691 if (ABI_64_P (output_bfd))
3692 goto do_relocation;
3693 /* FALLTHROUGH */
3694 case R_X86_64_64:
3695 if (rel->r_addend != 0)
3696 {
3697 if (h->root.root.string)
3698 name = h->root.root.string;
3699 else
3700 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3701 sym, NULL);
3702 (*_bfd_error_handler)
3703 (_("%B: relocation %s against STT_GNU_IFUNC "
3704 "symbol `%s' has non-zero addend: %d"),
3705 input_bfd, x86_64_elf_howto_table[r_type].name,
3706 name, rel->r_addend);
3707 bfd_set_error (bfd_error_bad_value);
3708 return FALSE;
3709 }
3710
3711 /* Generate dynamic relcoation only when there is a
3712 non-GOT reference in a shared object. */
3713 if (info->shared && h->non_got_ref)
3714 {
3715 Elf_Internal_Rela outrel;
3716 asection *sreloc;
3717
3718 /* Need a dynamic relocation to get the real function
3719 address. */
3720 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3721 info,
3722 input_section,
3723 rel->r_offset);
3724 if (outrel.r_offset == (bfd_vma) -1
3725 || outrel.r_offset == (bfd_vma) -2)
3726 abort ();
3727
3728 outrel.r_offset += (input_section->output_section->vma
3729 + input_section->output_offset);
3730
3731 if (h->dynindx == -1
3732 || h->forced_local
3733 || info->executable)
3734 {
3735 /* This symbol is resolved locally. */
3736 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3737 outrel.r_addend = (h->root.u.def.value
3738 + h->root.u.def.section->output_section->vma
3739 + h->root.u.def.section->output_offset);
3740 }
3741 else
3742 {
3743 outrel.r_info = htab->r_info (h->dynindx, r_type);
3744 outrel.r_addend = 0;
3745 }
3746
3747 sreloc = htab->elf.irelifunc;
3748 elf_append_rela (output_bfd, sreloc, &outrel);
3749
3750 /* If this reloc is against an external symbol, we
3751 do not want to fiddle with the addend. Otherwise,
3752 we need to include the symbol value so that it
3753 becomes an addend for the dynamic reloc. For an
3754 internal symbol, we have updated addend. */
3755 continue;
3756 }
3757 /* FALLTHROUGH */
3758 case R_X86_64_PC32:
3759 case R_X86_64_PC32_BND:
3760 case R_X86_64_PC64:
3761 case R_X86_64_PLT32:
3762 case R_X86_64_PLT32_BND:
3763 goto do_relocation;
3764
3765 case R_X86_64_GOTPCREL:
3766 case R_X86_64_GOTPCREL64:
3767 base_got = htab->elf.sgot;
3768 off = h->got.offset;
3769
3770 if (base_got == NULL)
3771 abort ();
3772
3773 if (off == (bfd_vma) -1)
3774 {
3775 /* We can't use h->got.offset here to save state, or
3776 even just remember the offset, as finish_dynamic_symbol
3777 would use that as offset into .got. */
3778
3779 if (htab->elf.splt != NULL)
3780 {
3781 plt_index = h->plt.offset / plt_entry_size - 1;
3782 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3783 base_got = htab->elf.sgotplt;
3784 }
3785 else
3786 {
3787 plt_index = h->plt.offset / plt_entry_size;
3788 off = plt_index * GOT_ENTRY_SIZE;
3789 base_got = htab->elf.igotplt;
3790 }
3791
3792 if (h->dynindx == -1
3793 || h->forced_local
3794 || info->symbolic)
3795 {
3796 /* This references the local defitionion. We must
3797 initialize this entry in the global offset table.
3798 Since the offset must always be a multiple of 8,
3799 we use the least significant bit to record
3800 whether we have initialized it already.
3801
3802 When doing a dynamic link, we create a .rela.got
3803 relocation entry to initialize the value. This
3804 is done in the finish_dynamic_symbol routine. */
3805 if ((off & 1) != 0)
3806 off &= ~1;
3807 else
3808 {
3809 bfd_put_64 (output_bfd, relocation,
3810 base_got->contents + off);
3811 /* Note that this is harmless for the GOTPLT64
3812 case, as -1 | 1 still is -1. */
3813 h->got.offset |= 1;
3814 }
3815 }
3816 }
3817
3818 relocation = (base_got->output_section->vma
3819 + base_got->output_offset + off);
3820
3821 goto do_relocation;
3822 }
3823 }
3824
3825 /* When generating a shared object, the relocations handled here are
3826 copied into the output file to be resolved at run time. */
3827 switch (r_type)
3828 {
3829 case R_X86_64_GOT32:
3830 case R_X86_64_GOT64:
3831 /* Relocation is to the entry for this symbol in the global
3832 offset table. */
3833 case R_X86_64_GOTPCREL:
3834 case R_X86_64_GOTPCREL64:
3835 /* Use global offset table entry as symbol value. */
3836 case R_X86_64_GOTPLT64:
3837 /* This is obsolete and treated the the same as GOT64. */
3838 base_got = htab->elf.sgot;
3839
3840 if (htab->elf.sgot == NULL)
3841 abort ();
3842
3843 if (h != NULL)
3844 {
3845 bfd_boolean dyn;
3846
3847 off = h->got.offset;
3848 if (h->needs_plt
3849 && h->plt.offset != (bfd_vma)-1
3850 && off == (bfd_vma)-1)
3851 {
3852 /* We can't use h->got.offset here to save
3853 state, or even just remember the offset, as
3854 finish_dynamic_symbol would use that as offset into
3855 .got. */
3856 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3857 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3858 base_got = htab->elf.sgotplt;
3859 }
3860
3861 dyn = htab->elf.dynamic_sections_created;
3862
3863 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3864 || (info->shared
3865 && SYMBOL_REFERENCES_LOCAL (info, h))
3866 || (ELF_ST_VISIBILITY (h->other)
3867 && h->root.type == bfd_link_hash_undefweak))
3868 {
3869 /* This is actually a static link, or it is a -Bsymbolic
3870 link and the symbol is defined locally, or the symbol
3871 was forced to be local because of a version file. We
3872 must initialize this entry in the global offset table.
3873 Since the offset must always be a multiple of 8, we
3874 use the least significant bit to record whether we
3875 have initialized it already.
3876
3877 When doing a dynamic link, we create a .rela.got
3878 relocation entry to initialize the value. This is
3879 done in the finish_dynamic_symbol routine. */
3880 if ((off & 1) != 0)
3881 off &= ~1;
3882 else
3883 {
3884 bfd_put_64 (output_bfd, relocation,
3885 base_got->contents + off);
3886 /* Note that this is harmless for the GOTPLT64 case,
3887 as -1 | 1 still is -1. */
3888 h->got.offset |= 1;
3889 }
3890 }
3891 else
3892 unresolved_reloc = FALSE;
3893 }
3894 else
3895 {
3896 if (local_got_offsets == NULL)
3897 abort ();
3898
3899 off = local_got_offsets[r_symndx];
3900
3901 /* The offset must always be a multiple of 8. We use
3902 the least significant bit to record whether we have
3903 already generated the necessary reloc. */
3904 if ((off & 1) != 0)
3905 off &= ~1;
3906 else
3907 {
3908 bfd_put_64 (output_bfd, relocation,
3909 base_got->contents + off);
3910
3911 if (info->shared)
3912 {
3913 asection *s;
3914 Elf_Internal_Rela outrel;
3915
3916 /* We need to generate a R_X86_64_RELATIVE reloc
3917 for the dynamic linker. */
3918 s = htab->elf.srelgot;
3919 if (s == NULL)
3920 abort ();
3921
3922 outrel.r_offset = (base_got->output_section->vma
3923 + base_got->output_offset
3924 + off);
3925 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3926 outrel.r_addend = relocation;
3927 elf_append_rela (output_bfd, s, &outrel);
3928 }
3929
3930 local_got_offsets[r_symndx] |= 1;
3931 }
3932 }
3933
3934 if (off >= (bfd_vma) -2)
3935 abort ();
3936
3937 relocation = base_got->output_section->vma
3938 + base_got->output_offset + off;
3939 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3940 relocation -= htab->elf.sgotplt->output_section->vma
3941 - htab->elf.sgotplt->output_offset;
3942
3943 break;
3944
3945 case R_X86_64_GOTOFF64:
3946 /* Relocation is relative to the start of the global offset
3947 table. */
3948
3949 /* Check to make sure it isn't a protected function symbol
3950 for shared library since it may not be local when used
3951 as function address. */
3952 if (!info->executable
3953 && h
3954 && !SYMBOLIC_BIND (info, h)
3955 && h->def_regular
3956 && h->type == STT_FUNC
3957 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3958 {
3959 (*_bfd_error_handler)
3960 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3961 input_bfd, h->root.root.string);
3962 bfd_set_error (bfd_error_bad_value);
3963 return FALSE;
3964 }
3965
3966 /* Note that sgot is not involved in this
3967 calculation. We always want the start of .got.plt. If we
3968 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3969 permitted by the ABI, we might have to change this
3970 calculation. */
3971 relocation -= htab->elf.sgotplt->output_section->vma
3972 + htab->elf.sgotplt->output_offset;
3973 break;
3974
3975 case R_X86_64_GOTPC32:
3976 case R_X86_64_GOTPC64:
3977 /* Use global offset table as symbol value. */
3978 relocation = htab->elf.sgotplt->output_section->vma
3979 + htab->elf.sgotplt->output_offset;
3980 unresolved_reloc = FALSE;
3981 break;
3982
3983 case R_X86_64_PLTOFF64:
3984 /* Relocation is PLT entry relative to GOT. For local
3985 symbols it's the symbol itself relative to GOT. */
3986 if (h != NULL
3987 /* See PLT32 handling. */
3988 && h->plt.offset != (bfd_vma) -1
3989 && htab->elf.splt != NULL)
3990 {
3991 if (htab->plt_bnd != NULL)
3992 {
3993 resolved_plt = htab->plt_bnd;
3994 plt_offset = eh->plt_bnd.offset;
3995 }
3996 else
3997 {
3998 resolved_plt = htab->elf.splt;
3999 plt_offset = h->plt.offset;
4000 }
4001
4002 relocation = (resolved_plt->output_section->vma
4003 + resolved_plt->output_offset
4004 + plt_offset);
4005 unresolved_reloc = FALSE;
4006 }
4007
4008 relocation -= htab->elf.sgotplt->output_section->vma
4009 + htab->elf.sgotplt->output_offset;
4010 break;
4011
4012 case R_X86_64_PLT32:
4013 case R_X86_64_PLT32_BND:
4014 /* Relocation is to the entry for this symbol in the
4015 procedure linkage table. */
4016
4017 /* Resolve a PLT32 reloc against a local symbol directly,
4018 without using the procedure linkage table. */
4019 if (h == NULL)
4020 break;
4021
4022 if ((h->plt.offset == (bfd_vma) -1
4023 && eh->plt_got.offset == (bfd_vma) -1)
4024 || htab->elf.splt == NULL)
4025 {
4026 /* We didn't make a PLT entry for this symbol. This
4027 happens when statically linking PIC code, or when
4028 using -Bsymbolic. */
4029 break;
4030 }
4031
4032 if (h->plt.offset != (bfd_vma) -1)
4033 {
4034 if (htab->plt_bnd != NULL)
4035 {
4036 resolved_plt = htab->plt_bnd;
4037 plt_offset = eh->plt_bnd.offset;
4038 }
4039 else
4040 {
4041 resolved_plt = htab->elf.splt;
4042 plt_offset = h->plt.offset;
4043 }
4044 }
4045 else
4046 {
4047 /* Use the GOT PLT. */
4048 resolved_plt = htab->plt_got;
4049 plt_offset = eh->plt_got.offset;
4050 }
4051
4052 relocation = (resolved_plt->output_section->vma
4053 + resolved_plt->output_offset
4054 + plt_offset);
4055 unresolved_reloc = FALSE;
4056 break;
4057
4058 case R_X86_64_SIZE32:
4059 case R_X86_64_SIZE64:
4060 /* Set to symbol size. */
4061 relocation = st_size;
4062 goto direct;
4063
4064 case R_X86_64_PC8:
4065 case R_X86_64_PC16:
4066 case R_X86_64_PC32:
4067 case R_X86_64_PC32_BND:
4068 /* Don't complain about -fPIC if the symbol is undefined when
4069 building executable. */
4070 if (info->shared
4071 && (input_section->flags & SEC_ALLOC) != 0
4072 && (input_section->flags & SEC_READONLY) != 0
4073 && h != NULL
4074 && !(info->executable
4075 && h->root.type == bfd_link_hash_undefined))
4076 {
4077 bfd_boolean fail = FALSE;
4078 bfd_boolean branch
4079 = ((r_type == R_X86_64_PC32
4080 || r_type == R_X86_64_PC32_BND)
4081 && is_32bit_relative_branch (contents, rel->r_offset));
4082
4083 if (SYMBOL_REFERENCES_LOCAL (info, h))
4084 {
4085 /* Symbol is referenced locally. Make sure it is
4086 defined locally or for a branch. */
4087 fail = !h->def_regular && !branch;
4088 }
4089 else if (!(info->executable
4090 && (h->needs_copy || eh->needs_copy)))
4091 {
4092 /* Symbol doesn't need copy reloc and isn't referenced
4093 locally. We only allow branch to symbol with
4094 non-default visibility. */
4095 fail = (!branch
4096 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4097 }
4098
4099 if (fail)
4100 {
4101 const char *fmt;
4102 const char *v;
4103 const char *pic = "";
4104
4105 switch (ELF_ST_VISIBILITY (h->other))
4106 {
4107 case STV_HIDDEN:
4108 v = _("hidden symbol");
4109 break;
4110 case STV_INTERNAL:
4111 v = _("internal symbol");
4112 break;
4113 case STV_PROTECTED:
4114 v = _("protected symbol");
4115 break;
4116 default:
4117 v = _("symbol");
4118 pic = _("; recompile with -fPIC");
4119 break;
4120 }
4121
4122 if (h->def_regular)
4123 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4124 else
4125 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4126
4127 (*_bfd_error_handler) (fmt, input_bfd,
4128 x86_64_elf_howto_table[r_type].name,
4129 v, h->root.root.string, pic);
4130 bfd_set_error (bfd_error_bad_value);
4131 return FALSE;
4132 }
4133 }
4134 /* Fall through. */
4135
4136 case R_X86_64_8:
4137 case R_X86_64_16:
4138 case R_X86_64_32:
4139 case R_X86_64_PC64:
4140 case R_X86_64_64:
4141 /* FIXME: The ABI says the linker should make sure the value is
4142 the same when it's zeroextended to 64 bit. */
4143
4144 direct:
4145 if ((input_section->flags & SEC_ALLOC) == 0)
4146 break;
4147
4148 /* Don't copy a pc-relative relocation into the output file
4149 if the symbol needs copy reloc or the symbol is undefined
4150 when building executable. */
4151 if ((info->shared
4152 && !(info->executable
4153 && h != NULL
4154 && (h->needs_copy
4155 || eh->needs_copy
4156 || h->root.type == bfd_link_hash_undefined)
4157 && IS_X86_64_PCREL_TYPE (r_type))
4158 && (h == NULL
4159 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4160 || h->root.type != bfd_link_hash_undefweak)
4161 && ((! IS_X86_64_PCREL_TYPE (r_type)
4162 && r_type != R_X86_64_SIZE32
4163 && r_type != R_X86_64_SIZE64)
4164 || ! SYMBOL_CALLS_LOCAL (info, h)))
4165 || (ELIMINATE_COPY_RELOCS
4166 && !info->shared
4167 && h != NULL
4168 && h->dynindx != -1
4169 && !h->non_got_ref
4170 && ((h->def_dynamic
4171 && !h->def_regular)
4172 || h->root.type == bfd_link_hash_undefweak
4173 || h->root.type == bfd_link_hash_undefined)))
4174 {
4175 Elf_Internal_Rela outrel;
4176 bfd_boolean skip, relocate;
4177 asection *sreloc;
4178
4179 /* When generating a shared object, these relocations
4180 are copied into the output file to be resolved at run
4181 time. */
4182 skip = FALSE;
4183 relocate = FALSE;
4184
4185 outrel.r_offset =
4186 _bfd_elf_section_offset (output_bfd, info, input_section,
4187 rel->r_offset);
4188 if (outrel.r_offset == (bfd_vma) -1)
4189 skip = TRUE;
4190 else if (outrel.r_offset == (bfd_vma) -2)
4191 skip = TRUE, relocate = TRUE;
4192
4193 outrel.r_offset += (input_section->output_section->vma
4194 + input_section->output_offset);
4195
4196 if (skip)
4197 memset (&outrel, 0, sizeof outrel);
4198
4199 /* h->dynindx may be -1 if this symbol was marked to
4200 become local. */
4201 else if (h != NULL
4202 && h->dynindx != -1
4203 && (IS_X86_64_PCREL_TYPE (r_type)
4204 || ! info->shared
4205 || ! SYMBOLIC_BIND (info, h)
4206 || ! h->def_regular))
4207 {
4208 outrel.r_info = htab->r_info (h->dynindx, r_type);
4209 outrel.r_addend = rel->r_addend;
4210 }
4211 else
4212 {
4213 /* This symbol is local, or marked to become local. */
4214 if (r_type == htab->pointer_r_type)
4215 {
4216 relocate = TRUE;
4217 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4218 outrel.r_addend = relocation + rel->r_addend;
4219 }
4220 else if (r_type == R_X86_64_64
4221 && !ABI_64_P (output_bfd))
4222 {
4223 relocate = TRUE;
4224 outrel.r_info = htab->r_info (0,
4225 R_X86_64_RELATIVE64);
4226 outrel.r_addend = relocation + rel->r_addend;
4227 /* Check addend overflow. */
4228 if ((outrel.r_addend & 0x80000000)
4229 != (rel->r_addend & 0x80000000))
4230 {
4231 const char *name;
4232 int addend = rel->r_addend;
4233 if (h && h->root.root.string)
4234 name = h->root.root.string;
4235 else
4236 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4237 sym, NULL);
4238 if (addend < 0)
4239 (*_bfd_error_handler)
4240 (_("%B: addend -0x%x in relocation %s against "
4241 "symbol `%s' at 0x%lx in section `%A' is "
4242 "out of range"),
4243 input_bfd, input_section, addend,
4244 x86_64_elf_howto_table[r_type].name,
4245 name, (unsigned long) rel->r_offset);
4246 else
4247 (*_bfd_error_handler)
4248 (_("%B: addend 0x%x in relocation %s against "
4249 "symbol `%s' at 0x%lx in section `%A' is "
4250 "out of range"),
4251 input_bfd, input_section, addend,
4252 x86_64_elf_howto_table[r_type].name,
4253 name, (unsigned long) rel->r_offset);
4254 bfd_set_error (bfd_error_bad_value);
4255 return FALSE;
4256 }
4257 }
4258 else
4259 {
4260 long sindx;
4261
4262 if (bfd_is_abs_section (sec))
4263 sindx = 0;
4264 else if (sec == NULL || sec->owner == NULL)
4265 {
4266 bfd_set_error (bfd_error_bad_value);
4267 return FALSE;
4268 }
4269 else
4270 {
4271 asection *osec;
4272
4273 /* We are turning this relocation into one
4274 against a section symbol. It would be
4275 proper to subtract the symbol's value,
4276 osec->vma, from the emitted reloc addend,
4277 but ld.so expects buggy relocs. */
4278 osec = sec->output_section;
4279 sindx = elf_section_data (osec)->dynindx;
4280 if (sindx == 0)
4281 {
4282 asection *oi = htab->elf.text_index_section;
4283 sindx = elf_section_data (oi)->dynindx;
4284 }
4285 BFD_ASSERT (sindx != 0);
4286 }
4287
4288 outrel.r_info = htab->r_info (sindx, r_type);
4289 outrel.r_addend = relocation + rel->r_addend;
4290 }
4291 }
4292
4293 sreloc = elf_section_data (input_section)->sreloc;
4294
4295 if (sreloc == NULL || sreloc->contents == NULL)
4296 {
4297 r = bfd_reloc_notsupported;
4298 goto check_relocation_error;
4299 }
4300
4301 elf_append_rela (output_bfd, sreloc, &outrel);
4302
4303 /* If this reloc is against an external symbol, we do
4304 not want to fiddle with the addend. Otherwise, we
4305 need to include the symbol value so that it becomes
4306 an addend for the dynamic reloc. */
4307 if (! relocate)
4308 continue;
4309 }
4310
4311 break;
4312
4313 case R_X86_64_TLSGD:
4314 case R_X86_64_GOTPC32_TLSDESC:
4315 case R_X86_64_TLSDESC_CALL:
4316 case R_X86_64_GOTTPOFF:
4317 tls_type = GOT_UNKNOWN;
4318 if (h == NULL && local_got_offsets)
4319 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4320 else if (h != NULL)
4321 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4322
4323 if (! elf_x86_64_tls_transition (info, input_bfd,
4324 input_section, contents,
4325 symtab_hdr, sym_hashes,
4326 &r_type, tls_type, rel,
4327 relend, h, r_symndx))
4328 return FALSE;
4329
4330 if (r_type == R_X86_64_TPOFF32)
4331 {
4332 bfd_vma roff = rel->r_offset;
4333
4334 BFD_ASSERT (! unresolved_reloc);
4335
4336 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4337 {
4338 /* GD->LE transition. For 64bit, change
4339 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4340 .word 0x6666; rex64; call __tls_get_addr
4341 into:
4342 movq %fs:0, %rax
4343 leaq foo@tpoff(%rax), %rax
4344 For 32bit, change
4345 leaq foo@tlsgd(%rip), %rdi
4346 .word 0x6666; rex64; call __tls_get_addr
4347 into:
4348 movl %fs:0, %eax
4349 leaq foo@tpoff(%rax), %rax
4350 For largepic, change:
4351 leaq foo@tlsgd(%rip), %rdi
4352 movabsq $__tls_get_addr@pltoff, %rax
4353 addq %rbx, %rax
4354 call *%rax
4355 into:
4356 movq %fs:0, %rax
4357 leaq foo@tpoff(%rax), %rax
4358 nopw 0x0(%rax,%rax,1) */
4359 int largepic = 0;
4360 if (ABI_64_P (output_bfd)
4361 && contents[roff + 5] == (bfd_byte) '\xb8')
4362 {
4363 memcpy (contents + roff - 3,
4364 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4365 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4366 largepic = 1;
4367 }
4368 else if (ABI_64_P (output_bfd))
4369 memcpy (contents + roff - 4,
4370 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4371 16);
4372 else
4373 memcpy (contents + roff - 3,
4374 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4375 15);
4376 bfd_put_32 (output_bfd,
4377 elf_x86_64_tpoff (info, relocation),
4378 contents + roff + 8 + largepic);
4379 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4380 rel++;
4381 continue;
4382 }
4383 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4384 {
4385 /* GDesc -> LE transition.
4386 It's originally something like:
4387 leaq x@tlsdesc(%rip), %rax
4388
4389 Change it to:
4390 movl $x@tpoff, %rax. */
4391
4392 unsigned int val, type;
4393
4394 type = bfd_get_8 (input_bfd, contents + roff - 3);
4395 val = bfd_get_8 (input_bfd, contents + roff - 1);
4396 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4397 contents + roff - 3);
4398 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4399 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4400 contents + roff - 1);
4401 bfd_put_32 (output_bfd,
4402 elf_x86_64_tpoff (info, relocation),
4403 contents + roff);
4404 continue;
4405 }
4406 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4407 {
4408 /* GDesc -> LE transition.
4409 It's originally:
4410 call *(%rax)
4411 Turn it into:
4412 xchg %ax,%ax. */
4413 bfd_put_8 (output_bfd, 0x66, contents + roff);
4414 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4415 continue;
4416 }
4417 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4418 {
4419 /* IE->LE transition:
4420 For 64bit, originally it can be one of:
4421 movq foo@gottpoff(%rip), %reg
4422 addq foo@gottpoff(%rip), %reg
4423 We change it into:
4424 movq $foo, %reg
4425 leaq foo(%reg), %reg
4426 addq $foo, %reg.
4427 For 32bit, originally it can be one of:
4428 movq foo@gottpoff(%rip), %reg
4429 addl foo@gottpoff(%rip), %reg
4430 We change it into:
4431 movq $foo, %reg
4432 leal foo(%reg), %reg
4433 addl $foo, %reg. */
4434
4435 unsigned int val, type, reg;
4436
4437 if (roff >= 3)
4438 val = bfd_get_8 (input_bfd, contents + roff - 3);
4439 else
4440 val = 0;
4441 type = bfd_get_8 (input_bfd, contents + roff - 2);
4442 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4443 reg >>= 3;
4444 if (type == 0x8b)
4445 {
4446 /* movq */
4447 if (val == 0x4c)
4448 bfd_put_8 (output_bfd, 0x49,
4449 contents + roff - 3);
4450 else if (!ABI_64_P (output_bfd) && val == 0x44)
4451 bfd_put_8 (output_bfd, 0x41,
4452 contents + roff - 3);
4453 bfd_put_8 (output_bfd, 0xc7,
4454 contents + roff - 2);
4455 bfd_put_8 (output_bfd, 0xc0 | reg,
4456 contents + roff - 1);
4457 }
4458 else if (reg == 4)
4459 {
4460 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4461 is special */
4462 if (val == 0x4c)
4463 bfd_put_8 (output_bfd, 0x49,
4464 contents + roff - 3);
4465 else if (!ABI_64_P (output_bfd) && val == 0x44)
4466 bfd_put_8 (output_bfd, 0x41,
4467 contents + roff - 3);
4468 bfd_put_8 (output_bfd, 0x81,
4469 contents + roff - 2);
4470 bfd_put_8 (output_bfd, 0xc0 | reg,
4471 contents + roff - 1);
4472 }
4473 else
4474 {
4475 /* addq/addl -> leaq/leal */
4476 if (val == 0x4c)
4477 bfd_put_8 (output_bfd, 0x4d,
4478 contents + roff - 3);
4479 else if (!ABI_64_P (output_bfd) && val == 0x44)
4480 bfd_put_8 (output_bfd, 0x45,
4481 contents + roff - 3);
4482 bfd_put_8 (output_bfd, 0x8d,
4483 contents + roff - 2);
4484 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4485 contents + roff - 1);
4486 }
4487 bfd_put_32 (output_bfd,
4488 elf_x86_64_tpoff (info, relocation),
4489 contents + roff);
4490 continue;
4491 }
4492 else
4493 BFD_ASSERT (FALSE);
4494 }
4495
4496 if (htab->elf.sgot == NULL)
4497 abort ();
4498
4499 if (h != NULL)
4500 {
4501 off = h->got.offset;
4502 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4503 }
4504 else
4505 {
4506 if (local_got_offsets == NULL)
4507 abort ();
4508
4509 off = local_got_offsets[r_symndx];
4510 offplt = local_tlsdesc_gotents[r_symndx];
4511 }
4512
4513 if ((off & 1) != 0)
4514 off &= ~1;
4515 else
4516 {
4517 Elf_Internal_Rela outrel;
4518 int dr_type, indx;
4519 asection *sreloc;
4520
4521 if (htab->elf.srelgot == NULL)
4522 abort ();
4523
4524 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4525
4526 if (GOT_TLS_GDESC_P (tls_type))
4527 {
4528 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4529 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4530 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4531 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4532 + htab->elf.sgotplt->output_offset
4533 + offplt
4534 + htab->sgotplt_jump_table_size);
4535 sreloc = htab->elf.srelplt;
4536 if (indx == 0)
4537 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4538 else
4539 outrel.r_addend = 0;
4540 elf_append_rela (output_bfd, sreloc, &outrel);
4541 }
4542
4543 sreloc = htab->elf.srelgot;
4544
4545 outrel.r_offset = (htab->elf.sgot->output_section->vma
4546 + htab->elf.sgot->output_offset + off);
4547
4548 if (GOT_TLS_GD_P (tls_type))
4549 dr_type = R_X86_64_DTPMOD64;
4550 else if (GOT_TLS_GDESC_P (tls_type))
4551 goto dr_done;
4552 else
4553 dr_type = R_X86_64_TPOFF64;
4554
4555 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4556 outrel.r_addend = 0;
4557 if ((dr_type == R_X86_64_TPOFF64
4558 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4559 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4560 outrel.r_info = htab->r_info (indx, dr_type);
4561
4562 elf_append_rela (output_bfd, sreloc, &outrel);
4563
4564 if (GOT_TLS_GD_P (tls_type))
4565 {
4566 if (indx == 0)
4567 {
4568 BFD_ASSERT (! unresolved_reloc);
4569 bfd_put_64 (output_bfd,
4570 relocation - elf_x86_64_dtpoff_base (info),
4571 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4572 }
4573 else
4574 {
4575 bfd_put_64 (output_bfd, 0,
4576 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4577 outrel.r_info = htab->r_info (indx,
4578 R_X86_64_DTPOFF64);
4579 outrel.r_offset += GOT_ENTRY_SIZE;
4580 elf_append_rela (output_bfd, sreloc,
4581 &outrel);
4582 }
4583 }
4584
4585 dr_done:
4586 if (h != NULL)
4587 h->got.offset |= 1;
4588 else
4589 local_got_offsets[r_symndx] |= 1;
4590 }
4591
4592 if (off >= (bfd_vma) -2
4593 && ! GOT_TLS_GDESC_P (tls_type))
4594 abort ();
4595 if (r_type == ELF32_R_TYPE (rel->r_info))
4596 {
4597 if (r_type == R_X86_64_GOTPC32_TLSDESC
4598 || r_type == R_X86_64_TLSDESC_CALL)
4599 relocation = htab->elf.sgotplt->output_section->vma
4600 + htab->elf.sgotplt->output_offset
4601 + offplt + htab->sgotplt_jump_table_size;
4602 else
4603 relocation = htab->elf.sgot->output_section->vma
4604 + htab->elf.sgot->output_offset + off;
4605 unresolved_reloc = FALSE;
4606 }
4607 else
4608 {
4609 bfd_vma roff = rel->r_offset;
4610
4611 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4612 {
4613 /* GD->IE transition. For 64bit, change
4614 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4615 .word 0x6666; rex64; call __tls_get_addr@plt
4616 into:
4617 movq %fs:0, %rax
4618 addq foo@gottpoff(%rip), %rax
4619 For 32bit, change
4620 leaq foo@tlsgd(%rip), %rdi
4621 .word 0x6666; rex64; call __tls_get_addr@plt
4622 into:
4623 movl %fs:0, %eax
4624 addq foo@gottpoff(%rip), %rax
4625 For largepic, change:
4626 leaq foo@tlsgd(%rip), %rdi
4627 movabsq $__tls_get_addr@pltoff, %rax
4628 addq %rbx, %rax
4629 call *%rax
4630 into:
4631 movq %fs:0, %rax
4632 addq foo@gottpoff(%rax), %rax
4633 nopw 0x0(%rax,%rax,1) */
4634 int largepic = 0;
4635 if (ABI_64_P (output_bfd)
4636 && contents[roff + 5] == (bfd_byte) '\xb8')
4637 {
4638 memcpy (contents + roff - 3,
4639 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4640 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4641 largepic = 1;
4642 }
4643 else if (ABI_64_P (output_bfd))
4644 memcpy (contents + roff - 4,
4645 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4646 16);
4647 else
4648 memcpy (contents + roff - 3,
4649 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4650 15);
4651
4652 relocation = (htab->elf.sgot->output_section->vma
4653 + htab->elf.sgot->output_offset + off
4654 - roff
4655 - largepic
4656 - input_section->output_section->vma
4657 - input_section->output_offset
4658 - 12);
4659 bfd_put_32 (output_bfd, relocation,
4660 contents + roff + 8 + largepic);
4661 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4662 rel++;
4663 continue;
4664 }
4665 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4666 {
4667 /* GDesc -> IE transition.
4668 It's originally something like:
4669 leaq x@tlsdesc(%rip), %rax
4670
4671 Change it to:
4672 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4673
4674 /* Now modify the instruction as appropriate. To
4675 turn a leaq into a movq in the form we use it, it
4676 suffices to change the second byte from 0x8d to
4677 0x8b. */
4678 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4679
4680 bfd_put_32 (output_bfd,
4681 htab->elf.sgot->output_section->vma
4682 + htab->elf.sgot->output_offset + off
4683 - rel->r_offset
4684 - input_section->output_section->vma
4685 - input_section->output_offset
4686 - 4,
4687 contents + roff);
4688 continue;
4689 }
4690 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4691 {
4692 /* GDesc -> IE transition.
4693 It's originally:
4694 call *(%rax)
4695
4696 Change it to:
4697 xchg %ax, %ax. */
4698
4699 bfd_put_8 (output_bfd, 0x66, contents + roff);
4700 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4701 continue;
4702 }
4703 else
4704 BFD_ASSERT (FALSE);
4705 }
4706 break;
4707
4708 case R_X86_64_TLSLD:
4709 if (! elf_x86_64_tls_transition (info, input_bfd,
4710 input_section, contents,
4711 symtab_hdr, sym_hashes,
4712 &r_type, GOT_UNKNOWN,
4713 rel, relend, h, r_symndx))
4714 return FALSE;
4715
4716 if (r_type != R_X86_64_TLSLD)
4717 {
4718 /* LD->LE transition:
4719 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4720 For 64bit, we change it into:
4721 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4722 For 32bit, we change it into:
4723 nopl 0x0(%rax); movl %fs:0, %eax.
4724 For largepic, change:
4725 leaq foo@tlsgd(%rip), %rdi
4726 movabsq $__tls_get_addr@pltoff, %rax
4727 addq %rbx, %rax
4728 call *%rax
4729 into:
4730 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4731 movq %fs:0, %eax */
4732
4733 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4734 if (ABI_64_P (output_bfd)
4735 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4736 memcpy (contents + rel->r_offset - 3,
4737 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4738 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4739 else if (ABI_64_P (output_bfd))
4740 memcpy (contents + rel->r_offset - 3,
4741 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4742 else
4743 memcpy (contents + rel->r_offset - 3,
4744 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4745 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4746 rel++;
4747 continue;
4748 }
4749
4750 if (htab->elf.sgot == NULL)
4751 abort ();
4752
4753 off = htab->tls_ld_got.offset;
4754 if (off & 1)
4755 off &= ~1;
4756 else
4757 {
4758 Elf_Internal_Rela outrel;
4759
4760 if (htab->elf.srelgot == NULL)
4761 abort ();
4762
4763 outrel.r_offset = (htab->elf.sgot->output_section->vma
4764 + htab->elf.sgot->output_offset + off);
4765
4766 bfd_put_64 (output_bfd, 0,
4767 htab->elf.sgot->contents + off);
4768 bfd_put_64 (output_bfd, 0,
4769 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4770 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4771 outrel.r_addend = 0;
4772 elf_append_rela (output_bfd, htab->elf.srelgot,
4773 &outrel);
4774 htab->tls_ld_got.offset |= 1;
4775 }
4776 relocation = htab->elf.sgot->output_section->vma
4777 + htab->elf.sgot->output_offset + off;
4778 unresolved_reloc = FALSE;
4779 break;
4780
4781 case R_X86_64_DTPOFF32:
4782 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4783 relocation -= elf_x86_64_dtpoff_base (info);
4784 else
4785 relocation = elf_x86_64_tpoff (info, relocation);
4786 break;
4787
4788 case R_X86_64_TPOFF32:
4789 case R_X86_64_TPOFF64:
4790 BFD_ASSERT (info->executable);
4791 relocation = elf_x86_64_tpoff (info, relocation);
4792 break;
4793
4794 case R_X86_64_DTPOFF64:
4795 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4796 relocation -= elf_x86_64_dtpoff_base (info);
4797 break;
4798
4799 default:
4800 break;
4801 }
4802
4803 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4804 because such sections are not SEC_ALLOC and thus ld.so will
4805 not process them. */
4806 if (unresolved_reloc
4807 && !((input_section->flags & SEC_DEBUGGING) != 0
4808 && h->def_dynamic)
4809 && _bfd_elf_section_offset (output_bfd, info, input_section,
4810 rel->r_offset) != (bfd_vma) -1)
4811 {
4812 (*_bfd_error_handler)
4813 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4814 input_bfd,
4815 input_section,
4816 (long) rel->r_offset,
4817 howto->name,
4818 h->root.root.string);
4819 return FALSE;
4820 }
4821
4822 do_relocation:
4823 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4824 contents, rel->r_offset,
4825 relocation, rel->r_addend);
4826
4827 check_relocation_error:
4828 if (r != bfd_reloc_ok)
4829 {
4830 const char *name;
4831
4832 if (h != NULL)
4833 name = h->root.root.string;
4834 else
4835 {
4836 name = bfd_elf_string_from_elf_section (input_bfd,
4837 symtab_hdr->sh_link,
4838 sym->st_name);
4839 if (name == NULL)
4840 return FALSE;
4841 if (*name == '\0')
4842 name = bfd_section_name (input_bfd, sec);
4843 }
4844
4845 if (r == bfd_reloc_overflow)
4846 {
4847 if (! ((*info->callbacks->reloc_overflow)
4848 (info, (h ? &h->root : NULL), name, howto->name,
4849 (bfd_vma) 0, input_bfd, input_section,
4850 rel->r_offset)))
4851 return FALSE;
4852 }
4853 else
4854 {
4855 (*_bfd_error_handler)
4856 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4857 input_bfd, input_section,
4858 (long) rel->r_offset, name, (int) r);
4859 return FALSE;
4860 }
4861 }
4862 }
4863
4864 return TRUE;
4865 }
4866
4867 /* Finish up dynamic symbol handling. We set the contents of various
4868 dynamic sections here. */
4869
4870 static bfd_boolean
4871 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4872 struct bfd_link_info *info,
4873 struct elf_link_hash_entry *h,
4874 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4875 {
4876 struct elf_x86_64_link_hash_table *htab;
4877 const struct elf_x86_64_backend_data *abed;
4878 bfd_boolean use_plt_bnd;
4879 struct elf_x86_64_link_hash_entry *eh;
4880
4881 htab = elf_x86_64_hash_table (info);
4882 if (htab == NULL)
4883 return FALSE;
4884
4885 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4886 section only if there is .plt section. */
4887 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4888 abed = (use_plt_bnd
4889 ? &elf_x86_64_bnd_arch_bed
4890 : get_elf_x86_64_backend_data (output_bfd));
4891
4892 eh = (struct elf_x86_64_link_hash_entry *) h;
4893
4894 if (h->plt.offset != (bfd_vma) -1)
4895 {
4896 bfd_vma plt_index;
4897 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4898 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4899 Elf_Internal_Rela rela;
4900 bfd_byte *loc;
4901 asection *plt, *gotplt, *relplt, *resolved_plt;
4902 const struct elf_backend_data *bed;
4903 bfd_vma plt_got_pcrel_offset;
4904
4905 /* When building a static executable, use .iplt, .igot.plt and
4906 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4907 if (htab->elf.splt != NULL)
4908 {
4909 plt = htab->elf.splt;
4910 gotplt = htab->elf.sgotplt;
4911 relplt = htab->elf.srelplt;
4912 }
4913 else
4914 {
4915 plt = htab->elf.iplt;
4916 gotplt = htab->elf.igotplt;
4917 relplt = htab->elf.irelplt;
4918 }
4919
4920 /* This symbol has an entry in the procedure linkage table. Set
4921 it up. */
4922 if ((h->dynindx == -1
4923 && !((h->forced_local || info->executable)
4924 && h->def_regular
4925 && h->type == STT_GNU_IFUNC))
4926 || plt == NULL
4927 || gotplt == NULL
4928 || relplt == NULL)
4929 abort ();
4930
4931 /* Get the index in the procedure linkage table which
4932 corresponds to this symbol. This is the index of this symbol
4933 in all the symbols for which we are making plt entries. The
4934 first entry in the procedure linkage table is reserved.
4935
4936 Get the offset into the .got table of the entry that
4937 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4938 bytes. The first three are reserved for the dynamic linker.
4939
4940 For static executables, we don't reserve anything. */
4941
4942 if (plt == htab->elf.splt)
4943 {
4944 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4945 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4946 }
4947 else
4948 {
4949 got_offset = h->plt.offset / abed->plt_entry_size;
4950 got_offset = got_offset * GOT_ENTRY_SIZE;
4951 }
4952
4953 plt_plt_insn_end = abed->plt_plt_insn_end;
4954 plt_plt_offset = abed->plt_plt_offset;
4955 plt_got_insn_size = abed->plt_got_insn_size;
4956 plt_got_offset = abed->plt_got_offset;
4957 if (use_plt_bnd)
4958 {
4959 /* Use the second PLT with BND relocations. */
4960 const bfd_byte *plt_entry, *plt2_entry;
4961
4962 if (eh->has_bnd_reloc)
4963 {
4964 plt_entry = elf_x86_64_bnd_plt_entry;
4965 plt2_entry = elf_x86_64_bnd_plt2_entry;
4966 }
4967 else
4968 {
4969 plt_entry = elf_x86_64_legacy_plt_entry;
4970 plt2_entry = elf_x86_64_legacy_plt2_entry;
4971
4972 /* Subtract 1 since there is no BND prefix. */
4973 plt_plt_insn_end -= 1;
4974 plt_plt_offset -= 1;
4975 plt_got_insn_size -= 1;
4976 plt_got_offset -= 1;
4977 }
4978
4979 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4980 == sizeof (elf_x86_64_legacy_plt_entry));
4981
4982 /* Fill in the entry in the procedure linkage table. */
4983 memcpy (plt->contents + h->plt.offset,
4984 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4985 /* Fill in the entry in the second PLT. */
4986 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4987 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4988
4989 resolved_plt = htab->plt_bnd;
4990 plt_offset = eh->plt_bnd.offset;
4991 }
4992 else
4993 {
4994 /* Fill in the entry in the procedure linkage table. */
4995 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4996 abed->plt_entry_size);
4997
4998 resolved_plt = plt;
4999 plt_offset = h->plt.offset;
5000 }
5001
5002 /* Insert the relocation positions of the plt section. */
5003
5004 /* Put offset the PC-relative instruction referring to the GOT entry,
5005 subtracting the size of that instruction. */
5006 plt_got_pcrel_offset = (gotplt->output_section->vma
5007 + gotplt->output_offset
5008 + got_offset
5009 - resolved_plt->output_section->vma
5010 - resolved_plt->output_offset
5011 - plt_offset
5012 - plt_got_insn_size);
5013
5014 /* Check PC-relative offset overflow in PLT entry. */
5015 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5016 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5017 output_bfd, h->root.root.string);
5018
5019 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5020 resolved_plt->contents + plt_offset + plt_got_offset);
5021
5022 /* Fill in the entry in the global offset table, initially this
5023 points to the second part of the PLT entry. */
5024 bfd_put_64 (output_bfd, (plt->output_section->vma
5025 + plt->output_offset
5026 + h->plt.offset + abed->plt_lazy_offset),
5027 gotplt->contents + got_offset);
5028
5029 /* Fill in the entry in the .rela.plt section. */
5030 rela.r_offset = (gotplt->output_section->vma
5031 + gotplt->output_offset
5032 + got_offset);
5033 if (h->dynindx == -1
5034 || ((info->executable
5035 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5036 && h->def_regular
5037 && h->type == STT_GNU_IFUNC))
5038 {
5039 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5040 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5041 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5042 rela.r_addend = (h->root.u.def.value
5043 + h->root.u.def.section->output_section->vma
5044 + h->root.u.def.section->output_offset);
5045 /* R_X86_64_IRELATIVE comes last. */
5046 plt_index = htab->next_irelative_index--;
5047 }
5048 else
5049 {
5050 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5051 rela.r_addend = 0;
5052 plt_index = htab->next_jump_slot_index++;
5053 }
5054
5055 /* Don't fill PLT entry for static executables. */
5056 if (plt == htab->elf.splt)
5057 {
5058 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5059
5060 /* Put relocation index. */
5061 bfd_put_32 (output_bfd, plt_index,
5062 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5063
5064 /* Put offset for jmp .PLT0 and check for overflow. We don't
5065 check relocation index for overflow since branch displacement
5066 will overflow first. */
5067 if (plt0_offset > 0x80000000)
5068 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5069 output_bfd, h->root.root.string);
5070 bfd_put_32 (output_bfd, - plt0_offset,
5071 plt->contents + h->plt.offset + plt_plt_offset);
5072 }
5073
5074 bed = get_elf_backend_data (output_bfd);
5075 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5076 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5077 }
5078 else if (eh->plt_got.offset != (bfd_vma) -1)
5079 {
5080 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5081 asection *plt, *got;
5082 bfd_boolean got_after_plt;
5083 int32_t got_pcrel_offset;
5084 const bfd_byte *got_plt_entry;
5085
5086 /* Set the entry in the GOT procedure linkage table. */
5087 plt = htab->plt_got;
5088 got = htab->elf.sgot;
5089 got_offset = h->got.offset;
5090
5091 if (got_offset == (bfd_vma) -1
5092 || h->type == STT_GNU_IFUNC
5093 || plt == NULL
5094 || got == NULL)
5095 abort ();
5096
5097 /* Use the second PLT entry template for the GOT PLT since they
5098 are the identical. */
5099 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5100 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5101 if (eh->has_bnd_reloc)
5102 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5103 else
5104 {
5105 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5106
5107 /* Subtract 1 since there is no BND prefix. */
5108 plt_got_insn_size -= 1;
5109 plt_got_offset -= 1;
5110 }
5111
5112 /* Fill in the entry in the GOT procedure linkage table. */
5113 plt_offset = eh->plt_got.offset;
5114 memcpy (plt->contents + plt_offset,
5115 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5116
5117 /* Put offset the PC-relative instruction referring to the GOT
5118 entry, subtracting the size of that instruction. */
5119 got_pcrel_offset = (got->output_section->vma
5120 + got->output_offset
5121 + got_offset
5122 - plt->output_section->vma
5123 - plt->output_offset
5124 - plt_offset
5125 - plt_got_insn_size);
5126
5127 /* Check PC-relative offset overflow in GOT PLT entry. */
5128 got_after_plt = got->output_section->vma > plt->output_section->vma;
5129 if ((got_after_plt && got_pcrel_offset < 0)
5130 || (!got_after_plt && got_pcrel_offset > 0))
5131 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5132 output_bfd, h->root.root.string);
5133
5134 bfd_put_32 (output_bfd, got_pcrel_offset,
5135 plt->contents + plt_offset + plt_got_offset);
5136 }
5137
5138 if (!h->def_regular
5139 && (h->plt.offset != (bfd_vma) -1
5140 || eh->plt_got.offset != (bfd_vma) -1))
5141 {
5142 /* Mark the symbol as undefined, rather than as defined in
5143 the .plt section. Leave the value if there were any
5144 relocations where pointer equality matters (this is a clue
5145 for the dynamic linker, to make function pointer
5146 comparisons work between an application and shared
5147 library), otherwise set it to zero. If a function is only
5148 called from a binary, there is no need to slow down
5149 shared libraries because of that. */
5150 sym->st_shndx = SHN_UNDEF;
5151 if (!h->pointer_equality_needed)
5152 sym->st_value = 0;
5153 }
5154
5155 if (h->got.offset != (bfd_vma) -1
5156 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5157 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5158 {
5159 Elf_Internal_Rela rela;
5160
5161 /* This symbol has an entry in the global offset table. Set it
5162 up. */
5163 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5164 abort ();
5165
5166 rela.r_offset = (htab->elf.sgot->output_section->vma
5167 + htab->elf.sgot->output_offset
5168 + (h->got.offset &~ (bfd_vma) 1));
5169
5170 /* If this is a static link, or it is a -Bsymbolic link and the
5171 symbol is defined locally or was forced to be local because
5172 of a version file, we just want to emit a RELATIVE reloc.
5173 The entry in the global offset table will already have been
5174 initialized in the relocate_section function. */
5175 if (h->def_regular
5176 && h->type == STT_GNU_IFUNC)
5177 {
5178 if (info->shared)
5179 {
5180 /* Generate R_X86_64_GLOB_DAT. */
5181 goto do_glob_dat;
5182 }
5183 else
5184 {
5185 asection *plt;
5186
5187 if (!h->pointer_equality_needed)
5188 abort ();
5189
5190 /* For non-shared object, we can't use .got.plt, which
5191 contains the real function addres if we need pointer
5192 equality. We load the GOT entry with the PLT entry. */
5193 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5194 bfd_put_64 (output_bfd, (plt->output_section->vma
5195 + plt->output_offset
5196 + h->plt.offset),
5197 htab->elf.sgot->contents + h->got.offset);
5198 return TRUE;
5199 }
5200 }
5201 else if (info->shared
5202 && SYMBOL_REFERENCES_LOCAL (info, h))
5203 {
5204 if (!h->def_regular)
5205 return FALSE;
5206 BFD_ASSERT((h->got.offset & 1) != 0);
5207 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5208 rela.r_addend = (h->root.u.def.value
5209 + h->root.u.def.section->output_section->vma
5210 + h->root.u.def.section->output_offset);
5211 }
5212 else
5213 {
5214 BFD_ASSERT((h->got.offset & 1) == 0);
5215 do_glob_dat:
5216 bfd_put_64 (output_bfd, (bfd_vma) 0,
5217 htab->elf.sgot->contents + h->got.offset);
5218 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5219 rela.r_addend = 0;
5220 }
5221
5222 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5223 }
5224
5225 if (h->needs_copy)
5226 {
5227 Elf_Internal_Rela rela;
5228
5229 /* This symbol needs a copy reloc. Set it up. */
5230
5231 if (h->dynindx == -1
5232 || (h->root.type != bfd_link_hash_defined
5233 && h->root.type != bfd_link_hash_defweak)
5234 || htab->srelbss == NULL)
5235 abort ();
5236
5237 rela.r_offset = (h->root.u.def.value
5238 + h->root.u.def.section->output_section->vma
5239 + h->root.u.def.section->output_offset);
5240 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5241 rela.r_addend = 0;
5242 elf_append_rela (output_bfd, htab->srelbss, &rela);
5243 }
5244
5245 return TRUE;
5246 }
5247
5248 /* Finish up local dynamic symbol handling. We set the contents of
5249 various dynamic sections here. */
5250
5251 static bfd_boolean
5252 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5253 {
5254 struct elf_link_hash_entry *h
5255 = (struct elf_link_hash_entry *) *slot;
5256 struct bfd_link_info *info
5257 = (struct bfd_link_info *) inf;
5258
5259 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5260 info, h, NULL);
5261 }
5262
5263 /* Used to decide how to sort relocs in an optimal manner for the
5264 dynamic linker, before writing them out. */
5265
5266 static enum elf_reloc_type_class
5267 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5268 const asection *rel_sec ATTRIBUTE_UNUSED,
5269 const Elf_Internal_Rela *rela)
5270 {
5271 switch ((int) ELF32_R_TYPE (rela->r_info))
5272 {
5273 case R_X86_64_RELATIVE:
5274 case R_X86_64_RELATIVE64:
5275 return reloc_class_relative;
5276 case R_X86_64_JUMP_SLOT:
5277 return reloc_class_plt;
5278 case R_X86_64_COPY:
5279 return reloc_class_copy;
5280 default:
5281 return reloc_class_normal;
5282 }
5283 }
5284
5285 /* Finish up the dynamic sections. */
5286
5287 static bfd_boolean
5288 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5289 struct bfd_link_info *info)
5290 {
5291 struct elf_x86_64_link_hash_table *htab;
5292 bfd *dynobj;
5293 asection *sdyn;
5294 const struct elf_x86_64_backend_data *abed;
5295
5296 htab = elf_x86_64_hash_table (info);
5297 if (htab == NULL)
5298 return FALSE;
5299
5300 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5301 section only if there is .plt section. */
5302 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5303 ? &elf_x86_64_bnd_arch_bed
5304 : get_elf_x86_64_backend_data (output_bfd));
5305
5306 dynobj = htab->elf.dynobj;
5307 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5308
5309 if (htab->elf.dynamic_sections_created)
5310 {
5311 bfd_byte *dyncon, *dynconend;
5312 const struct elf_backend_data *bed;
5313 bfd_size_type sizeof_dyn;
5314
5315 if (sdyn == NULL || htab->elf.sgot == NULL)
5316 abort ();
5317
5318 bed = get_elf_backend_data (dynobj);
5319 sizeof_dyn = bed->s->sizeof_dyn;
5320 dyncon = sdyn->contents;
5321 dynconend = sdyn->contents + sdyn->size;
5322 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5323 {
5324 Elf_Internal_Dyn dyn;
5325 asection *s;
5326
5327 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5328
5329 switch (dyn.d_tag)
5330 {
5331 default:
5332 continue;
5333
5334 case DT_PLTGOT:
5335 s = htab->elf.sgotplt;
5336 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5337 break;
5338
5339 case DT_JMPREL:
5340 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5341 break;
5342
5343 case DT_PLTRELSZ:
5344 s = htab->elf.srelplt->output_section;
5345 dyn.d_un.d_val = s->size;
5346 break;
5347
5348 case DT_RELASZ:
5349 /* The procedure linkage table relocs (DT_JMPREL) should
5350 not be included in the overall relocs (DT_RELA).
5351 Therefore, we override the DT_RELASZ entry here to
5352 make it not include the JMPREL relocs. Since the
5353 linker script arranges for .rela.plt to follow all
5354 other relocation sections, we don't have to worry
5355 about changing the DT_RELA entry. */
5356 if (htab->elf.srelplt != NULL)
5357 {
5358 s = htab->elf.srelplt->output_section;
5359 dyn.d_un.d_val -= s->size;
5360 }
5361 break;
5362
5363 case DT_TLSDESC_PLT:
5364 s = htab->elf.splt;
5365 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5366 + htab->tlsdesc_plt;
5367 break;
5368
5369 case DT_TLSDESC_GOT:
5370 s = htab->elf.sgot;
5371 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5372 + htab->tlsdesc_got;
5373 break;
5374 }
5375
5376 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5377 }
5378
5379 /* Fill in the special first entry in the procedure linkage table. */
5380 if (htab->elf.splt && htab->elf.splt->size > 0)
5381 {
5382 /* Fill in the first entry in the procedure linkage table. */
5383 memcpy (htab->elf.splt->contents,
5384 abed->plt0_entry, abed->plt_entry_size);
5385 /* Add offset for pushq GOT+8(%rip), since the instruction
5386 uses 6 bytes subtract this value. */
5387 bfd_put_32 (output_bfd,
5388 (htab->elf.sgotplt->output_section->vma
5389 + htab->elf.sgotplt->output_offset
5390 + 8
5391 - htab->elf.splt->output_section->vma
5392 - htab->elf.splt->output_offset
5393 - 6),
5394 htab->elf.splt->contents + abed->plt0_got1_offset);
5395 /* Add offset for the PC-relative instruction accessing GOT+16,
5396 subtracting the offset to the end of that instruction. */
5397 bfd_put_32 (output_bfd,
5398 (htab->elf.sgotplt->output_section->vma
5399 + htab->elf.sgotplt->output_offset
5400 + 16
5401 - htab->elf.splt->output_section->vma
5402 - htab->elf.splt->output_offset
5403 - abed->plt0_got2_insn_end),
5404 htab->elf.splt->contents + abed->plt0_got2_offset);
5405
5406 elf_section_data (htab->elf.splt->output_section)
5407 ->this_hdr.sh_entsize = abed->plt_entry_size;
5408
5409 if (htab->tlsdesc_plt)
5410 {
5411 bfd_put_64 (output_bfd, (bfd_vma) 0,
5412 htab->elf.sgot->contents + htab->tlsdesc_got);
5413
5414 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5415 abed->plt0_entry, abed->plt_entry_size);
5416
5417 /* Add offset for pushq GOT+8(%rip), since the
5418 instruction uses 6 bytes subtract this value. */
5419 bfd_put_32 (output_bfd,
5420 (htab->elf.sgotplt->output_section->vma
5421 + htab->elf.sgotplt->output_offset
5422 + 8
5423 - htab->elf.splt->output_section->vma
5424 - htab->elf.splt->output_offset
5425 - htab->tlsdesc_plt
5426 - 6),
5427 htab->elf.splt->contents
5428 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5429 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5430 where TGD stands for htab->tlsdesc_got, subtracting the offset
5431 to the end of that instruction. */
5432 bfd_put_32 (output_bfd,
5433 (htab->elf.sgot->output_section->vma
5434 + htab->elf.sgot->output_offset
5435 + htab->tlsdesc_got
5436 - htab->elf.splt->output_section->vma
5437 - htab->elf.splt->output_offset
5438 - htab->tlsdesc_plt
5439 - abed->plt0_got2_insn_end),
5440 htab->elf.splt->contents
5441 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5442 }
5443 }
5444 }
5445
5446 if (htab->plt_bnd != NULL)
5447 elf_section_data (htab->plt_bnd->output_section)
5448 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5449
5450 if (htab->elf.sgotplt)
5451 {
5452 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5453 {
5454 (*_bfd_error_handler)
5455 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5456 return FALSE;
5457 }
5458
5459 /* Fill in the first three entries in the global offset table. */
5460 if (htab->elf.sgotplt->size > 0)
5461 {
5462 /* Set the first entry in the global offset table to the address of
5463 the dynamic section. */
5464 if (sdyn == NULL)
5465 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5466 else
5467 bfd_put_64 (output_bfd,
5468 sdyn->output_section->vma + sdyn->output_offset,
5469 htab->elf.sgotplt->contents);
5470 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5471 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5472 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5473 }
5474
5475 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5476 GOT_ENTRY_SIZE;
5477 }
5478
5479 /* Adjust .eh_frame for .plt section. */
5480 if (htab->plt_eh_frame != NULL
5481 && htab->plt_eh_frame->contents != NULL)
5482 {
5483 if (htab->elf.splt != NULL
5484 && htab->elf.splt->size != 0
5485 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5486 && htab->elf.splt->output_section != NULL
5487 && htab->plt_eh_frame->output_section != NULL)
5488 {
5489 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5490 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5491 + htab->plt_eh_frame->output_offset
5492 + PLT_FDE_START_OFFSET;
5493 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5494 htab->plt_eh_frame->contents
5495 + PLT_FDE_START_OFFSET);
5496 }
5497 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5498 {
5499 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5500 htab->plt_eh_frame,
5501 htab->plt_eh_frame->contents))
5502 return FALSE;
5503 }
5504 }
5505
5506 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5507 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5508 = GOT_ENTRY_SIZE;
5509
5510 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5511 htab_traverse (htab->loc_hash_table,
5512 elf_x86_64_finish_local_dynamic_symbol,
5513 info);
5514
5515 return TRUE;
5516 }
5517
5518 /* Return an array of PLT entry symbol values. */
5519
5520 static bfd_vma *
5521 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5522 asection *relplt)
5523 {
5524 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5525 arelent *p;
5526 long count, i;
5527 bfd_vma *plt_sym_val;
5528 bfd_vma plt_offset;
5529 bfd_byte *plt_contents;
5530 const struct elf_x86_64_backend_data *bed;
5531 Elf_Internal_Shdr *hdr;
5532 asection *plt_bnd;
5533
5534 /* Get the .plt section contents. PLT passed down may point to the
5535 .plt.bnd section. Make sure that PLT always points to the .plt
5536 section. */
5537 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5538 if (plt_bnd)
5539 {
5540 if (plt != plt_bnd)
5541 abort ();
5542 plt = bfd_get_section_by_name (abfd, ".plt");
5543 if (plt == NULL)
5544 abort ();
5545 bed = &elf_x86_64_bnd_arch_bed;
5546 }
5547 else
5548 bed = get_elf_x86_64_backend_data (abfd);
5549
5550 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5551 if (plt_contents == NULL)
5552 return NULL;
5553 if (!bfd_get_section_contents (abfd, (asection *) plt,
5554 plt_contents, 0, plt->size))
5555 {
5556 bad_return:
5557 free (plt_contents);
5558 return NULL;
5559 }
5560
5561 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5562 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5563 goto bad_return;
5564
5565 hdr = &elf_section_data (relplt)->this_hdr;
5566 count = relplt->size / hdr->sh_entsize;
5567
5568 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5569 if (plt_sym_val == NULL)
5570 goto bad_return;
5571
5572 for (i = 0; i < count; i++)
5573 plt_sym_val[i] = -1;
5574
5575 plt_offset = bed->plt_entry_size;
5576 p = relplt->relocation;
5577 for (i = 0; i < count; i++, p++)
5578 {
5579 long reloc_index;
5580
5581 /* Skip unknown relocation. */
5582 if (p->howto == NULL)
5583 continue;
5584
5585 if (p->howto->type != R_X86_64_JUMP_SLOT
5586 && p->howto->type != R_X86_64_IRELATIVE)
5587 continue;
5588
5589 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5590 + bed->plt_reloc_offset));
5591 if (reloc_index >= count)
5592 abort ();
5593 if (plt_bnd)
5594 {
5595 /* This is the index in .plt section. */
5596 long plt_index = plt_offset / bed->plt_entry_size;
5597 /* Store VMA + the offset in .plt.bnd section. */
5598 plt_sym_val[reloc_index] =
5599 (plt_bnd->vma
5600 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5601 }
5602 else
5603 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5604 plt_offset += bed->plt_entry_size;
5605 }
5606
5607 free (plt_contents);
5608
5609 return plt_sym_val;
5610 }
5611
5612 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5613 support. */
5614
5615 static long
5616 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5617 long symcount,
5618 asymbol **syms,
5619 long dynsymcount,
5620 asymbol **dynsyms,
5621 asymbol **ret)
5622 {
5623 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5624 as PLT if it exists. */
5625 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5626 if (plt == NULL)
5627 plt = bfd_get_section_by_name (abfd, ".plt");
5628 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5629 dynsymcount, dynsyms, ret,
5630 plt,
5631 elf_x86_64_get_plt_sym_val);
5632 }
5633
5634 /* Handle an x86-64 specific section when reading an object file. This
5635 is called when elfcode.h finds a section with an unknown type. */
5636
5637 static bfd_boolean
5638 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5639 const char *name, int shindex)
5640 {
5641 if (hdr->sh_type != SHT_X86_64_UNWIND)
5642 return FALSE;
5643
5644 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5645 return FALSE;
5646
5647 return TRUE;
5648 }
5649
5650 /* Hook called by the linker routine which adds symbols from an object
5651 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5652 of .bss. */
5653
5654 static bfd_boolean
5655 elf_x86_64_add_symbol_hook (bfd *abfd,
5656 struct bfd_link_info *info,
5657 Elf_Internal_Sym *sym,
5658 const char **namep ATTRIBUTE_UNUSED,
5659 flagword *flagsp ATTRIBUTE_UNUSED,
5660 asection **secp,
5661 bfd_vma *valp)
5662 {
5663 asection *lcomm;
5664
5665 switch (sym->st_shndx)
5666 {
5667 case SHN_X86_64_LCOMMON:
5668 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5669 if (lcomm == NULL)
5670 {
5671 lcomm = bfd_make_section_with_flags (abfd,
5672 "LARGE_COMMON",
5673 (SEC_ALLOC
5674 | SEC_IS_COMMON
5675 | SEC_LINKER_CREATED));
5676 if (lcomm == NULL)
5677 return FALSE;
5678 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5679 }
5680 *secp = lcomm;
5681 *valp = sym->st_size;
5682 return TRUE;
5683 }
5684
5685 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5686 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5687 && (abfd->flags & DYNAMIC) == 0
5688 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5689 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5690
5691 return TRUE;
5692 }
5693
5694
5695 /* Given a BFD section, try to locate the corresponding ELF section
5696 index. */
5697
5698 static bfd_boolean
5699 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5700 asection *sec, int *index_return)
5701 {
5702 if (sec == &_bfd_elf_large_com_section)
5703 {
5704 *index_return = SHN_X86_64_LCOMMON;
5705 return TRUE;
5706 }
5707 return FALSE;
5708 }
5709
5710 /* Process a symbol. */
5711
5712 static void
5713 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5714 asymbol *asym)
5715 {
5716 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5717
5718 switch (elfsym->internal_elf_sym.st_shndx)
5719 {
5720 case SHN_X86_64_LCOMMON:
5721 asym->section = &_bfd_elf_large_com_section;
5722 asym->value = elfsym->internal_elf_sym.st_size;
5723 /* Common symbol doesn't set BSF_GLOBAL. */
5724 asym->flags &= ~BSF_GLOBAL;
5725 break;
5726 }
5727 }
5728
5729 static bfd_boolean
5730 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5731 {
5732 return (sym->st_shndx == SHN_COMMON
5733 || sym->st_shndx == SHN_X86_64_LCOMMON);
5734 }
5735
5736 static unsigned int
5737 elf_x86_64_common_section_index (asection *sec)
5738 {
5739 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5740 return SHN_COMMON;
5741 else
5742 return SHN_X86_64_LCOMMON;
5743 }
5744
5745 static asection *
5746 elf_x86_64_common_section (asection *sec)
5747 {
5748 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5749 return bfd_com_section_ptr;
5750 else
5751 return &_bfd_elf_large_com_section;
5752 }
5753
5754 static bfd_boolean
5755 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5756 const Elf_Internal_Sym *sym,
5757 asection **psec,
5758 bfd_boolean newdef,
5759 bfd_boolean olddef,
5760 bfd *oldbfd,
5761 const asection *oldsec)
5762 {
5763 /* A normal common symbol and a large common symbol result in a
5764 normal common symbol. We turn the large common symbol into a
5765 normal one. */
5766 if (!olddef
5767 && h->root.type == bfd_link_hash_common
5768 && !newdef
5769 && bfd_is_com_section (*psec)
5770 && oldsec != *psec)
5771 {
5772 if (sym->st_shndx == SHN_COMMON
5773 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5774 {
5775 h->root.u.c.p->section
5776 = bfd_make_section_old_way (oldbfd, "COMMON");
5777 h->root.u.c.p->section->flags = SEC_ALLOC;
5778 }
5779 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5780 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5781 *psec = bfd_com_section_ptr;
5782 }
5783
5784 return TRUE;
5785 }
5786
5787 static int
5788 elf_x86_64_additional_program_headers (bfd *abfd,
5789 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5790 {
5791 asection *s;
5792 int count = 0;
5793
5794 /* Check to see if we need a large readonly segment. */
5795 s = bfd_get_section_by_name (abfd, ".lrodata");
5796 if (s && (s->flags & SEC_LOAD))
5797 count++;
5798
5799 /* Check to see if we need a large data segment. Since .lbss sections
5800 is placed right after the .bss section, there should be no need for
5801 a large data segment just because of .lbss. */
5802 s = bfd_get_section_by_name (abfd, ".ldata");
5803 if (s && (s->flags & SEC_LOAD))
5804 count++;
5805
5806 return count;
5807 }
5808
5809 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5810
5811 static bfd_boolean
5812 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5813 {
5814 if (h->plt.offset != (bfd_vma) -1
5815 && !h->def_regular
5816 && !h->pointer_equality_needed)
5817 return FALSE;
5818
5819 return _bfd_elf_hash_symbol (h);
5820 }
5821
5822 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5823
5824 static bfd_boolean
5825 elf_x86_64_relocs_compatible (const bfd_target *input,
5826 const bfd_target *output)
5827 {
5828 return ((xvec_get_elf_backend_data (input)->s->elfclass
5829 == xvec_get_elf_backend_data (output)->s->elfclass)
5830 && _bfd_elf_relocs_compatible (input, output));
5831 }
5832
5833 static const struct bfd_elf_special_section
5834 elf_x86_64_special_sections[]=
5835 {
5836 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5837 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5838 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5839 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5840 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5841 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5842 { NULL, 0, 0, 0, 0 }
5843 };
5844
5845 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5846 #define TARGET_LITTLE_NAME "elf64-x86-64"
5847 #define ELF_ARCH bfd_arch_i386
5848 #define ELF_TARGET_ID X86_64_ELF_DATA
5849 #define ELF_MACHINE_CODE EM_X86_64
5850 #define ELF_MAXPAGESIZE 0x200000
5851 #define ELF_MINPAGESIZE 0x1000
5852 #define ELF_COMMONPAGESIZE 0x1000
5853
5854 #define elf_backend_can_gc_sections 1
5855 #define elf_backend_can_refcount 1
5856 #define elf_backend_want_got_plt 1
5857 #define elf_backend_plt_readonly 1
5858 #define elf_backend_want_plt_sym 0
5859 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5860 #define elf_backend_rela_normal 1
5861 #define elf_backend_plt_alignment 4
5862
5863 #define elf_info_to_howto elf_x86_64_info_to_howto
5864
5865 #define bfd_elf64_bfd_link_hash_table_create \
5866 elf_x86_64_link_hash_table_create
5867 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5868 #define bfd_elf64_bfd_reloc_name_lookup \
5869 elf_x86_64_reloc_name_lookup
5870
5871 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5872 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5873 #define elf_backend_check_relocs elf_x86_64_check_relocs
5874 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5875 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5876 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5877 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5878 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5879 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5880 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5881 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5882 #ifdef CORE_HEADER
5883 #define elf_backend_write_core_note elf_x86_64_write_core_note
5884 #endif
5885 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5886 #define elf_backend_relocate_section elf_x86_64_relocate_section
5887 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5888 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5889 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5890 #define elf_backend_object_p elf64_x86_64_elf_object_p
5891 #define bfd_elf64_mkobject elf_x86_64_mkobject
5892 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5893
5894 #define elf_backend_section_from_shdr \
5895 elf_x86_64_section_from_shdr
5896
5897 #define elf_backend_section_from_bfd_section \
5898 elf_x86_64_elf_section_from_bfd_section
5899 #define elf_backend_add_symbol_hook \
5900 elf_x86_64_add_symbol_hook
5901 #define elf_backend_symbol_processing \
5902 elf_x86_64_symbol_processing
5903 #define elf_backend_common_section_index \
5904 elf_x86_64_common_section_index
5905 #define elf_backend_common_section \
5906 elf_x86_64_common_section
5907 #define elf_backend_common_definition \
5908 elf_x86_64_common_definition
5909 #define elf_backend_merge_symbol \
5910 elf_x86_64_merge_symbol
5911 #define elf_backend_special_sections \
5912 elf_x86_64_special_sections
5913 #define elf_backend_additional_program_headers \
5914 elf_x86_64_additional_program_headers
5915 #define elf_backend_hash_symbol \
5916 elf_x86_64_hash_symbol
5917
5918 #include "elf64-target.h"
5919
5920 /* FreeBSD support. */
5921
5922 #undef TARGET_LITTLE_SYM
5923 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5924 #undef TARGET_LITTLE_NAME
5925 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5926
5927 #undef ELF_OSABI
5928 #define ELF_OSABI ELFOSABI_FREEBSD
5929
5930 #undef elf64_bed
5931 #define elf64_bed elf64_x86_64_fbsd_bed
5932
5933 #include "elf64-target.h"
5934
5935 /* Solaris 2 support. */
5936
5937 #undef TARGET_LITTLE_SYM
5938 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5939 #undef TARGET_LITTLE_NAME
5940 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5941
5942 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5943 objects won't be recognized. */
5944 #undef ELF_OSABI
5945
5946 #undef elf64_bed
5947 #define elf64_bed elf64_x86_64_sol2_bed
5948
5949 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5950 boundary. */
5951 #undef elf_backend_static_tls_alignment
5952 #define elf_backend_static_tls_alignment 16
5953
5954 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5955
5956 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5957 File, p.63. */
5958 #undef elf_backend_want_plt_sym
5959 #define elf_backend_want_plt_sym 1
5960
5961 #include "elf64-target.h"
5962
5963 /* Native Client support. */
5964
5965 static bfd_boolean
5966 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5967 {
5968 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5969 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5970 return TRUE;
5971 }
5972
5973 #undef TARGET_LITTLE_SYM
5974 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5975 #undef TARGET_LITTLE_NAME
5976 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5977 #undef elf64_bed
5978 #define elf64_bed elf64_x86_64_nacl_bed
5979
5980 #undef ELF_MAXPAGESIZE
5981 #undef ELF_MINPAGESIZE
5982 #undef ELF_COMMONPAGESIZE
5983 #define ELF_MAXPAGESIZE 0x10000
5984 #define ELF_MINPAGESIZE 0x10000
5985 #define ELF_COMMONPAGESIZE 0x10000
5986
5987 /* Restore defaults. */
5988 #undef ELF_OSABI
5989 #undef elf_backend_static_tls_alignment
5990 #undef elf_backend_want_plt_sym
5991 #define elf_backend_want_plt_sym 0
5992
5993 /* NaCl uses substantially different PLT entries for the same effects. */
5994
5995 #undef elf_backend_plt_alignment
5996 #define elf_backend_plt_alignment 5
5997 #define NACL_PLT_ENTRY_SIZE 64
5998 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5999
6000 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6001 {
6002 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6003 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6004 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6005 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6006 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6007
6008 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6009 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6010
6011 /* 32 bytes of nop to pad out to the standard size. */
6012 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6013 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6014 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6015 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6016 0x66, /* excess data32 prefix */
6017 0x90 /* nop */
6018 };
6019
6020 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6021 {
6022 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6023 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6024 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6025 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6026
6027 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6028 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6029 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6030
6031 /* Lazy GOT entries point here (32-byte aligned). */
6032 0x68, /* pushq immediate */
6033 0, 0, 0, 0, /* replaced with index into relocation table. */
6034 0xe9, /* jmp relative */
6035 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6036
6037 /* 22 bytes of nop to pad out to the standard size. */
6038 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6039 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6040 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6041 };
6042
6043 /* .eh_frame covering the .plt section. */
6044
6045 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6046 {
6047 #if (PLT_CIE_LENGTH != 20 \
6048 || PLT_FDE_LENGTH != 36 \
6049 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6050 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6051 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6052 #endif
6053 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6054 0, 0, 0, 0, /* CIE ID */
6055 1, /* CIE version */
6056 'z', 'R', 0, /* Augmentation string */
6057 1, /* Code alignment factor */
6058 0x78, /* Data alignment factor */
6059 16, /* Return address column */
6060 1, /* Augmentation size */
6061 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6062 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6063 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6064 DW_CFA_nop, DW_CFA_nop,
6065
6066 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6067 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6068 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6069 0, 0, 0, 0, /* .plt size goes here */
6070 0, /* Augmentation size */
6071 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6072 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6073 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6074 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6075 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6076 13, /* Block length */
6077 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6078 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6079 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6080 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6081 DW_CFA_nop, DW_CFA_nop
6082 };
6083
6084 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6085 {
6086 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6087 elf_x86_64_nacl_plt_entry, /* plt_entry */
6088 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6089 2, /* plt0_got1_offset */
6090 9, /* plt0_got2_offset */
6091 13, /* plt0_got2_insn_end */
6092 3, /* plt_got_offset */
6093 33, /* plt_reloc_offset */
6094 38, /* plt_plt_offset */
6095 7, /* plt_got_insn_size */
6096 42, /* plt_plt_insn_end */
6097 32, /* plt_lazy_offset */
6098 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6099 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6100 };
6101
6102 #undef elf_backend_arch_data
6103 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6104
6105 #undef elf_backend_object_p
6106 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6107 #undef elf_backend_modify_segment_map
6108 #define elf_backend_modify_segment_map nacl_modify_segment_map
6109 #undef elf_backend_modify_program_headers
6110 #define elf_backend_modify_program_headers nacl_modify_program_headers
6111 #undef elf_backend_final_write_processing
6112 #define elf_backend_final_write_processing nacl_final_write_processing
6113
6114 #include "elf64-target.h"
6115
6116 /* Native Client x32 support. */
6117
6118 static bfd_boolean
6119 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6120 {
6121 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6122 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6123 return TRUE;
6124 }
6125
6126 #undef TARGET_LITTLE_SYM
6127 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6128 #undef TARGET_LITTLE_NAME
6129 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6130 #undef elf32_bed
6131 #define elf32_bed elf32_x86_64_nacl_bed
6132
6133 #define bfd_elf32_bfd_link_hash_table_create \
6134 elf_x86_64_link_hash_table_create
6135 #define bfd_elf32_bfd_reloc_type_lookup \
6136 elf_x86_64_reloc_type_lookup
6137 #define bfd_elf32_bfd_reloc_name_lookup \
6138 elf_x86_64_reloc_name_lookup
6139 #define bfd_elf32_mkobject \
6140 elf_x86_64_mkobject
6141 #define bfd_elf32_get_synthetic_symtab \
6142 elf_x86_64_get_synthetic_symtab
6143
6144 #undef elf_backend_object_p
6145 #define elf_backend_object_p \
6146 elf32_x86_64_nacl_elf_object_p
6147
6148 #undef elf_backend_bfd_from_remote_memory
6149 #define elf_backend_bfd_from_remote_memory \
6150 _bfd_elf32_bfd_from_remote_memory
6151
6152 #undef elf_backend_size_info
6153 #define elf_backend_size_info \
6154 _bfd_elf32_size_info
6155
6156 #include "elf32-target.h"
6157
6158 /* Restore defaults. */
6159 #undef elf_backend_object_p
6160 #define elf_backend_object_p elf64_x86_64_elf_object_p
6161 #undef elf_backend_bfd_from_remote_memory
6162 #undef elf_backend_size_info
6163 #undef elf_backend_modify_segment_map
6164 #undef elf_backend_modify_program_headers
6165 #undef elf_backend_final_write_processing
6166
6167 /* Intel L1OM support. */
6168
6169 static bfd_boolean
6170 elf64_l1om_elf_object_p (bfd *abfd)
6171 {
6172 /* Set the right machine number for an L1OM elf64 file. */
6173 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6174 return TRUE;
6175 }
6176
6177 #undef TARGET_LITTLE_SYM
6178 #define TARGET_LITTLE_SYM l1om_elf64_vec
6179 #undef TARGET_LITTLE_NAME
6180 #define TARGET_LITTLE_NAME "elf64-l1om"
6181 #undef ELF_ARCH
6182 #define ELF_ARCH bfd_arch_l1om
6183
6184 #undef ELF_MACHINE_CODE
6185 #define ELF_MACHINE_CODE EM_L1OM
6186
6187 #undef ELF_OSABI
6188
6189 #undef elf64_bed
6190 #define elf64_bed elf64_l1om_bed
6191
6192 #undef elf_backend_object_p
6193 #define elf_backend_object_p elf64_l1om_elf_object_p
6194
6195 /* Restore defaults. */
6196 #undef ELF_MAXPAGESIZE
6197 #undef ELF_MINPAGESIZE
6198 #undef ELF_COMMONPAGESIZE
6199 #define ELF_MAXPAGESIZE 0x200000
6200 #define ELF_MINPAGESIZE 0x1000
6201 #define ELF_COMMONPAGESIZE 0x1000
6202 #undef elf_backend_plt_alignment
6203 #define elf_backend_plt_alignment 4
6204 #undef elf_backend_arch_data
6205 #define elf_backend_arch_data &elf_x86_64_arch_bed
6206
6207 #include "elf64-target.h"
6208
6209 /* FreeBSD L1OM support. */
6210
6211 #undef TARGET_LITTLE_SYM
6212 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6213 #undef TARGET_LITTLE_NAME
6214 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6215
6216 #undef ELF_OSABI
6217 #define ELF_OSABI ELFOSABI_FREEBSD
6218
6219 #undef elf64_bed
6220 #define elf64_bed elf64_l1om_fbsd_bed
6221
6222 #include "elf64-target.h"
6223
6224 /* Intel K1OM support. */
6225
6226 static bfd_boolean
6227 elf64_k1om_elf_object_p (bfd *abfd)
6228 {
6229 /* Set the right machine number for an K1OM elf64 file. */
6230 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6231 return TRUE;
6232 }
6233
6234 #undef TARGET_LITTLE_SYM
6235 #define TARGET_LITTLE_SYM k1om_elf64_vec
6236 #undef TARGET_LITTLE_NAME
6237 #define TARGET_LITTLE_NAME "elf64-k1om"
6238 #undef ELF_ARCH
6239 #define ELF_ARCH bfd_arch_k1om
6240
6241 #undef ELF_MACHINE_CODE
6242 #define ELF_MACHINE_CODE EM_K1OM
6243
6244 #undef ELF_OSABI
6245
6246 #undef elf64_bed
6247 #define elf64_bed elf64_k1om_bed
6248
6249 #undef elf_backend_object_p
6250 #define elf_backend_object_p elf64_k1om_elf_object_p
6251
6252 #undef elf_backend_static_tls_alignment
6253
6254 #undef elf_backend_want_plt_sym
6255 #define elf_backend_want_plt_sym 0
6256
6257 #include "elf64-target.h"
6258
6259 /* FreeBSD K1OM support. */
6260
6261 #undef TARGET_LITTLE_SYM
6262 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6263 #undef TARGET_LITTLE_NAME
6264 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6265
6266 #undef ELF_OSABI
6267 #define ELF_OSABI ELFOSABI_FREEBSD
6268
6269 #undef elf64_bed
6270 #define elf64_bed elf64_k1om_fbsd_bed
6271
6272 #include "elf64-target.h"
6273
6274 /* 32bit x86-64 support. */
6275
6276 #undef TARGET_LITTLE_SYM
6277 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6278 #undef TARGET_LITTLE_NAME
6279 #define TARGET_LITTLE_NAME "elf32-x86-64"
6280 #undef elf32_bed
6281
6282 #undef ELF_ARCH
6283 #define ELF_ARCH bfd_arch_i386
6284
6285 #undef ELF_MACHINE_CODE
6286 #define ELF_MACHINE_CODE EM_X86_64
6287
6288 #undef ELF_OSABI
6289
6290 #undef elf_backend_object_p
6291 #define elf_backend_object_p \
6292 elf32_x86_64_elf_object_p
6293
6294 #undef elf_backend_bfd_from_remote_memory
6295 #define elf_backend_bfd_from_remote_memory \
6296 _bfd_elf32_bfd_from_remote_memory
6297
6298 #undef elf_backend_size_info
6299 #define elf_backend_size_info \
6300 _bfd_elf32_size_info
6301
6302 #include "elf32-target.h"
This page took 0.168217 seconds and 4 git commands to generate.