Automatic date update in version.in
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352 \f
353 /* Support for core dump NOTE sections. */
354 static bfd_boolean
355 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
356 {
357 int offset;
358 size_t size;
359
360 switch (note->descsz)
361 {
362 default:
363 return FALSE;
364
365 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
366 /* pr_cursig */
367 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
368
369 /* pr_pid */
370 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
371
372 /* pr_reg */
373 offset = 72;
374 size = 216;
375
376 break;
377
378 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
379 /* pr_cursig */
380 elf_tdata (abfd)->core->signal
381 = bfd_get_16 (abfd, note->descdata + 12);
382
383 /* pr_pid */
384 elf_tdata (abfd)->core->lwpid
385 = bfd_get_32 (abfd, note->descdata + 32);
386
387 /* pr_reg */
388 offset = 112;
389 size = 216;
390
391 break;
392 }
393
394 /* Make a ".reg/999" section. */
395 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
396 size, note->descpos + offset);
397 }
398
399 static bfd_boolean
400 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
401 {
402 switch (note->descsz)
403 {
404 default:
405 return FALSE;
406
407 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 12);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
414 break;
415
416 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
417 elf_tdata (abfd)->core->pid
418 = bfd_get_32 (abfd, note->descdata + 24);
419 elf_tdata (abfd)->core->program
420 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
421 elf_tdata (abfd)->core->command
422 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
423 }
424
425 /* Note that for some reason, a spurious space is tacked
426 onto the end of the args in some (at least one anyway)
427 implementations, so strip it off if it exists. */
428
429 {
430 char *command = elf_tdata (abfd)->core->command;
431 int n = strlen (command);
432
433 if (0 < n && command[n - 1] == ' ')
434 command[n - 1] = '\0';
435 }
436
437 return TRUE;
438 }
439
440 #ifdef CORE_HEADER
441 static char *
442 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
443 int note_type, ...)
444 {
445 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
446 va_list ap;
447 const char *fname, *psargs;
448 long pid;
449 int cursig;
450 const void *gregs;
451
452 switch (note_type)
453 {
454 default:
455 return NULL;
456
457 case NT_PRPSINFO:
458 va_start (ap, note_type);
459 fname = va_arg (ap, const char *);
460 psargs = va_arg (ap, const char *);
461 va_end (ap);
462
463 if (bed->s->elfclass == ELFCLASS32)
464 {
465 prpsinfo32_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 else
473 {
474 prpsinfo64_t data;
475 memset (&data, 0, sizeof (data));
476 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
477 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
478 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
479 &data, sizeof (data));
480 }
481 /* NOTREACHED */
482
483 case NT_PRSTATUS:
484 va_start (ap, note_type);
485 pid = va_arg (ap, long);
486 cursig = va_arg (ap, int);
487 gregs = va_arg (ap, const void *);
488 va_end (ap);
489
490 if (bed->s->elfclass == ELFCLASS32)
491 {
492 if (bed->elf_machine_code == EM_X86_64)
493 {
494 prstatusx32_t prstat;
495 memset (&prstat, 0, sizeof (prstat));
496 prstat.pr_pid = pid;
497 prstat.pr_cursig = cursig;
498 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
499 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
500 &prstat, sizeof (prstat));
501 }
502 else
503 {
504 prstatus32_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 else
514 {
515 prstatus64_t prstat;
516 memset (&prstat, 0, sizeof (prstat));
517 prstat.pr_pid = pid;
518 prstat.pr_cursig = cursig;
519 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
520 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
521 &prstat, sizeof (prstat));
522 }
523 }
524 /* NOTREACHED */
525 }
526 #endif
527 \f
528 /* Functions for the x86-64 ELF linker. */
529
530 /* The name of the dynamic interpreter. This is put in the .interp
531 section. */
532
533 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
534 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
535
536 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
537 copying dynamic variables from a shared lib into an app's dynbss
538 section, and instead use a dynamic relocation to point into the
539 shared lib. */
540 #define ELIMINATE_COPY_RELOCS 1
541
542 /* The size in bytes of an entry in the global offset table. */
543
544 #define GOT_ENTRY_SIZE 8
545
546 /* The size in bytes of an entry in the procedure linkage table. */
547
548 #define PLT_ENTRY_SIZE 16
549
550 /* The first entry in a procedure linkage table looks like this. See the
551 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
552
553 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
556 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
557 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 };
559
560 /* Subsequent entries in a procedure linkage table look like this. */
561
562 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
563 {
564 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
565 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
566 0x68, /* pushq immediate */
567 0, 0, 0, 0, /* replaced with index into relocation table. */
568 0xe9, /* jmp relative */
569 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 };
571
572 /* The first entry in a procedure linkage table with BND relocations
573 like this. */
574
575 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
576 {
577 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
578 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
579 0x0f, 0x1f, 0 /* nopl (%rax) */
580 };
581
582 /* Subsequent entries for legacy branches in a procedure linkage table
583 with BND relocations look like this. */
584
585 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
586 {
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xe9, 0, 0, 0, 0, /* jmpq relative */
589 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a procedure linkage
593 table with BND relocations look like this. */
594
595 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* Entries for legacy branches in the second procedure linkage table
603 look like this. */
604
605 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the second procedure linkage
613 table look like this. */
614
615 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* .eh_frame covering the .plt section. */
623
624 static const bfd_byte elf_x86_64_eh_frame_plt[] =
625 {
626 #define PLT_CIE_LENGTH 20
627 #define PLT_FDE_LENGTH 36
628 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
629 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
630 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
631 0, 0, 0, 0, /* CIE ID */
632 1, /* CIE version */
633 'z', 'R', 0, /* Augmentation string */
634 1, /* Code alignment factor */
635 0x78, /* Data alignment factor */
636 16, /* Return address column */
637 1, /* Augmentation size */
638 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
639 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
640 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
641 DW_CFA_nop, DW_CFA_nop,
642
643 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
644 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
645 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
646 0, 0, 0, 0, /* .plt size goes here */
647 0, /* Augmentation size */
648 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
649 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
650 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
651 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
652 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
653 11, /* Block length */
654 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
655 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
656 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
657 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
658 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
659 };
660
661 /* Architecture-specific backend data for x86-64. */
662
663 struct elf_x86_64_backend_data
664 {
665 /* Templates for the initial PLT entry and for subsequent entries. */
666 const bfd_byte *plt0_entry;
667 const bfd_byte *plt_entry;
668 unsigned int plt_entry_size; /* Size of each PLT entry. */
669
670 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
671 unsigned int plt0_got1_offset;
672 unsigned int plt0_got2_offset;
673
674 /* Offset of the end of the PC-relative instruction containing
675 plt0_got2_offset. */
676 unsigned int plt0_got2_insn_end;
677
678 /* Offsets into plt_entry that are to be replaced with... */
679 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
680 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
681 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
682
683 /* Length of the PC-relative instruction containing plt_got_offset. */
684 unsigned int plt_got_insn_size;
685
686 /* Offset of the end of the PC-relative jump to plt0_entry. */
687 unsigned int plt_plt_insn_end;
688
689 /* Offset into plt_entry where the initial value of the GOT entry points. */
690 unsigned int plt_lazy_offset;
691
692 /* .eh_frame covering the .plt section. */
693 const bfd_byte *eh_frame_plt;
694 unsigned int eh_frame_plt_size;
695 };
696
697 #define get_elf_x86_64_arch_data(bed) \
698 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
699
700 #define get_elf_x86_64_backend_data(abfd) \
701 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
702
703 #define GET_PLT_ENTRY_SIZE(abfd) \
704 get_elf_x86_64_backend_data (abfd)->plt_entry_size
705
706 /* These are the standard parameters. */
707 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
708 {
709 elf_x86_64_plt0_entry, /* plt0_entry */
710 elf_x86_64_plt_entry, /* plt_entry */
711 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
712 2, /* plt0_got1_offset */
713 8, /* plt0_got2_offset */
714 12, /* plt0_got2_insn_end */
715 2, /* plt_got_offset */
716 7, /* plt_reloc_offset */
717 12, /* plt_plt_offset */
718 6, /* plt_got_insn_size */
719 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
720 6, /* plt_lazy_offset */
721 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
722 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
723 };
724
725 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
726 {
727 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
728 elf_x86_64_bnd_plt_entry, /* plt_entry */
729 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
730 2, /* plt0_got1_offset */
731 1+8, /* plt0_got2_offset */
732 1+12, /* plt0_got2_insn_end */
733 1+2, /* plt_got_offset */
734 1, /* plt_reloc_offset */
735 7, /* plt_plt_offset */
736 1+6, /* plt_got_insn_size */
737 11, /* plt_plt_insn_end */
738 0, /* plt_lazy_offset */
739 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
740 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
741 };
742
743 #define elf_backend_arch_data &elf_x86_64_arch_bed
744
745 /* Is a undefined weak symbol which is resolved to 0. Reference to an
746 undefined weak symbol is resolved to 0 when building executable if
747 it isn't dynamic and
748 1. Has non-GOT/non-PLT relocations in text section. Or
749 2. Has no GOT/PLT relocation.
750 */
751 #define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
752 ((EH)->elf.root.type == bfd_link_hash_undefweak \
753 && bfd_link_executable (INFO) \
754 && (elf_x86_64_hash_table (INFO)->interp == NULL \
755 || !(GOT_RELOC) \
756 || (EH)->has_non_got_reloc \
757 || !(INFO)->dynamic_undefined_weak))
758
759 /* x86-64 ELF linker hash entry. */
760
761 struct elf_x86_64_link_hash_entry
762 {
763 struct elf_link_hash_entry elf;
764
765 /* Track dynamic relocs copied for this symbol. */
766 struct elf_dyn_relocs *dyn_relocs;
767
768 #define GOT_UNKNOWN 0
769 #define GOT_NORMAL 1
770 #define GOT_TLS_GD 2
771 #define GOT_TLS_IE 3
772 #define GOT_TLS_GDESC 4
773 #define GOT_TLS_GD_BOTH_P(type) \
774 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
775 #define GOT_TLS_GD_P(type) \
776 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
777 #define GOT_TLS_GDESC_P(type) \
778 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
779 #define GOT_TLS_GD_ANY_P(type) \
780 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
781 unsigned char tls_type;
782
783 /* TRUE if a weak symbol with a real definition needs a copy reloc.
784 When there is a weak symbol with a real definition, the processor
785 independent code will have arranged for us to see the real
786 definition first. We need to copy the needs_copy bit from the
787 real definition and check it when allowing copy reloc in PIE. */
788 unsigned int needs_copy : 1;
789
790 /* TRUE if symbol has at least one BND relocation. */
791 unsigned int has_bnd_reloc : 1;
792
793 /* TRUE if symbol has GOT or PLT relocations. */
794 unsigned int has_got_reloc : 1;
795
796 /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
797 unsigned int has_non_got_reloc : 1;
798
799 /* 0: symbol isn't __tls_get_addr.
800 1: symbol is __tls_get_addr.
801 2: symbol is unknown. */
802 unsigned int tls_get_addr : 2;
803
804 /* Reference count of C/C++ function pointer relocations in read-write
805 section which can be resolved at run-time. */
806 bfd_signed_vma func_pointer_refcount;
807
808 /* Information about the GOT PLT entry. Filled when there are both
809 GOT and PLT relocations against the same function. */
810 union gotplt_union plt_got;
811
812 /* Information about the second PLT entry. Filled when has_bnd_reloc is
813 set. */
814 union gotplt_union plt_bnd;
815
816 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
817 starting at the end of the jump table. */
818 bfd_vma tlsdesc_got;
819 };
820
821 #define elf_x86_64_hash_entry(ent) \
822 ((struct elf_x86_64_link_hash_entry *)(ent))
823
824 struct elf_x86_64_obj_tdata
825 {
826 struct elf_obj_tdata root;
827
828 /* tls_type for each local got entry. */
829 char *local_got_tls_type;
830
831 /* GOTPLT entries for TLS descriptors. */
832 bfd_vma *local_tlsdesc_gotent;
833 };
834
835 #define elf_x86_64_tdata(abfd) \
836 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
837
838 #define elf_x86_64_local_got_tls_type(abfd) \
839 (elf_x86_64_tdata (abfd)->local_got_tls_type)
840
841 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
842 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
843
844 #define is_x86_64_elf(bfd) \
845 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
846 && elf_tdata (bfd) != NULL \
847 && elf_object_id (bfd) == X86_64_ELF_DATA)
848
849 static bfd_boolean
850 elf_x86_64_mkobject (bfd *abfd)
851 {
852 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
853 X86_64_ELF_DATA);
854 }
855
856 /* x86-64 ELF linker hash table. */
857
858 struct elf_x86_64_link_hash_table
859 {
860 struct elf_link_hash_table elf;
861
862 /* Short-cuts to get to dynamic linker sections. */
863 asection *interp;
864 asection *sdynbss;
865 asection *srelbss;
866 asection *plt_eh_frame;
867 asection *plt_bnd;
868 asection *plt_got;
869
870 union
871 {
872 bfd_signed_vma refcount;
873 bfd_vma offset;
874 } tls_ld_got;
875
876 /* The amount of space used by the jump slots in the GOT. */
877 bfd_vma sgotplt_jump_table_size;
878
879 /* Small local sym cache. */
880 struct sym_cache sym_cache;
881
882 bfd_vma (*r_info) (bfd_vma, bfd_vma);
883 bfd_vma (*r_sym) (bfd_vma);
884 unsigned int pointer_r_type;
885 const char *dynamic_interpreter;
886 int dynamic_interpreter_size;
887
888 /* _TLS_MODULE_BASE_ symbol. */
889 struct bfd_link_hash_entry *tls_module_base;
890
891 /* Used by local STT_GNU_IFUNC symbols. */
892 htab_t loc_hash_table;
893 void * loc_hash_memory;
894
895 /* The offset into splt of the PLT entry for the TLS descriptor
896 resolver. Special values are 0, if not necessary (or not found
897 to be necessary yet), and -1 if needed but not determined
898 yet. */
899 bfd_vma tlsdesc_plt;
900 /* The offset into sgot of the GOT entry used by the PLT entry
901 above. */
902 bfd_vma tlsdesc_got;
903
904 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
905 bfd_vma next_jump_slot_index;
906 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
907 bfd_vma next_irelative_index;
908
909 /* TRUE if there are dynamic relocs against IFUNC symbols that apply
910 to read-only sections. */
911 bfd_boolean readonly_dynrelocs_against_ifunc;
912 };
913
914 /* Get the x86-64 ELF linker hash table from a link_info structure. */
915
916 #define elf_x86_64_hash_table(p) \
917 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
918 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
919
920 #define elf_x86_64_compute_jump_table_size(htab) \
921 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
922
923 /* Create an entry in an x86-64 ELF linker hash table. */
924
925 static struct bfd_hash_entry *
926 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
927 struct bfd_hash_table *table,
928 const char *string)
929 {
930 /* Allocate the structure if it has not already been allocated by a
931 subclass. */
932 if (entry == NULL)
933 {
934 entry = (struct bfd_hash_entry *)
935 bfd_hash_allocate (table,
936 sizeof (struct elf_x86_64_link_hash_entry));
937 if (entry == NULL)
938 return entry;
939 }
940
941 /* Call the allocation method of the superclass. */
942 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
943 if (entry != NULL)
944 {
945 struct elf_x86_64_link_hash_entry *eh;
946
947 eh = (struct elf_x86_64_link_hash_entry *) entry;
948 eh->dyn_relocs = NULL;
949 eh->tls_type = GOT_UNKNOWN;
950 eh->needs_copy = 0;
951 eh->has_bnd_reloc = 0;
952 eh->has_got_reloc = 0;
953 eh->has_non_got_reloc = 0;
954 eh->tls_get_addr = 2;
955 eh->func_pointer_refcount = 0;
956 eh->plt_bnd.offset = (bfd_vma) -1;
957 eh->plt_got.offset = (bfd_vma) -1;
958 eh->tlsdesc_got = (bfd_vma) -1;
959 }
960
961 return entry;
962 }
963
964 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
965 for local symbol so that we can handle local STT_GNU_IFUNC symbols
966 as global symbol. We reuse indx and dynstr_index for local symbol
967 hash since they aren't used by global symbols in this backend. */
968
969 static hashval_t
970 elf_x86_64_local_htab_hash (const void *ptr)
971 {
972 struct elf_link_hash_entry *h
973 = (struct elf_link_hash_entry *) ptr;
974 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
975 }
976
977 /* Compare local hash entries. */
978
979 static int
980 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
981 {
982 struct elf_link_hash_entry *h1
983 = (struct elf_link_hash_entry *) ptr1;
984 struct elf_link_hash_entry *h2
985 = (struct elf_link_hash_entry *) ptr2;
986
987 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
988 }
989
990 /* Find and/or create a hash entry for local symbol. */
991
992 static struct elf_link_hash_entry *
993 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
994 bfd *abfd, const Elf_Internal_Rela *rel,
995 bfd_boolean create)
996 {
997 struct elf_x86_64_link_hash_entry e, *ret;
998 asection *sec = abfd->sections;
999 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
1000 htab->r_sym (rel->r_info));
1001 void **slot;
1002
1003 e.elf.indx = sec->id;
1004 e.elf.dynstr_index = htab->r_sym (rel->r_info);
1005 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
1006 create ? INSERT : NO_INSERT);
1007
1008 if (!slot)
1009 return NULL;
1010
1011 if (*slot)
1012 {
1013 ret = (struct elf_x86_64_link_hash_entry *) *slot;
1014 return &ret->elf;
1015 }
1016
1017 ret = (struct elf_x86_64_link_hash_entry *)
1018 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
1019 sizeof (struct elf_x86_64_link_hash_entry));
1020 if (ret)
1021 {
1022 memset (ret, 0, sizeof (*ret));
1023 ret->elf.indx = sec->id;
1024 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
1025 ret->elf.dynindx = -1;
1026 ret->func_pointer_refcount = 0;
1027 ret->plt_got.offset = (bfd_vma) -1;
1028 *slot = ret;
1029 }
1030 return &ret->elf;
1031 }
1032
1033 /* Destroy an X86-64 ELF linker hash table. */
1034
1035 static void
1036 elf_x86_64_link_hash_table_free (bfd *obfd)
1037 {
1038 struct elf_x86_64_link_hash_table *htab
1039 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1040
1041 if (htab->loc_hash_table)
1042 htab_delete (htab->loc_hash_table);
1043 if (htab->loc_hash_memory)
1044 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1045 _bfd_elf_link_hash_table_free (obfd);
1046 }
1047
1048 /* Create an X86-64 ELF linker hash table. */
1049
1050 static struct bfd_link_hash_table *
1051 elf_x86_64_link_hash_table_create (bfd *abfd)
1052 {
1053 struct elf_x86_64_link_hash_table *ret;
1054 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1055
1056 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1057 if (ret == NULL)
1058 return NULL;
1059
1060 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1061 elf_x86_64_link_hash_newfunc,
1062 sizeof (struct elf_x86_64_link_hash_entry),
1063 X86_64_ELF_DATA))
1064 {
1065 free (ret);
1066 return NULL;
1067 }
1068
1069 if (ABI_64_P (abfd))
1070 {
1071 ret->r_info = elf64_r_info;
1072 ret->r_sym = elf64_r_sym;
1073 ret->pointer_r_type = R_X86_64_64;
1074 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1075 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1076 }
1077 else
1078 {
1079 ret->r_info = elf32_r_info;
1080 ret->r_sym = elf32_r_sym;
1081 ret->pointer_r_type = R_X86_64_32;
1082 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1083 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1084 }
1085
1086 ret->loc_hash_table = htab_try_create (1024,
1087 elf_x86_64_local_htab_hash,
1088 elf_x86_64_local_htab_eq,
1089 NULL);
1090 ret->loc_hash_memory = objalloc_create ();
1091 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1092 {
1093 elf_x86_64_link_hash_table_free (abfd);
1094 return NULL;
1095 }
1096 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1097
1098 return &ret->elf.root;
1099 }
1100
1101 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1102 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1103 hash table. */
1104
1105 static bfd_boolean
1106 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1107 struct bfd_link_info *info)
1108 {
1109 struct elf_x86_64_link_hash_table *htab;
1110
1111 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1112 return FALSE;
1113
1114 htab = elf_x86_64_hash_table (info);
1115 if (htab == NULL)
1116 return FALSE;
1117
1118 /* Set the contents of the .interp section to the interpreter. */
1119 if (bfd_link_executable (info) && !info->nointerp)
1120 {
1121 asection *s = bfd_get_linker_section (dynobj, ".interp");
1122 if (s == NULL)
1123 abort ();
1124 s->size = htab->dynamic_interpreter_size;
1125 s->contents = (unsigned char *) htab->dynamic_interpreter;
1126 htab->interp = s;
1127 }
1128
1129 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1130 if (!htab->sdynbss)
1131 abort ();
1132
1133 if (bfd_link_executable (info))
1134 {
1135 /* Always allow copy relocs for building executables. */
1136 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1137 if (s == NULL)
1138 {
1139 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1140 s = bfd_make_section_anyway_with_flags (dynobj,
1141 ".rela.bss",
1142 (bed->dynamic_sec_flags
1143 | SEC_READONLY));
1144 if (s == NULL
1145 || ! bfd_set_section_alignment (dynobj, s,
1146 bed->s->log_file_align))
1147 return FALSE;
1148 }
1149 htab->srelbss = s;
1150 }
1151
1152 if (!info->no_ld_generated_unwind_info
1153 && htab->plt_eh_frame == NULL
1154 && htab->elf.splt != NULL)
1155 {
1156 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1157 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1158 | SEC_LINKER_CREATED);
1159 htab->plt_eh_frame
1160 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1161 if (htab->plt_eh_frame == NULL
1162 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1163 return FALSE;
1164 }
1165 return TRUE;
1166 }
1167
1168 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1169
1170 static void
1171 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1172 struct elf_link_hash_entry *dir,
1173 struct elf_link_hash_entry *ind)
1174 {
1175 struct elf_x86_64_link_hash_entry *edir, *eind;
1176
1177 edir = (struct elf_x86_64_link_hash_entry *) dir;
1178 eind = (struct elf_x86_64_link_hash_entry *) ind;
1179
1180 if (!edir->has_bnd_reloc)
1181 edir->has_bnd_reloc = eind->has_bnd_reloc;
1182
1183 if (!edir->has_got_reloc)
1184 edir->has_got_reloc = eind->has_got_reloc;
1185
1186 if (!edir->has_non_got_reloc)
1187 edir->has_non_got_reloc = eind->has_non_got_reloc;
1188
1189 if (eind->dyn_relocs != NULL)
1190 {
1191 if (edir->dyn_relocs != NULL)
1192 {
1193 struct elf_dyn_relocs **pp;
1194 struct elf_dyn_relocs *p;
1195
1196 /* Add reloc counts against the indirect sym to the direct sym
1197 list. Merge any entries against the same section. */
1198 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1199 {
1200 struct elf_dyn_relocs *q;
1201
1202 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1203 if (q->sec == p->sec)
1204 {
1205 q->pc_count += p->pc_count;
1206 q->count += p->count;
1207 *pp = p->next;
1208 break;
1209 }
1210 if (q == NULL)
1211 pp = &p->next;
1212 }
1213 *pp = edir->dyn_relocs;
1214 }
1215
1216 edir->dyn_relocs = eind->dyn_relocs;
1217 eind->dyn_relocs = NULL;
1218 }
1219
1220 if (ind->root.type == bfd_link_hash_indirect
1221 && dir->got.refcount <= 0)
1222 {
1223 edir->tls_type = eind->tls_type;
1224 eind->tls_type = GOT_UNKNOWN;
1225 }
1226
1227 if (ELIMINATE_COPY_RELOCS
1228 && ind->root.type != bfd_link_hash_indirect
1229 && dir->dynamic_adjusted)
1230 {
1231 /* If called to transfer flags for a weakdef during processing
1232 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1233 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1234 dir->ref_dynamic |= ind->ref_dynamic;
1235 dir->ref_regular |= ind->ref_regular;
1236 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1237 dir->needs_plt |= ind->needs_plt;
1238 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1239 }
1240 else
1241 {
1242 if (eind->func_pointer_refcount > 0)
1243 {
1244 edir->func_pointer_refcount += eind->func_pointer_refcount;
1245 eind->func_pointer_refcount = 0;
1246 }
1247
1248 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1249 }
1250 }
1251
1252 static bfd_boolean
1253 elf64_x86_64_elf_object_p (bfd *abfd)
1254 {
1255 /* Set the right machine number for an x86-64 elf64 file. */
1256 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1257 return TRUE;
1258 }
1259
1260 static bfd_boolean
1261 elf32_x86_64_elf_object_p (bfd *abfd)
1262 {
1263 /* Set the right machine number for an x86-64 elf32 file. */
1264 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1265 return TRUE;
1266 }
1267
1268 /* Return TRUE if the TLS access code sequence support transition
1269 from R_TYPE. */
1270
1271 static bfd_boolean
1272 elf_x86_64_check_tls_transition (bfd *abfd,
1273 struct bfd_link_info *info,
1274 asection *sec,
1275 bfd_byte *contents,
1276 Elf_Internal_Shdr *symtab_hdr,
1277 struct elf_link_hash_entry **sym_hashes,
1278 unsigned int r_type,
1279 const Elf_Internal_Rela *rel,
1280 const Elf_Internal_Rela *relend)
1281 {
1282 unsigned int val;
1283 unsigned long r_symndx;
1284 bfd_boolean largepic = FALSE;
1285 struct elf_link_hash_entry *h;
1286 bfd_vma offset;
1287 struct elf_x86_64_link_hash_table *htab;
1288 bfd_byte *call;
1289 bfd_boolean indirect_call, tls_get_addr;
1290
1291 htab = elf_x86_64_hash_table (info);
1292 offset = rel->r_offset;
1293 switch (r_type)
1294 {
1295 case R_X86_64_TLSGD:
1296 case R_X86_64_TLSLD:
1297 if ((rel + 1) >= relend)
1298 return FALSE;
1299
1300 if (r_type == R_X86_64_TLSGD)
1301 {
1302 /* Check transition from GD access model. For 64bit, only
1303 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1304 .word 0x6666; rex64; call __tls_get_addr@PLT
1305 or
1306 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1307 .byte 0x66; rex64
1308 call *__tls_get_addr@GOTPCREL(%rip)
1309 which may be converted to
1310 addr32 call __tls_get_addr
1311 can transit to different access model. For 32bit, only
1312 leaq foo@tlsgd(%rip), %rdi
1313 .word 0x6666; rex64; call __tls_get_addr@PLT
1314 or
1315 leaq foo@tlsgd(%rip), %rdi
1316 .byte 0x66; rex64
1317 call *__tls_get_addr@GOTPCREL(%rip)
1318 which may be converted to
1319 addr32 call __tls_get_addr
1320 can transit to different access model. For largepic,
1321 we also support:
1322 leaq foo@tlsgd(%rip), %rdi
1323 movabsq $__tls_get_addr@pltoff, %rax
1324 addq $r15, %rax
1325 call *%rax
1326 or
1327 leaq foo@tlsgd(%rip), %rdi
1328 movabsq $__tls_get_addr@pltoff, %rax
1329 addq $rbx, %rax
1330 call *%rax */
1331
1332 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1333
1334 if ((offset + 12) > sec->size)
1335 return FALSE;
1336
1337 call = contents + offset + 4;
1338 if (call[0] != 0x66
1339 || !((call[1] == 0x48
1340 && call[2] == 0xff
1341 && call[3] == 0x15)
1342 || (call[1] == 0x48
1343 && call[2] == 0x67
1344 && call[3] == 0xe8)
1345 || (call[1] == 0x66
1346 && call[2] == 0x48
1347 && call[3] == 0xe8)))
1348 {
1349 if (!ABI_64_P (abfd)
1350 || (offset + 19) > sec->size
1351 || offset < 3
1352 || memcmp (call - 7, leaq + 1, 3) != 0
1353 || memcmp (call, "\x48\xb8", 2) != 0
1354 || call[11] != 0x01
1355 || call[13] != 0xff
1356 || call[14] != 0xd0
1357 || !((call[10] == 0x48 && call[12] == 0xd8)
1358 || (call[10] == 0x4c && call[12] == 0xf8)))
1359 return FALSE;
1360 largepic = TRUE;
1361 }
1362 else if (ABI_64_P (abfd))
1363 {
1364 if (offset < 4
1365 || memcmp (contents + offset - 4, leaq, 4) != 0)
1366 return FALSE;
1367 }
1368 else
1369 {
1370 if (offset < 3
1371 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1372 return FALSE;
1373 }
1374 indirect_call = call[2] == 0xff;
1375 }
1376 else
1377 {
1378 /* Check transition from LD access model. Only
1379 leaq foo@tlsld(%rip), %rdi;
1380 call __tls_get_addr@PLT
1381 or
1382 leaq foo@tlsld(%rip), %rdi;
1383 call *__tls_get_addr@GOTPCREL(%rip)
1384 which may be converted to
1385 addr32 call __tls_get_addr
1386 can transit to different access model. For largepic
1387 we also support:
1388 leaq foo@tlsld(%rip), %rdi
1389 movabsq $__tls_get_addr@pltoff, %rax
1390 addq $r15, %rax
1391 call *%rax
1392 or
1393 leaq foo@tlsld(%rip), %rdi
1394 movabsq $__tls_get_addr@pltoff, %rax
1395 addq $rbx, %rax
1396 call *%rax */
1397
1398 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1399
1400 if (offset < 3 || (offset + 9) > sec->size)
1401 return FALSE;
1402
1403 if (memcmp (contents + offset - 3, lea, 3) != 0)
1404 return FALSE;
1405
1406 call = contents + offset + 4;
1407 if (!(call[0] == 0xe8
1408 || (call[0] == 0xff && call[1] == 0x15)
1409 || (call[0] == 0x67 && call[1] == 0xe8)))
1410 {
1411 if (!ABI_64_P (abfd)
1412 || (offset + 19) > sec->size
1413 || memcmp (call, "\x48\xb8", 2) != 0
1414 || call[11] != 0x01
1415 || call[13] != 0xff
1416 || call[14] != 0xd0
1417 || !((call[10] == 0x48 && call[12] == 0xd8)
1418 || (call[10] == 0x4c && call[12] == 0xf8)))
1419 return FALSE;
1420 largepic = TRUE;
1421 }
1422 indirect_call = call[0] == 0xff;
1423 }
1424
1425 r_symndx = htab->r_sym (rel[1].r_info);
1426 if (r_symndx < symtab_hdr->sh_info)
1427 return FALSE;
1428
1429 tls_get_addr = FALSE;
1430 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1431 if (h != NULL && h->root.root.string != NULL)
1432 {
1433 struct elf_x86_64_link_hash_entry *eh
1434 = (struct elf_x86_64_link_hash_entry *) h;
1435 tls_get_addr = eh->tls_get_addr == 1;
1436 if (eh->tls_get_addr > 1)
1437 {
1438 /* Use strncmp to check __tls_get_addr since
1439 __tls_get_addr may be versioned. */
1440 if (strncmp (h->root.root.string, "__tls_get_addr", 14)
1441 == 0)
1442 {
1443 eh->tls_get_addr = 1;
1444 tls_get_addr = TRUE;
1445 }
1446 else
1447 eh->tls_get_addr = 0;
1448 }
1449 }
1450
1451 if (!tls_get_addr)
1452 return FALSE;
1453 else if (largepic)
1454 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
1455 else if (indirect_call)
1456 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX;
1457 else
1458 return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1459 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
1460
1461 case R_X86_64_GOTTPOFF:
1462 /* Check transition from IE access model:
1463 mov foo@gottpoff(%rip), %reg
1464 add foo@gottpoff(%rip), %reg
1465 */
1466
1467 /* Check REX prefix first. */
1468 if (offset >= 3 && (offset + 4) <= sec->size)
1469 {
1470 val = bfd_get_8 (abfd, contents + offset - 3);
1471 if (val != 0x48 && val != 0x4c)
1472 {
1473 /* X32 may have 0x44 REX prefix or no REX prefix. */
1474 if (ABI_64_P (abfd))
1475 return FALSE;
1476 }
1477 }
1478 else
1479 {
1480 /* X32 may not have any REX prefix. */
1481 if (ABI_64_P (abfd))
1482 return FALSE;
1483 if (offset < 2 || (offset + 3) > sec->size)
1484 return FALSE;
1485 }
1486
1487 val = bfd_get_8 (abfd, contents + offset - 2);
1488 if (val != 0x8b && val != 0x03)
1489 return FALSE;
1490
1491 val = bfd_get_8 (abfd, contents + offset - 1);
1492 return (val & 0xc7) == 5;
1493
1494 case R_X86_64_GOTPC32_TLSDESC:
1495 /* Check transition from GDesc access model:
1496 leaq x@tlsdesc(%rip), %rax
1497
1498 Make sure it's a leaq adding rip to a 32-bit offset
1499 into any register, although it's probably almost always
1500 going to be rax. */
1501
1502 if (offset < 3 || (offset + 4) > sec->size)
1503 return FALSE;
1504
1505 val = bfd_get_8 (abfd, contents + offset - 3);
1506 if ((val & 0xfb) != 0x48)
1507 return FALSE;
1508
1509 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1510 return FALSE;
1511
1512 val = bfd_get_8 (abfd, contents + offset - 1);
1513 return (val & 0xc7) == 0x05;
1514
1515 case R_X86_64_TLSDESC_CALL:
1516 /* Check transition from GDesc access model:
1517 call *x@tlsdesc(%rax)
1518 */
1519 if (offset + 2 <= sec->size)
1520 {
1521 /* Make sure that it's a call *x@tlsdesc(%rax). */
1522 call = contents + offset;
1523 return call[0] == 0xff && call[1] == 0x10;
1524 }
1525
1526 return FALSE;
1527
1528 default:
1529 abort ();
1530 }
1531 }
1532
1533 /* Return TRUE if the TLS access transition is OK or no transition
1534 will be performed. Update R_TYPE if there is a transition. */
1535
1536 static bfd_boolean
1537 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1538 asection *sec, bfd_byte *contents,
1539 Elf_Internal_Shdr *symtab_hdr,
1540 struct elf_link_hash_entry **sym_hashes,
1541 unsigned int *r_type, int tls_type,
1542 const Elf_Internal_Rela *rel,
1543 const Elf_Internal_Rela *relend,
1544 struct elf_link_hash_entry *h,
1545 unsigned long r_symndx,
1546 bfd_boolean from_relocate_section)
1547 {
1548 unsigned int from_type = *r_type;
1549 unsigned int to_type = from_type;
1550 bfd_boolean check = TRUE;
1551
1552 /* Skip TLS transition for functions. */
1553 if (h != NULL
1554 && (h->type == STT_FUNC
1555 || h->type == STT_GNU_IFUNC))
1556 return TRUE;
1557
1558 switch (from_type)
1559 {
1560 case R_X86_64_TLSGD:
1561 case R_X86_64_GOTPC32_TLSDESC:
1562 case R_X86_64_TLSDESC_CALL:
1563 case R_X86_64_GOTTPOFF:
1564 if (bfd_link_executable (info))
1565 {
1566 if (h == NULL)
1567 to_type = R_X86_64_TPOFF32;
1568 else
1569 to_type = R_X86_64_GOTTPOFF;
1570 }
1571
1572 /* When we are called from elf_x86_64_relocate_section, there may
1573 be additional transitions based on TLS_TYPE. */
1574 if (from_relocate_section)
1575 {
1576 unsigned int new_to_type = to_type;
1577
1578 if (bfd_link_executable (info)
1579 && h != NULL
1580 && h->dynindx == -1
1581 && tls_type == GOT_TLS_IE)
1582 new_to_type = R_X86_64_TPOFF32;
1583
1584 if (to_type == R_X86_64_TLSGD
1585 || to_type == R_X86_64_GOTPC32_TLSDESC
1586 || to_type == R_X86_64_TLSDESC_CALL)
1587 {
1588 if (tls_type == GOT_TLS_IE)
1589 new_to_type = R_X86_64_GOTTPOFF;
1590 }
1591
1592 /* We checked the transition before when we were called from
1593 elf_x86_64_check_relocs. We only want to check the new
1594 transition which hasn't been checked before. */
1595 check = new_to_type != to_type && from_type == to_type;
1596 to_type = new_to_type;
1597 }
1598
1599 break;
1600
1601 case R_X86_64_TLSLD:
1602 if (bfd_link_executable (info))
1603 to_type = R_X86_64_TPOFF32;
1604 break;
1605
1606 default:
1607 return TRUE;
1608 }
1609
1610 /* Return TRUE if there is no transition. */
1611 if (from_type == to_type)
1612 return TRUE;
1613
1614 /* Check if the transition can be performed. */
1615 if (check
1616 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1617 symtab_hdr, sym_hashes,
1618 from_type, rel, relend))
1619 {
1620 reloc_howto_type *from, *to;
1621 const char *name;
1622
1623 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1624 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1625
1626 if (h)
1627 name = h->root.root.string;
1628 else
1629 {
1630 struct elf_x86_64_link_hash_table *htab;
1631
1632 htab = elf_x86_64_hash_table (info);
1633 if (htab == NULL)
1634 name = "*unknown*";
1635 else
1636 {
1637 Elf_Internal_Sym *isym;
1638
1639 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1640 abfd, r_symndx);
1641 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1642 }
1643 }
1644
1645 (*_bfd_error_handler)
1646 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1647 "in section `%A' failed"),
1648 abfd, sec, from->name, to->name, name,
1649 (unsigned long) rel->r_offset);
1650 bfd_set_error (bfd_error_bad_value);
1651 return FALSE;
1652 }
1653
1654 *r_type = to_type;
1655 return TRUE;
1656 }
1657
1658 /* Rename some of the generic section flags to better document how they
1659 are used here. */
1660 #define need_convert_load sec_flg0
1661 #define check_relocs_failed sec_flg1
1662
1663 static bfd_boolean
1664 elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
1665 struct elf_link_hash_entry *h,
1666 Elf_Internal_Shdr *symtab_hdr,
1667 Elf_Internal_Sym *isym,
1668 reloc_howto_type *howto)
1669 {
1670 const char *v = "";
1671 const char *und = "";
1672 const char *pic = "";
1673
1674 const char *name;
1675 if (h)
1676 {
1677 name = h->root.root.string;
1678 switch (ELF_ST_VISIBILITY (h->other))
1679 {
1680 case STV_HIDDEN:
1681 v = _("hidden symbol ");
1682 break;
1683 case STV_INTERNAL:
1684 v = _("internal symbol ");
1685 break;
1686 case STV_PROTECTED:
1687 v = _("protected symbol ");
1688 break;
1689 default:
1690 v = _("symbol ");
1691 pic = _("; recompile with -fPIC");
1692 break;
1693 }
1694
1695 if (!h->def_regular && !h->def_dynamic)
1696 und = _("undefined ");
1697 }
1698 else
1699 {
1700 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1701 pic = _("; recompile with -fPIC");
1702 }
1703
1704 (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can "
1705 "not be used when making a shared object%s"),
1706 input_bfd, howto->name, und, v, name, pic);
1707 bfd_set_error (bfd_error_bad_value);
1708 sec->check_relocs_failed = 1;
1709 return FALSE;
1710 }
1711
1712 /* With the local symbol, foo, we convert
1713 mov foo@GOTPCREL(%rip), %reg
1714 to
1715 lea foo(%rip), %reg
1716 and convert
1717 call/jmp *foo@GOTPCREL(%rip)
1718 to
1719 nop call foo/jmp foo nop
1720 When PIC is false, convert
1721 test %reg, foo@GOTPCREL(%rip)
1722 to
1723 test $foo, %reg
1724 and convert
1725 binop foo@GOTPCREL(%rip), %reg
1726 to
1727 binop $foo, %reg
1728 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1729 instructions. */
1730
1731 static bfd_boolean
1732 elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec,
1733 bfd_byte *contents,
1734 Elf_Internal_Rela *irel,
1735 struct elf_link_hash_entry *h,
1736 bfd_boolean *converted,
1737 struct bfd_link_info *link_info)
1738 {
1739 struct elf_x86_64_link_hash_table *htab;
1740 bfd_boolean is_pic;
1741 bfd_boolean require_reloc_pc32;
1742 bfd_boolean relocx;
1743 bfd_boolean to_reloc_pc32;
1744 asection *tsec;
1745 char symtype;
1746 bfd_signed_vma raddend;
1747 unsigned int opcode;
1748 unsigned int modrm;
1749 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
1750 unsigned int r_symndx;
1751 bfd_vma toff;
1752 bfd_vma roff = irel->r_offset;
1753
1754 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1755 return TRUE;
1756
1757 raddend = irel->r_addend;
1758 /* Addend for 32-bit PC-relative relocation must be -4. */
1759 if (raddend != -4)
1760 return TRUE;
1761
1762 htab = elf_x86_64_hash_table (link_info);
1763 is_pic = bfd_link_pic (link_info);
1764
1765 relocx = (r_type == R_X86_64_GOTPCRELX
1766 || r_type == R_X86_64_REX_GOTPCRELX);
1767
1768 /* TRUE if we can convert only to R_X86_64_PC32. Enable it for
1769 --no-relax. */
1770 require_reloc_pc32
1771 = link_info->disable_target_specific_optimizations > 1;
1772
1773 r_symndx = htab->r_sym (irel->r_info);
1774
1775 opcode = bfd_get_8 (abfd, contents + roff - 2);
1776
1777 /* Convert mov to lea since it has been done for a while. */
1778 if (opcode != 0x8b)
1779 {
1780 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1781 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1782 test, xor instructions. */
1783 if (!relocx)
1784 return TRUE;
1785 }
1786
1787 /* We convert only to R_X86_64_PC32:
1788 1. Branch.
1789 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1790 3. require_reloc_pc32 is true.
1791 4. PIC.
1792 */
1793 to_reloc_pc32 = (opcode == 0xff
1794 || !relocx
1795 || require_reloc_pc32
1796 || is_pic);
1797
1798 /* Get the symbol referred to by the reloc. */
1799 if (h == NULL)
1800 {
1801 Elf_Internal_Sym *isym
1802 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1803
1804 /* Skip relocation against undefined symbols. */
1805 if (isym->st_shndx == SHN_UNDEF)
1806 return TRUE;
1807
1808 symtype = ELF_ST_TYPE (isym->st_info);
1809
1810 if (isym->st_shndx == SHN_ABS)
1811 tsec = bfd_abs_section_ptr;
1812 else if (isym->st_shndx == SHN_COMMON)
1813 tsec = bfd_com_section_ptr;
1814 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1815 tsec = &_bfd_elf_large_com_section;
1816 else
1817 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1818
1819 toff = isym->st_value;
1820 }
1821 else
1822 {
1823 /* Undefined weak symbol is only bound locally in executable
1824 and its reference is resolved as 0 without relocation
1825 overflow. We can only perform this optimization for
1826 GOTPCRELX relocations since we need to modify REX byte.
1827 It is OK convert mov with R_X86_64_GOTPCREL to
1828 R_X86_64_PC32. */
1829 if ((relocx || opcode == 0x8b)
1830 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
1831 TRUE,
1832 elf_x86_64_hash_entry (h)))
1833 {
1834 if (opcode == 0xff)
1835 {
1836 /* Skip for branch instructions since R_X86_64_PC32
1837 may overflow. */
1838 if (require_reloc_pc32)
1839 return TRUE;
1840 }
1841 else if (relocx)
1842 {
1843 /* For non-branch instructions, we can convert to
1844 R_X86_64_32/R_X86_64_32S since we know if there
1845 is a REX byte. */
1846 to_reloc_pc32 = FALSE;
1847 }
1848
1849 /* Since we don't know the current PC when PIC is true,
1850 we can't convert to R_X86_64_PC32. */
1851 if (to_reloc_pc32 && is_pic)
1852 return TRUE;
1853
1854 goto convert;
1855 }
1856 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1857 ld.so may use its link-time address. */
1858 else if ((h->def_regular
1859 || h->root.type == bfd_link_hash_defined
1860 || h->root.type == bfd_link_hash_defweak)
1861 && h != htab->elf.hdynamic
1862 && SYMBOL_REFERENCES_LOCAL (link_info, h))
1863 {
1864 /* bfd_link_hash_new or bfd_link_hash_undefined is
1865 set by an assignment in a linker script in
1866 bfd_elf_record_link_assignment. */
1867 if (h->def_regular
1868 && (h->root.type == bfd_link_hash_new
1869 || h->root.type == bfd_link_hash_undefined))
1870 {
1871 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1872 if (require_reloc_pc32)
1873 return TRUE;
1874 goto convert;
1875 }
1876 tsec = h->root.u.def.section;
1877 toff = h->root.u.def.value;
1878 symtype = h->type;
1879 }
1880 else
1881 return TRUE;
1882 }
1883
1884 /* Don't convert GOTPCREL relocation against large section. */
1885 if (elf_section_data (tsec) != NULL
1886 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1887 return TRUE;
1888
1889 /* We can only estimate relocation overflow for R_X86_64_PC32. */
1890 if (!to_reloc_pc32)
1891 goto convert;
1892
1893 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
1894 {
1895 /* At this stage in linking, no SEC_MERGE symbol has been
1896 adjusted, so all references to such symbols need to be
1897 passed through _bfd_merged_section_offset. (Later, in
1898 relocate_section, all SEC_MERGE symbols *except* for
1899 section symbols have been adjusted.)
1900
1901 gas may reduce relocations against symbols in SEC_MERGE
1902 sections to a relocation against the section symbol when
1903 the original addend was zero. When the reloc is against
1904 a section symbol we should include the addend in the
1905 offset passed to _bfd_merged_section_offset, since the
1906 location of interest is the original symbol. On the
1907 other hand, an access to "sym+addend" where "sym" is not
1908 a section symbol should not include the addend; Such an
1909 access is presumed to be an offset from "sym"; The
1910 location of interest is just "sym". */
1911 if (symtype == STT_SECTION)
1912 toff += raddend;
1913
1914 toff = _bfd_merged_section_offset (abfd, &tsec,
1915 elf_section_data (tsec)->sec_info,
1916 toff);
1917
1918 if (symtype != STT_SECTION)
1919 toff += raddend;
1920 }
1921 else
1922 toff += raddend;
1923
1924 /* Don't convert if R_X86_64_PC32 relocation overflows. */
1925 if (tsec->output_section == sec->output_section)
1926 {
1927 if ((toff - roff + 0x80000000) > 0xffffffff)
1928 return TRUE;
1929 }
1930 else
1931 {
1932 bfd_signed_vma distance;
1933
1934 /* At this point, we don't know the load addresses of TSEC
1935 section nor SEC section. We estimate the distrance between
1936 SEC and TSEC. We store the estimated distances in the
1937 compressed_size field of the output section, which is only
1938 used to decompress the compressed input section. */
1939 if (sec->output_section->compressed_size == 0)
1940 {
1941 asection *asect;
1942 bfd_size_type size = 0;
1943 for (asect = link_info->output_bfd->sections;
1944 asect != NULL;
1945 asect = asect->next)
1946 /* Skip debug sections since compressed_size is used to
1947 compress debug sections. */
1948 if ((asect->flags & SEC_DEBUGGING) == 0)
1949 {
1950 asection *i;
1951 for (i = asect->map_head.s;
1952 i != NULL;
1953 i = i->map_head.s)
1954 {
1955 size = align_power (size, i->alignment_power);
1956 size += i->size;
1957 }
1958 asect->compressed_size = size;
1959 }
1960 }
1961
1962 /* Don't convert GOTPCREL relocations if TSEC isn't placed
1963 after SEC. */
1964 distance = (tsec->output_section->compressed_size
1965 - sec->output_section->compressed_size);
1966 if (distance < 0)
1967 return TRUE;
1968
1969 /* Take PT_GNU_RELRO segment into account by adding
1970 maxpagesize. */
1971 if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
1972 - roff + 0x80000000) > 0xffffffff)
1973 return TRUE;
1974 }
1975
1976 convert:
1977 if (opcode == 0xff)
1978 {
1979 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1980 unsigned int nop;
1981 unsigned int disp;
1982 bfd_vma nop_offset;
1983
1984 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1985 R_X86_64_PC32. */
1986 modrm = bfd_get_8 (abfd, contents + roff - 1);
1987 if (modrm == 0x25)
1988 {
1989 /* Convert to "jmp foo nop". */
1990 modrm = 0xe9;
1991 nop = NOP_OPCODE;
1992 nop_offset = irel->r_offset + 3;
1993 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1994 irel->r_offset -= 1;
1995 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1996 }
1997 else
1998 {
1999 struct elf_x86_64_link_hash_entry *eh
2000 = (struct elf_x86_64_link_hash_entry *) h;
2001
2002 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2003 is a nop prefix. */
2004 modrm = 0xe8;
2005 /* To support TLS optimization, always use addr32 prefix for
2006 "call *__tls_get_addr@GOTPCREL(%rip)". */
2007 if (eh && eh->tls_get_addr == 1)
2008 {
2009 nop = 0x67;
2010 nop_offset = irel->r_offset - 2;
2011 }
2012 else
2013 {
2014 nop = link_info->call_nop_byte;
2015 if (link_info->call_nop_as_suffix)
2016 {
2017 nop_offset = irel->r_offset + 3;
2018 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2019 irel->r_offset -= 1;
2020 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2021 }
2022 else
2023 nop_offset = irel->r_offset - 2;
2024 }
2025 }
2026 bfd_put_8 (abfd, nop, contents + nop_offset);
2027 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2028 r_type = R_X86_64_PC32;
2029 }
2030 else
2031 {
2032 unsigned int rex;
2033 unsigned int rex_mask = REX_R;
2034
2035 if (r_type == R_X86_64_REX_GOTPCRELX)
2036 rex = bfd_get_8 (abfd, contents + roff - 3);
2037 else
2038 rex = 0;
2039
2040 if (opcode == 0x8b)
2041 {
2042 if (to_reloc_pc32)
2043 {
2044 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2045 "lea foo(%rip), %reg". */
2046 opcode = 0x8d;
2047 r_type = R_X86_64_PC32;
2048 }
2049 else
2050 {
2051 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2052 "mov $foo, %reg". */
2053 opcode = 0xc7;
2054 modrm = bfd_get_8 (abfd, contents + roff - 1);
2055 modrm = 0xc0 | (modrm & 0x38) >> 3;
2056 if ((rex & REX_W) != 0
2057 && ABI_64_P (link_info->output_bfd))
2058 {
2059 /* Keep the REX_W bit in REX byte for LP64. */
2060 r_type = R_X86_64_32S;
2061 goto rewrite_modrm_rex;
2062 }
2063 else
2064 {
2065 /* If the REX_W bit in REX byte isn't needed,
2066 use R_X86_64_32 and clear the W bit to avoid
2067 sign-extend imm32 to imm64. */
2068 r_type = R_X86_64_32;
2069 /* Clear the W bit in REX byte. */
2070 rex_mask |= REX_W;
2071 goto rewrite_modrm_rex;
2072 }
2073 }
2074 }
2075 else
2076 {
2077 /* R_X86_64_PC32 isn't supported. */
2078 if (to_reloc_pc32)
2079 return TRUE;
2080
2081 modrm = bfd_get_8 (abfd, contents + roff - 1);
2082 if (opcode == 0x85)
2083 {
2084 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2085 "test $foo, %reg". */
2086 modrm = 0xc0 | (modrm & 0x38) >> 3;
2087 opcode = 0xf7;
2088 }
2089 else
2090 {
2091 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2092 "binop $foo, %reg". */
2093 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
2094 opcode = 0x81;
2095 }
2096
2097 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2098 overflow when sign-extending imm32 to imm64. */
2099 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
2100
2101 rewrite_modrm_rex:
2102 bfd_put_8 (abfd, modrm, contents + roff - 1);
2103
2104 if (rex)
2105 {
2106 /* Move the R bit to the B bit in REX byte. */
2107 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2108 bfd_put_8 (abfd, rex, contents + roff - 3);
2109 }
2110
2111 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2112 irel->r_addend = 0;
2113 }
2114
2115 bfd_put_8 (abfd, opcode, contents + roff - 2);
2116 }
2117
2118 irel->r_info = htab->r_info (r_symndx, r_type);
2119
2120 *converted = TRUE;
2121
2122 return TRUE;
2123 }
2124
2125 /* Look through the relocs for a section during the first phase, and
2126 calculate needed space in the global offset table, procedure
2127 linkage table, and dynamic reloc sections. */
2128
2129 static bfd_boolean
2130 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
2131 asection *sec,
2132 const Elf_Internal_Rela *relocs)
2133 {
2134 struct elf_x86_64_link_hash_table *htab;
2135 Elf_Internal_Shdr *symtab_hdr;
2136 struct elf_link_hash_entry **sym_hashes;
2137 const Elf_Internal_Rela *rel;
2138 const Elf_Internal_Rela *rel_end;
2139 asection *sreloc;
2140 bfd_byte *contents;
2141 bfd_boolean use_plt_got;
2142
2143 if (bfd_link_relocatable (info))
2144 return TRUE;
2145
2146 /* Don't do anything special with non-loaded, non-alloced sections.
2147 In particular, any relocs in such sections should not affect GOT
2148 and PLT reference counting (ie. we don't allow them to create GOT
2149 or PLT entries), there's no possibility or desire to optimize TLS
2150 relocs, and there's not much point in propagating relocs to shared
2151 libs that the dynamic linker won't relocate. */
2152 if ((sec->flags & SEC_ALLOC) == 0)
2153 return TRUE;
2154
2155 BFD_ASSERT (is_x86_64_elf (abfd));
2156
2157 htab = elf_x86_64_hash_table (info);
2158 if (htab == NULL)
2159 {
2160 sec->check_relocs_failed = 1;
2161 return FALSE;
2162 }
2163
2164 /* Get the section contents. */
2165 if (elf_section_data (sec)->this_hdr.contents != NULL)
2166 contents = elf_section_data (sec)->this_hdr.contents;
2167 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2168 {
2169 sec->check_relocs_failed = 1;
2170 return FALSE;
2171 }
2172
2173 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
2174
2175 symtab_hdr = &elf_symtab_hdr (abfd);
2176 sym_hashes = elf_sym_hashes (abfd);
2177
2178 sreloc = NULL;
2179
2180 rel_end = relocs + sec->reloc_count;
2181 for (rel = relocs; rel < rel_end; rel++)
2182 {
2183 unsigned int r_type;
2184 unsigned long r_symndx;
2185 struct elf_link_hash_entry *h;
2186 struct elf_x86_64_link_hash_entry *eh;
2187 Elf_Internal_Sym *isym;
2188 const char *name;
2189 bfd_boolean size_reloc;
2190
2191 r_symndx = htab->r_sym (rel->r_info);
2192 r_type = ELF32_R_TYPE (rel->r_info);
2193
2194 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2195 {
2196 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
2197 abfd, r_symndx);
2198 goto error_return;
2199 }
2200
2201 if (r_symndx < symtab_hdr->sh_info)
2202 {
2203 /* A local symbol. */
2204 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2205 abfd, r_symndx);
2206 if (isym == NULL)
2207 goto error_return;
2208
2209 /* Check relocation against local STT_GNU_IFUNC symbol. */
2210 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2211 {
2212 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
2213 TRUE);
2214 if (h == NULL)
2215 goto error_return;
2216
2217 /* Fake a STT_GNU_IFUNC symbol. */
2218 h->type = STT_GNU_IFUNC;
2219 h->def_regular = 1;
2220 h->ref_regular = 1;
2221 h->forced_local = 1;
2222 h->root.type = bfd_link_hash_defined;
2223 }
2224 else
2225 h = NULL;
2226 }
2227 else
2228 {
2229 isym = NULL;
2230 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2231 while (h->root.type == bfd_link_hash_indirect
2232 || h->root.type == bfd_link_hash_warning)
2233 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2234 }
2235
2236 /* Check invalid x32 relocations. */
2237 if (!ABI_64_P (abfd))
2238 switch (r_type)
2239 {
2240 default:
2241 break;
2242
2243 case R_X86_64_DTPOFF64:
2244 case R_X86_64_TPOFF64:
2245 case R_X86_64_PC64:
2246 case R_X86_64_GOTOFF64:
2247 case R_X86_64_GOT64:
2248 case R_X86_64_GOTPCREL64:
2249 case R_X86_64_GOTPC64:
2250 case R_X86_64_GOTPLT64:
2251 case R_X86_64_PLTOFF64:
2252 {
2253 if (h)
2254 name = h->root.root.string;
2255 else
2256 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2257 NULL);
2258 (*_bfd_error_handler)
2259 (_("%B: relocation %s against symbol `%s' isn't "
2260 "supported in x32 mode"), abfd,
2261 x86_64_elf_howto_table[r_type].name, name);
2262 bfd_set_error (bfd_error_bad_value);
2263 goto error_return;
2264 }
2265 break;
2266 }
2267
2268 if (h != NULL)
2269 {
2270 switch (r_type)
2271 {
2272 default:
2273 break;
2274
2275 case R_X86_64_PC32_BND:
2276 case R_X86_64_PLT32_BND:
2277 case R_X86_64_PC32:
2278 case R_X86_64_PLT32:
2279 case R_X86_64_32:
2280 case R_X86_64_64:
2281 /* MPX PLT is supported only if elf_x86_64_arch_bed
2282 is used in 64-bit mode. */
2283 if (ABI_64_P (abfd)
2284 && info->bndplt
2285 && (get_elf_x86_64_backend_data (abfd)
2286 == &elf_x86_64_arch_bed))
2287 {
2288 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
2289
2290 /* Create the second PLT for Intel MPX support. */
2291 if (htab->plt_bnd == NULL)
2292 {
2293 unsigned int plt_bnd_align;
2294 const struct elf_backend_data *bed;
2295
2296 bed = get_elf_backend_data (info->output_bfd);
2297 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
2298 && (sizeof (elf_x86_64_bnd_plt2_entry)
2299 == sizeof (elf_x86_64_legacy_plt2_entry)));
2300 plt_bnd_align = 3;
2301
2302 if (htab->elf.dynobj == NULL)
2303 htab->elf.dynobj = abfd;
2304 htab->plt_bnd
2305 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2306 ".plt.bnd",
2307 (bed->dynamic_sec_flags
2308 | SEC_ALLOC
2309 | SEC_CODE
2310 | SEC_LOAD
2311 | SEC_READONLY));
2312 if (htab->plt_bnd == NULL
2313 || !bfd_set_section_alignment (htab->elf.dynobj,
2314 htab->plt_bnd,
2315 plt_bnd_align))
2316 goto error_return;
2317 }
2318 }
2319
2320 case R_X86_64_32S:
2321 case R_X86_64_PC64:
2322 case R_X86_64_GOTPCREL:
2323 case R_X86_64_GOTPCRELX:
2324 case R_X86_64_REX_GOTPCRELX:
2325 case R_X86_64_GOTPCREL64:
2326 if (htab->elf.dynobj == NULL)
2327 htab->elf.dynobj = abfd;
2328 /* Create the ifunc sections for static executables. */
2329 if (h->type == STT_GNU_IFUNC
2330 && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
2331 info))
2332 goto error_return;
2333 break;
2334 }
2335
2336 /* It is referenced by a non-shared object. */
2337 h->ref_regular = 1;
2338 h->root.non_ir_ref = 1;
2339
2340 if (h->type == STT_GNU_IFUNC)
2341 elf_tdata (info->output_bfd)->has_gnu_symbols
2342 |= elf_gnu_symbol_ifunc;
2343 }
2344
2345 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2346 symtab_hdr, sym_hashes,
2347 &r_type, GOT_UNKNOWN,
2348 rel, rel_end, h, r_symndx, FALSE))
2349 goto error_return;
2350
2351 eh = (struct elf_x86_64_link_hash_entry *) h;
2352 switch (r_type)
2353 {
2354 case R_X86_64_TLSLD:
2355 htab->tls_ld_got.refcount += 1;
2356 goto create_got;
2357
2358 case R_X86_64_TPOFF32:
2359 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2360 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2361 &x86_64_elf_howto_table[r_type]);
2362 if (eh != NULL)
2363 eh->has_got_reloc = 1;
2364 break;
2365
2366 case R_X86_64_GOTTPOFF:
2367 if (!bfd_link_executable (info))
2368 info->flags |= DF_STATIC_TLS;
2369 /* Fall through */
2370
2371 case R_X86_64_GOT32:
2372 case R_X86_64_GOTPCREL:
2373 case R_X86_64_GOTPCRELX:
2374 case R_X86_64_REX_GOTPCRELX:
2375 case R_X86_64_TLSGD:
2376 case R_X86_64_GOT64:
2377 case R_X86_64_GOTPCREL64:
2378 case R_X86_64_GOTPLT64:
2379 case R_X86_64_GOTPC32_TLSDESC:
2380 case R_X86_64_TLSDESC_CALL:
2381 /* This symbol requires a global offset table entry. */
2382 {
2383 int tls_type, old_tls_type;
2384
2385 switch (r_type)
2386 {
2387 default: tls_type = GOT_NORMAL; break;
2388 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2389 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2390 case R_X86_64_GOTPC32_TLSDESC:
2391 case R_X86_64_TLSDESC_CALL:
2392 tls_type = GOT_TLS_GDESC; break;
2393 }
2394
2395 if (h != NULL)
2396 {
2397 h->got.refcount += 1;
2398 old_tls_type = eh->tls_type;
2399 }
2400 else
2401 {
2402 bfd_signed_vma *local_got_refcounts;
2403
2404 /* This is a global offset table entry for a local symbol. */
2405 local_got_refcounts = elf_local_got_refcounts (abfd);
2406 if (local_got_refcounts == NULL)
2407 {
2408 bfd_size_type size;
2409
2410 size = symtab_hdr->sh_info;
2411 size *= sizeof (bfd_signed_vma)
2412 + sizeof (bfd_vma) + sizeof (char);
2413 local_got_refcounts = ((bfd_signed_vma *)
2414 bfd_zalloc (abfd, size));
2415 if (local_got_refcounts == NULL)
2416 goto error_return;
2417 elf_local_got_refcounts (abfd) = local_got_refcounts;
2418 elf_x86_64_local_tlsdesc_gotent (abfd)
2419 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2420 elf_x86_64_local_got_tls_type (abfd)
2421 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2422 }
2423 local_got_refcounts[r_symndx] += 1;
2424 old_tls_type
2425 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
2426 }
2427
2428 /* If a TLS symbol is accessed using IE at least once,
2429 there is no point to use dynamic model for it. */
2430 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2431 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2432 || tls_type != GOT_TLS_IE))
2433 {
2434 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2435 tls_type = old_tls_type;
2436 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2437 && GOT_TLS_GD_ANY_P (tls_type))
2438 tls_type |= old_tls_type;
2439 else
2440 {
2441 if (h)
2442 name = h->root.root.string;
2443 else
2444 name = bfd_elf_sym_name (abfd, symtab_hdr,
2445 isym, NULL);
2446 (*_bfd_error_handler)
2447 (_("%B: '%s' accessed both as normal and thread local symbol"),
2448 abfd, name);
2449 bfd_set_error (bfd_error_bad_value);
2450 goto error_return;
2451 }
2452 }
2453
2454 if (old_tls_type != tls_type)
2455 {
2456 if (eh != NULL)
2457 eh->tls_type = tls_type;
2458 else
2459 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
2460 }
2461 }
2462 /* Fall through */
2463
2464 case R_X86_64_GOTOFF64:
2465 case R_X86_64_GOTPC32:
2466 case R_X86_64_GOTPC64:
2467 create_got:
2468 if (eh != NULL)
2469 eh->has_got_reloc = 1;
2470 if (htab->elf.sgot == NULL)
2471 {
2472 if (htab->elf.dynobj == NULL)
2473 htab->elf.dynobj = abfd;
2474 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
2475 info))
2476 goto error_return;
2477 }
2478 break;
2479
2480 case R_X86_64_PLT32:
2481 case R_X86_64_PLT32_BND:
2482 /* This symbol requires a procedure linkage table entry. We
2483 actually build the entry in adjust_dynamic_symbol,
2484 because this might be a case of linking PIC code which is
2485 never referenced by a dynamic object, in which case we
2486 don't need to generate a procedure linkage table entry
2487 after all. */
2488
2489 /* If this is a local symbol, we resolve it directly without
2490 creating a procedure linkage table entry. */
2491 if (h == NULL)
2492 continue;
2493
2494 eh->has_got_reloc = 1;
2495 h->needs_plt = 1;
2496 h->plt.refcount += 1;
2497 break;
2498
2499 case R_X86_64_PLTOFF64:
2500 /* This tries to form the 'address' of a function relative
2501 to GOT. For global symbols we need a PLT entry. */
2502 if (h != NULL)
2503 {
2504 h->needs_plt = 1;
2505 h->plt.refcount += 1;
2506 }
2507 goto create_got;
2508
2509 case R_X86_64_SIZE32:
2510 case R_X86_64_SIZE64:
2511 size_reloc = TRUE;
2512 goto do_size;
2513
2514 case R_X86_64_32:
2515 if (!ABI_64_P (abfd))
2516 goto pointer;
2517 case R_X86_64_8:
2518 case R_X86_64_16:
2519 case R_X86_64_32S:
2520 /* Check relocation overflow as these relocs may lead to
2521 run-time relocation overflow. Don't error out for
2522 sections we don't care about, such as debug sections or
2523 when relocation overflow check is disabled. */
2524 if (!info->no_reloc_overflow_check
2525 && (bfd_link_pic (info)
2526 || (bfd_link_executable (info)
2527 && h != NULL
2528 && !h->def_regular
2529 && h->def_dynamic
2530 && (sec->flags & SEC_READONLY) == 0)))
2531 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2532 &x86_64_elf_howto_table[r_type]);
2533 /* Fall through. */
2534
2535 case R_X86_64_PC8:
2536 case R_X86_64_PC16:
2537 case R_X86_64_PC32:
2538 case R_X86_64_PC32_BND:
2539 case R_X86_64_PC64:
2540 case R_X86_64_64:
2541 pointer:
2542 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2543 eh->has_non_got_reloc = 1;
2544 /* We are called after all symbols have been resolved. Only
2545 relocation against STT_GNU_IFUNC symbol must go through
2546 PLT. */
2547 if (h != NULL
2548 && (bfd_link_executable (info)
2549 || h->type == STT_GNU_IFUNC))
2550 {
2551 /* If this reloc is in a read-only section, we might
2552 need a copy reloc. We can't check reliably at this
2553 stage whether the section is read-only, as input
2554 sections have not yet been mapped to output sections.
2555 Tentatively set the flag for now, and correct in
2556 adjust_dynamic_symbol. */
2557 h->non_got_ref = 1;
2558
2559 /* We may need a .plt entry if the function this reloc
2560 refers to is in a shared lib. */
2561 h->plt.refcount += 1;
2562 if (r_type == R_X86_64_PC32)
2563 {
2564 /* Since something like ".long foo - ." may be used
2565 as pointer, make sure that PLT is used if foo is
2566 a function defined in a shared library. */
2567 if ((sec->flags & SEC_CODE) == 0)
2568 h->pointer_equality_needed = 1;
2569 }
2570 else if (r_type != R_X86_64_PC32_BND
2571 && r_type != R_X86_64_PC64)
2572 {
2573 h->pointer_equality_needed = 1;
2574 /* At run-time, R_X86_64_64 can be resolved for both
2575 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2576 can only be resolved for x32. */
2577 if ((sec->flags & SEC_READONLY) == 0
2578 && (r_type == R_X86_64_64
2579 || (!ABI_64_P (abfd)
2580 && (r_type == R_X86_64_32
2581 || r_type == R_X86_64_32S))))
2582 eh->func_pointer_refcount += 1;
2583 }
2584 }
2585
2586 size_reloc = FALSE;
2587 do_size:
2588 /* If we are creating a shared library, and this is a reloc
2589 against a global symbol, or a non PC relative reloc
2590 against a local symbol, then we need to copy the reloc
2591 into the shared library. However, if we are linking with
2592 -Bsymbolic, we do not need to copy a reloc against a
2593 global symbol which is defined in an object we are
2594 including in the link (i.e., DEF_REGULAR is set). At
2595 this point we have not seen all the input files, so it is
2596 possible that DEF_REGULAR is not set now but will be set
2597 later (it is never cleared). In case of a weak definition,
2598 DEF_REGULAR may be cleared later by a strong definition in
2599 a shared library. We account for that possibility below by
2600 storing information in the relocs_copied field of the hash
2601 table entry. A similar situation occurs when creating
2602 shared libraries and symbol visibility changes render the
2603 symbol local.
2604
2605 If on the other hand, we are creating an executable, we
2606 may need to keep relocations for symbols satisfied by a
2607 dynamic library if we manage to avoid copy relocs for the
2608 symbol. */
2609 if ((bfd_link_pic (info)
2610 && (! IS_X86_64_PCREL_TYPE (r_type)
2611 || (h != NULL
2612 && (! (bfd_link_pie (info)
2613 || SYMBOLIC_BIND (info, h))
2614 || h->root.type == bfd_link_hash_defweak
2615 || !h->def_regular))))
2616 || (ELIMINATE_COPY_RELOCS
2617 && !bfd_link_pic (info)
2618 && h != NULL
2619 && (h->root.type == bfd_link_hash_defweak
2620 || !h->def_regular)))
2621 {
2622 struct elf_dyn_relocs *p;
2623 struct elf_dyn_relocs **head;
2624
2625 /* We must copy these reloc types into the output file.
2626 Create a reloc section in dynobj and make room for
2627 this reloc. */
2628 if (sreloc == NULL)
2629 {
2630 if (htab->elf.dynobj == NULL)
2631 htab->elf.dynobj = abfd;
2632
2633 sreloc = _bfd_elf_make_dynamic_reloc_section
2634 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2635 abfd, /*rela?*/ TRUE);
2636
2637 if (sreloc == NULL)
2638 goto error_return;
2639 }
2640
2641 /* If this is a global symbol, we count the number of
2642 relocations we need for this symbol. */
2643 if (h != NULL)
2644 head = &eh->dyn_relocs;
2645 else
2646 {
2647 /* Track dynamic relocs needed for local syms too.
2648 We really need local syms available to do this
2649 easily. Oh well. */
2650 asection *s;
2651 void **vpp;
2652
2653 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2654 abfd, r_symndx);
2655 if (isym == NULL)
2656 goto error_return;
2657
2658 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2659 if (s == NULL)
2660 s = sec;
2661
2662 /* Beware of type punned pointers vs strict aliasing
2663 rules. */
2664 vpp = &(elf_section_data (s)->local_dynrel);
2665 head = (struct elf_dyn_relocs **)vpp;
2666 }
2667
2668 p = *head;
2669 if (p == NULL || p->sec != sec)
2670 {
2671 bfd_size_type amt = sizeof *p;
2672
2673 p = ((struct elf_dyn_relocs *)
2674 bfd_alloc (htab->elf.dynobj, amt));
2675 if (p == NULL)
2676 goto error_return;
2677 p->next = *head;
2678 *head = p;
2679 p->sec = sec;
2680 p->count = 0;
2681 p->pc_count = 0;
2682 }
2683
2684 p->count += 1;
2685 /* Count size relocation as PC-relative relocation. */
2686 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2687 p->pc_count += 1;
2688 }
2689 break;
2690
2691 /* This relocation describes the C++ object vtable hierarchy.
2692 Reconstruct it for later use during GC. */
2693 case R_X86_64_GNU_VTINHERIT:
2694 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2695 goto error_return;
2696 break;
2697
2698 /* This relocation describes which C++ vtable entries are actually
2699 used. Record for later use during GC. */
2700 case R_X86_64_GNU_VTENTRY:
2701 BFD_ASSERT (h != NULL);
2702 if (h != NULL
2703 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2704 goto error_return;
2705 break;
2706
2707 default:
2708 break;
2709 }
2710
2711 if (use_plt_got
2712 && h != NULL
2713 && h->plt.refcount > 0
2714 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2715 || h->got.refcount > 0)
2716 && htab->plt_got == NULL)
2717 {
2718 /* Create the GOT procedure linkage table. */
2719 unsigned int plt_got_align;
2720 const struct elf_backend_data *bed;
2721
2722 bed = get_elf_backend_data (info->output_bfd);
2723 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2724 && (sizeof (elf_x86_64_bnd_plt2_entry)
2725 == sizeof (elf_x86_64_legacy_plt2_entry)));
2726 plt_got_align = 3;
2727
2728 if (htab->elf.dynobj == NULL)
2729 htab->elf.dynobj = abfd;
2730 htab->plt_got
2731 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2732 ".plt.got",
2733 (bed->dynamic_sec_flags
2734 | SEC_ALLOC
2735 | SEC_CODE
2736 | SEC_LOAD
2737 | SEC_READONLY));
2738 if (htab->plt_got == NULL
2739 || !bfd_set_section_alignment (htab->elf.dynobj,
2740 htab->plt_got,
2741 plt_got_align))
2742 goto error_return;
2743 }
2744
2745 if ((r_type == R_X86_64_GOTPCREL
2746 || r_type == R_X86_64_GOTPCRELX
2747 || r_type == R_X86_64_REX_GOTPCRELX)
2748 && (h == NULL || h->type != STT_GNU_IFUNC))
2749 sec->need_convert_load = 1;
2750 }
2751
2752 if (elf_section_data (sec)->this_hdr.contents != contents)
2753 {
2754 if (!info->keep_memory)
2755 free (contents);
2756 else
2757 {
2758 /* Cache the section contents for elf_link_input_bfd. */
2759 elf_section_data (sec)->this_hdr.contents = contents;
2760 }
2761 }
2762
2763 return TRUE;
2764
2765 error_return:
2766 if (elf_section_data (sec)->this_hdr.contents != contents)
2767 free (contents);
2768 sec->check_relocs_failed = 1;
2769 return FALSE;
2770 }
2771
2772 /* Return the section that should be marked against GC for a given
2773 relocation. */
2774
2775 static asection *
2776 elf_x86_64_gc_mark_hook (asection *sec,
2777 struct bfd_link_info *info,
2778 Elf_Internal_Rela *rel,
2779 struct elf_link_hash_entry *h,
2780 Elf_Internal_Sym *sym)
2781 {
2782 if (h != NULL)
2783 switch (ELF32_R_TYPE (rel->r_info))
2784 {
2785 case R_X86_64_GNU_VTINHERIT:
2786 case R_X86_64_GNU_VTENTRY:
2787 return NULL;
2788 }
2789
2790 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2791 }
2792
2793 /* Remove undefined weak symbol from the dynamic symbol table if it
2794 is resolved to 0. */
2795
2796 static bfd_boolean
2797 elf_x86_64_fixup_symbol (struct bfd_link_info *info,
2798 struct elf_link_hash_entry *h)
2799 {
2800 if (h->dynindx != -1
2801 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2802 elf_x86_64_hash_entry (h)->has_got_reloc,
2803 elf_x86_64_hash_entry (h)))
2804 {
2805 h->dynindx = -1;
2806 _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
2807 h->dynstr_index);
2808 }
2809 return TRUE;
2810 }
2811
2812 /* Adjust a symbol defined by a dynamic object and referenced by a
2813 regular object. The current definition is in some section of the
2814 dynamic object, but we're not including those sections. We have to
2815 change the definition to something the rest of the link can
2816 understand. */
2817
2818 static bfd_boolean
2819 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2820 struct elf_link_hash_entry *h)
2821 {
2822 struct elf_x86_64_link_hash_table *htab;
2823 asection *s;
2824 struct elf_x86_64_link_hash_entry *eh;
2825 struct elf_dyn_relocs *p;
2826
2827 /* STT_GNU_IFUNC symbol must go through PLT. */
2828 if (h->type == STT_GNU_IFUNC)
2829 {
2830 /* All local STT_GNU_IFUNC references must be treate as local
2831 calls via local PLT. */
2832 if (h->ref_regular
2833 && SYMBOL_CALLS_LOCAL (info, h))
2834 {
2835 bfd_size_type pc_count = 0, count = 0;
2836 struct elf_dyn_relocs **pp;
2837
2838 eh = (struct elf_x86_64_link_hash_entry *) h;
2839 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2840 {
2841 pc_count += p->pc_count;
2842 p->count -= p->pc_count;
2843 p->pc_count = 0;
2844 count += p->count;
2845 if (p->count == 0)
2846 *pp = p->next;
2847 else
2848 pp = &p->next;
2849 }
2850
2851 if (pc_count || count)
2852 {
2853 h->needs_plt = 1;
2854 h->non_got_ref = 1;
2855 if (h->plt.refcount <= 0)
2856 h->plt.refcount = 1;
2857 else
2858 h->plt.refcount += 1;
2859 }
2860 }
2861
2862 if (h->plt.refcount <= 0)
2863 {
2864 h->plt.offset = (bfd_vma) -1;
2865 h->needs_plt = 0;
2866 }
2867 return TRUE;
2868 }
2869
2870 /* If this is a function, put it in the procedure linkage table. We
2871 will fill in the contents of the procedure linkage table later,
2872 when we know the address of the .got section. */
2873 if (h->type == STT_FUNC
2874 || h->needs_plt)
2875 {
2876 if (h->plt.refcount <= 0
2877 || SYMBOL_CALLS_LOCAL (info, h)
2878 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2879 && h->root.type == bfd_link_hash_undefweak))
2880 {
2881 /* This case can occur if we saw a PLT32 reloc in an input
2882 file, but the symbol was never referred to by a dynamic
2883 object, or if all references were garbage collected. In
2884 such a case, we don't actually need to build a procedure
2885 linkage table, and we can just do a PC32 reloc instead. */
2886 h->plt.offset = (bfd_vma) -1;
2887 h->needs_plt = 0;
2888 }
2889
2890 return TRUE;
2891 }
2892 else
2893 /* It's possible that we incorrectly decided a .plt reloc was
2894 needed for an R_X86_64_PC32 reloc to a non-function sym in
2895 check_relocs. We can't decide accurately between function and
2896 non-function syms in check-relocs; Objects loaded later in
2897 the link may change h->type. So fix it now. */
2898 h->plt.offset = (bfd_vma) -1;
2899
2900 /* If this is a weak symbol, and there is a real definition, the
2901 processor independent code will have arranged for us to see the
2902 real definition first, and we can just use the same value. */
2903 if (h->u.weakdef != NULL)
2904 {
2905 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2906 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2907 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2908 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2909 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2910 {
2911 eh = (struct elf_x86_64_link_hash_entry *) h;
2912 h->non_got_ref = h->u.weakdef->non_got_ref;
2913 eh->needs_copy = h->u.weakdef->needs_copy;
2914 }
2915 return TRUE;
2916 }
2917
2918 /* This is a reference to a symbol defined by a dynamic object which
2919 is not a function. */
2920
2921 /* If we are creating a shared library, we must presume that the
2922 only references to the symbol are via the global offset table.
2923 For such cases we need not do anything here; the relocations will
2924 be handled correctly by relocate_section. */
2925 if (!bfd_link_executable (info))
2926 return TRUE;
2927
2928 /* If there are no references to this symbol that do not use the
2929 GOT, we don't need to generate a copy reloc. */
2930 if (!h->non_got_ref)
2931 return TRUE;
2932
2933 /* If -z nocopyreloc was given, we won't generate them either. */
2934 if (info->nocopyreloc)
2935 {
2936 h->non_got_ref = 0;
2937 return TRUE;
2938 }
2939
2940 if (ELIMINATE_COPY_RELOCS)
2941 {
2942 eh = (struct elf_x86_64_link_hash_entry *) h;
2943 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2944 {
2945 s = p->sec->output_section;
2946 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2947 break;
2948 }
2949
2950 /* If we didn't find any dynamic relocs in read-only sections, then
2951 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2952 if (p == NULL)
2953 {
2954 h->non_got_ref = 0;
2955 return TRUE;
2956 }
2957 }
2958
2959 /* We must allocate the symbol in our .dynbss section, which will
2960 become part of the .bss section of the executable. There will be
2961 an entry for this symbol in the .dynsym section. The dynamic
2962 object will contain position independent code, so all references
2963 from the dynamic object to this symbol will go through the global
2964 offset table. The dynamic linker will use the .dynsym entry to
2965 determine the address it must put in the global offset table, so
2966 both the dynamic object and the regular object will refer to the
2967 same memory location for the variable. */
2968
2969 htab = elf_x86_64_hash_table (info);
2970 if (htab == NULL)
2971 return FALSE;
2972
2973 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2974 to copy the initial value out of the dynamic object and into the
2975 runtime process image. */
2976 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2977 {
2978 const struct elf_backend_data *bed;
2979 bed = get_elf_backend_data (info->output_bfd);
2980 htab->srelbss->size += bed->s->sizeof_rela;
2981 h->needs_copy = 1;
2982 }
2983
2984 s = htab->sdynbss;
2985
2986 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2987 }
2988
2989 /* Allocate space in .plt, .got and associated reloc sections for
2990 dynamic relocs. */
2991
2992 static bfd_boolean
2993 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2994 {
2995 struct bfd_link_info *info;
2996 struct elf_x86_64_link_hash_table *htab;
2997 struct elf_x86_64_link_hash_entry *eh;
2998 struct elf_dyn_relocs *p;
2999 const struct elf_backend_data *bed;
3000 unsigned int plt_entry_size;
3001 bfd_boolean resolved_to_zero;
3002
3003 if (h->root.type == bfd_link_hash_indirect)
3004 return TRUE;
3005
3006 eh = (struct elf_x86_64_link_hash_entry *) h;
3007
3008 info = (struct bfd_link_info *) inf;
3009 htab = elf_x86_64_hash_table (info);
3010 if (htab == NULL)
3011 return FALSE;
3012 bed = get_elf_backend_data (info->output_bfd);
3013 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3014
3015 resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3016 eh->has_got_reloc,
3017 eh);
3018
3019 /* We can't use the GOT PLT if pointer equality is needed since
3020 finish_dynamic_symbol won't clear symbol value and the dynamic
3021 linker won't update the GOT slot. We will get into an infinite
3022 loop at run-time. */
3023 if (htab->plt_got != NULL
3024 && h->type != STT_GNU_IFUNC
3025 && !h->pointer_equality_needed
3026 && h->plt.refcount > 0
3027 && h->got.refcount > 0)
3028 {
3029 /* Don't use the regular PLT if there are both GOT and GOTPLT
3030 reloctions. */
3031 h->plt.offset = (bfd_vma) -1;
3032
3033 /* Use the GOT PLT. */
3034 eh->plt_got.refcount = 1;
3035 }
3036
3037 /* Clear the reference count of function pointer relocations if
3038 symbol isn't a normal function. */
3039 if (h->type != STT_FUNC)
3040 eh->func_pointer_refcount = 0;
3041
3042 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
3043 here if it is defined and referenced in a non-shared object. */
3044 if (h->type == STT_GNU_IFUNC
3045 && h->def_regular)
3046 {
3047 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
3048 &eh->dyn_relocs,
3049 &htab->readonly_dynrelocs_against_ifunc,
3050 plt_entry_size,
3051 plt_entry_size,
3052 GOT_ENTRY_SIZE))
3053 {
3054 asection *s = htab->plt_bnd;
3055 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
3056 {
3057 /* Use the .plt.bnd section if it is created. */
3058 eh->plt_bnd.offset = s->size;
3059
3060 /* Make room for this entry in the .plt.bnd section. */
3061 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3062 }
3063
3064 return TRUE;
3065 }
3066 else
3067 return FALSE;
3068 }
3069 /* Don't create the PLT entry if there are only function pointer
3070 relocations which can be resolved at run-time. */
3071 else if (htab->elf.dynamic_sections_created
3072 && (h->plt.refcount > eh->func_pointer_refcount
3073 || eh->plt_got.refcount > 0))
3074 {
3075 bfd_boolean use_plt_got;
3076
3077 /* Clear the reference count of function pointer relocations
3078 if PLT is used. */
3079 eh->func_pointer_refcount = 0;
3080
3081 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
3082 {
3083 /* Don't use the regular PLT for DF_BIND_NOW. */
3084 h->plt.offset = (bfd_vma) -1;
3085
3086 /* Use the GOT PLT. */
3087 h->got.refcount = 1;
3088 eh->plt_got.refcount = 1;
3089 }
3090
3091 use_plt_got = eh->plt_got.refcount > 0;
3092
3093 /* Make sure this symbol is output as a dynamic symbol.
3094 Undefined weak syms won't yet be marked as dynamic. */
3095 if (h->dynindx == -1
3096 && !h->forced_local
3097 && !resolved_to_zero)
3098 {
3099 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3100 return FALSE;
3101 }
3102
3103 if (bfd_link_pic (info)
3104 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3105 {
3106 asection *s = htab->elf.splt;
3107 asection *bnd_s = htab->plt_bnd;
3108 asection *got_s = htab->plt_got;
3109
3110 /* If this is the first .plt entry, make room for the special
3111 first entry. The .plt section is used by prelink to undo
3112 prelinking for dynamic relocations. */
3113 if (s->size == 0)
3114 s->size = plt_entry_size;
3115
3116 if (use_plt_got)
3117 eh->plt_got.offset = got_s->size;
3118 else
3119 {
3120 h->plt.offset = s->size;
3121 if (bnd_s)
3122 eh->plt_bnd.offset = bnd_s->size;
3123 }
3124
3125 /* If this symbol is not defined in a regular file, and we are
3126 not generating a shared library, then set the symbol to this
3127 location in the .plt. This is required to make function
3128 pointers compare as equal between the normal executable and
3129 the shared library. */
3130 if (! bfd_link_pic (info)
3131 && !h->def_regular)
3132 {
3133 if (use_plt_got)
3134 {
3135 /* We need to make a call to the entry of the GOT PLT
3136 instead of regular PLT entry. */
3137 h->root.u.def.section = got_s;
3138 h->root.u.def.value = eh->plt_got.offset;
3139 }
3140 else
3141 {
3142 if (bnd_s)
3143 {
3144 /* We need to make a call to the entry of the second
3145 PLT instead of regular PLT entry. */
3146 h->root.u.def.section = bnd_s;
3147 h->root.u.def.value = eh->plt_bnd.offset;
3148 }
3149 else
3150 {
3151 h->root.u.def.section = s;
3152 h->root.u.def.value = h->plt.offset;
3153 }
3154 }
3155 }
3156
3157 /* Make room for this entry. */
3158 if (use_plt_got)
3159 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3160 else
3161 {
3162 s->size += plt_entry_size;
3163 if (bnd_s)
3164 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3165
3166 /* We also need to make an entry in the .got.plt section,
3167 which will be placed in the .got section by the linker
3168 script. */
3169 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
3170
3171 /* There should be no PLT relocation against resolved
3172 undefined weak symbol in executable. */
3173 if (!resolved_to_zero)
3174 {
3175 /* We also need to make an entry in the .rela.plt
3176 section. */
3177 htab->elf.srelplt->size += bed->s->sizeof_rela;
3178 htab->elf.srelplt->reloc_count++;
3179 }
3180 }
3181 }
3182 else
3183 {
3184 eh->plt_got.offset = (bfd_vma) -1;
3185 h->plt.offset = (bfd_vma) -1;
3186 h->needs_plt = 0;
3187 }
3188 }
3189 else
3190 {
3191 eh->plt_got.offset = (bfd_vma) -1;
3192 h->plt.offset = (bfd_vma) -1;
3193 h->needs_plt = 0;
3194 }
3195
3196 eh->tlsdesc_got = (bfd_vma) -1;
3197
3198 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
3199 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
3200 if (h->got.refcount > 0
3201 && bfd_link_executable (info)
3202 && h->dynindx == -1
3203 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
3204 {
3205 h->got.offset = (bfd_vma) -1;
3206 }
3207 else if (h->got.refcount > 0)
3208 {
3209 asection *s;
3210 bfd_boolean dyn;
3211 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
3212
3213 /* Make sure this symbol is output as a dynamic symbol.
3214 Undefined weak syms won't yet be marked as dynamic. */
3215 if (h->dynindx == -1
3216 && !h->forced_local
3217 && !resolved_to_zero)
3218 {
3219 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3220 return FALSE;
3221 }
3222
3223 if (GOT_TLS_GDESC_P (tls_type))
3224 {
3225 eh->tlsdesc_got = htab->elf.sgotplt->size
3226 - elf_x86_64_compute_jump_table_size (htab);
3227 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3228 h->got.offset = (bfd_vma) -2;
3229 }
3230 if (! GOT_TLS_GDESC_P (tls_type)
3231 || GOT_TLS_GD_P (tls_type))
3232 {
3233 s = htab->elf.sgot;
3234 h->got.offset = s->size;
3235 s->size += GOT_ENTRY_SIZE;
3236 if (GOT_TLS_GD_P (tls_type))
3237 s->size += GOT_ENTRY_SIZE;
3238 }
3239 dyn = htab->elf.dynamic_sections_created;
3240 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
3241 and two if global. R_X86_64_GOTTPOFF needs one dynamic
3242 relocation. No dynamic relocation against resolved undefined
3243 weak symbol in executable. */
3244 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
3245 || tls_type == GOT_TLS_IE)
3246 htab->elf.srelgot->size += bed->s->sizeof_rela;
3247 else if (GOT_TLS_GD_P (tls_type))
3248 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
3249 else if (! GOT_TLS_GDESC_P (tls_type)
3250 && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3251 && !resolved_to_zero)
3252 || h->root.type != bfd_link_hash_undefweak)
3253 && (bfd_link_pic (info)
3254 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3255 htab->elf.srelgot->size += bed->s->sizeof_rela;
3256 if (GOT_TLS_GDESC_P (tls_type))
3257 {
3258 htab->elf.srelplt->size += bed->s->sizeof_rela;
3259 htab->tlsdesc_plt = (bfd_vma) -1;
3260 }
3261 }
3262 else
3263 h->got.offset = (bfd_vma) -1;
3264
3265 if (eh->dyn_relocs == NULL)
3266 return TRUE;
3267
3268 /* In the shared -Bsymbolic case, discard space allocated for
3269 dynamic pc-relative relocs against symbols which turn out to be
3270 defined in regular objects. For the normal shared case, discard
3271 space for pc-relative relocs that have become local due to symbol
3272 visibility changes. */
3273
3274 if (bfd_link_pic (info))
3275 {
3276 /* Relocs that use pc_count are those that appear on a call
3277 insn, or certain REL relocs that can generated via assembly.
3278 We want calls to protected symbols to resolve directly to the
3279 function rather than going via the plt. If people want
3280 function pointer comparisons to work as expected then they
3281 should avoid writing weird assembly. */
3282 if (SYMBOL_CALLS_LOCAL (info, h))
3283 {
3284 struct elf_dyn_relocs **pp;
3285
3286 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3287 {
3288 p->count -= p->pc_count;
3289 p->pc_count = 0;
3290 if (p->count == 0)
3291 *pp = p->next;
3292 else
3293 pp = &p->next;
3294 }
3295 }
3296
3297 /* Also discard relocs on undefined weak syms with non-default
3298 visibility or in PIE. */
3299 if (eh->dyn_relocs != NULL)
3300 {
3301 if (h->root.type == bfd_link_hash_undefweak)
3302 {
3303 /* Undefined weak symbol is never bound locally in shared
3304 library. */
3305 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3306 || resolved_to_zero)
3307 eh->dyn_relocs = NULL;
3308 else if (h->dynindx == -1
3309 && ! h->forced_local
3310 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3311 return FALSE;
3312 }
3313 /* For PIE, discard space for pc-relative relocs against
3314 symbols which turn out to need copy relocs. */
3315 else if (bfd_link_executable (info)
3316 && (h->needs_copy || eh->needs_copy)
3317 && h->def_dynamic
3318 && !h->def_regular)
3319 {
3320 struct elf_dyn_relocs **pp;
3321
3322 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3323 {
3324 if (p->pc_count != 0)
3325 *pp = p->next;
3326 else
3327 pp = &p->next;
3328 }
3329 }
3330 }
3331 }
3332 else if (ELIMINATE_COPY_RELOCS)
3333 {
3334 /* For the non-shared case, discard space for relocs against
3335 symbols which turn out to need copy relocs or are not
3336 dynamic. Keep dynamic relocations for run-time function
3337 pointer initialization. */
3338
3339 if ((!h->non_got_ref
3340 || eh->func_pointer_refcount > 0
3341 || (h->root.type == bfd_link_hash_undefweak
3342 && !resolved_to_zero))
3343 && ((h->def_dynamic
3344 && !h->def_regular)
3345 || (htab->elf.dynamic_sections_created
3346 && (h->root.type == bfd_link_hash_undefweak
3347 || h->root.type == bfd_link_hash_undefined))))
3348 {
3349 /* Make sure this symbol is output as a dynamic symbol.
3350 Undefined weak syms won't yet be marked as dynamic. */
3351 if (h->dynindx == -1
3352 && ! h->forced_local
3353 && ! resolved_to_zero
3354 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3355 return FALSE;
3356
3357 /* If that succeeded, we know we'll be keeping all the
3358 relocs. */
3359 if (h->dynindx != -1)
3360 goto keep;
3361 }
3362
3363 eh->dyn_relocs = NULL;
3364 eh->func_pointer_refcount = 0;
3365
3366 keep: ;
3367 }
3368
3369 /* Finally, allocate space. */
3370 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3371 {
3372 asection * sreloc;
3373
3374 sreloc = elf_section_data (p->sec)->sreloc;
3375
3376 BFD_ASSERT (sreloc != NULL);
3377
3378 sreloc->size += p->count * bed->s->sizeof_rela;
3379 }
3380
3381 return TRUE;
3382 }
3383
3384 /* Allocate space in .plt, .got and associated reloc sections for
3385 local dynamic relocs. */
3386
3387 static bfd_boolean
3388 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
3389 {
3390 struct elf_link_hash_entry *h
3391 = (struct elf_link_hash_entry *) *slot;
3392
3393 if (h->type != STT_GNU_IFUNC
3394 || !h->def_regular
3395 || !h->ref_regular
3396 || !h->forced_local
3397 || h->root.type != bfd_link_hash_defined)
3398 abort ();
3399
3400 return elf_x86_64_allocate_dynrelocs (h, inf);
3401 }
3402
3403 /* Find any dynamic relocs that apply to read-only sections. */
3404
3405 static bfd_boolean
3406 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
3407 void * inf)
3408 {
3409 struct elf_x86_64_link_hash_entry *eh;
3410 struct elf_dyn_relocs *p;
3411
3412 /* Skip local IFUNC symbols. */
3413 if (h->forced_local && h->type == STT_GNU_IFUNC)
3414 return TRUE;
3415
3416 eh = (struct elf_x86_64_link_hash_entry *) h;
3417 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3418 {
3419 asection *s = p->sec->output_section;
3420
3421 if (s != NULL && (s->flags & SEC_READONLY) != 0)
3422 {
3423 struct bfd_link_info *info = (struct bfd_link_info *) inf;
3424
3425 info->flags |= DF_TEXTREL;
3426
3427 if ((info->warn_shared_textrel && bfd_link_pic (info))
3428 || info->error_textrel)
3429 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
3430 p->sec->owner, h->root.root.string,
3431 p->sec);
3432
3433 /* Not an error, just cut short the traversal. */
3434 return FALSE;
3435 }
3436 }
3437 return TRUE;
3438 }
3439
3440 /* Convert load via the GOT slot to load immediate. */
3441
3442 static bfd_boolean
3443 elf_x86_64_convert_load (bfd *abfd, asection *sec,
3444 struct bfd_link_info *link_info)
3445 {
3446 Elf_Internal_Shdr *symtab_hdr;
3447 Elf_Internal_Rela *internal_relocs;
3448 Elf_Internal_Rela *irel, *irelend;
3449 bfd_byte *contents;
3450 struct elf_x86_64_link_hash_table *htab;
3451 bfd_boolean changed;
3452 bfd_signed_vma *local_got_refcounts;
3453
3454 /* Don't even try to convert non-ELF outputs. */
3455 if (!is_elf_hash_table (link_info->hash))
3456 return FALSE;
3457
3458 /* Nothing to do if there is no need or no output. */
3459 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3460 || sec->need_convert_load == 0
3461 || bfd_is_abs_section (sec->output_section))
3462 return TRUE;
3463
3464 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3465
3466 /* Load the relocations for this section. */
3467 internal_relocs = (_bfd_elf_link_read_relocs
3468 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3469 link_info->keep_memory));
3470 if (internal_relocs == NULL)
3471 return FALSE;
3472
3473 changed = FALSE;
3474 htab = elf_x86_64_hash_table (link_info);
3475 local_got_refcounts = elf_local_got_refcounts (abfd);
3476
3477 /* Get the section contents. */
3478 if (elf_section_data (sec)->this_hdr.contents != NULL)
3479 contents = elf_section_data (sec)->this_hdr.contents;
3480 else
3481 {
3482 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3483 goto error_return;
3484 }
3485
3486 irelend = internal_relocs + sec->reloc_count;
3487 for (irel = internal_relocs; irel < irelend; irel++)
3488 {
3489 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3490 unsigned int r_symndx;
3491 struct elf_link_hash_entry *h;
3492 bfd_boolean converted;
3493
3494 if (r_type != R_X86_64_GOTPCRELX
3495 && r_type != R_X86_64_REX_GOTPCRELX
3496 && r_type != R_X86_64_GOTPCREL)
3497 continue;
3498
3499 r_symndx = htab->r_sym (irel->r_info);
3500 if (r_symndx < symtab_hdr->sh_info)
3501 h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
3502 (const Elf_Internal_Rela *) irel,
3503 FALSE);
3504 else
3505 {
3506 h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
3507 while (h->root.type == bfd_link_hash_indirect
3508 || h->root.type == bfd_link_hash_warning)
3509 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3510 }
3511
3512 /* STT_GNU_IFUNC must keep GOTPCREL relocations. */
3513 if (h != NULL && h->type == STT_GNU_IFUNC)
3514 continue;
3515
3516 converted = FALSE;
3517 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h,
3518 &converted, link_info))
3519 goto error_return;
3520
3521 if (converted)
3522 {
3523 changed = converted;
3524 if (h)
3525 {
3526 if (h->got.refcount > 0)
3527 h->got.refcount -= 1;
3528 }
3529 else
3530 {
3531 if (local_got_refcounts != NULL
3532 && local_got_refcounts[r_symndx] > 0)
3533 local_got_refcounts[r_symndx] -= 1;
3534 }
3535 }
3536 }
3537
3538 if (contents != NULL
3539 && elf_section_data (sec)->this_hdr.contents != contents)
3540 {
3541 if (!changed && !link_info->keep_memory)
3542 free (contents);
3543 else
3544 {
3545 /* Cache the section contents for elf_link_input_bfd. */
3546 elf_section_data (sec)->this_hdr.contents = contents;
3547 }
3548 }
3549
3550 if (elf_section_data (sec)->relocs != internal_relocs)
3551 {
3552 if (!changed)
3553 free (internal_relocs);
3554 else
3555 elf_section_data (sec)->relocs = internal_relocs;
3556 }
3557
3558 return TRUE;
3559
3560 error_return:
3561 if (contents != NULL
3562 && elf_section_data (sec)->this_hdr.contents != contents)
3563 free (contents);
3564 if (internal_relocs != NULL
3565 && elf_section_data (sec)->relocs != internal_relocs)
3566 free (internal_relocs);
3567 return FALSE;
3568 }
3569
3570 /* Set the sizes of the dynamic sections. */
3571
3572 static bfd_boolean
3573 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3574 struct bfd_link_info *info)
3575 {
3576 struct elf_x86_64_link_hash_table *htab;
3577 bfd *dynobj;
3578 asection *s;
3579 bfd_boolean relocs;
3580 bfd *ibfd;
3581 const struct elf_backend_data *bed;
3582
3583 htab = elf_x86_64_hash_table (info);
3584 if (htab == NULL)
3585 return FALSE;
3586 bed = get_elf_backend_data (output_bfd);
3587
3588 dynobj = htab->elf.dynobj;
3589 if (dynobj == NULL)
3590 abort ();
3591
3592 /* Set up .got offsets for local syms, and space for local dynamic
3593 relocs. */
3594 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3595 {
3596 bfd_signed_vma *local_got;
3597 bfd_signed_vma *end_local_got;
3598 char *local_tls_type;
3599 bfd_vma *local_tlsdesc_gotent;
3600 bfd_size_type locsymcount;
3601 Elf_Internal_Shdr *symtab_hdr;
3602 asection *srel;
3603
3604 if (! is_x86_64_elf (ibfd))
3605 continue;
3606
3607 for (s = ibfd->sections; s != NULL; s = s->next)
3608 {
3609 struct elf_dyn_relocs *p;
3610
3611 if (!elf_x86_64_convert_load (ibfd, s, info))
3612 return FALSE;
3613
3614 for (p = (struct elf_dyn_relocs *)
3615 (elf_section_data (s)->local_dynrel);
3616 p != NULL;
3617 p = p->next)
3618 {
3619 if (!bfd_is_abs_section (p->sec)
3620 && bfd_is_abs_section (p->sec->output_section))
3621 {
3622 /* Input section has been discarded, either because
3623 it is a copy of a linkonce section or due to
3624 linker script /DISCARD/, so we'll be discarding
3625 the relocs too. */
3626 }
3627 else if (p->count != 0)
3628 {
3629 srel = elf_section_data (p->sec)->sreloc;
3630 srel->size += p->count * bed->s->sizeof_rela;
3631 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3632 && (info->flags & DF_TEXTREL) == 0)
3633 {
3634 info->flags |= DF_TEXTREL;
3635 if ((info->warn_shared_textrel && bfd_link_pic (info))
3636 || info->error_textrel)
3637 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3638 p->sec->owner, p->sec);
3639 }
3640 }
3641 }
3642 }
3643
3644 local_got = elf_local_got_refcounts (ibfd);
3645 if (!local_got)
3646 continue;
3647
3648 symtab_hdr = &elf_symtab_hdr (ibfd);
3649 locsymcount = symtab_hdr->sh_info;
3650 end_local_got = local_got + locsymcount;
3651 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3652 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3653 s = htab->elf.sgot;
3654 srel = htab->elf.srelgot;
3655 for (; local_got < end_local_got;
3656 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3657 {
3658 *local_tlsdesc_gotent = (bfd_vma) -1;
3659 if (*local_got > 0)
3660 {
3661 if (GOT_TLS_GDESC_P (*local_tls_type))
3662 {
3663 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3664 - elf_x86_64_compute_jump_table_size (htab);
3665 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3666 *local_got = (bfd_vma) -2;
3667 }
3668 if (! GOT_TLS_GDESC_P (*local_tls_type)
3669 || GOT_TLS_GD_P (*local_tls_type))
3670 {
3671 *local_got = s->size;
3672 s->size += GOT_ENTRY_SIZE;
3673 if (GOT_TLS_GD_P (*local_tls_type))
3674 s->size += GOT_ENTRY_SIZE;
3675 }
3676 if (bfd_link_pic (info)
3677 || GOT_TLS_GD_ANY_P (*local_tls_type)
3678 || *local_tls_type == GOT_TLS_IE)
3679 {
3680 if (GOT_TLS_GDESC_P (*local_tls_type))
3681 {
3682 htab->elf.srelplt->size
3683 += bed->s->sizeof_rela;
3684 htab->tlsdesc_plt = (bfd_vma) -1;
3685 }
3686 if (! GOT_TLS_GDESC_P (*local_tls_type)
3687 || GOT_TLS_GD_P (*local_tls_type))
3688 srel->size += bed->s->sizeof_rela;
3689 }
3690 }
3691 else
3692 *local_got = (bfd_vma) -1;
3693 }
3694 }
3695
3696 if (htab->tls_ld_got.refcount > 0)
3697 {
3698 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3699 relocs. */
3700 htab->tls_ld_got.offset = htab->elf.sgot->size;
3701 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3702 htab->elf.srelgot->size += bed->s->sizeof_rela;
3703 }
3704 else
3705 htab->tls_ld_got.offset = -1;
3706
3707 /* Allocate global sym .plt and .got entries, and space for global
3708 sym dynamic relocs. */
3709 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3710 info);
3711
3712 /* Allocate .plt and .got entries, and space for local symbols. */
3713 htab_traverse (htab->loc_hash_table,
3714 elf_x86_64_allocate_local_dynrelocs,
3715 info);
3716
3717 /* For every jump slot reserved in the sgotplt, reloc_count is
3718 incremented. However, when we reserve space for TLS descriptors,
3719 it's not incremented, so in order to compute the space reserved
3720 for them, it suffices to multiply the reloc count by the jump
3721 slot size.
3722
3723 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3724 so that R_X86_64_IRELATIVE entries come last. */
3725 if (htab->elf.srelplt)
3726 {
3727 htab->sgotplt_jump_table_size
3728 = elf_x86_64_compute_jump_table_size (htab);
3729 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3730 }
3731 else if (htab->elf.irelplt)
3732 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3733
3734 if (htab->tlsdesc_plt)
3735 {
3736 /* If we're not using lazy TLS relocations, don't generate the
3737 PLT and GOT entries they require. */
3738 if ((info->flags & DF_BIND_NOW))
3739 htab->tlsdesc_plt = 0;
3740 else
3741 {
3742 htab->tlsdesc_got = htab->elf.sgot->size;
3743 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3744 /* Reserve room for the initial entry.
3745 FIXME: we could probably do away with it in this case. */
3746 if (htab->elf.splt->size == 0)
3747 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3748 htab->tlsdesc_plt = htab->elf.splt->size;
3749 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3750 }
3751 }
3752
3753 if (htab->elf.sgotplt)
3754 {
3755 /* Don't allocate .got.plt section if there are no GOT nor PLT
3756 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3757 if ((htab->elf.hgot == NULL
3758 || !htab->elf.hgot->ref_regular_nonweak)
3759 && (htab->elf.sgotplt->size
3760 == get_elf_backend_data (output_bfd)->got_header_size)
3761 && (htab->elf.splt == NULL
3762 || htab->elf.splt->size == 0)
3763 && (htab->elf.sgot == NULL
3764 || htab->elf.sgot->size == 0)
3765 && (htab->elf.iplt == NULL
3766 || htab->elf.iplt->size == 0)
3767 && (htab->elf.igotplt == NULL
3768 || htab->elf.igotplt->size == 0))
3769 htab->elf.sgotplt->size = 0;
3770 }
3771
3772 if (htab->plt_eh_frame != NULL
3773 && htab->elf.splt != NULL
3774 && htab->elf.splt->size != 0
3775 && !bfd_is_abs_section (htab->elf.splt->output_section)
3776 && _bfd_elf_eh_frame_present (info))
3777 {
3778 const struct elf_x86_64_backend_data *arch_data
3779 = get_elf_x86_64_arch_data (bed);
3780 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3781 }
3782
3783 /* We now have determined the sizes of the various dynamic sections.
3784 Allocate memory for them. */
3785 relocs = FALSE;
3786 for (s = dynobj->sections; s != NULL; s = s->next)
3787 {
3788 if ((s->flags & SEC_LINKER_CREATED) == 0)
3789 continue;
3790
3791 if (s == htab->elf.splt
3792 || s == htab->elf.sgot
3793 || s == htab->elf.sgotplt
3794 || s == htab->elf.iplt
3795 || s == htab->elf.igotplt
3796 || s == htab->plt_bnd
3797 || s == htab->plt_got
3798 || s == htab->plt_eh_frame
3799 || s == htab->sdynbss)
3800 {
3801 /* Strip this section if we don't need it; see the
3802 comment below. */
3803 }
3804 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3805 {
3806 if (s->size != 0 && s != htab->elf.srelplt)
3807 relocs = TRUE;
3808
3809 /* We use the reloc_count field as a counter if we need
3810 to copy relocs into the output file. */
3811 if (s != htab->elf.srelplt)
3812 s->reloc_count = 0;
3813 }
3814 else
3815 {
3816 /* It's not one of our sections, so don't allocate space. */
3817 continue;
3818 }
3819
3820 if (s->size == 0)
3821 {
3822 /* If we don't need this section, strip it from the
3823 output file. This is mostly to handle .rela.bss and
3824 .rela.plt. We must create both sections in
3825 create_dynamic_sections, because they must be created
3826 before the linker maps input sections to output
3827 sections. The linker does that before
3828 adjust_dynamic_symbol is called, and it is that
3829 function which decides whether anything needs to go
3830 into these sections. */
3831
3832 s->flags |= SEC_EXCLUDE;
3833 continue;
3834 }
3835
3836 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3837 continue;
3838
3839 /* Allocate memory for the section contents. We use bfd_zalloc
3840 here in case unused entries are not reclaimed before the
3841 section's contents are written out. This should not happen,
3842 but this way if it does, we get a R_X86_64_NONE reloc instead
3843 of garbage. */
3844 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3845 if (s->contents == NULL)
3846 return FALSE;
3847 }
3848
3849 if (htab->plt_eh_frame != NULL
3850 && htab->plt_eh_frame->contents != NULL)
3851 {
3852 const struct elf_x86_64_backend_data *arch_data
3853 = get_elf_x86_64_arch_data (bed);
3854
3855 memcpy (htab->plt_eh_frame->contents,
3856 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3857 bfd_put_32 (dynobj, htab->elf.splt->size,
3858 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3859 }
3860
3861 if (htab->elf.dynamic_sections_created)
3862 {
3863 /* Add some entries to the .dynamic section. We fill in the
3864 values later, in elf_x86_64_finish_dynamic_sections, but we
3865 must add the entries now so that we get the correct size for
3866 the .dynamic section. The DT_DEBUG entry is filled in by the
3867 dynamic linker and used by the debugger. */
3868 #define add_dynamic_entry(TAG, VAL) \
3869 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3870
3871 if (bfd_link_executable (info))
3872 {
3873 if (!add_dynamic_entry (DT_DEBUG, 0))
3874 return FALSE;
3875 }
3876
3877 if (htab->elf.splt->size != 0)
3878 {
3879 /* DT_PLTGOT is used by prelink even if there is no PLT
3880 relocation. */
3881 if (!add_dynamic_entry (DT_PLTGOT, 0))
3882 return FALSE;
3883
3884 if (htab->elf.srelplt->size != 0)
3885 {
3886 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3887 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3888 || !add_dynamic_entry (DT_JMPREL, 0))
3889 return FALSE;
3890 }
3891
3892 if (htab->tlsdesc_plt
3893 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3894 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3895 return FALSE;
3896 }
3897
3898 if (relocs)
3899 {
3900 if (!add_dynamic_entry (DT_RELA, 0)
3901 || !add_dynamic_entry (DT_RELASZ, 0)
3902 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3903 return FALSE;
3904
3905 /* If any dynamic relocs apply to a read-only section,
3906 then we need a DT_TEXTREL entry. */
3907 if ((info->flags & DF_TEXTREL) == 0)
3908 elf_link_hash_traverse (&htab->elf,
3909 elf_x86_64_readonly_dynrelocs,
3910 info);
3911
3912 if ((info->flags & DF_TEXTREL) != 0)
3913 {
3914 if (htab->readonly_dynrelocs_against_ifunc)
3915 {
3916 info->callbacks->einfo
3917 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3918 bfd_set_error (bfd_error_bad_value);
3919 return FALSE;
3920 }
3921
3922 if (!add_dynamic_entry (DT_TEXTREL, 0))
3923 return FALSE;
3924 }
3925 }
3926 }
3927 #undef add_dynamic_entry
3928
3929 return TRUE;
3930 }
3931
3932 static bfd_boolean
3933 elf_x86_64_always_size_sections (bfd *output_bfd,
3934 struct bfd_link_info *info)
3935 {
3936 asection *tls_sec = elf_hash_table (info)->tls_sec;
3937
3938 if (tls_sec)
3939 {
3940 struct elf_link_hash_entry *tlsbase;
3941
3942 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3943 "_TLS_MODULE_BASE_",
3944 FALSE, FALSE, FALSE);
3945
3946 if (tlsbase && tlsbase->type == STT_TLS)
3947 {
3948 struct elf_x86_64_link_hash_table *htab;
3949 struct bfd_link_hash_entry *bh = NULL;
3950 const struct elf_backend_data *bed
3951 = get_elf_backend_data (output_bfd);
3952
3953 htab = elf_x86_64_hash_table (info);
3954 if (htab == NULL)
3955 return FALSE;
3956
3957 if (!(_bfd_generic_link_add_one_symbol
3958 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3959 tls_sec, 0, NULL, FALSE,
3960 bed->collect, &bh)))
3961 return FALSE;
3962
3963 htab->tls_module_base = bh;
3964
3965 tlsbase = (struct elf_link_hash_entry *)bh;
3966 tlsbase->def_regular = 1;
3967 tlsbase->other = STV_HIDDEN;
3968 tlsbase->root.linker_def = 1;
3969 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3970 }
3971 }
3972
3973 return TRUE;
3974 }
3975
3976 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3977 executables. Rather than setting it to the beginning of the TLS
3978 section, we have to set it to the end. This function may be called
3979 multiple times, it is idempotent. */
3980
3981 static void
3982 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3983 {
3984 struct elf_x86_64_link_hash_table *htab;
3985 struct bfd_link_hash_entry *base;
3986
3987 if (!bfd_link_executable (info))
3988 return;
3989
3990 htab = elf_x86_64_hash_table (info);
3991 if (htab == NULL)
3992 return;
3993
3994 base = htab->tls_module_base;
3995 if (base == NULL)
3996 return;
3997
3998 base->u.def.value = htab->elf.tls_size;
3999 }
4000
4001 /* Return the base VMA address which should be subtracted from real addresses
4002 when resolving @dtpoff relocation.
4003 This is PT_TLS segment p_vaddr. */
4004
4005 static bfd_vma
4006 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
4007 {
4008 /* If tls_sec is NULL, we should have signalled an error already. */
4009 if (elf_hash_table (info)->tls_sec == NULL)
4010 return 0;
4011 return elf_hash_table (info)->tls_sec->vma;
4012 }
4013
4014 /* Return the relocation value for @tpoff relocation
4015 if STT_TLS virtual address is ADDRESS. */
4016
4017 static bfd_vma
4018 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
4019 {
4020 struct elf_link_hash_table *htab = elf_hash_table (info);
4021 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
4022 bfd_vma static_tls_size;
4023
4024 /* If tls_segment is NULL, we should have signalled an error already. */
4025 if (htab->tls_sec == NULL)
4026 return 0;
4027
4028 /* Consider special static TLS alignment requirements. */
4029 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
4030 return address - static_tls_size - htab->tls_sec->vma;
4031 }
4032
4033 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
4034 branch? */
4035
4036 static bfd_boolean
4037 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
4038 {
4039 /* Opcode Instruction
4040 0xe8 call
4041 0xe9 jump
4042 0x0f 0x8x conditional jump */
4043 return ((offset > 0
4044 && (contents [offset - 1] == 0xe8
4045 || contents [offset - 1] == 0xe9))
4046 || (offset > 1
4047 && contents [offset - 2] == 0x0f
4048 && (contents [offset - 1] & 0xf0) == 0x80));
4049 }
4050
4051 /* Relocate an x86_64 ELF section. */
4052
4053 static bfd_boolean
4054 elf_x86_64_relocate_section (bfd *output_bfd,
4055 struct bfd_link_info *info,
4056 bfd *input_bfd,
4057 asection *input_section,
4058 bfd_byte *contents,
4059 Elf_Internal_Rela *relocs,
4060 Elf_Internal_Sym *local_syms,
4061 asection **local_sections)
4062 {
4063 struct elf_x86_64_link_hash_table *htab;
4064 Elf_Internal_Shdr *symtab_hdr;
4065 struct elf_link_hash_entry **sym_hashes;
4066 bfd_vma *local_got_offsets;
4067 bfd_vma *local_tlsdesc_gotents;
4068 Elf_Internal_Rela *rel;
4069 Elf_Internal_Rela *wrel;
4070 Elf_Internal_Rela *relend;
4071 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
4072
4073 BFD_ASSERT (is_x86_64_elf (input_bfd));
4074
4075 /* Skip if check_relocs failed. */
4076 if (input_section->check_relocs_failed)
4077 return FALSE;
4078
4079 htab = elf_x86_64_hash_table (info);
4080 if (htab == NULL)
4081 return FALSE;
4082 symtab_hdr = &elf_symtab_hdr (input_bfd);
4083 sym_hashes = elf_sym_hashes (input_bfd);
4084 local_got_offsets = elf_local_got_offsets (input_bfd);
4085 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
4086
4087 elf_x86_64_set_tls_module_base (info);
4088
4089 rel = wrel = relocs;
4090 relend = relocs + input_section->reloc_count;
4091 for (; rel < relend; wrel++, rel++)
4092 {
4093 unsigned int r_type;
4094 reloc_howto_type *howto;
4095 unsigned long r_symndx;
4096 struct elf_link_hash_entry *h;
4097 struct elf_x86_64_link_hash_entry *eh;
4098 Elf_Internal_Sym *sym;
4099 asection *sec;
4100 bfd_vma off, offplt, plt_offset;
4101 bfd_vma relocation;
4102 bfd_boolean unresolved_reloc;
4103 bfd_reloc_status_type r;
4104 int tls_type;
4105 asection *base_got, *resolved_plt;
4106 bfd_vma st_size;
4107 bfd_boolean resolved_to_zero;
4108
4109 r_type = ELF32_R_TYPE (rel->r_info);
4110 if (r_type == (int) R_X86_64_GNU_VTINHERIT
4111 || r_type == (int) R_X86_64_GNU_VTENTRY)
4112 {
4113 if (wrel != rel)
4114 *wrel = *rel;
4115 continue;
4116 }
4117
4118 if (r_type >= (int) R_X86_64_standard)
4119 {
4120 (*_bfd_error_handler)
4121 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4122 input_bfd, input_section, r_type);
4123 bfd_set_error (bfd_error_bad_value);
4124 return FALSE;
4125 }
4126
4127 if (r_type != (int) R_X86_64_32
4128 || ABI_64_P (output_bfd))
4129 howto = x86_64_elf_howto_table + r_type;
4130 else
4131 howto = (x86_64_elf_howto_table
4132 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
4133 r_symndx = htab->r_sym (rel->r_info);
4134 h = NULL;
4135 sym = NULL;
4136 sec = NULL;
4137 unresolved_reloc = FALSE;
4138 if (r_symndx < symtab_hdr->sh_info)
4139 {
4140 sym = local_syms + r_symndx;
4141 sec = local_sections[r_symndx];
4142
4143 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
4144 &sec, rel);
4145 st_size = sym->st_size;
4146
4147 /* Relocate against local STT_GNU_IFUNC symbol. */
4148 if (!bfd_link_relocatable (info)
4149 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4150 {
4151 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
4152 rel, FALSE);
4153 if (h == NULL)
4154 abort ();
4155
4156 /* Set STT_GNU_IFUNC symbol value. */
4157 h->root.u.def.value = sym->st_value;
4158 h->root.u.def.section = sec;
4159 }
4160 }
4161 else
4162 {
4163 bfd_boolean warned ATTRIBUTE_UNUSED;
4164 bfd_boolean ignored ATTRIBUTE_UNUSED;
4165
4166 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4167 r_symndx, symtab_hdr, sym_hashes,
4168 h, sec, relocation,
4169 unresolved_reloc, warned, ignored);
4170 st_size = h->size;
4171 }
4172
4173 if (sec != NULL && discarded_section (sec))
4174 {
4175 _bfd_clear_contents (howto, input_bfd, input_section,
4176 contents + rel->r_offset);
4177 wrel->r_offset = rel->r_offset;
4178 wrel->r_info = 0;
4179 wrel->r_addend = 0;
4180
4181 /* For ld -r, remove relocations in debug sections against
4182 sections defined in discarded sections. Not done for
4183 eh_frame editing code expects to be present. */
4184 if (bfd_link_relocatable (info)
4185 && (input_section->flags & SEC_DEBUGGING))
4186 wrel--;
4187
4188 continue;
4189 }
4190
4191 if (bfd_link_relocatable (info))
4192 {
4193 if (wrel != rel)
4194 *wrel = *rel;
4195 continue;
4196 }
4197
4198 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
4199 {
4200 if (r_type == R_X86_64_64)
4201 {
4202 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
4203 zero-extend it to 64bit if addend is zero. */
4204 r_type = R_X86_64_32;
4205 memset (contents + rel->r_offset + 4, 0, 4);
4206 }
4207 else if (r_type == R_X86_64_SIZE64)
4208 {
4209 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4210 zero-extend it to 64bit if addend is zero. */
4211 r_type = R_X86_64_SIZE32;
4212 memset (contents + rel->r_offset + 4, 0, 4);
4213 }
4214 }
4215
4216 eh = (struct elf_x86_64_link_hash_entry *) h;
4217
4218 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4219 it here if it is defined in a non-shared object. */
4220 if (h != NULL
4221 && h->type == STT_GNU_IFUNC
4222 && h->def_regular)
4223 {
4224 bfd_vma plt_index;
4225 const char *name;
4226
4227 if ((input_section->flags & SEC_ALLOC) == 0)
4228 {
4229 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4230 sections because such sections are not SEC_ALLOC and
4231 thus ld.so will not process them. */
4232 if ((input_section->flags & SEC_DEBUGGING) != 0)
4233 continue;
4234 abort ();
4235 }
4236 else if (h->plt.offset == (bfd_vma) -1)
4237 abort ();
4238
4239 /* STT_GNU_IFUNC symbol must go through PLT. */
4240 if (htab->elf.splt != NULL)
4241 {
4242 if (htab->plt_bnd != NULL)
4243 {
4244 resolved_plt = htab->plt_bnd;
4245 plt_offset = eh->plt_bnd.offset;
4246 }
4247 else
4248 {
4249 resolved_plt = htab->elf.splt;
4250 plt_offset = h->plt.offset;
4251 }
4252 }
4253 else
4254 {
4255 resolved_plt = htab->elf.iplt;
4256 plt_offset = h->plt.offset;
4257 }
4258
4259 relocation = (resolved_plt->output_section->vma
4260 + resolved_plt->output_offset + plt_offset);
4261
4262 switch (r_type)
4263 {
4264 default:
4265 if (h->root.root.string)
4266 name = h->root.root.string;
4267 else
4268 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4269 NULL);
4270 (*_bfd_error_handler)
4271 (_("%B: relocation %s against STT_GNU_IFUNC "
4272 "symbol `%s' isn't handled by %s"), input_bfd,
4273 howto->name, name, __FUNCTION__);
4274 bfd_set_error (bfd_error_bad_value);
4275 return FALSE;
4276
4277 case R_X86_64_32S:
4278 if (bfd_link_pic (info))
4279 abort ();
4280 goto do_relocation;
4281
4282 case R_X86_64_32:
4283 if (ABI_64_P (output_bfd))
4284 goto do_relocation;
4285 /* FALLTHROUGH */
4286 case R_X86_64_64:
4287 if (rel->r_addend != 0)
4288 {
4289 if (h->root.root.string)
4290 name = h->root.root.string;
4291 else
4292 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4293 sym, NULL);
4294 (*_bfd_error_handler)
4295 (_("%B: relocation %s against STT_GNU_IFUNC "
4296 "symbol `%s' has non-zero addend: %d"),
4297 input_bfd, howto->name, name, rel->r_addend);
4298 bfd_set_error (bfd_error_bad_value);
4299 return FALSE;
4300 }
4301
4302 /* Generate dynamic relcoation only when there is a
4303 non-GOT reference in a shared object. */
4304 if (bfd_link_pic (info) && h->non_got_ref)
4305 {
4306 Elf_Internal_Rela outrel;
4307 asection *sreloc;
4308
4309 /* Need a dynamic relocation to get the real function
4310 address. */
4311 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4312 info,
4313 input_section,
4314 rel->r_offset);
4315 if (outrel.r_offset == (bfd_vma) -1
4316 || outrel.r_offset == (bfd_vma) -2)
4317 abort ();
4318
4319 outrel.r_offset += (input_section->output_section->vma
4320 + input_section->output_offset);
4321
4322 if (h->dynindx == -1
4323 || h->forced_local
4324 || bfd_link_executable (info))
4325 {
4326 /* This symbol is resolved locally. */
4327 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4328 outrel.r_addend = (h->root.u.def.value
4329 + h->root.u.def.section->output_section->vma
4330 + h->root.u.def.section->output_offset);
4331 }
4332 else
4333 {
4334 outrel.r_info = htab->r_info (h->dynindx, r_type);
4335 outrel.r_addend = 0;
4336 }
4337
4338 sreloc = htab->elf.irelifunc;
4339 elf_append_rela (output_bfd, sreloc, &outrel);
4340
4341 /* If this reloc is against an external symbol, we
4342 do not want to fiddle with the addend. Otherwise,
4343 we need to include the symbol value so that it
4344 becomes an addend for the dynamic reloc. For an
4345 internal symbol, we have updated addend. */
4346 continue;
4347 }
4348 /* FALLTHROUGH */
4349 case R_X86_64_PC32:
4350 case R_X86_64_PC32_BND:
4351 case R_X86_64_PC64:
4352 case R_X86_64_PLT32:
4353 case R_X86_64_PLT32_BND:
4354 goto do_relocation;
4355
4356 case R_X86_64_GOTPCREL:
4357 case R_X86_64_GOTPCRELX:
4358 case R_X86_64_REX_GOTPCRELX:
4359 case R_X86_64_GOTPCREL64:
4360 base_got = htab->elf.sgot;
4361 off = h->got.offset;
4362
4363 if (base_got == NULL)
4364 abort ();
4365
4366 if (off == (bfd_vma) -1)
4367 {
4368 /* We can't use h->got.offset here to save state, or
4369 even just remember the offset, as finish_dynamic_symbol
4370 would use that as offset into .got. */
4371
4372 if (htab->elf.splt != NULL)
4373 {
4374 plt_index = h->plt.offset / plt_entry_size - 1;
4375 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4376 base_got = htab->elf.sgotplt;
4377 }
4378 else
4379 {
4380 plt_index = h->plt.offset / plt_entry_size;
4381 off = plt_index * GOT_ENTRY_SIZE;
4382 base_got = htab->elf.igotplt;
4383 }
4384
4385 if (h->dynindx == -1
4386 || h->forced_local
4387 || info->symbolic)
4388 {
4389 /* This references the local defitionion. We must
4390 initialize this entry in the global offset table.
4391 Since the offset must always be a multiple of 8,
4392 we use the least significant bit to record
4393 whether we have initialized it already.
4394
4395 When doing a dynamic link, we create a .rela.got
4396 relocation entry to initialize the value. This
4397 is done in the finish_dynamic_symbol routine. */
4398 if ((off & 1) != 0)
4399 off &= ~1;
4400 else
4401 {
4402 bfd_put_64 (output_bfd, relocation,
4403 base_got->contents + off);
4404 /* Note that this is harmless for the GOTPLT64
4405 case, as -1 | 1 still is -1. */
4406 h->got.offset |= 1;
4407 }
4408 }
4409 }
4410
4411 relocation = (base_got->output_section->vma
4412 + base_got->output_offset + off);
4413
4414 goto do_relocation;
4415 }
4416 }
4417
4418 resolved_to_zero = (eh != NULL
4419 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
4420 eh->has_got_reloc,
4421 eh));
4422
4423 /* When generating a shared object, the relocations handled here are
4424 copied into the output file to be resolved at run time. */
4425 switch (r_type)
4426 {
4427 case R_X86_64_GOT32:
4428 case R_X86_64_GOT64:
4429 /* Relocation is to the entry for this symbol in the global
4430 offset table. */
4431 case R_X86_64_GOTPCREL:
4432 case R_X86_64_GOTPCRELX:
4433 case R_X86_64_REX_GOTPCRELX:
4434 case R_X86_64_GOTPCREL64:
4435 /* Use global offset table entry as symbol value. */
4436 case R_X86_64_GOTPLT64:
4437 /* This is obsolete and treated the the same as GOT64. */
4438 base_got = htab->elf.sgot;
4439
4440 if (htab->elf.sgot == NULL)
4441 abort ();
4442
4443 if (h != NULL)
4444 {
4445 bfd_boolean dyn;
4446
4447 off = h->got.offset;
4448 if (h->needs_plt
4449 && h->plt.offset != (bfd_vma)-1
4450 && off == (bfd_vma)-1)
4451 {
4452 /* We can't use h->got.offset here to save
4453 state, or even just remember the offset, as
4454 finish_dynamic_symbol would use that as offset into
4455 .got. */
4456 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4457 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4458 base_got = htab->elf.sgotplt;
4459 }
4460
4461 dyn = htab->elf.dynamic_sections_created;
4462
4463 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4464 || (bfd_link_pic (info)
4465 && SYMBOL_REFERENCES_LOCAL (info, h))
4466 || (ELF_ST_VISIBILITY (h->other)
4467 && h->root.type == bfd_link_hash_undefweak))
4468 {
4469 /* This is actually a static link, or it is a -Bsymbolic
4470 link and the symbol is defined locally, or the symbol
4471 was forced to be local because of a version file. We
4472 must initialize this entry in the global offset table.
4473 Since the offset must always be a multiple of 8, we
4474 use the least significant bit to record whether we
4475 have initialized it already.
4476
4477 When doing a dynamic link, we create a .rela.got
4478 relocation entry to initialize the value. This is
4479 done in the finish_dynamic_symbol routine. */
4480 if ((off & 1) != 0)
4481 off &= ~1;
4482 else
4483 {
4484 bfd_put_64 (output_bfd, relocation,
4485 base_got->contents + off);
4486 /* Note that this is harmless for the GOTPLT64 case,
4487 as -1 | 1 still is -1. */
4488 h->got.offset |= 1;
4489 }
4490 }
4491 else
4492 unresolved_reloc = FALSE;
4493 }
4494 else
4495 {
4496 if (local_got_offsets == NULL)
4497 abort ();
4498
4499 off = local_got_offsets[r_symndx];
4500
4501 /* The offset must always be a multiple of 8. We use
4502 the least significant bit to record whether we have
4503 already generated the necessary reloc. */
4504 if ((off & 1) != 0)
4505 off &= ~1;
4506 else
4507 {
4508 bfd_put_64 (output_bfd, relocation,
4509 base_got->contents + off);
4510
4511 if (bfd_link_pic (info))
4512 {
4513 asection *s;
4514 Elf_Internal_Rela outrel;
4515
4516 /* We need to generate a R_X86_64_RELATIVE reloc
4517 for the dynamic linker. */
4518 s = htab->elf.srelgot;
4519 if (s == NULL)
4520 abort ();
4521
4522 outrel.r_offset = (base_got->output_section->vma
4523 + base_got->output_offset
4524 + off);
4525 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4526 outrel.r_addend = relocation;
4527 elf_append_rela (output_bfd, s, &outrel);
4528 }
4529
4530 local_got_offsets[r_symndx] |= 1;
4531 }
4532 }
4533
4534 if (off >= (bfd_vma) -2)
4535 abort ();
4536
4537 relocation = base_got->output_section->vma
4538 + base_got->output_offset + off;
4539 if (r_type != R_X86_64_GOTPCREL
4540 && r_type != R_X86_64_GOTPCRELX
4541 && r_type != R_X86_64_REX_GOTPCRELX
4542 && r_type != R_X86_64_GOTPCREL64)
4543 relocation -= htab->elf.sgotplt->output_section->vma
4544 - htab->elf.sgotplt->output_offset;
4545
4546 break;
4547
4548 case R_X86_64_GOTOFF64:
4549 /* Relocation is relative to the start of the global offset
4550 table. */
4551
4552 /* Check to make sure it isn't a protected function or data
4553 symbol for shared library since it may not be local when
4554 used as function address or with copy relocation. We also
4555 need to make sure that a symbol is referenced locally. */
4556 if (bfd_link_pic (info) && h)
4557 {
4558 if (!h->def_regular)
4559 {
4560 const char *v;
4561
4562 switch (ELF_ST_VISIBILITY (h->other))
4563 {
4564 case STV_HIDDEN:
4565 v = _("hidden symbol");
4566 break;
4567 case STV_INTERNAL:
4568 v = _("internal symbol");
4569 break;
4570 case STV_PROTECTED:
4571 v = _("protected symbol");
4572 break;
4573 default:
4574 v = _("symbol");
4575 break;
4576 }
4577
4578 (*_bfd_error_handler)
4579 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4580 input_bfd, v, h->root.root.string);
4581 bfd_set_error (bfd_error_bad_value);
4582 return FALSE;
4583 }
4584 else if (!bfd_link_executable (info)
4585 && !SYMBOL_REFERENCES_LOCAL (info, h)
4586 && (h->type == STT_FUNC
4587 || h->type == STT_OBJECT)
4588 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4589 {
4590 (*_bfd_error_handler)
4591 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4592 input_bfd,
4593 h->type == STT_FUNC ? "function" : "data",
4594 h->root.root.string);
4595 bfd_set_error (bfd_error_bad_value);
4596 return FALSE;
4597 }
4598 }
4599
4600 /* Note that sgot is not involved in this
4601 calculation. We always want the start of .got.plt. If we
4602 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4603 permitted by the ABI, we might have to change this
4604 calculation. */
4605 relocation -= htab->elf.sgotplt->output_section->vma
4606 + htab->elf.sgotplt->output_offset;
4607 break;
4608
4609 case R_X86_64_GOTPC32:
4610 case R_X86_64_GOTPC64:
4611 /* Use global offset table as symbol value. */
4612 relocation = htab->elf.sgotplt->output_section->vma
4613 + htab->elf.sgotplt->output_offset;
4614 unresolved_reloc = FALSE;
4615 break;
4616
4617 case R_X86_64_PLTOFF64:
4618 /* Relocation is PLT entry relative to GOT. For local
4619 symbols it's the symbol itself relative to GOT. */
4620 if (h != NULL
4621 /* See PLT32 handling. */
4622 && h->plt.offset != (bfd_vma) -1
4623 && htab->elf.splt != NULL)
4624 {
4625 if (htab->plt_bnd != NULL)
4626 {
4627 resolved_plt = htab->plt_bnd;
4628 plt_offset = eh->plt_bnd.offset;
4629 }
4630 else
4631 {
4632 resolved_plt = htab->elf.splt;
4633 plt_offset = h->plt.offset;
4634 }
4635
4636 relocation = (resolved_plt->output_section->vma
4637 + resolved_plt->output_offset
4638 + plt_offset);
4639 unresolved_reloc = FALSE;
4640 }
4641
4642 relocation -= htab->elf.sgotplt->output_section->vma
4643 + htab->elf.sgotplt->output_offset;
4644 break;
4645
4646 case R_X86_64_PLT32:
4647 case R_X86_64_PLT32_BND:
4648 /* Relocation is to the entry for this symbol in the
4649 procedure linkage table. */
4650
4651 /* Resolve a PLT32 reloc against a local symbol directly,
4652 without using the procedure linkage table. */
4653 if (h == NULL)
4654 break;
4655
4656 if ((h->plt.offset == (bfd_vma) -1
4657 && eh->plt_got.offset == (bfd_vma) -1)
4658 || htab->elf.splt == NULL)
4659 {
4660 /* We didn't make a PLT entry for this symbol. This
4661 happens when statically linking PIC code, or when
4662 using -Bsymbolic. */
4663 break;
4664 }
4665
4666 if (h->plt.offset != (bfd_vma) -1)
4667 {
4668 if (htab->plt_bnd != NULL)
4669 {
4670 resolved_plt = htab->plt_bnd;
4671 plt_offset = eh->plt_bnd.offset;
4672 }
4673 else
4674 {
4675 resolved_plt = htab->elf.splt;
4676 plt_offset = h->plt.offset;
4677 }
4678 }
4679 else
4680 {
4681 /* Use the GOT PLT. */
4682 resolved_plt = htab->plt_got;
4683 plt_offset = eh->plt_got.offset;
4684 }
4685
4686 relocation = (resolved_plt->output_section->vma
4687 + resolved_plt->output_offset
4688 + plt_offset);
4689 unresolved_reloc = FALSE;
4690 break;
4691
4692 case R_X86_64_SIZE32:
4693 case R_X86_64_SIZE64:
4694 /* Set to symbol size. */
4695 relocation = st_size;
4696 goto direct;
4697
4698 case R_X86_64_PC8:
4699 case R_X86_64_PC16:
4700 case R_X86_64_PC32:
4701 case R_X86_64_PC32_BND:
4702 /* Don't complain about -fPIC if the symbol is undefined when
4703 building executable unless it is unresolved weak symbol. */
4704 if ((input_section->flags & SEC_ALLOC) != 0
4705 && (input_section->flags & SEC_READONLY) != 0
4706 && h != NULL
4707 && ((bfd_link_executable (info)
4708 && h->root.type == bfd_link_hash_undefweak
4709 && !resolved_to_zero)
4710 || (bfd_link_pic (info)
4711 && !(bfd_link_pie (info)
4712 && h->root.type == bfd_link_hash_undefined))))
4713 {
4714 bfd_boolean fail = FALSE;
4715 bfd_boolean branch
4716 = ((r_type == R_X86_64_PC32
4717 || r_type == R_X86_64_PC32_BND)
4718 && is_32bit_relative_branch (contents, rel->r_offset));
4719
4720 if (SYMBOL_REFERENCES_LOCAL (info, h))
4721 {
4722 /* Symbol is referenced locally. Make sure it is
4723 defined locally or for a branch. */
4724 fail = !h->def_regular && !branch;
4725 }
4726 else if (!(bfd_link_pie (info)
4727 && (h->needs_copy || eh->needs_copy)))
4728 {
4729 /* Symbol doesn't need copy reloc and isn't referenced
4730 locally. We only allow branch to symbol with
4731 non-default visibility. */
4732 fail = (!branch
4733 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4734 }
4735
4736 if (fail)
4737 return elf_x86_64_need_pic (input_bfd, input_section,
4738 h, NULL, NULL, howto);
4739 }
4740 /* Fall through. */
4741
4742 case R_X86_64_8:
4743 case R_X86_64_16:
4744 case R_X86_64_32:
4745 case R_X86_64_PC64:
4746 case R_X86_64_64:
4747 /* FIXME: The ABI says the linker should make sure the value is
4748 the same when it's zeroextended to 64 bit. */
4749
4750 direct:
4751 if ((input_section->flags & SEC_ALLOC) == 0)
4752 break;
4753
4754 /* Don't copy a pc-relative relocation into the output file
4755 if the symbol needs copy reloc or the symbol is undefined
4756 when building executable. Copy dynamic function pointer
4757 relocations. Don't generate dynamic relocations against
4758 resolved undefined weak symbols in PIE. */
4759 if ((bfd_link_pic (info)
4760 && !(bfd_link_pie (info)
4761 && h != NULL
4762 && (h->needs_copy
4763 || eh->needs_copy
4764 || h->root.type == bfd_link_hash_undefined)
4765 && IS_X86_64_PCREL_TYPE (r_type))
4766 && (h == NULL
4767 || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4768 && !resolved_to_zero)
4769 || h->root.type != bfd_link_hash_undefweak))
4770 && ((! IS_X86_64_PCREL_TYPE (r_type)
4771 && r_type != R_X86_64_SIZE32
4772 && r_type != R_X86_64_SIZE64)
4773 || ! SYMBOL_CALLS_LOCAL (info, h)))
4774 || (ELIMINATE_COPY_RELOCS
4775 && !bfd_link_pic (info)
4776 && h != NULL
4777 && h->dynindx != -1
4778 && (!h->non_got_ref
4779 || eh->func_pointer_refcount > 0
4780 || (h->root.type == bfd_link_hash_undefweak
4781 && !resolved_to_zero))
4782 && ((h->def_dynamic && !h->def_regular)
4783 /* Undefined weak symbol is bound locally when
4784 PIC is false. */
4785 || h->root.type == bfd_link_hash_undefined)))
4786 {
4787 Elf_Internal_Rela outrel;
4788 bfd_boolean skip, relocate;
4789 asection *sreloc;
4790
4791 /* When generating a shared object, these relocations
4792 are copied into the output file to be resolved at run
4793 time. */
4794 skip = FALSE;
4795 relocate = FALSE;
4796
4797 outrel.r_offset =
4798 _bfd_elf_section_offset (output_bfd, info, input_section,
4799 rel->r_offset);
4800 if (outrel.r_offset == (bfd_vma) -1)
4801 skip = TRUE;
4802 else if (outrel.r_offset == (bfd_vma) -2)
4803 skip = TRUE, relocate = TRUE;
4804
4805 outrel.r_offset += (input_section->output_section->vma
4806 + input_section->output_offset);
4807
4808 if (skip)
4809 memset (&outrel, 0, sizeof outrel);
4810
4811 /* h->dynindx may be -1 if this symbol was marked to
4812 become local. */
4813 else if (h != NULL
4814 && h->dynindx != -1
4815 && (IS_X86_64_PCREL_TYPE (r_type)
4816 || !(bfd_link_executable (info)
4817 || SYMBOLIC_BIND (info, h))
4818 || ! h->def_regular))
4819 {
4820 outrel.r_info = htab->r_info (h->dynindx, r_type);
4821 outrel.r_addend = rel->r_addend;
4822 }
4823 else
4824 {
4825 /* This symbol is local, or marked to become local.
4826 When relocation overflow check is disabled, we
4827 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4828 if (r_type == htab->pointer_r_type
4829 || (r_type == R_X86_64_32
4830 && info->no_reloc_overflow_check))
4831 {
4832 relocate = TRUE;
4833 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4834 outrel.r_addend = relocation + rel->r_addend;
4835 }
4836 else if (r_type == R_X86_64_64
4837 && !ABI_64_P (output_bfd))
4838 {
4839 relocate = TRUE;
4840 outrel.r_info = htab->r_info (0,
4841 R_X86_64_RELATIVE64);
4842 outrel.r_addend = relocation + rel->r_addend;
4843 /* Check addend overflow. */
4844 if ((outrel.r_addend & 0x80000000)
4845 != (rel->r_addend & 0x80000000))
4846 {
4847 const char *name;
4848 int addend = rel->r_addend;
4849 if (h && h->root.root.string)
4850 name = h->root.root.string;
4851 else
4852 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4853 sym, NULL);
4854 if (addend < 0)
4855 (*_bfd_error_handler)
4856 (_("%B: addend -0x%x in relocation %s against "
4857 "symbol `%s' at 0x%lx in section `%A' is "
4858 "out of range"),
4859 input_bfd, input_section, addend,
4860 howto->name, name,
4861 (unsigned long) rel->r_offset);
4862 else
4863 (*_bfd_error_handler)
4864 (_("%B: addend 0x%x in relocation %s against "
4865 "symbol `%s' at 0x%lx in section `%A' is "
4866 "out of range"),
4867 input_bfd, input_section, addend,
4868 howto->name, name,
4869 (unsigned long) rel->r_offset);
4870 bfd_set_error (bfd_error_bad_value);
4871 return FALSE;
4872 }
4873 }
4874 else
4875 {
4876 long sindx;
4877
4878 if (bfd_is_abs_section (sec))
4879 sindx = 0;
4880 else if (sec == NULL || sec->owner == NULL)
4881 {
4882 bfd_set_error (bfd_error_bad_value);
4883 return FALSE;
4884 }
4885 else
4886 {
4887 asection *osec;
4888
4889 /* We are turning this relocation into one
4890 against a section symbol. It would be
4891 proper to subtract the symbol's value,
4892 osec->vma, from the emitted reloc addend,
4893 but ld.so expects buggy relocs. */
4894 osec = sec->output_section;
4895 sindx = elf_section_data (osec)->dynindx;
4896 if (sindx == 0)
4897 {
4898 asection *oi = htab->elf.text_index_section;
4899 sindx = elf_section_data (oi)->dynindx;
4900 }
4901 BFD_ASSERT (sindx != 0);
4902 }
4903
4904 outrel.r_info = htab->r_info (sindx, r_type);
4905 outrel.r_addend = relocation + rel->r_addend;
4906 }
4907 }
4908
4909 sreloc = elf_section_data (input_section)->sreloc;
4910
4911 if (sreloc == NULL || sreloc->contents == NULL)
4912 {
4913 r = bfd_reloc_notsupported;
4914 goto check_relocation_error;
4915 }
4916
4917 elf_append_rela (output_bfd, sreloc, &outrel);
4918
4919 /* If this reloc is against an external symbol, we do
4920 not want to fiddle with the addend. Otherwise, we
4921 need to include the symbol value so that it becomes
4922 an addend for the dynamic reloc. */
4923 if (! relocate)
4924 continue;
4925 }
4926
4927 break;
4928
4929 case R_X86_64_TLSGD:
4930 case R_X86_64_GOTPC32_TLSDESC:
4931 case R_X86_64_TLSDESC_CALL:
4932 case R_X86_64_GOTTPOFF:
4933 tls_type = GOT_UNKNOWN;
4934 if (h == NULL && local_got_offsets)
4935 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4936 else if (h != NULL)
4937 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4938
4939 if (! elf_x86_64_tls_transition (info, input_bfd,
4940 input_section, contents,
4941 symtab_hdr, sym_hashes,
4942 &r_type, tls_type, rel,
4943 relend, h, r_symndx, TRUE))
4944 return FALSE;
4945
4946 if (r_type == R_X86_64_TPOFF32)
4947 {
4948 bfd_vma roff = rel->r_offset;
4949
4950 BFD_ASSERT (! unresolved_reloc);
4951
4952 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4953 {
4954 /* GD->LE transition. For 64bit, change
4955 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4956 .word 0x6666; rex64; call __tls_get_addr@PLT
4957 or
4958 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4959 .byte 0x66; rex64
4960 call *__tls_get_addr@GOTPCREL(%rip)
4961 which may be converted to
4962 addr32 call __tls_get_addr
4963 into:
4964 movq %fs:0, %rax
4965 leaq foo@tpoff(%rax), %rax
4966 For 32bit, change
4967 leaq foo@tlsgd(%rip), %rdi
4968 .word 0x6666; rex64; call __tls_get_addr@PLT
4969 or
4970 leaq foo@tlsgd(%rip), %rdi
4971 .byte 0x66; rex64
4972 call *__tls_get_addr@GOTPCREL(%rip)
4973 which may be converted to
4974 addr32 call __tls_get_addr
4975 into:
4976 movl %fs:0, %eax
4977 leaq foo@tpoff(%rax), %rax
4978 For largepic, change:
4979 leaq foo@tlsgd(%rip), %rdi
4980 movabsq $__tls_get_addr@pltoff, %rax
4981 addq %r15, %rax
4982 call *%rax
4983 into:
4984 movq %fs:0, %rax
4985 leaq foo@tpoff(%rax), %rax
4986 nopw 0x0(%rax,%rax,1) */
4987 int largepic = 0;
4988 if (ABI_64_P (output_bfd))
4989 {
4990 if (contents[roff + 5] == 0xb8)
4991 {
4992 memcpy (contents + roff - 3,
4993 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4994 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4995 largepic = 1;
4996 }
4997 else
4998 memcpy (contents + roff - 4,
4999 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5000 16);
5001 }
5002 else
5003 memcpy (contents + roff - 3,
5004 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5005 15);
5006 bfd_put_32 (output_bfd,
5007 elf_x86_64_tpoff (info, relocation),
5008 contents + roff + 8 + largepic);
5009 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
5010 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
5011 rel++;
5012 wrel++;
5013 continue;
5014 }
5015 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5016 {
5017 /* GDesc -> LE transition.
5018 It's originally something like:
5019 leaq x@tlsdesc(%rip), %rax
5020
5021 Change it to:
5022 movl $x@tpoff, %rax. */
5023
5024 unsigned int val, type;
5025
5026 type = bfd_get_8 (input_bfd, contents + roff - 3);
5027 val = bfd_get_8 (input_bfd, contents + roff - 1);
5028 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
5029 contents + roff - 3);
5030 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
5031 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
5032 contents + roff - 1);
5033 bfd_put_32 (output_bfd,
5034 elf_x86_64_tpoff (info, relocation),
5035 contents + roff);
5036 continue;
5037 }
5038 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5039 {
5040 /* GDesc -> LE transition.
5041 It's originally:
5042 call *(%rax)
5043 Turn it into:
5044 xchg %ax,%ax. */
5045 bfd_put_8 (output_bfd, 0x66, contents + roff);
5046 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5047 continue;
5048 }
5049 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
5050 {
5051 /* IE->LE transition:
5052 For 64bit, originally it can be one of:
5053 movq foo@gottpoff(%rip), %reg
5054 addq foo@gottpoff(%rip), %reg
5055 We change it into:
5056 movq $foo, %reg
5057 leaq foo(%reg), %reg
5058 addq $foo, %reg.
5059 For 32bit, originally it can be one of:
5060 movq foo@gottpoff(%rip), %reg
5061 addl foo@gottpoff(%rip), %reg
5062 We change it into:
5063 movq $foo, %reg
5064 leal foo(%reg), %reg
5065 addl $foo, %reg. */
5066
5067 unsigned int val, type, reg;
5068
5069 if (roff >= 3)
5070 val = bfd_get_8 (input_bfd, contents + roff - 3);
5071 else
5072 val = 0;
5073 type = bfd_get_8 (input_bfd, contents + roff - 2);
5074 reg = bfd_get_8 (input_bfd, contents + roff - 1);
5075 reg >>= 3;
5076 if (type == 0x8b)
5077 {
5078 /* movq */
5079 if (val == 0x4c)
5080 bfd_put_8 (output_bfd, 0x49,
5081 contents + roff - 3);
5082 else if (!ABI_64_P (output_bfd) && val == 0x44)
5083 bfd_put_8 (output_bfd, 0x41,
5084 contents + roff - 3);
5085 bfd_put_8 (output_bfd, 0xc7,
5086 contents + roff - 2);
5087 bfd_put_8 (output_bfd, 0xc0 | reg,
5088 contents + roff - 1);
5089 }
5090 else if (reg == 4)
5091 {
5092 /* addq/addl -> addq/addl - addressing with %rsp/%r12
5093 is special */
5094 if (val == 0x4c)
5095 bfd_put_8 (output_bfd, 0x49,
5096 contents + roff - 3);
5097 else if (!ABI_64_P (output_bfd) && val == 0x44)
5098 bfd_put_8 (output_bfd, 0x41,
5099 contents + roff - 3);
5100 bfd_put_8 (output_bfd, 0x81,
5101 contents + roff - 2);
5102 bfd_put_8 (output_bfd, 0xc0 | reg,
5103 contents + roff - 1);
5104 }
5105 else
5106 {
5107 /* addq/addl -> leaq/leal */
5108 if (val == 0x4c)
5109 bfd_put_8 (output_bfd, 0x4d,
5110 contents + roff - 3);
5111 else if (!ABI_64_P (output_bfd) && val == 0x44)
5112 bfd_put_8 (output_bfd, 0x45,
5113 contents + roff - 3);
5114 bfd_put_8 (output_bfd, 0x8d,
5115 contents + roff - 2);
5116 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
5117 contents + roff - 1);
5118 }
5119 bfd_put_32 (output_bfd,
5120 elf_x86_64_tpoff (info, relocation),
5121 contents + roff);
5122 continue;
5123 }
5124 else
5125 BFD_ASSERT (FALSE);
5126 }
5127
5128 if (htab->elf.sgot == NULL)
5129 abort ();
5130
5131 if (h != NULL)
5132 {
5133 off = h->got.offset;
5134 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
5135 }
5136 else
5137 {
5138 if (local_got_offsets == NULL)
5139 abort ();
5140
5141 off = local_got_offsets[r_symndx];
5142 offplt = local_tlsdesc_gotents[r_symndx];
5143 }
5144
5145 if ((off & 1) != 0)
5146 off &= ~1;
5147 else
5148 {
5149 Elf_Internal_Rela outrel;
5150 int dr_type, indx;
5151 asection *sreloc;
5152
5153 if (htab->elf.srelgot == NULL)
5154 abort ();
5155
5156 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5157
5158 if (GOT_TLS_GDESC_P (tls_type))
5159 {
5160 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
5161 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
5162 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
5163 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
5164 + htab->elf.sgotplt->output_offset
5165 + offplt
5166 + htab->sgotplt_jump_table_size);
5167 sreloc = htab->elf.srelplt;
5168 if (indx == 0)
5169 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5170 else
5171 outrel.r_addend = 0;
5172 elf_append_rela (output_bfd, sreloc, &outrel);
5173 }
5174
5175 sreloc = htab->elf.srelgot;
5176
5177 outrel.r_offset = (htab->elf.sgot->output_section->vma
5178 + htab->elf.sgot->output_offset + off);
5179
5180 if (GOT_TLS_GD_P (tls_type))
5181 dr_type = R_X86_64_DTPMOD64;
5182 else if (GOT_TLS_GDESC_P (tls_type))
5183 goto dr_done;
5184 else
5185 dr_type = R_X86_64_TPOFF64;
5186
5187 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
5188 outrel.r_addend = 0;
5189 if ((dr_type == R_X86_64_TPOFF64
5190 || dr_type == R_X86_64_TLSDESC) && indx == 0)
5191 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5192 outrel.r_info = htab->r_info (indx, dr_type);
5193
5194 elf_append_rela (output_bfd, sreloc, &outrel);
5195
5196 if (GOT_TLS_GD_P (tls_type))
5197 {
5198 if (indx == 0)
5199 {
5200 BFD_ASSERT (! unresolved_reloc);
5201 bfd_put_64 (output_bfd,
5202 relocation - elf_x86_64_dtpoff_base (info),
5203 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5204 }
5205 else
5206 {
5207 bfd_put_64 (output_bfd, 0,
5208 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5209 outrel.r_info = htab->r_info (indx,
5210 R_X86_64_DTPOFF64);
5211 outrel.r_offset += GOT_ENTRY_SIZE;
5212 elf_append_rela (output_bfd, sreloc,
5213 &outrel);
5214 }
5215 }
5216
5217 dr_done:
5218 if (h != NULL)
5219 h->got.offset |= 1;
5220 else
5221 local_got_offsets[r_symndx] |= 1;
5222 }
5223
5224 if (off >= (bfd_vma) -2
5225 && ! GOT_TLS_GDESC_P (tls_type))
5226 abort ();
5227 if (r_type == ELF32_R_TYPE (rel->r_info))
5228 {
5229 if (r_type == R_X86_64_GOTPC32_TLSDESC
5230 || r_type == R_X86_64_TLSDESC_CALL)
5231 relocation = htab->elf.sgotplt->output_section->vma
5232 + htab->elf.sgotplt->output_offset
5233 + offplt + htab->sgotplt_jump_table_size;
5234 else
5235 relocation = htab->elf.sgot->output_section->vma
5236 + htab->elf.sgot->output_offset + off;
5237 unresolved_reloc = FALSE;
5238 }
5239 else
5240 {
5241 bfd_vma roff = rel->r_offset;
5242
5243 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5244 {
5245 /* GD->IE transition. For 64bit, change
5246 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5247 .word 0x6666; rex64; call __tls_get_addr@PLT
5248 or
5249 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5250 .byte 0x66; rex64
5251 call *__tls_get_addr@GOTPCREL(%rip
5252 which may be converted to
5253 addr32 call __tls_get_addr
5254 into:
5255 movq %fs:0, %rax
5256 addq foo@gottpoff(%rip), %rax
5257 For 32bit, change
5258 leaq foo@tlsgd(%rip), %rdi
5259 .word 0x6666; rex64; call __tls_get_addr@PLT
5260 or
5261 leaq foo@tlsgd(%rip), %rdi
5262 .byte 0x66; rex64;
5263 call *__tls_get_addr@GOTPCREL(%rip)
5264 which may be converted to
5265 addr32 call __tls_get_addr
5266 into:
5267 movl %fs:0, %eax
5268 addq foo@gottpoff(%rip), %rax
5269 For largepic, change:
5270 leaq foo@tlsgd(%rip), %rdi
5271 movabsq $__tls_get_addr@pltoff, %rax
5272 addq %r15, %rax
5273 call *%rax
5274 into:
5275 movq %fs:0, %rax
5276 addq foo@gottpoff(%rax), %rax
5277 nopw 0x0(%rax,%rax,1) */
5278 int largepic = 0;
5279 if (ABI_64_P (output_bfd))
5280 {
5281 if (contents[roff + 5] == 0xb8)
5282 {
5283 memcpy (contents + roff - 3,
5284 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5285 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5286 largepic = 1;
5287 }
5288 else
5289 memcpy (contents + roff - 4,
5290 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5291 16);
5292 }
5293 else
5294 memcpy (contents + roff - 3,
5295 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5296 15);
5297
5298 relocation = (htab->elf.sgot->output_section->vma
5299 + htab->elf.sgot->output_offset + off
5300 - roff
5301 - largepic
5302 - input_section->output_section->vma
5303 - input_section->output_offset
5304 - 12);
5305 bfd_put_32 (output_bfd, relocation,
5306 contents + roff + 8 + largepic);
5307 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5308 rel++;
5309 wrel++;
5310 continue;
5311 }
5312 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5313 {
5314 /* GDesc -> IE transition.
5315 It's originally something like:
5316 leaq x@tlsdesc(%rip), %rax
5317
5318 Change it to:
5319 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5320
5321 /* Now modify the instruction as appropriate. To
5322 turn a leaq into a movq in the form we use it, it
5323 suffices to change the second byte from 0x8d to
5324 0x8b. */
5325 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5326
5327 bfd_put_32 (output_bfd,
5328 htab->elf.sgot->output_section->vma
5329 + htab->elf.sgot->output_offset + off
5330 - rel->r_offset
5331 - input_section->output_section->vma
5332 - input_section->output_offset
5333 - 4,
5334 contents + roff);
5335 continue;
5336 }
5337 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5338 {
5339 /* GDesc -> IE transition.
5340 It's originally:
5341 call *(%rax)
5342
5343 Change it to:
5344 xchg %ax, %ax. */
5345
5346 bfd_put_8 (output_bfd, 0x66, contents + roff);
5347 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5348 continue;
5349 }
5350 else
5351 BFD_ASSERT (FALSE);
5352 }
5353 break;
5354
5355 case R_X86_64_TLSLD:
5356 if (! elf_x86_64_tls_transition (info, input_bfd,
5357 input_section, contents,
5358 symtab_hdr, sym_hashes,
5359 &r_type, GOT_UNKNOWN, rel,
5360 relend, h, r_symndx, TRUE))
5361 return FALSE;
5362
5363 if (r_type != R_X86_64_TLSLD)
5364 {
5365 /* LD->LE transition:
5366 leaq foo@tlsld(%rip), %rdi
5367 call __tls_get_addr@PLT
5368 For 64bit, we change it into:
5369 .word 0x6666; .byte 0x66; movq %fs:0, %rax
5370 For 32bit, we change it into:
5371 nopl 0x0(%rax); movl %fs:0, %eax
5372 Or
5373 leaq foo@tlsld(%rip), %rdi;
5374 call *__tls_get_addr@GOTPCREL(%rip)
5375 which may be converted to
5376 addr32 call __tls_get_addr
5377 For 64bit, we change it into:
5378 .word 0x6666; .word 0x6666; movq %fs:0, %rax
5379 For 32bit, we change it into:
5380 nopw 0x0(%rax); movl %fs:0, %eax
5381 For largepic, change:
5382 leaq foo@tlsgd(%rip), %rdi
5383 movabsq $__tls_get_addr@pltoff, %rax
5384 addq %rbx, %rax
5385 call *%rax
5386 into
5387 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
5388 movq %fs:0, %eax */
5389
5390 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5391 if (ABI_64_P (output_bfd))
5392 {
5393 if (contents[rel->r_offset + 5] == 0xb8)
5394 memcpy (contents + rel->r_offset - 3,
5395 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5396 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5397 else if (contents[rel->r_offset + 4] == 0xff
5398 || contents[rel->r_offset + 4] == 0x67)
5399 memcpy (contents + rel->r_offset - 3,
5400 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
5401 13);
5402 else
5403 memcpy (contents + rel->r_offset - 3,
5404 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5405 }
5406 else
5407 {
5408 if (contents[rel->r_offset + 4] == 0xff)
5409 memcpy (contents + rel->r_offset - 3,
5410 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
5411 13);
5412 else
5413 memcpy (contents + rel->r_offset - 3,
5414 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5415 }
5416 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
5417 and R_X86_64_PLTOFF64. */
5418 rel++;
5419 wrel++;
5420 continue;
5421 }
5422
5423 if (htab->elf.sgot == NULL)
5424 abort ();
5425
5426 off = htab->tls_ld_got.offset;
5427 if (off & 1)
5428 off &= ~1;
5429 else
5430 {
5431 Elf_Internal_Rela outrel;
5432
5433 if (htab->elf.srelgot == NULL)
5434 abort ();
5435
5436 outrel.r_offset = (htab->elf.sgot->output_section->vma
5437 + htab->elf.sgot->output_offset + off);
5438
5439 bfd_put_64 (output_bfd, 0,
5440 htab->elf.sgot->contents + off);
5441 bfd_put_64 (output_bfd, 0,
5442 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5443 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5444 outrel.r_addend = 0;
5445 elf_append_rela (output_bfd, htab->elf.srelgot,
5446 &outrel);
5447 htab->tls_ld_got.offset |= 1;
5448 }
5449 relocation = htab->elf.sgot->output_section->vma
5450 + htab->elf.sgot->output_offset + off;
5451 unresolved_reloc = FALSE;
5452 break;
5453
5454 case R_X86_64_DTPOFF32:
5455 if (!bfd_link_executable (info)
5456 || (input_section->flags & SEC_CODE) == 0)
5457 relocation -= elf_x86_64_dtpoff_base (info);
5458 else
5459 relocation = elf_x86_64_tpoff (info, relocation);
5460 break;
5461
5462 case R_X86_64_TPOFF32:
5463 case R_X86_64_TPOFF64:
5464 BFD_ASSERT (bfd_link_executable (info));
5465 relocation = elf_x86_64_tpoff (info, relocation);
5466 break;
5467
5468 case R_X86_64_DTPOFF64:
5469 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5470 relocation -= elf_x86_64_dtpoff_base (info);
5471 break;
5472
5473 default:
5474 break;
5475 }
5476
5477 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5478 because such sections are not SEC_ALLOC and thus ld.so will
5479 not process them. */
5480 if (unresolved_reloc
5481 && !((input_section->flags & SEC_DEBUGGING) != 0
5482 && h->def_dynamic)
5483 && _bfd_elf_section_offset (output_bfd, info, input_section,
5484 rel->r_offset) != (bfd_vma) -1)
5485 {
5486 (*_bfd_error_handler)
5487 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5488 input_bfd,
5489 input_section,
5490 (long) rel->r_offset,
5491 howto->name,
5492 h->root.root.string);
5493 return FALSE;
5494 }
5495
5496 do_relocation:
5497 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5498 contents, rel->r_offset,
5499 relocation, rel->r_addend);
5500
5501 check_relocation_error:
5502 if (r != bfd_reloc_ok)
5503 {
5504 const char *name;
5505
5506 if (h != NULL)
5507 name = h->root.root.string;
5508 else
5509 {
5510 name = bfd_elf_string_from_elf_section (input_bfd,
5511 symtab_hdr->sh_link,
5512 sym->st_name);
5513 if (name == NULL)
5514 return FALSE;
5515 if (*name == '\0')
5516 name = bfd_section_name (input_bfd, sec);
5517 }
5518
5519 if (r == bfd_reloc_overflow)
5520 (*info->callbacks->reloc_overflow)
5521 (info, (h ? &h->root : NULL), name, howto->name,
5522 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5523 else
5524 {
5525 (*_bfd_error_handler)
5526 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5527 input_bfd, input_section,
5528 (long) rel->r_offset, name, (int) r);
5529 return FALSE;
5530 }
5531 }
5532
5533 if (wrel != rel)
5534 *wrel = *rel;
5535 }
5536
5537 if (wrel != rel)
5538 {
5539 Elf_Internal_Shdr *rel_hdr;
5540 size_t deleted = rel - wrel;
5541
5542 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5543 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5544 if (rel_hdr->sh_size == 0)
5545 {
5546 /* It is too late to remove an empty reloc section. Leave
5547 one NONE reloc.
5548 ??? What is wrong with an empty section??? */
5549 rel_hdr->sh_size = rel_hdr->sh_entsize;
5550 deleted -= 1;
5551 }
5552 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5553 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5554 input_section->reloc_count -= deleted;
5555 }
5556
5557 return TRUE;
5558 }
5559
5560 /* Finish up dynamic symbol handling. We set the contents of various
5561 dynamic sections here. */
5562
5563 static bfd_boolean
5564 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5565 struct bfd_link_info *info,
5566 struct elf_link_hash_entry *h,
5567 Elf_Internal_Sym *sym)
5568 {
5569 struct elf_x86_64_link_hash_table *htab;
5570 const struct elf_x86_64_backend_data *abed;
5571 bfd_boolean use_plt_bnd;
5572 struct elf_x86_64_link_hash_entry *eh;
5573 bfd_boolean local_undefweak;
5574
5575 htab = elf_x86_64_hash_table (info);
5576 if (htab == NULL)
5577 return FALSE;
5578
5579 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5580 section only if there is .plt section. */
5581 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5582 abed = (use_plt_bnd
5583 ? &elf_x86_64_bnd_arch_bed
5584 : get_elf_x86_64_backend_data (output_bfd));
5585
5586 eh = (struct elf_x86_64_link_hash_entry *) h;
5587
5588 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5589 resolved undefined weak symbols in executable so that their
5590 references have value 0 at run-time. */
5591 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
5592 eh->has_got_reloc,
5593 eh);
5594
5595 if (h->plt.offset != (bfd_vma) -1)
5596 {
5597 bfd_vma plt_index;
5598 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5599 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5600 Elf_Internal_Rela rela;
5601 bfd_byte *loc;
5602 asection *plt, *gotplt, *relplt, *resolved_plt;
5603 const struct elf_backend_data *bed;
5604 bfd_vma plt_got_pcrel_offset;
5605
5606 /* When building a static executable, use .iplt, .igot.plt and
5607 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5608 if (htab->elf.splt != NULL)
5609 {
5610 plt = htab->elf.splt;
5611 gotplt = htab->elf.sgotplt;
5612 relplt = htab->elf.srelplt;
5613 }
5614 else
5615 {
5616 plt = htab->elf.iplt;
5617 gotplt = htab->elf.igotplt;
5618 relplt = htab->elf.irelplt;
5619 }
5620
5621 /* This symbol has an entry in the procedure linkage table. Set
5622 it up. */
5623 if ((h->dynindx == -1
5624 && !local_undefweak
5625 && !((h->forced_local || bfd_link_executable (info))
5626 && h->def_regular
5627 && h->type == STT_GNU_IFUNC))
5628 || plt == NULL
5629 || gotplt == NULL
5630 || relplt == NULL)
5631 abort ();
5632
5633 /* Get the index in the procedure linkage table which
5634 corresponds to this symbol. This is the index of this symbol
5635 in all the symbols for which we are making plt entries. The
5636 first entry in the procedure linkage table is reserved.
5637
5638 Get the offset into the .got table of the entry that
5639 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5640 bytes. The first three are reserved for the dynamic linker.
5641
5642 For static executables, we don't reserve anything. */
5643
5644 if (plt == htab->elf.splt)
5645 {
5646 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5647 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5648 }
5649 else
5650 {
5651 got_offset = h->plt.offset / abed->plt_entry_size;
5652 got_offset = got_offset * GOT_ENTRY_SIZE;
5653 }
5654
5655 plt_plt_insn_end = abed->plt_plt_insn_end;
5656 plt_plt_offset = abed->plt_plt_offset;
5657 plt_got_insn_size = abed->plt_got_insn_size;
5658 plt_got_offset = abed->plt_got_offset;
5659 if (use_plt_bnd)
5660 {
5661 /* Use the second PLT with BND relocations. */
5662 const bfd_byte *plt_entry, *plt2_entry;
5663
5664 if (eh->has_bnd_reloc)
5665 {
5666 plt_entry = elf_x86_64_bnd_plt_entry;
5667 plt2_entry = elf_x86_64_bnd_plt2_entry;
5668 }
5669 else
5670 {
5671 plt_entry = elf_x86_64_legacy_plt_entry;
5672 plt2_entry = elf_x86_64_legacy_plt2_entry;
5673
5674 /* Subtract 1 since there is no BND prefix. */
5675 plt_plt_insn_end -= 1;
5676 plt_plt_offset -= 1;
5677 plt_got_insn_size -= 1;
5678 plt_got_offset -= 1;
5679 }
5680
5681 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5682 == sizeof (elf_x86_64_legacy_plt_entry));
5683
5684 /* Fill in the entry in the procedure linkage table. */
5685 memcpy (plt->contents + h->plt.offset,
5686 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5687 /* Fill in the entry in the second PLT. */
5688 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5689 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5690
5691 resolved_plt = htab->plt_bnd;
5692 plt_offset = eh->plt_bnd.offset;
5693 }
5694 else
5695 {
5696 /* Fill in the entry in the procedure linkage table. */
5697 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5698 abed->plt_entry_size);
5699
5700 resolved_plt = plt;
5701 plt_offset = h->plt.offset;
5702 }
5703
5704 /* Insert the relocation positions of the plt section. */
5705
5706 /* Put offset the PC-relative instruction referring to the GOT entry,
5707 subtracting the size of that instruction. */
5708 plt_got_pcrel_offset = (gotplt->output_section->vma
5709 + gotplt->output_offset
5710 + got_offset
5711 - resolved_plt->output_section->vma
5712 - resolved_plt->output_offset
5713 - plt_offset
5714 - plt_got_insn_size);
5715
5716 /* Check PC-relative offset overflow in PLT entry. */
5717 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5718 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5719 output_bfd, h->root.root.string);
5720
5721 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5722 resolved_plt->contents + plt_offset + plt_got_offset);
5723
5724 /* Fill in the entry in the global offset table, initially this
5725 points to the second part of the PLT entry. Leave the entry
5726 as zero for undefined weak symbol in PIE. No PLT relocation
5727 against undefined weak symbol in PIE. */
5728 if (!local_undefweak)
5729 {
5730 bfd_put_64 (output_bfd, (plt->output_section->vma
5731 + plt->output_offset
5732 + h->plt.offset
5733 + abed->plt_lazy_offset),
5734 gotplt->contents + got_offset);
5735
5736 /* Fill in the entry in the .rela.plt section. */
5737 rela.r_offset = (gotplt->output_section->vma
5738 + gotplt->output_offset
5739 + got_offset);
5740 if (h->dynindx == -1
5741 || ((bfd_link_executable (info)
5742 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5743 && h->def_regular
5744 && h->type == STT_GNU_IFUNC))
5745 {
5746 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5747 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5748 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5749 rela.r_addend = (h->root.u.def.value
5750 + h->root.u.def.section->output_section->vma
5751 + h->root.u.def.section->output_offset);
5752 /* R_X86_64_IRELATIVE comes last. */
5753 plt_index = htab->next_irelative_index--;
5754 }
5755 else
5756 {
5757 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5758 rela.r_addend = 0;
5759 plt_index = htab->next_jump_slot_index++;
5760 }
5761
5762 /* Don't fill PLT entry for static executables. */
5763 if (plt == htab->elf.splt)
5764 {
5765 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5766
5767 /* Put relocation index. */
5768 bfd_put_32 (output_bfd, plt_index,
5769 (plt->contents + h->plt.offset
5770 + abed->plt_reloc_offset));
5771
5772 /* Put offset for jmp .PLT0 and check for overflow. We don't
5773 check relocation index for overflow since branch displacement
5774 will overflow first. */
5775 if (plt0_offset > 0x80000000)
5776 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5777 output_bfd, h->root.root.string);
5778 bfd_put_32 (output_bfd, - plt0_offset,
5779 plt->contents + h->plt.offset + plt_plt_offset);
5780 }
5781
5782 bed = get_elf_backend_data (output_bfd);
5783 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5784 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5785 }
5786 }
5787 else if (eh->plt_got.offset != (bfd_vma) -1)
5788 {
5789 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5790 asection *plt, *got;
5791 bfd_boolean got_after_plt;
5792 int32_t got_pcrel_offset;
5793 const bfd_byte *got_plt_entry;
5794
5795 /* Set the entry in the GOT procedure linkage table. */
5796 plt = htab->plt_got;
5797 got = htab->elf.sgot;
5798 got_offset = h->got.offset;
5799
5800 if (got_offset == (bfd_vma) -1
5801 || h->type == STT_GNU_IFUNC
5802 || plt == NULL
5803 || got == NULL)
5804 abort ();
5805
5806 /* Use the second PLT entry template for the GOT PLT since they
5807 are the identical. */
5808 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5809 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5810 if (eh->has_bnd_reloc)
5811 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5812 else
5813 {
5814 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5815
5816 /* Subtract 1 since there is no BND prefix. */
5817 plt_got_insn_size -= 1;
5818 plt_got_offset -= 1;
5819 }
5820
5821 /* Fill in the entry in the GOT procedure linkage table. */
5822 plt_offset = eh->plt_got.offset;
5823 memcpy (plt->contents + plt_offset,
5824 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5825
5826 /* Put offset the PC-relative instruction referring to the GOT
5827 entry, subtracting the size of that instruction. */
5828 got_pcrel_offset = (got->output_section->vma
5829 + got->output_offset
5830 + got_offset
5831 - plt->output_section->vma
5832 - plt->output_offset
5833 - plt_offset
5834 - plt_got_insn_size);
5835
5836 /* Check PC-relative offset overflow in GOT PLT entry. */
5837 got_after_plt = got->output_section->vma > plt->output_section->vma;
5838 if ((got_after_plt && got_pcrel_offset < 0)
5839 || (!got_after_plt && got_pcrel_offset > 0))
5840 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5841 output_bfd, h->root.root.string);
5842
5843 bfd_put_32 (output_bfd, got_pcrel_offset,
5844 plt->contents + plt_offset + plt_got_offset);
5845 }
5846
5847 if (!local_undefweak
5848 && !h->def_regular
5849 && (h->plt.offset != (bfd_vma) -1
5850 || eh->plt_got.offset != (bfd_vma) -1))
5851 {
5852 /* Mark the symbol as undefined, rather than as defined in
5853 the .plt section. Leave the value if there were any
5854 relocations where pointer equality matters (this is a clue
5855 for the dynamic linker, to make function pointer
5856 comparisons work between an application and shared
5857 library), otherwise set it to zero. If a function is only
5858 called from a binary, there is no need to slow down
5859 shared libraries because of that. */
5860 sym->st_shndx = SHN_UNDEF;
5861 if (!h->pointer_equality_needed)
5862 sym->st_value = 0;
5863 }
5864
5865 /* Don't generate dynamic GOT relocation against undefined weak
5866 symbol in executable. */
5867 if (h->got.offset != (bfd_vma) -1
5868 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5869 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
5870 && !local_undefweak)
5871 {
5872 Elf_Internal_Rela rela;
5873
5874 /* This symbol has an entry in the global offset table. Set it
5875 up. */
5876 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5877 abort ();
5878
5879 rela.r_offset = (htab->elf.sgot->output_section->vma
5880 + htab->elf.sgot->output_offset
5881 + (h->got.offset &~ (bfd_vma) 1));
5882
5883 /* If this is a static link, or it is a -Bsymbolic link and the
5884 symbol is defined locally or was forced to be local because
5885 of a version file, we just want to emit a RELATIVE reloc.
5886 The entry in the global offset table will already have been
5887 initialized in the relocate_section function. */
5888 if (h->def_regular
5889 && h->type == STT_GNU_IFUNC)
5890 {
5891 if (bfd_link_pic (info))
5892 {
5893 /* Generate R_X86_64_GLOB_DAT. */
5894 goto do_glob_dat;
5895 }
5896 else
5897 {
5898 asection *plt;
5899
5900 if (!h->pointer_equality_needed)
5901 abort ();
5902
5903 /* For non-shared object, we can't use .got.plt, which
5904 contains the real function addres if we need pointer
5905 equality. We load the GOT entry with the PLT entry. */
5906 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5907 bfd_put_64 (output_bfd, (plt->output_section->vma
5908 + plt->output_offset
5909 + h->plt.offset),
5910 htab->elf.sgot->contents + h->got.offset);
5911 return TRUE;
5912 }
5913 }
5914 else if (bfd_link_pic (info)
5915 && SYMBOL_REFERENCES_LOCAL (info, h))
5916 {
5917 if (!h->def_regular)
5918 return FALSE;
5919 BFD_ASSERT((h->got.offset & 1) != 0);
5920 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5921 rela.r_addend = (h->root.u.def.value
5922 + h->root.u.def.section->output_section->vma
5923 + h->root.u.def.section->output_offset);
5924 }
5925 else
5926 {
5927 BFD_ASSERT((h->got.offset & 1) == 0);
5928 do_glob_dat:
5929 bfd_put_64 (output_bfd, (bfd_vma) 0,
5930 htab->elf.sgot->contents + h->got.offset);
5931 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5932 rela.r_addend = 0;
5933 }
5934
5935 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5936 }
5937
5938 if (h->needs_copy)
5939 {
5940 Elf_Internal_Rela rela;
5941
5942 /* This symbol needs a copy reloc. Set it up. */
5943
5944 if (h->dynindx == -1
5945 || (h->root.type != bfd_link_hash_defined
5946 && h->root.type != bfd_link_hash_defweak)
5947 || htab->srelbss == NULL)
5948 abort ();
5949
5950 rela.r_offset = (h->root.u.def.value
5951 + h->root.u.def.section->output_section->vma
5952 + h->root.u.def.section->output_offset);
5953 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5954 rela.r_addend = 0;
5955 elf_append_rela (output_bfd, htab->srelbss, &rela);
5956 }
5957
5958 return TRUE;
5959 }
5960
5961 /* Finish up local dynamic symbol handling. We set the contents of
5962 various dynamic sections here. */
5963
5964 static bfd_boolean
5965 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5966 {
5967 struct elf_link_hash_entry *h
5968 = (struct elf_link_hash_entry *) *slot;
5969 struct bfd_link_info *info
5970 = (struct bfd_link_info *) inf;
5971
5972 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5973 info, h, NULL);
5974 }
5975
5976 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
5977 here since undefined weak symbol may not be dynamic and may not be
5978 called for elf_x86_64_finish_dynamic_symbol. */
5979
5980 static bfd_boolean
5981 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
5982 void *inf)
5983 {
5984 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
5985 struct bfd_link_info *info = (struct bfd_link_info *) inf;
5986
5987 if (h->root.type != bfd_link_hash_undefweak
5988 || h->dynindx != -1)
5989 return TRUE;
5990
5991 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5992 info, h, NULL);
5993 }
5994
5995 /* Used to decide how to sort relocs in an optimal manner for the
5996 dynamic linker, before writing them out. */
5997
5998 static enum elf_reloc_type_class
5999 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
6000 const asection *rel_sec ATTRIBUTE_UNUSED,
6001 const Elf_Internal_Rela *rela)
6002 {
6003 bfd *abfd = info->output_bfd;
6004 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6005 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6006
6007 if (htab->elf.dynsym != NULL
6008 && htab->elf.dynsym->contents != NULL)
6009 {
6010 /* Check relocation against STT_GNU_IFUNC symbol if there are
6011 dynamic symbols. */
6012 unsigned long r_symndx = htab->r_sym (rela->r_info);
6013 if (r_symndx != STN_UNDEF)
6014 {
6015 Elf_Internal_Sym sym;
6016 if (!bed->s->swap_symbol_in (abfd,
6017 (htab->elf.dynsym->contents
6018 + r_symndx * bed->s->sizeof_sym),
6019 0, &sym))
6020 abort ();
6021
6022 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
6023 return reloc_class_ifunc;
6024 }
6025 }
6026
6027 switch ((int) ELF32_R_TYPE (rela->r_info))
6028 {
6029 case R_X86_64_IRELATIVE:
6030 return reloc_class_ifunc;
6031 case R_X86_64_RELATIVE:
6032 case R_X86_64_RELATIVE64:
6033 return reloc_class_relative;
6034 case R_X86_64_JUMP_SLOT:
6035 return reloc_class_plt;
6036 case R_X86_64_COPY:
6037 return reloc_class_copy;
6038 default:
6039 return reloc_class_normal;
6040 }
6041 }
6042
6043 /* Finish up the dynamic sections. */
6044
6045 static bfd_boolean
6046 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
6047 struct bfd_link_info *info)
6048 {
6049 struct elf_x86_64_link_hash_table *htab;
6050 bfd *dynobj;
6051 asection *sdyn;
6052 const struct elf_x86_64_backend_data *abed;
6053
6054 htab = elf_x86_64_hash_table (info);
6055 if (htab == NULL)
6056 return FALSE;
6057
6058 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
6059 section only if there is .plt section. */
6060 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
6061 ? &elf_x86_64_bnd_arch_bed
6062 : get_elf_x86_64_backend_data (output_bfd));
6063
6064 dynobj = htab->elf.dynobj;
6065 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6066
6067 if (htab->elf.dynamic_sections_created)
6068 {
6069 bfd_byte *dyncon, *dynconend;
6070 const struct elf_backend_data *bed;
6071 bfd_size_type sizeof_dyn;
6072
6073 if (sdyn == NULL || htab->elf.sgot == NULL)
6074 abort ();
6075
6076 bed = get_elf_backend_data (dynobj);
6077 sizeof_dyn = bed->s->sizeof_dyn;
6078 dyncon = sdyn->contents;
6079 dynconend = sdyn->contents + sdyn->size;
6080 for (; dyncon < dynconend; dyncon += sizeof_dyn)
6081 {
6082 Elf_Internal_Dyn dyn;
6083 asection *s;
6084
6085 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
6086
6087 switch (dyn.d_tag)
6088 {
6089 default:
6090 continue;
6091
6092 case DT_PLTGOT:
6093 s = htab->elf.sgotplt;
6094 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6095 break;
6096
6097 case DT_JMPREL:
6098 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
6099 break;
6100
6101 case DT_PLTRELSZ:
6102 s = htab->elf.srelplt->output_section;
6103 dyn.d_un.d_val = s->size;
6104 break;
6105
6106 case DT_RELASZ:
6107 /* The procedure linkage table relocs (DT_JMPREL) should
6108 not be included in the overall relocs (DT_RELA).
6109 Therefore, we override the DT_RELASZ entry here to
6110 make it not include the JMPREL relocs. Since the
6111 linker script arranges for .rela.plt to follow all
6112 other relocation sections, we don't have to worry
6113 about changing the DT_RELA entry. */
6114 if (htab->elf.srelplt != NULL)
6115 {
6116 s = htab->elf.srelplt->output_section;
6117 dyn.d_un.d_val -= s->size;
6118 }
6119 break;
6120
6121 case DT_TLSDESC_PLT:
6122 s = htab->elf.splt;
6123 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6124 + htab->tlsdesc_plt;
6125 break;
6126
6127 case DT_TLSDESC_GOT:
6128 s = htab->elf.sgot;
6129 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6130 + htab->tlsdesc_got;
6131 break;
6132 }
6133
6134 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
6135 }
6136
6137 /* Fill in the special first entry in the procedure linkage table. */
6138 if (htab->elf.splt && htab->elf.splt->size > 0)
6139 {
6140 /* Fill in the first entry in the procedure linkage table. */
6141 memcpy (htab->elf.splt->contents,
6142 abed->plt0_entry, abed->plt_entry_size);
6143 /* Add offset for pushq GOT+8(%rip), since the instruction
6144 uses 6 bytes subtract this value. */
6145 bfd_put_32 (output_bfd,
6146 (htab->elf.sgotplt->output_section->vma
6147 + htab->elf.sgotplt->output_offset
6148 + 8
6149 - htab->elf.splt->output_section->vma
6150 - htab->elf.splt->output_offset
6151 - 6),
6152 htab->elf.splt->contents + abed->plt0_got1_offset);
6153 /* Add offset for the PC-relative instruction accessing GOT+16,
6154 subtracting the offset to the end of that instruction. */
6155 bfd_put_32 (output_bfd,
6156 (htab->elf.sgotplt->output_section->vma
6157 + htab->elf.sgotplt->output_offset
6158 + 16
6159 - htab->elf.splt->output_section->vma
6160 - htab->elf.splt->output_offset
6161 - abed->plt0_got2_insn_end),
6162 htab->elf.splt->contents + abed->plt0_got2_offset);
6163
6164 elf_section_data (htab->elf.splt->output_section)
6165 ->this_hdr.sh_entsize = abed->plt_entry_size;
6166
6167 if (htab->tlsdesc_plt)
6168 {
6169 bfd_put_64 (output_bfd, (bfd_vma) 0,
6170 htab->elf.sgot->contents + htab->tlsdesc_got);
6171
6172 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
6173 abed->plt0_entry, abed->plt_entry_size);
6174
6175 /* Add offset for pushq GOT+8(%rip), since the
6176 instruction uses 6 bytes subtract this value. */
6177 bfd_put_32 (output_bfd,
6178 (htab->elf.sgotplt->output_section->vma
6179 + htab->elf.sgotplt->output_offset
6180 + 8
6181 - htab->elf.splt->output_section->vma
6182 - htab->elf.splt->output_offset
6183 - htab->tlsdesc_plt
6184 - 6),
6185 htab->elf.splt->contents
6186 + htab->tlsdesc_plt + abed->plt0_got1_offset);
6187 /* Add offset for the PC-relative instruction accessing GOT+TDG,
6188 where TGD stands for htab->tlsdesc_got, subtracting the offset
6189 to the end of that instruction. */
6190 bfd_put_32 (output_bfd,
6191 (htab->elf.sgot->output_section->vma
6192 + htab->elf.sgot->output_offset
6193 + htab->tlsdesc_got
6194 - htab->elf.splt->output_section->vma
6195 - htab->elf.splt->output_offset
6196 - htab->tlsdesc_plt
6197 - abed->plt0_got2_insn_end),
6198 htab->elf.splt->contents
6199 + htab->tlsdesc_plt + abed->plt0_got2_offset);
6200 }
6201 }
6202 }
6203
6204 if (htab->plt_bnd != NULL)
6205 elf_section_data (htab->plt_bnd->output_section)
6206 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
6207
6208 if (htab->elf.sgotplt)
6209 {
6210 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
6211 {
6212 (*_bfd_error_handler)
6213 (_("discarded output section: `%A'"), htab->elf.sgotplt);
6214 return FALSE;
6215 }
6216
6217 /* Fill in the first three entries in the global offset table. */
6218 if (htab->elf.sgotplt->size > 0)
6219 {
6220 /* Set the first entry in the global offset table to the address of
6221 the dynamic section. */
6222 if (sdyn == NULL)
6223 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
6224 else
6225 bfd_put_64 (output_bfd,
6226 sdyn->output_section->vma + sdyn->output_offset,
6227 htab->elf.sgotplt->contents);
6228 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6229 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
6230 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
6231 }
6232
6233 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
6234 GOT_ENTRY_SIZE;
6235 }
6236
6237 /* Adjust .eh_frame for .plt section. */
6238 if (htab->plt_eh_frame != NULL
6239 && htab->plt_eh_frame->contents != NULL)
6240 {
6241 if (htab->elf.splt != NULL
6242 && htab->elf.splt->size != 0
6243 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
6244 && htab->elf.splt->output_section != NULL
6245 && htab->plt_eh_frame->output_section != NULL)
6246 {
6247 bfd_vma plt_start = htab->elf.splt->output_section->vma;
6248 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
6249 + htab->plt_eh_frame->output_offset
6250 + PLT_FDE_START_OFFSET;
6251 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
6252 htab->plt_eh_frame->contents
6253 + PLT_FDE_START_OFFSET);
6254 }
6255 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
6256 {
6257 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
6258 htab->plt_eh_frame,
6259 htab->plt_eh_frame->contents))
6260 return FALSE;
6261 }
6262 }
6263
6264 if (htab->elf.sgot && htab->elf.sgot->size > 0)
6265 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
6266 = GOT_ENTRY_SIZE;
6267
6268 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
6269 htab_traverse (htab->loc_hash_table,
6270 elf_x86_64_finish_local_dynamic_symbol,
6271 info);
6272
6273 /* Fill PLT entries for undefined weak symbols in PIE. */
6274 if (bfd_link_pie (info))
6275 bfd_hash_traverse (&info->hash->table,
6276 elf_x86_64_pie_finish_undefweak_symbol,
6277 info);
6278
6279 return TRUE;
6280 }
6281
6282 /* Return an array of PLT entry symbol values. */
6283
6284 static bfd_vma *
6285 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
6286 asection *relplt)
6287 {
6288 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
6289 arelent *p;
6290 long count, i;
6291 bfd_vma *plt_sym_val;
6292 bfd_vma plt_offset;
6293 bfd_byte *plt_contents;
6294 const struct elf_x86_64_backend_data *bed;
6295 Elf_Internal_Shdr *hdr;
6296 asection *plt_bnd;
6297
6298 /* Get the .plt section contents. PLT passed down may point to the
6299 .plt.bnd section. Make sure that PLT always points to the .plt
6300 section. */
6301 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6302 if (plt_bnd)
6303 {
6304 if (plt != plt_bnd)
6305 abort ();
6306 plt = bfd_get_section_by_name (abfd, ".plt");
6307 if (plt == NULL)
6308 abort ();
6309 bed = &elf_x86_64_bnd_arch_bed;
6310 }
6311 else
6312 bed = get_elf_x86_64_backend_data (abfd);
6313
6314 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6315 if (plt_contents == NULL)
6316 return NULL;
6317 if (!bfd_get_section_contents (abfd, (asection *) plt,
6318 plt_contents, 0, plt->size))
6319 {
6320 bad_return:
6321 free (plt_contents);
6322 return NULL;
6323 }
6324
6325 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6326 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6327 goto bad_return;
6328
6329 hdr = &elf_section_data (relplt)->this_hdr;
6330 count = relplt->size / hdr->sh_entsize;
6331
6332 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6333 if (plt_sym_val == NULL)
6334 goto bad_return;
6335
6336 for (i = 0; i < count; i++)
6337 plt_sym_val[i] = -1;
6338
6339 plt_offset = bed->plt_entry_size;
6340 p = relplt->relocation;
6341 for (i = 0; i < count; i++, p++)
6342 {
6343 long reloc_index;
6344
6345 /* Skip unknown relocation. */
6346 if (p->howto == NULL)
6347 continue;
6348
6349 if (p->howto->type != R_X86_64_JUMP_SLOT
6350 && p->howto->type != R_X86_64_IRELATIVE)
6351 continue;
6352
6353 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6354 + bed->plt_reloc_offset));
6355 if (reloc_index < count)
6356 {
6357 if (plt_bnd)
6358 {
6359 /* This is the index in .plt section. */
6360 long plt_index = plt_offset / bed->plt_entry_size;
6361 /* Store VMA + the offset in .plt.bnd section. */
6362 plt_sym_val[reloc_index] =
6363 (plt_bnd->vma
6364 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6365 }
6366 else
6367 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6368 }
6369 plt_offset += bed->plt_entry_size;
6370
6371 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6372 section. */
6373 if (plt_offset >= plt->size)
6374 break;
6375 }
6376
6377 free (plt_contents);
6378
6379 return plt_sym_val;
6380 }
6381
6382 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6383 support. */
6384
6385 static long
6386 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6387 long symcount,
6388 asymbol **syms,
6389 long dynsymcount,
6390 asymbol **dynsyms,
6391 asymbol **ret)
6392 {
6393 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6394 as PLT if it exists. */
6395 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6396 if (plt == NULL)
6397 plt = bfd_get_section_by_name (abfd, ".plt");
6398 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6399 dynsymcount, dynsyms, ret,
6400 plt,
6401 elf_x86_64_get_plt_sym_val);
6402 }
6403
6404 /* Handle an x86-64 specific section when reading an object file. This
6405 is called when elfcode.h finds a section with an unknown type. */
6406
6407 static bfd_boolean
6408 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6409 const char *name, int shindex)
6410 {
6411 if (hdr->sh_type != SHT_X86_64_UNWIND)
6412 return FALSE;
6413
6414 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6415 return FALSE;
6416
6417 return TRUE;
6418 }
6419
6420 /* Hook called by the linker routine which adds symbols from an object
6421 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6422 of .bss. */
6423
6424 static bfd_boolean
6425 elf_x86_64_add_symbol_hook (bfd *abfd,
6426 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6427 Elf_Internal_Sym *sym,
6428 const char **namep ATTRIBUTE_UNUSED,
6429 flagword *flagsp ATTRIBUTE_UNUSED,
6430 asection **secp,
6431 bfd_vma *valp)
6432 {
6433 asection *lcomm;
6434
6435 switch (sym->st_shndx)
6436 {
6437 case SHN_X86_64_LCOMMON:
6438 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6439 if (lcomm == NULL)
6440 {
6441 lcomm = bfd_make_section_with_flags (abfd,
6442 "LARGE_COMMON",
6443 (SEC_ALLOC
6444 | SEC_IS_COMMON
6445 | SEC_LINKER_CREATED));
6446 if (lcomm == NULL)
6447 return FALSE;
6448 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6449 }
6450 *secp = lcomm;
6451 *valp = sym->st_size;
6452 return TRUE;
6453 }
6454
6455 return TRUE;
6456 }
6457
6458
6459 /* Given a BFD section, try to locate the corresponding ELF section
6460 index. */
6461
6462 static bfd_boolean
6463 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6464 asection *sec, int *index_return)
6465 {
6466 if (sec == &_bfd_elf_large_com_section)
6467 {
6468 *index_return = SHN_X86_64_LCOMMON;
6469 return TRUE;
6470 }
6471 return FALSE;
6472 }
6473
6474 /* Process a symbol. */
6475
6476 static void
6477 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6478 asymbol *asym)
6479 {
6480 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6481
6482 switch (elfsym->internal_elf_sym.st_shndx)
6483 {
6484 case SHN_X86_64_LCOMMON:
6485 asym->section = &_bfd_elf_large_com_section;
6486 asym->value = elfsym->internal_elf_sym.st_size;
6487 /* Common symbol doesn't set BSF_GLOBAL. */
6488 asym->flags &= ~BSF_GLOBAL;
6489 break;
6490 }
6491 }
6492
6493 static bfd_boolean
6494 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6495 {
6496 return (sym->st_shndx == SHN_COMMON
6497 || sym->st_shndx == SHN_X86_64_LCOMMON);
6498 }
6499
6500 static unsigned int
6501 elf_x86_64_common_section_index (asection *sec)
6502 {
6503 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6504 return SHN_COMMON;
6505 else
6506 return SHN_X86_64_LCOMMON;
6507 }
6508
6509 static asection *
6510 elf_x86_64_common_section (asection *sec)
6511 {
6512 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6513 return bfd_com_section_ptr;
6514 else
6515 return &_bfd_elf_large_com_section;
6516 }
6517
6518 static bfd_boolean
6519 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6520 const Elf_Internal_Sym *sym,
6521 asection **psec,
6522 bfd_boolean newdef,
6523 bfd_boolean olddef,
6524 bfd *oldbfd,
6525 const asection *oldsec)
6526 {
6527 /* A normal common symbol and a large common symbol result in a
6528 normal common symbol. We turn the large common symbol into a
6529 normal one. */
6530 if (!olddef
6531 && h->root.type == bfd_link_hash_common
6532 && !newdef
6533 && bfd_is_com_section (*psec)
6534 && oldsec != *psec)
6535 {
6536 if (sym->st_shndx == SHN_COMMON
6537 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6538 {
6539 h->root.u.c.p->section
6540 = bfd_make_section_old_way (oldbfd, "COMMON");
6541 h->root.u.c.p->section->flags = SEC_ALLOC;
6542 }
6543 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6544 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6545 *psec = bfd_com_section_ptr;
6546 }
6547
6548 return TRUE;
6549 }
6550
6551 static int
6552 elf_x86_64_additional_program_headers (bfd *abfd,
6553 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6554 {
6555 asection *s;
6556 int count = 0;
6557
6558 /* Check to see if we need a large readonly segment. */
6559 s = bfd_get_section_by_name (abfd, ".lrodata");
6560 if (s && (s->flags & SEC_LOAD))
6561 count++;
6562
6563 /* Check to see if we need a large data segment. Since .lbss sections
6564 is placed right after the .bss section, there should be no need for
6565 a large data segment just because of .lbss. */
6566 s = bfd_get_section_by_name (abfd, ".ldata");
6567 if (s && (s->flags & SEC_LOAD))
6568 count++;
6569
6570 return count;
6571 }
6572
6573 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6574
6575 static bfd_boolean
6576 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6577 {
6578 if (h->plt.offset != (bfd_vma) -1
6579 && !h->def_regular
6580 && !h->pointer_equality_needed)
6581 return FALSE;
6582
6583 return _bfd_elf_hash_symbol (h);
6584 }
6585
6586 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6587
6588 static bfd_boolean
6589 elf_x86_64_relocs_compatible (const bfd_target *input,
6590 const bfd_target *output)
6591 {
6592 return ((xvec_get_elf_backend_data (input)->s->elfclass
6593 == xvec_get_elf_backend_data (output)->s->elfclass)
6594 && _bfd_elf_relocs_compatible (input, output));
6595 }
6596
6597 static const struct bfd_elf_special_section
6598 elf_x86_64_special_sections[]=
6599 {
6600 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6601 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6602 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6603 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6604 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6605 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6606 { NULL, 0, 0, 0, 0 }
6607 };
6608
6609 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6610 #define TARGET_LITTLE_NAME "elf64-x86-64"
6611 #define ELF_ARCH bfd_arch_i386
6612 #define ELF_TARGET_ID X86_64_ELF_DATA
6613 #define ELF_MACHINE_CODE EM_X86_64
6614 #define ELF_MAXPAGESIZE 0x200000
6615 #define ELF_MINPAGESIZE 0x1000
6616 #define ELF_COMMONPAGESIZE 0x1000
6617
6618 #define elf_backend_can_gc_sections 1
6619 #define elf_backend_can_refcount 1
6620 #define elf_backend_want_got_plt 1
6621 #define elf_backend_plt_readonly 1
6622 #define elf_backend_want_plt_sym 0
6623 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6624 #define elf_backend_rela_normal 1
6625 #define elf_backend_plt_alignment 4
6626 #define elf_backend_extern_protected_data 1
6627 #define elf_backend_caches_rawsize 1
6628
6629 #define elf_info_to_howto elf_x86_64_info_to_howto
6630
6631 #define bfd_elf64_bfd_link_hash_table_create \
6632 elf_x86_64_link_hash_table_create
6633 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6634 #define bfd_elf64_bfd_reloc_name_lookup \
6635 elf_x86_64_reloc_name_lookup
6636
6637 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6638 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6639 #define elf_backend_check_relocs elf_x86_64_check_relocs
6640 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6641 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6642 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6643 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6644 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6645 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6646 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6647 #ifdef CORE_HEADER
6648 #define elf_backend_write_core_note elf_x86_64_write_core_note
6649 #endif
6650 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6651 #define elf_backend_relocate_section elf_x86_64_relocate_section
6652 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6653 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6654 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6655 #define elf_backend_object_p elf64_x86_64_elf_object_p
6656 #define bfd_elf64_mkobject elf_x86_64_mkobject
6657 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6658
6659 #define elf_backend_section_from_shdr \
6660 elf_x86_64_section_from_shdr
6661
6662 #define elf_backend_section_from_bfd_section \
6663 elf_x86_64_elf_section_from_bfd_section
6664 #define elf_backend_add_symbol_hook \
6665 elf_x86_64_add_symbol_hook
6666 #define elf_backend_symbol_processing \
6667 elf_x86_64_symbol_processing
6668 #define elf_backend_common_section_index \
6669 elf_x86_64_common_section_index
6670 #define elf_backend_common_section \
6671 elf_x86_64_common_section
6672 #define elf_backend_common_definition \
6673 elf_x86_64_common_definition
6674 #define elf_backend_merge_symbol \
6675 elf_x86_64_merge_symbol
6676 #define elf_backend_special_sections \
6677 elf_x86_64_special_sections
6678 #define elf_backend_additional_program_headers \
6679 elf_x86_64_additional_program_headers
6680 #define elf_backend_hash_symbol \
6681 elf_x86_64_hash_symbol
6682 #define elf_backend_omit_section_dynsym \
6683 ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
6684 #define elf_backend_fixup_symbol \
6685 elf_x86_64_fixup_symbol
6686
6687 #include "elf64-target.h"
6688
6689 /* CloudABI support. */
6690
6691 #undef TARGET_LITTLE_SYM
6692 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6693 #undef TARGET_LITTLE_NAME
6694 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6695
6696 #undef ELF_OSABI
6697 #define ELF_OSABI ELFOSABI_CLOUDABI
6698
6699 #undef elf64_bed
6700 #define elf64_bed elf64_x86_64_cloudabi_bed
6701
6702 #include "elf64-target.h"
6703
6704 /* FreeBSD support. */
6705
6706 #undef TARGET_LITTLE_SYM
6707 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6708 #undef TARGET_LITTLE_NAME
6709 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6710
6711 #undef ELF_OSABI
6712 #define ELF_OSABI ELFOSABI_FREEBSD
6713
6714 #undef elf64_bed
6715 #define elf64_bed elf64_x86_64_fbsd_bed
6716
6717 #include "elf64-target.h"
6718
6719 /* Solaris 2 support. */
6720
6721 #undef TARGET_LITTLE_SYM
6722 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6723 #undef TARGET_LITTLE_NAME
6724 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6725
6726 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6727 objects won't be recognized. */
6728 #undef ELF_OSABI
6729
6730 #undef elf64_bed
6731 #define elf64_bed elf64_x86_64_sol2_bed
6732
6733 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6734 boundary. */
6735 #undef elf_backend_static_tls_alignment
6736 #define elf_backend_static_tls_alignment 16
6737
6738 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6739
6740 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6741 File, p.63. */
6742 #undef elf_backend_want_plt_sym
6743 #define elf_backend_want_plt_sym 1
6744
6745 #undef elf_backend_strtab_flags
6746 #define elf_backend_strtab_flags SHF_STRINGS
6747
6748 static bfd_boolean
6749 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6750 bfd *obfd ATTRIBUTE_UNUSED,
6751 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6752 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6753 {
6754 /* PR 19938: FIXME: Need to add code for setting the sh_info
6755 and sh_link fields of Solaris specific section types. */
6756 return FALSE;
6757 }
6758
6759 #undef elf_backend_copy_special_section_fields
6760 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6761
6762 #include "elf64-target.h"
6763
6764 /* Native Client support. */
6765
6766 static bfd_boolean
6767 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6768 {
6769 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6770 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6771 return TRUE;
6772 }
6773
6774 #undef TARGET_LITTLE_SYM
6775 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6776 #undef TARGET_LITTLE_NAME
6777 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6778 #undef elf64_bed
6779 #define elf64_bed elf64_x86_64_nacl_bed
6780
6781 #undef ELF_MAXPAGESIZE
6782 #undef ELF_MINPAGESIZE
6783 #undef ELF_COMMONPAGESIZE
6784 #define ELF_MAXPAGESIZE 0x10000
6785 #define ELF_MINPAGESIZE 0x10000
6786 #define ELF_COMMONPAGESIZE 0x10000
6787
6788 /* Restore defaults. */
6789 #undef ELF_OSABI
6790 #undef elf_backend_static_tls_alignment
6791 #undef elf_backend_want_plt_sym
6792 #define elf_backend_want_plt_sym 0
6793 #undef elf_backend_strtab_flags
6794 #undef elf_backend_copy_special_section_fields
6795
6796 /* NaCl uses substantially different PLT entries for the same effects. */
6797
6798 #undef elf_backend_plt_alignment
6799 #define elf_backend_plt_alignment 5
6800 #define NACL_PLT_ENTRY_SIZE 64
6801 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6802
6803 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6804 {
6805 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6806 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6807 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6808 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6809 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6810
6811 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6812 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6813
6814 /* 32 bytes of nop to pad out to the standard size. */
6815 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6816 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6817 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6818 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6819 0x66, /* excess data16 prefix */
6820 0x90 /* nop */
6821 };
6822
6823 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6824 {
6825 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6826 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6827 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6828 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6829
6830 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6831 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6832 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6833
6834 /* Lazy GOT entries point here (32-byte aligned). */
6835 0x68, /* pushq immediate */
6836 0, 0, 0, 0, /* replaced with index into relocation table. */
6837 0xe9, /* jmp relative */
6838 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6839
6840 /* 22 bytes of nop to pad out to the standard size. */
6841 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6842 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6843 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6844 };
6845
6846 /* .eh_frame covering the .plt section. */
6847
6848 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6849 {
6850 #if (PLT_CIE_LENGTH != 20 \
6851 || PLT_FDE_LENGTH != 36 \
6852 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6853 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6854 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6855 #endif
6856 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6857 0, 0, 0, 0, /* CIE ID */
6858 1, /* CIE version */
6859 'z', 'R', 0, /* Augmentation string */
6860 1, /* Code alignment factor */
6861 0x78, /* Data alignment factor */
6862 16, /* Return address column */
6863 1, /* Augmentation size */
6864 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6865 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6866 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6867 DW_CFA_nop, DW_CFA_nop,
6868
6869 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6870 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6871 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6872 0, 0, 0, 0, /* .plt size goes here */
6873 0, /* Augmentation size */
6874 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6875 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6876 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6877 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6878 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6879 13, /* Block length */
6880 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6881 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6882 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6883 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6884 DW_CFA_nop, DW_CFA_nop
6885 };
6886
6887 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6888 {
6889 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6890 elf_x86_64_nacl_plt_entry, /* plt_entry */
6891 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6892 2, /* plt0_got1_offset */
6893 9, /* plt0_got2_offset */
6894 13, /* plt0_got2_insn_end */
6895 3, /* plt_got_offset */
6896 33, /* plt_reloc_offset */
6897 38, /* plt_plt_offset */
6898 7, /* plt_got_insn_size */
6899 42, /* plt_plt_insn_end */
6900 32, /* plt_lazy_offset */
6901 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6902 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6903 };
6904
6905 #undef elf_backend_arch_data
6906 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6907
6908 #undef elf_backend_object_p
6909 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6910 #undef elf_backend_modify_segment_map
6911 #define elf_backend_modify_segment_map nacl_modify_segment_map
6912 #undef elf_backend_modify_program_headers
6913 #define elf_backend_modify_program_headers nacl_modify_program_headers
6914 #undef elf_backend_final_write_processing
6915 #define elf_backend_final_write_processing nacl_final_write_processing
6916
6917 #include "elf64-target.h"
6918
6919 /* Native Client x32 support. */
6920
6921 static bfd_boolean
6922 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6923 {
6924 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6925 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6926 return TRUE;
6927 }
6928
6929 #undef TARGET_LITTLE_SYM
6930 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6931 #undef TARGET_LITTLE_NAME
6932 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6933 #undef elf32_bed
6934 #define elf32_bed elf32_x86_64_nacl_bed
6935
6936 #define bfd_elf32_bfd_link_hash_table_create \
6937 elf_x86_64_link_hash_table_create
6938 #define bfd_elf32_bfd_reloc_type_lookup \
6939 elf_x86_64_reloc_type_lookup
6940 #define bfd_elf32_bfd_reloc_name_lookup \
6941 elf_x86_64_reloc_name_lookup
6942 #define bfd_elf32_mkobject \
6943 elf_x86_64_mkobject
6944 #define bfd_elf32_get_synthetic_symtab \
6945 elf_x86_64_get_synthetic_symtab
6946
6947 #undef elf_backend_object_p
6948 #define elf_backend_object_p \
6949 elf32_x86_64_nacl_elf_object_p
6950
6951 #undef elf_backend_bfd_from_remote_memory
6952 #define elf_backend_bfd_from_remote_memory \
6953 _bfd_elf32_bfd_from_remote_memory
6954
6955 #undef elf_backend_size_info
6956 #define elf_backend_size_info \
6957 _bfd_elf32_size_info
6958
6959 #include "elf32-target.h"
6960
6961 /* Restore defaults. */
6962 #undef elf_backend_object_p
6963 #define elf_backend_object_p elf64_x86_64_elf_object_p
6964 #undef elf_backend_bfd_from_remote_memory
6965 #undef elf_backend_size_info
6966 #undef elf_backend_modify_segment_map
6967 #undef elf_backend_modify_program_headers
6968 #undef elf_backend_final_write_processing
6969
6970 /* Intel L1OM support. */
6971
6972 static bfd_boolean
6973 elf64_l1om_elf_object_p (bfd *abfd)
6974 {
6975 /* Set the right machine number for an L1OM elf64 file. */
6976 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6977 return TRUE;
6978 }
6979
6980 #undef TARGET_LITTLE_SYM
6981 #define TARGET_LITTLE_SYM l1om_elf64_vec
6982 #undef TARGET_LITTLE_NAME
6983 #define TARGET_LITTLE_NAME "elf64-l1om"
6984 #undef ELF_ARCH
6985 #define ELF_ARCH bfd_arch_l1om
6986
6987 #undef ELF_MACHINE_CODE
6988 #define ELF_MACHINE_CODE EM_L1OM
6989
6990 #undef ELF_OSABI
6991
6992 #undef elf64_bed
6993 #define elf64_bed elf64_l1om_bed
6994
6995 #undef elf_backend_object_p
6996 #define elf_backend_object_p elf64_l1om_elf_object_p
6997
6998 /* Restore defaults. */
6999 #undef ELF_MAXPAGESIZE
7000 #undef ELF_MINPAGESIZE
7001 #undef ELF_COMMONPAGESIZE
7002 #define ELF_MAXPAGESIZE 0x200000
7003 #define ELF_MINPAGESIZE 0x1000
7004 #define ELF_COMMONPAGESIZE 0x1000
7005 #undef elf_backend_plt_alignment
7006 #define elf_backend_plt_alignment 4
7007 #undef elf_backend_arch_data
7008 #define elf_backend_arch_data &elf_x86_64_arch_bed
7009
7010 #include "elf64-target.h"
7011
7012 /* FreeBSD L1OM support. */
7013
7014 #undef TARGET_LITTLE_SYM
7015 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
7016 #undef TARGET_LITTLE_NAME
7017 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
7018
7019 #undef ELF_OSABI
7020 #define ELF_OSABI ELFOSABI_FREEBSD
7021
7022 #undef elf64_bed
7023 #define elf64_bed elf64_l1om_fbsd_bed
7024
7025 #include "elf64-target.h"
7026
7027 /* Intel K1OM support. */
7028
7029 static bfd_boolean
7030 elf64_k1om_elf_object_p (bfd *abfd)
7031 {
7032 /* Set the right machine number for an K1OM elf64 file. */
7033 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
7034 return TRUE;
7035 }
7036
7037 #undef TARGET_LITTLE_SYM
7038 #define TARGET_LITTLE_SYM k1om_elf64_vec
7039 #undef TARGET_LITTLE_NAME
7040 #define TARGET_LITTLE_NAME "elf64-k1om"
7041 #undef ELF_ARCH
7042 #define ELF_ARCH bfd_arch_k1om
7043
7044 #undef ELF_MACHINE_CODE
7045 #define ELF_MACHINE_CODE EM_K1OM
7046
7047 #undef ELF_OSABI
7048
7049 #undef elf64_bed
7050 #define elf64_bed elf64_k1om_bed
7051
7052 #undef elf_backend_object_p
7053 #define elf_backend_object_p elf64_k1om_elf_object_p
7054
7055 #undef elf_backend_static_tls_alignment
7056
7057 #undef elf_backend_want_plt_sym
7058 #define elf_backend_want_plt_sym 0
7059
7060 #include "elf64-target.h"
7061
7062 /* FreeBSD K1OM support. */
7063
7064 #undef TARGET_LITTLE_SYM
7065 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
7066 #undef TARGET_LITTLE_NAME
7067 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
7068
7069 #undef ELF_OSABI
7070 #define ELF_OSABI ELFOSABI_FREEBSD
7071
7072 #undef elf64_bed
7073 #define elf64_bed elf64_k1om_fbsd_bed
7074
7075 #include "elf64-target.h"
7076
7077 /* 32bit x86-64 support. */
7078
7079 #undef TARGET_LITTLE_SYM
7080 #define TARGET_LITTLE_SYM x86_64_elf32_vec
7081 #undef TARGET_LITTLE_NAME
7082 #define TARGET_LITTLE_NAME "elf32-x86-64"
7083 #undef elf32_bed
7084
7085 #undef ELF_ARCH
7086 #define ELF_ARCH bfd_arch_i386
7087
7088 #undef ELF_MACHINE_CODE
7089 #define ELF_MACHINE_CODE EM_X86_64
7090
7091 #undef ELF_OSABI
7092
7093 #undef elf_backend_object_p
7094 #define elf_backend_object_p \
7095 elf32_x86_64_elf_object_p
7096
7097 #undef elf_backend_bfd_from_remote_memory
7098 #define elf_backend_bfd_from_remote_memory \
7099 _bfd_elf32_bfd_from_remote_memory
7100
7101 #undef elf_backend_size_info
7102 #define elf_backend_size_info \
7103 _bfd_elf32_size_info
7104
7105 #include "elf32-target.h"
This page took 0.190917 seconds and 4 git commands to generate.