-Wimplicit-fallthrough warning fixes
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 _bfd_error_handler (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352 \f
353 /* Support for core dump NOTE sections. */
354 static bfd_boolean
355 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
356 {
357 int offset;
358 size_t size;
359
360 switch (note->descsz)
361 {
362 default:
363 return FALSE;
364
365 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
366 /* pr_cursig */
367 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
368
369 /* pr_pid */
370 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
371
372 /* pr_reg */
373 offset = 72;
374 size = 216;
375
376 break;
377
378 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
379 /* pr_cursig */
380 elf_tdata (abfd)->core->signal
381 = bfd_get_16 (abfd, note->descdata + 12);
382
383 /* pr_pid */
384 elf_tdata (abfd)->core->lwpid
385 = bfd_get_32 (abfd, note->descdata + 32);
386
387 /* pr_reg */
388 offset = 112;
389 size = 216;
390
391 break;
392 }
393
394 /* Make a ".reg/999" section. */
395 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
396 size, note->descpos + offset);
397 }
398
399 static bfd_boolean
400 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
401 {
402 switch (note->descsz)
403 {
404 default:
405 return FALSE;
406
407 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 12);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
414 break;
415
416 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
417 elf_tdata (abfd)->core->pid
418 = bfd_get_32 (abfd, note->descdata + 24);
419 elf_tdata (abfd)->core->program
420 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
421 elf_tdata (abfd)->core->command
422 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
423 }
424
425 /* Note that for some reason, a spurious space is tacked
426 onto the end of the args in some (at least one anyway)
427 implementations, so strip it off if it exists. */
428
429 {
430 char *command = elf_tdata (abfd)->core->command;
431 int n = strlen (command);
432
433 if (0 < n && command[n - 1] == ' ')
434 command[n - 1] = '\0';
435 }
436
437 return TRUE;
438 }
439
440 #ifdef CORE_HEADER
441 static char *
442 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
443 int note_type, ...)
444 {
445 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
446 va_list ap;
447 const char *fname, *psargs;
448 long pid;
449 int cursig;
450 const void *gregs;
451
452 switch (note_type)
453 {
454 default:
455 return NULL;
456
457 case NT_PRPSINFO:
458 va_start (ap, note_type);
459 fname = va_arg (ap, const char *);
460 psargs = va_arg (ap, const char *);
461 va_end (ap);
462
463 if (bed->s->elfclass == ELFCLASS32)
464 {
465 prpsinfo32_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 else
473 {
474 prpsinfo64_t data;
475 memset (&data, 0, sizeof (data));
476 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
477 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
478 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
479 &data, sizeof (data));
480 }
481 /* NOTREACHED */
482
483 case NT_PRSTATUS:
484 va_start (ap, note_type);
485 pid = va_arg (ap, long);
486 cursig = va_arg (ap, int);
487 gregs = va_arg (ap, const void *);
488 va_end (ap);
489
490 if (bed->s->elfclass == ELFCLASS32)
491 {
492 if (bed->elf_machine_code == EM_X86_64)
493 {
494 prstatusx32_t prstat;
495 memset (&prstat, 0, sizeof (prstat));
496 prstat.pr_pid = pid;
497 prstat.pr_cursig = cursig;
498 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
499 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
500 &prstat, sizeof (prstat));
501 }
502 else
503 {
504 prstatus32_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 else
514 {
515 prstatus64_t prstat;
516 memset (&prstat, 0, sizeof (prstat));
517 prstat.pr_pid = pid;
518 prstat.pr_cursig = cursig;
519 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
520 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
521 &prstat, sizeof (prstat));
522 }
523 }
524 /* NOTREACHED */
525 }
526 #endif
527 \f
528 /* Functions for the x86-64 ELF linker. */
529
530 /* The name of the dynamic interpreter. This is put in the .interp
531 section. */
532
533 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
534 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
535
536 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
537 copying dynamic variables from a shared lib into an app's dynbss
538 section, and instead use a dynamic relocation to point into the
539 shared lib. */
540 #define ELIMINATE_COPY_RELOCS 1
541
542 /* The size in bytes of an entry in the global offset table. */
543
544 #define GOT_ENTRY_SIZE 8
545
546 /* The size in bytes of an entry in the procedure linkage table. */
547
548 #define PLT_ENTRY_SIZE 16
549
550 /* The first entry in a procedure linkage table looks like this. See the
551 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
552
553 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
556 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
557 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 };
559
560 /* Subsequent entries in a procedure linkage table look like this. */
561
562 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
563 {
564 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
565 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
566 0x68, /* pushq immediate */
567 0, 0, 0, 0, /* replaced with index into relocation table. */
568 0xe9, /* jmp relative */
569 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 };
571
572 /* The first entry in a procedure linkage table with BND relocations
573 like this. */
574
575 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
576 {
577 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
578 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
579 0x0f, 0x1f, 0 /* nopl (%rax) */
580 };
581
582 /* Subsequent entries for legacy branches in a procedure linkage table
583 with BND relocations look like this. */
584
585 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
586 {
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xe9, 0, 0, 0, 0, /* jmpq relative */
589 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a procedure linkage
593 table with BND relocations look like this. */
594
595 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* Entries for legacy branches in the second procedure linkage table
603 look like this. */
604
605 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the second procedure linkage
613 table look like this. */
614
615 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* .eh_frame covering the .plt section. */
623
624 static const bfd_byte elf_x86_64_eh_frame_plt[] =
625 {
626 #define PLT_CIE_LENGTH 20
627 #define PLT_FDE_LENGTH 36
628 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
629 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
630 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
631 0, 0, 0, 0, /* CIE ID */
632 1, /* CIE version */
633 'z', 'R', 0, /* Augmentation string */
634 1, /* Code alignment factor */
635 0x78, /* Data alignment factor */
636 16, /* Return address column */
637 1, /* Augmentation size */
638 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
639 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
640 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
641 DW_CFA_nop, DW_CFA_nop,
642
643 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
644 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
645 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
646 0, 0, 0, 0, /* .plt size goes here */
647 0, /* Augmentation size */
648 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
649 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
650 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
651 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
652 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
653 11, /* Block length */
654 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
655 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
656 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
657 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
658 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
659 };
660
661 /* Architecture-specific backend data for x86-64. */
662
663 struct elf_x86_64_backend_data
664 {
665 /* Templates for the initial PLT entry and for subsequent entries. */
666 const bfd_byte *plt0_entry;
667 const bfd_byte *plt_entry;
668 unsigned int plt_entry_size; /* Size of each PLT entry. */
669
670 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
671 unsigned int plt0_got1_offset;
672 unsigned int plt0_got2_offset;
673
674 /* Offset of the end of the PC-relative instruction containing
675 plt0_got2_offset. */
676 unsigned int plt0_got2_insn_end;
677
678 /* Offsets into plt_entry that are to be replaced with... */
679 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
680 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
681 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
682
683 /* Length of the PC-relative instruction containing plt_got_offset. */
684 unsigned int plt_got_insn_size;
685
686 /* Offset of the end of the PC-relative jump to plt0_entry. */
687 unsigned int plt_plt_insn_end;
688
689 /* Offset into plt_entry where the initial value of the GOT entry points. */
690 unsigned int plt_lazy_offset;
691
692 /* .eh_frame covering the .plt section. */
693 const bfd_byte *eh_frame_plt;
694 unsigned int eh_frame_plt_size;
695 };
696
697 #define get_elf_x86_64_arch_data(bed) \
698 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
699
700 #define get_elf_x86_64_backend_data(abfd) \
701 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
702
703 #define GET_PLT_ENTRY_SIZE(abfd) \
704 get_elf_x86_64_backend_data (abfd)->plt_entry_size
705
706 /* These are the standard parameters. */
707 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
708 {
709 elf_x86_64_plt0_entry, /* plt0_entry */
710 elf_x86_64_plt_entry, /* plt_entry */
711 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
712 2, /* plt0_got1_offset */
713 8, /* plt0_got2_offset */
714 12, /* plt0_got2_insn_end */
715 2, /* plt_got_offset */
716 7, /* plt_reloc_offset */
717 12, /* plt_plt_offset */
718 6, /* plt_got_insn_size */
719 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
720 6, /* plt_lazy_offset */
721 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
722 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
723 };
724
725 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
726 {
727 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
728 elf_x86_64_bnd_plt_entry, /* plt_entry */
729 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
730 2, /* plt0_got1_offset */
731 1+8, /* plt0_got2_offset */
732 1+12, /* plt0_got2_insn_end */
733 1+2, /* plt_got_offset */
734 1, /* plt_reloc_offset */
735 7, /* plt_plt_offset */
736 1+6, /* plt_got_insn_size */
737 11, /* plt_plt_insn_end */
738 0, /* plt_lazy_offset */
739 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
740 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
741 };
742
743 #define elf_backend_arch_data &elf_x86_64_arch_bed
744
745 /* Is a undefined weak symbol which is resolved to 0. Reference to an
746 undefined weak symbol is resolved to 0 when building executable if
747 it isn't dynamic and
748 1. Has non-GOT/non-PLT relocations in text section. Or
749 2. Has no GOT/PLT relocation.
750 */
751 #define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
752 ((EH)->elf.root.type == bfd_link_hash_undefweak \
753 && bfd_link_executable (INFO) \
754 && (elf_x86_64_hash_table (INFO)->interp == NULL \
755 || !(GOT_RELOC) \
756 || (EH)->has_non_got_reloc \
757 || !(INFO)->dynamic_undefined_weak))
758
759 /* x86-64 ELF linker hash entry. */
760
761 struct elf_x86_64_link_hash_entry
762 {
763 struct elf_link_hash_entry elf;
764
765 /* Track dynamic relocs copied for this symbol. */
766 struct elf_dyn_relocs *dyn_relocs;
767
768 #define GOT_UNKNOWN 0
769 #define GOT_NORMAL 1
770 #define GOT_TLS_GD 2
771 #define GOT_TLS_IE 3
772 #define GOT_TLS_GDESC 4
773 #define GOT_TLS_GD_BOTH_P(type) \
774 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
775 #define GOT_TLS_GD_P(type) \
776 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
777 #define GOT_TLS_GDESC_P(type) \
778 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
779 #define GOT_TLS_GD_ANY_P(type) \
780 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
781 unsigned char tls_type;
782
783 /* TRUE if a weak symbol with a real definition needs a copy reloc.
784 When there is a weak symbol with a real definition, the processor
785 independent code will have arranged for us to see the real
786 definition first. We need to copy the needs_copy bit from the
787 real definition and check it when allowing copy reloc in PIE. */
788 unsigned int needs_copy : 1;
789
790 /* TRUE if symbol has at least one BND relocation. */
791 unsigned int has_bnd_reloc : 1;
792
793 /* TRUE if symbol has GOT or PLT relocations. */
794 unsigned int has_got_reloc : 1;
795
796 /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
797 unsigned int has_non_got_reloc : 1;
798
799 /* 0: symbol isn't __tls_get_addr.
800 1: symbol is __tls_get_addr.
801 2: symbol is unknown. */
802 unsigned int tls_get_addr : 2;
803
804 /* Reference count of C/C++ function pointer relocations in read-write
805 section which can be resolved at run-time. */
806 bfd_signed_vma func_pointer_refcount;
807
808 /* Information about the GOT PLT entry. Filled when there are both
809 GOT and PLT relocations against the same function. */
810 union gotplt_union plt_got;
811
812 /* Information about the second PLT entry. Filled when has_bnd_reloc is
813 set. */
814 union gotplt_union plt_bnd;
815
816 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
817 starting at the end of the jump table. */
818 bfd_vma tlsdesc_got;
819 };
820
821 #define elf_x86_64_hash_entry(ent) \
822 ((struct elf_x86_64_link_hash_entry *)(ent))
823
824 struct elf_x86_64_obj_tdata
825 {
826 struct elf_obj_tdata root;
827
828 /* tls_type for each local got entry. */
829 char *local_got_tls_type;
830
831 /* GOTPLT entries for TLS descriptors. */
832 bfd_vma *local_tlsdesc_gotent;
833 };
834
835 #define elf_x86_64_tdata(abfd) \
836 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
837
838 #define elf_x86_64_local_got_tls_type(abfd) \
839 (elf_x86_64_tdata (abfd)->local_got_tls_type)
840
841 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
842 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
843
844 #define is_x86_64_elf(bfd) \
845 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
846 && elf_tdata (bfd) != NULL \
847 && elf_object_id (bfd) == X86_64_ELF_DATA)
848
849 static bfd_boolean
850 elf_x86_64_mkobject (bfd *abfd)
851 {
852 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
853 X86_64_ELF_DATA);
854 }
855
856 /* x86-64 ELF linker hash table. */
857
858 struct elf_x86_64_link_hash_table
859 {
860 struct elf_link_hash_table elf;
861
862 /* Short-cuts to get to dynamic linker sections. */
863 asection *interp;
864 asection *sdynbss;
865 asection *srelbss;
866 asection *plt_eh_frame;
867 asection *plt_bnd;
868 asection *plt_got;
869
870 union
871 {
872 bfd_signed_vma refcount;
873 bfd_vma offset;
874 } tls_ld_got;
875
876 /* The amount of space used by the jump slots in the GOT. */
877 bfd_vma sgotplt_jump_table_size;
878
879 /* Small local sym cache. */
880 struct sym_cache sym_cache;
881
882 bfd_vma (*r_info) (bfd_vma, bfd_vma);
883 bfd_vma (*r_sym) (bfd_vma);
884 unsigned int pointer_r_type;
885 const char *dynamic_interpreter;
886 int dynamic_interpreter_size;
887
888 /* _TLS_MODULE_BASE_ symbol. */
889 struct bfd_link_hash_entry *tls_module_base;
890
891 /* Used by local STT_GNU_IFUNC symbols. */
892 htab_t loc_hash_table;
893 void * loc_hash_memory;
894
895 /* The offset into splt of the PLT entry for the TLS descriptor
896 resolver. Special values are 0, if not necessary (or not found
897 to be necessary yet), and -1 if needed but not determined
898 yet. */
899 bfd_vma tlsdesc_plt;
900 /* The offset into sgot of the GOT entry used by the PLT entry
901 above. */
902 bfd_vma tlsdesc_got;
903
904 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
905 bfd_vma next_jump_slot_index;
906 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
907 bfd_vma next_irelative_index;
908
909 /* TRUE if there are dynamic relocs against IFUNC symbols that apply
910 to read-only sections. */
911 bfd_boolean readonly_dynrelocs_against_ifunc;
912 };
913
914 /* Get the x86-64 ELF linker hash table from a link_info structure. */
915
916 #define elf_x86_64_hash_table(p) \
917 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
918 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
919
920 #define elf_x86_64_compute_jump_table_size(htab) \
921 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
922
923 /* Create an entry in an x86-64 ELF linker hash table. */
924
925 static struct bfd_hash_entry *
926 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
927 struct bfd_hash_table *table,
928 const char *string)
929 {
930 /* Allocate the structure if it has not already been allocated by a
931 subclass. */
932 if (entry == NULL)
933 {
934 entry = (struct bfd_hash_entry *)
935 bfd_hash_allocate (table,
936 sizeof (struct elf_x86_64_link_hash_entry));
937 if (entry == NULL)
938 return entry;
939 }
940
941 /* Call the allocation method of the superclass. */
942 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
943 if (entry != NULL)
944 {
945 struct elf_x86_64_link_hash_entry *eh;
946
947 eh = (struct elf_x86_64_link_hash_entry *) entry;
948 eh->dyn_relocs = NULL;
949 eh->tls_type = GOT_UNKNOWN;
950 eh->needs_copy = 0;
951 eh->has_bnd_reloc = 0;
952 eh->has_got_reloc = 0;
953 eh->has_non_got_reloc = 0;
954 eh->tls_get_addr = 2;
955 eh->func_pointer_refcount = 0;
956 eh->plt_bnd.offset = (bfd_vma) -1;
957 eh->plt_got.offset = (bfd_vma) -1;
958 eh->tlsdesc_got = (bfd_vma) -1;
959 }
960
961 return entry;
962 }
963
964 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
965 for local symbol so that we can handle local STT_GNU_IFUNC symbols
966 as global symbol. We reuse indx and dynstr_index for local symbol
967 hash since they aren't used by global symbols in this backend. */
968
969 static hashval_t
970 elf_x86_64_local_htab_hash (const void *ptr)
971 {
972 struct elf_link_hash_entry *h
973 = (struct elf_link_hash_entry *) ptr;
974 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
975 }
976
977 /* Compare local hash entries. */
978
979 static int
980 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
981 {
982 struct elf_link_hash_entry *h1
983 = (struct elf_link_hash_entry *) ptr1;
984 struct elf_link_hash_entry *h2
985 = (struct elf_link_hash_entry *) ptr2;
986
987 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
988 }
989
990 /* Find and/or create a hash entry for local symbol. */
991
992 static struct elf_link_hash_entry *
993 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
994 bfd *abfd, const Elf_Internal_Rela *rel,
995 bfd_boolean create)
996 {
997 struct elf_x86_64_link_hash_entry e, *ret;
998 asection *sec = abfd->sections;
999 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
1000 htab->r_sym (rel->r_info));
1001 void **slot;
1002
1003 e.elf.indx = sec->id;
1004 e.elf.dynstr_index = htab->r_sym (rel->r_info);
1005 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
1006 create ? INSERT : NO_INSERT);
1007
1008 if (!slot)
1009 return NULL;
1010
1011 if (*slot)
1012 {
1013 ret = (struct elf_x86_64_link_hash_entry *) *slot;
1014 return &ret->elf;
1015 }
1016
1017 ret = (struct elf_x86_64_link_hash_entry *)
1018 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
1019 sizeof (struct elf_x86_64_link_hash_entry));
1020 if (ret)
1021 {
1022 memset (ret, 0, sizeof (*ret));
1023 ret->elf.indx = sec->id;
1024 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
1025 ret->elf.dynindx = -1;
1026 ret->func_pointer_refcount = 0;
1027 ret->plt_got.offset = (bfd_vma) -1;
1028 *slot = ret;
1029 }
1030 return &ret->elf;
1031 }
1032
1033 /* Destroy an X86-64 ELF linker hash table. */
1034
1035 static void
1036 elf_x86_64_link_hash_table_free (bfd *obfd)
1037 {
1038 struct elf_x86_64_link_hash_table *htab
1039 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1040
1041 if (htab->loc_hash_table)
1042 htab_delete (htab->loc_hash_table);
1043 if (htab->loc_hash_memory)
1044 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1045 _bfd_elf_link_hash_table_free (obfd);
1046 }
1047
1048 /* Create an X86-64 ELF linker hash table. */
1049
1050 static struct bfd_link_hash_table *
1051 elf_x86_64_link_hash_table_create (bfd *abfd)
1052 {
1053 struct elf_x86_64_link_hash_table *ret;
1054 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1055
1056 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1057 if (ret == NULL)
1058 return NULL;
1059
1060 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1061 elf_x86_64_link_hash_newfunc,
1062 sizeof (struct elf_x86_64_link_hash_entry),
1063 X86_64_ELF_DATA))
1064 {
1065 free (ret);
1066 return NULL;
1067 }
1068
1069 if (ABI_64_P (abfd))
1070 {
1071 ret->r_info = elf64_r_info;
1072 ret->r_sym = elf64_r_sym;
1073 ret->pointer_r_type = R_X86_64_64;
1074 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1075 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1076 }
1077 else
1078 {
1079 ret->r_info = elf32_r_info;
1080 ret->r_sym = elf32_r_sym;
1081 ret->pointer_r_type = R_X86_64_32;
1082 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1083 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1084 }
1085
1086 ret->loc_hash_table = htab_try_create (1024,
1087 elf_x86_64_local_htab_hash,
1088 elf_x86_64_local_htab_eq,
1089 NULL);
1090 ret->loc_hash_memory = objalloc_create ();
1091 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1092 {
1093 elf_x86_64_link_hash_table_free (abfd);
1094 return NULL;
1095 }
1096 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1097
1098 return &ret->elf.root;
1099 }
1100
1101 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1102 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1103 hash table. */
1104
1105 static bfd_boolean
1106 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1107 struct bfd_link_info *info)
1108 {
1109 struct elf_x86_64_link_hash_table *htab;
1110
1111 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1112 return FALSE;
1113
1114 htab = elf_x86_64_hash_table (info);
1115 if (htab == NULL)
1116 return FALSE;
1117
1118 /* Set the contents of the .interp section to the interpreter. */
1119 if (bfd_link_executable (info) && !info->nointerp)
1120 {
1121 asection *s = bfd_get_linker_section (dynobj, ".interp");
1122 if (s == NULL)
1123 abort ();
1124 s->size = htab->dynamic_interpreter_size;
1125 s->contents = (unsigned char *) htab->dynamic_interpreter;
1126 htab->interp = s;
1127 }
1128
1129 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1130 if (!htab->sdynbss)
1131 abort ();
1132
1133 if (bfd_link_executable (info))
1134 {
1135 /* Always allow copy relocs for building executables. */
1136 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1137 if (s == NULL)
1138 {
1139 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1140 s = bfd_make_section_anyway_with_flags (dynobj,
1141 ".rela.bss",
1142 (bed->dynamic_sec_flags
1143 | SEC_READONLY));
1144 if (s == NULL
1145 || ! bfd_set_section_alignment (dynobj, s,
1146 bed->s->log_file_align))
1147 return FALSE;
1148 }
1149 htab->srelbss = s;
1150 }
1151
1152 if (!info->no_ld_generated_unwind_info
1153 && htab->plt_eh_frame == NULL
1154 && htab->elf.splt != NULL)
1155 {
1156 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1157 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1158 | SEC_LINKER_CREATED);
1159 htab->plt_eh_frame
1160 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1161 if (htab->plt_eh_frame == NULL
1162 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1163 return FALSE;
1164 }
1165
1166 /* Align .got section to its entry size. */
1167 if (htab->elf.sgot != NULL
1168 && !bfd_set_section_alignment (dynobj, htab->elf.sgot, 3))
1169 return FALSE;
1170
1171 /* Align .got.plt section to its entry size. */
1172 if (htab->elf.sgotplt != NULL
1173 && !bfd_set_section_alignment (dynobj, htab->elf.sgotplt, 3))
1174 return FALSE;
1175
1176 return TRUE;
1177 }
1178
1179 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1180
1181 static void
1182 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1183 struct elf_link_hash_entry *dir,
1184 struct elf_link_hash_entry *ind)
1185 {
1186 struct elf_x86_64_link_hash_entry *edir, *eind;
1187
1188 edir = (struct elf_x86_64_link_hash_entry *) dir;
1189 eind = (struct elf_x86_64_link_hash_entry *) ind;
1190
1191 if (!edir->has_bnd_reloc)
1192 edir->has_bnd_reloc = eind->has_bnd_reloc;
1193
1194 if (!edir->has_got_reloc)
1195 edir->has_got_reloc = eind->has_got_reloc;
1196
1197 if (!edir->has_non_got_reloc)
1198 edir->has_non_got_reloc = eind->has_non_got_reloc;
1199
1200 if (eind->dyn_relocs != NULL)
1201 {
1202 if (edir->dyn_relocs != NULL)
1203 {
1204 struct elf_dyn_relocs **pp;
1205 struct elf_dyn_relocs *p;
1206
1207 /* Add reloc counts against the indirect sym to the direct sym
1208 list. Merge any entries against the same section. */
1209 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1210 {
1211 struct elf_dyn_relocs *q;
1212
1213 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1214 if (q->sec == p->sec)
1215 {
1216 q->pc_count += p->pc_count;
1217 q->count += p->count;
1218 *pp = p->next;
1219 break;
1220 }
1221 if (q == NULL)
1222 pp = &p->next;
1223 }
1224 *pp = edir->dyn_relocs;
1225 }
1226
1227 edir->dyn_relocs = eind->dyn_relocs;
1228 eind->dyn_relocs = NULL;
1229 }
1230
1231 if (ind->root.type == bfd_link_hash_indirect
1232 && dir->got.refcount <= 0)
1233 {
1234 edir->tls_type = eind->tls_type;
1235 eind->tls_type = GOT_UNKNOWN;
1236 }
1237
1238 if (ELIMINATE_COPY_RELOCS
1239 && ind->root.type != bfd_link_hash_indirect
1240 && dir->dynamic_adjusted)
1241 {
1242 /* If called to transfer flags for a weakdef during processing
1243 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1244 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1245 dir->ref_dynamic |= ind->ref_dynamic;
1246 dir->ref_regular |= ind->ref_regular;
1247 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1248 dir->needs_plt |= ind->needs_plt;
1249 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1250 }
1251 else
1252 {
1253 if (eind->func_pointer_refcount > 0)
1254 {
1255 edir->func_pointer_refcount += eind->func_pointer_refcount;
1256 eind->func_pointer_refcount = 0;
1257 }
1258
1259 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1260 }
1261 }
1262
1263 static bfd_boolean
1264 elf64_x86_64_elf_object_p (bfd *abfd)
1265 {
1266 /* Set the right machine number for an x86-64 elf64 file. */
1267 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1268 return TRUE;
1269 }
1270
1271 static bfd_boolean
1272 elf32_x86_64_elf_object_p (bfd *abfd)
1273 {
1274 /* Set the right machine number for an x86-64 elf32 file. */
1275 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1276 return TRUE;
1277 }
1278
1279 /* Return TRUE if the TLS access code sequence support transition
1280 from R_TYPE. */
1281
1282 static bfd_boolean
1283 elf_x86_64_check_tls_transition (bfd *abfd,
1284 struct bfd_link_info *info,
1285 asection *sec,
1286 bfd_byte *contents,
1287 Elf_Internal_Shdr *symtab_hdr,
1288 struct elf_link_hash_entry **sym_hashes,
1289 unsigned int r_type,
1290 const Elf_Internal_Rela *rel,
1291 const Elf_Internal_Rela *relend)
1292 {
1293 unsigned int val;
1294 unsigned long r_symndx;
1295 bfd_boolean largepic = FALSE;
1296 struct elf_link_hash_entry *h;
1297 bfd_vma offset;
1298 struct elf_x86_64_link_hash_table *htab;
1299 bfd_byte *call;
1300 bfd_boolean indirect_call, tls_get_addr;
1301
1302 htab = elf_x86_64_hash_table (info);
1303 offset = rel->r_offset;
1304 switch (r_type)
1305 {
1306 case R_X86_64_TLSGD:
1307 case R_X86_64_TLSLD:
1308 if ((rel + 1) >= relend)
1309 return FALSE;
1310
1311 if (r_type == R_X86_64_TLSGD)
1312 {
1313 /* Check transition from GD access model. For 64bit, only
1314 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1315 .word 0x6666; rex64; call __tls_get_addr@PLT
1316 or
1317 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1318 .byte 0x66; rex64
1319 call *__tls_get_addr@GOTPCREL(%rip)
1320 which may be converted to
1321 addr32 call __tls_get_addr
1322 can transit to different access model. For 32bit, only
1323 leaq foo@tlsgd(%rip), %rdi
1324 .word 0x6666; rex64; call __tls_get_addr@PLT
1325 or
1326 leaq foo@tlsgd(%rip), %rdi
1327 .byte 0x66; rex64
1328 call *__tls_get_addr@GOTPCREL(%rip)
1329 which may be converted to
1330 addr32 call __tls_get_addr
1331 can transit to different access model. For largepic,
1332 we also support:
1333 leaq foo@tlsgd(%rip), %rdi
1334 movabsq $__tls_get_addr@pltoff, %rax
1335 addq $r15, %rax
1336 call *%rax
1337 or
1338 leaq foo@tlsgd(%rip), %rdi
1339 movabsq $__tls_get_addr@pltoff, %rax
1340 addq $rbx, %rax
1341 call *%rax */
1342
1343 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1344
1345 if ((offset + 12) > sec->size)
1346 return FALSE;
1347
1348 call = contents + offset + 4;
1349 if (call[0] != 0x66
1350 || !((call[1] == 0x48
1351 && call[2] == 0xff
1352 && call[3] == 0x15)
1353 || (call[1] == 0x48
1354 && call[2] == 0x67
1355 && call[3] == 0xe8)
1356 || (call[1] == 0x66
1357 && call[2] == 0x48
1358 && call[3] == 0xe8)))
1359 {
1360 if (!ABI_64_P (abfd)
1361 || (offset + 19) > sec->size
1362 || offset < 3
1363 || memcmp (call - 7, leaq + 1, 3) != 0
1364 || memcmp (call, "\x48\xb8", 2) != 0
1365 || call[11] != 0x01
1366 || call[13] != 0xff
1367 || call[14] != 0xd0
1368 || !((call[10] == 0x48 && call[12] == 0xd8)
1369 || (call[10] == 0x4c && call[12] == 0xf8)))
1370 return FALSE;
1371 largepic = TRUE;
1372 }
1373 else if (ABI_64_P (abfd))
1374 {
1375 if (offset < 4
1376 || memcmp (contents + offset - 4, leaq, 4) != 0)
1377 return FALSE;
1378 }
1379 else
1380 {
1381 if (offset < 3
1382 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1383 return FALSE;
1384 }
1385 indirect_call = call[2] == 0xff;
1386 }
1387 else
1388 {
1389 /* Check transition from LD access model. Only
1390 leaq foo@tlsld(%rip), %rdi;
1391 call __tls_get_addr@PLT
1392 or
1393 leaq foo@tlsld(%rip), %rdi;
1394 call *__tls_get_addr@GOTPCREL(%rip)
1395 which may be converted to
1396 addr32 call __tls_get_addr
1397 can transit to different access model. For largepic
1398 we also support:
1399 leaq foo@tlsld(%rip), %rdi
1400 movabsq $__tls_get_addr@pltoff, %rax
1401 addq $r15, %rax
1402 call *%rax
1403 or
1404 leaq foo@tlsld(%rip), %rdi
1405 movabsq $__tls_get_addr@pltoff, %rax
1406 addq $rbx, %rax
1407 call *%rax */
1408
1409 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1410
1411 if (offset < 3 || (offset + 9) > sec->size)
1412 return FALSE;
1413
1414 if (memcmp (contents + offset - 3, lea, 3) != 0)
1415 return FALSE;
1416
1417 call = contents + offset + 4;
1418 if (!(call[0] == 0xe8
1419 || (call[0] == 0xff && call[1] == 0x15)
1420 || (call[0] == 0x67 && call[1] == 0xe8)))
1421 {
1422 if (!ABI_64_P (abfd)
1423 || (offset + 19) > sec->size
1424 || memcmp (call, "\x48\xb8", 2) != 0
1425 || call[11] != 0x01
1426 || call[13] != 0xff
1427 || call[14] != 0xd0
1428 || !((call[10] == 0x48 && call[12] == 0xd8)
1429 || (call[10] == 0x4c && call[12] == 0xf8)))
1430 return FALSE;
1431 largepic = TRUE;
1432 }
1433 indirect_call = call[0] == 0xff;
1434 }
1435
1436 r_symndx = htab->r_sym (rel[1].r_info);
1437 if (r_symndx < symtab_hdr->sh_info)
1438 return FALSE;
1439
1440 tls_get_addr = FALSE;
1441 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1442 if (h != NULL && h->root.root.string != NULL)
1443 {
1444 struct elf_x86_64_link_hash_entry *eh
1445 = (struct elf_x86_64_link_hash_entry *) h;
1446 tls_get_addr = eh->tls_get_addr == 1;
1447 if (eh->tls_get_addr > 1)
1448 {
1449 /* Use strncmp to check __tls_get_addr since
1450 __tls_get_addr may be versioned. */
1451 if (strncmp (h->root.root.string, "__tls_get_addr", 14)
1452 == 0)
1453 {
1454 eh->tls_get_addr = 1;
1455 tls_get_addr = TRUE;
1456 }
1457 else
1458 eh->tls_get_addr = 0;
1459 }
1460 }
1461
1462 if (!tls_get_addr)
1463 return FALSE;
1464 else if (largepic)
1465 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
1466 else if (indirect_call)
1467 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX;
1468 else
1469 return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1470 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
1471
1472 case R_X86_64_GOTTPOFF:
1473 /* Check transition from IE access model:
1474 mov foo@gottpoff(%rip), %reg
1475 add foo@gottpoff(%rip), %reg
1476 */
1477
1478 /* Check REX prefix first. */
1479 if (offset >= 3 && (offset + 4) <= sec->size)
1480 {
1481 val = bfd_get_8 (abfd, contents + offset - 3);
1482 if (val != 0x48 && val != 0x4c)
1483 {
1484 /* X32 may have 0x44 REX prefix or no REX prefix. */
1485 if (ABI_64_P (abfd))
1486 return FALSE;
1487 }
1488 }
1489 else
1490 {
1491 /* X32 may not have any REX prefix. */
1492 if (ABI_64_P (abfd))
1493 return FALSE;
1494 if (offset < 2 || (offset + 3) > sec->size)
1495 return FALSE;
1496 }
1497
1498 val = bfd_get_8 (abfd, contents + offset - 2);
1499 if (val != 0x8b && val != 0x03)
1500 return FALSE;
1501
1502 val = bfd_get_8 (abfd, contents + offset - 1);
1503 return (val & 0xc7) == 5;
1504
1505 case R_X86_64_GOTPC32_TLSDESC:
1506 /* Check transition from GDesc access model:
1507 leaq x@tlsdesc(%rip), %rax
1508
1509 Make sure it's a leaq adding rip to a 32-bit offset
1510 into any register, although it's probably almost always
1511 going to be rax. */
1512
1513 if (offset < 3 || (offset + 4) > sec->size)
1514 return FALSE;
1515
1516 val = bfd_get_8 (abfd, contents + offset - 3);
1517 if ((val & 0xfb) != 0x48)
1518 return FALSE;
1519
1520 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1521 return FALSE;
1522
1523 val = bfd_get_8 (abfd, contents + offset - 1);
1524 return (val & 0xc7) == 0x05;
1525
1526 case R_X86_64_TLSDESC_CALL:
1527 /* Check transition from GDesc access model:
1528 call *x@tlsdesc(%rax)
1529 */
1530 if (offset + 2 <= sec->size)
1531 {
1532 /* Make sure that it's a call *x@tlsdesc(%rax). */
1533 call = contents + offset;
1534 return call[0] == 0xff && call[1] == 0x10;
1535 }
1536
1537 return FALSE;
1538
1539 default:
1540 abort ();
1541 }
1542 }
1543
1544 /* Return TRUE if the TLS access transition is OK or no transition
1545 will be performed. Update R_TYPE if there is a transition. */
1546
1547 static bfd_boolean
1548 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1549 asection *sec, bfd_byte *contents,
1550 Elf_Internal_Shdr *symtab_hdr,
1551 struct elf_link_hash_entry **sym_hashes,
1552 unsigned int *r_type, int tls_type,
1553 const Elf_Internal_Rela *rel,
1554 const Elf_Internal_Rela *relend,
1555 struct elf_link_hash_entry *h,
1556 unsigned long r_symndx,
1557 bfd_boolean from_relocate_section)
1558 {
1559 unsigned int from_type = *r_type;
1560 unsigned int to_type = from_type;
1561 bfd_boolean check = TRUE;
1562
1563 /* Skip TLS transition for functions. */
1564 if (h != NULL
1565 && (h->type == STT_FUNC
1566 || h->type == STT_GNU_IFUNC))
1567 return TRUE;
1568
1569 switch (from_type)
1570 {
1571 case R_X86_64_TLSGD:
1572 case R_X86_64_GOTPC32_TLSDESC:
1573 case R_X86_64_TLSDESC_CALL:
1574 case R_X86_64_GOTTPOFF:
1575 if (bfd_link_executable (info))
1576 {
1577 if (h == NULL)
1578 to_type = R_X86_64_TPOFF32;
1579 else
1580 to_type = R_X86_64_GOTTPOFF;
1581 }
1582
1583 /* When we are called from elf_x86_64_relocate_section, there may
1584 be additional transitions based on TLS_TYPE. */
1585 if (from_relocate_section)
1586 {
1587 unsigned int new_to_type = to_type;
1588
1589 if (bfd_link_executable (info)
1590 && h != NULL
1591 && h->dynindx == -1
1592 && tls_type == GOT_TLS_IE)
1593 new_to_type = R_X86_64_TPOFF32;
1594
1595 if (to_type == R_X86_64_TLSGD
1596 || to_type == R_X86_64_GOTPC32_TLSDESC
1597 || to_type == R_X86_64_TLSDESC_CALL)
1598 {
1599 if (tls_type == GOT_TLS_IE)
1600 new_to_type = R_X86_64_GOTTPOFF;
1601 }
1602
1603 /* We checked the transition before when we were called from
1604 elf_x86_64_check_relocs. We only want to check the new
1605 transition which hasn't been checked before. */
1606 check = new_to_type != to_type && from_type == to_type;
1607 to_type = new_to_type;
1608 }
1609
1610 break;
1611
1612 case R_X86_64_TLSLD:
1613 if (bfd_link_executable (info))
1614 to_type = R_X86_64_TPOFF32;
1615 break;
1616
1617 default:
1618 return TRUE;
1619 }
1620
1621 /* Return TRUE if there is no transition. */
1622 if (from_type == to_type)
1623 return TRUE;
1624
1625 /* Check if the transition can be performed. */
1626 if (check
1627 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1628 symtab_hdr, sym_hashes,
1629 from_type, rel, relend))
1630 {
1631 reloc_howto_type *from, *to;
1632 const char *name;
1633
1634 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1635 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1636
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 {
1641 struct elf_x86_64_link_hash_table *htab;
1642
1643 htab = elf_x86_64_hash_table (info);
1644 if (htab == NULL)
1645 name = "*unknown*";
1646 else
1647 {
1648 Elf_Internal_Sym *isym;
1649
1650 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1651 abfd, r_symndx);
1652 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1653 }
1654 }
1655
1656 _bfd_error_handler
1657 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1658 "in section `%A' failed"),
1659 abfd, sec, from->name, to->name, name,
1660 (unsigned long) rel->r_offset);
1661 bfd_set_error (bfd_error_bad_value);
1662 return FALSE;
1663 }
1664
1665 *r_type = to_type;
1666 return TRUE;
1667 }
1668
1669 /* Rename some of the generic section flags to better document how they
1670 are used here. */
1671 #define need_convert_load sec_flg0
1672 #define check_relocs_failed sec_flg1
1673
1674 static bfd_boolean
1675 elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
1676 struct elf_link_hash_entry *h,
1677 Elf_Internal_Shdr *symtab_hdr,
1678 Elf_Internal_Sym *isym,
1679 reloc_howto_type *howto)
1680 {
1681 const char *v = "";
1682 const char *und = "";
1683 const char *pic = "";
1684
1685 const char *name;
1686 if (h)
1687 {
1688 name = h->root.root.string;
1689 switch (ELF_ST_VISIBILITY (h->other))
1690 {
1691 case STV_HIDDEN:
1692 v = _("hidden symbol ");
1693 break;
1694 case STV_INTERNAL:
1695 v = _("internal symbol ");
1696 break;
1697 case STV_PROTECTED:
1698 v = _("protected symbol ");
1699 break;
1700 default:
1701 v = _("symbol ");
1702 pic = _("; recompile with -fPIC");
1703 break;
1704 }
1705
1706 if (!h->def_regular && !h->def_dynamic)
1707 und = _("undefined ");
1708 }
1709 else
1710 {
1711 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1712 pic = _("; recompile with -fPIC");
1713 }
1714
1715 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1716 "not be used when making a shared object%s"),
1717 input_bfd, howto->name, und, v, name, pic);
1718 bfd_set_error (bfd_error_bad_value);
1719 sec->check_relocs_failed = 1;
1720 return FALSE;
1721 }
1722
1723 /* With the local symbol, foo, we convert
1724 mov foo@GOTPCREL(%rip), %reg
1725 to
1726 lea foo(%rip), %reg
1727 and convert
1728 call/jmp *foo@GOTPCREL(%rip)
1729 to
1730 nop call foo/jmp foo nop
1731 When PIC is false, convert
1732 test %reg, foo@GOTPCREL(%rip)
1733 to
1734 test $foo, %reg
1735 and convert
1736 binop foo@GOTPCREL(%rip), %reg
1737 to
1738 binop $foo, %reg
1739 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1740 instructions. */
1741
1742 static bfd_boolean
1743 elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec,
1744 bfd_byte *contents,
1745 Elf_Internal_Rela *irel,
1746 struct elf_link_hash_entry *h,
1747 bfd_boolean *converted,
1748 struct bfd_link_info *link_info)
1749 {
1750 struct elf_x86_64_link_hash_table *htab;
1751 bfd_boolean is_pic;
1752 bfd_boolean require_reloc_pc32;
1753 bfd_boolean relocx;
1754 bfd_boolean to_reloc_pc32;
1755 asection *tsec;
1756 char symtype;
1757 bfd_signed_vma raddend;
1758 unsigned int opcode;
1759 unsigned int modrm;
1760 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
1761 unsigned int r_symndx;
1762 bfd_vma toff;
1763 bfd_vma roff = irel->r_offset;
1764
1765 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1766 return TRUE;
1767
1768 raddend = irel->r_addend;
1769 /* Addend for 32-bit PC-relative relocation must be -4. */
1770 if (raddend != -4)
1771 return TRUE;
1772
1773 htab = elf_x86_64_hash_table (link_info);
1774 is_pic = bfd_link_pic (link_info);
1775
1776 relocx = (r_type == R_X86_64_GOTPCRELX
1777 || r_type == R_X86_64_REX_GOTPCRELX);
1778
1779 /* TRUE if we can convert only to R_X86_64_PC32. Enable it for
1780 --no-relax. */
1781 require_reloc_pc32
1782 = link_info->disable_target_specific_optimizations > 1;
1783
1784 r_symndx = htab->r_sym (irel->r_info);
1785
1786 opcode = bfd_get_8 (abfd, contents + roff - 2);
1787
1788 /* Convert mov to lea since it has been done for a while. */
1789 if (opcode != 0x8b)
1790 {
1791 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1792 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1793 test, xor instructions. */
1794 if (!relocx)
1795 return TRUE;
1796 }
1797
1798 /* We convert only to R_X86_64_PC32:
1799 1. Branch.
1800 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1801 3. require_reloc_pc32 is true.
1802 4. PIC.
1803 */
1804 to_reloc_pc32 = (opcode == 0xff
1805 || !relocx
1806 || require_reloc_pc32
1807 || is_pic);
1808
1809 /* Get the symbol referred to by the reloc. */
1810 if (h == NULL)
1811 {
1812 Elf_Internal_Sym *isym
1813 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1814
1815 /* Skip relocation against undefined symbols. */
1816 if (isym->st_shndx == SHN_UNDEF)
1817 return TRUE;
1818
1819 symtype = ELF_ST_TYPE (isym->st_info);
1820
1821 if (isym->st_shndx == SHN_ABS)
1822 tsec = bfd_abs_section_ptr;
1823 else if (isym->st_shndx == SHN_COMMON)
1824 tsec = bfd_com_section_ptr;
1825 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1826 tsec = &_bfd_elf_large_com_section;
1827 else
1828 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1829
1830 toff = isym->st_value;
1831 }
1832 else
1833 {
1834 /* Undefined weak symbol is only bound locally in executable
1835 and its reference is resolved as 0 without relocation
1836 overflow. We can only perform this optimization for
1837 GOTPCRELX relocations since we need to modify REX byte.
1838 It is OK convert mov with R_X86_64_GOTPCREL to
1839 R_X86_64_PC32. */
1840 if ((relocx || opcode == 0x8b)
1841 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
1842 TRUE,
1843 elf_x86_64_hash_entry (h)))
1844 {
1845 if (opcode == 0xff)
1846 {
1847 /* Skip for branch instructions since R_X86_64_PC32
1848 may overflow. */
1849 if (require_reloc_pc32)
1850 return TRUE;
1851 }
1852 else if (relocx)
1853 {
1854 /* For non-branch instructions, we can convert to
1855 R_X86_64_32/R_X86_64_32S since we know if there
1856 is a REX byte. */
1857 to_reloc_pc32 = FALSE;
1858 }
1859
1860 /* Since we don't know the current PC when PIC is true,
1861 we can't convert to R_X86_64_PC32. */
1862 if (to_reloc_pc32 && is_pic)
1863 return TRUE;
1864
1865 goto convert;
1866 }
1867 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1868 ld.so may use its link-time address. */
1869 else if ((h->def_regular
1870 || h->root.type == bfd_link_hash_defined
1871 || h->root.type == bfd_link_hash_defweak)
1872 && h != htab->elf.hdynamic
1873 && SYMBOL_REFERENCES_LOCAL (link_info, h))
1874 {
1875 /* bfd_link_hash_new or bfd_link_hash_undefined is
1876 set by an assignment in a linker script in
1877 bfd_elf_record_link_assignment. */
1878 if (h->def_regular
1879 && (h->root.type == bfd_link_hash_new
1880 || h->root.type == bfd_link_hash_undefined))
1881 {
1882 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1883 if (require_reloc_pc32)
1884 return TRUE;
1885 goto convert;
1886 }
1887 tsec = h->root.u.def.section;
1888 toff = h->root.u.def.value;
1889 symtype = h->type;
1890 }
1891 else
1892 return TRUE;
1893 }
1894
1895 /* Don't convert GOTPCREL relocation against large section. */
1896 if (elf_section_data (tsec) != NULL
1897 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1898 return TRUE;
1899
1900 /* We can only estimate relocation overflow for R_X86_64_PC32. */
1901 if (!to_reloc_pc32)
1902 goto convert;
1903
1904 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
1905 {
1906 /* At this stage in linking, no SEC_MERGE symbol has been
1907 adjusted, so all references to such symbols need to be
1908 passed through _bfd_merged_section_offset. (Later, in
1909 relocate_section, all SEC_MERGE symbols *except* for
1910 section symbols have been adjusted.)
1911
1912 gas may reduce relocations against symbols in SEC_MERGE
1913 sections to a relocation against the section symbol when
1914 the original addend was zero. When the reloc is against
1915 a section symbol we should include the addend in the
1916 offset passed to _bfd_merged_section_offset, since the
1917 location of interest is the original symbol. On the
1918 other hand, an access to "sym+addend" where "sym" is not
1919 a section symbol should not include the addend; Such an
1920 access is presumed to be an offset from "sym"; The
1921 location of interest is just "sym". */
1922 if (symtype == STT_SECTION)
1923 toff += raddend;
1924
1925 toff = _bfd_merged_section_offset (abfd, &tsec,
1926 elf_section_data (tsec)->sec_info,
1927 toff);
1928
1929 if (symtype != STT_SECTION)
1930 toff += raddend;
1931 }
1932 else
1933 toff += raddend;
1934
1935 /* Don't convert if R_X86_64_PC32 relocation overflows. */
1936 if (tsec->output_section == sec->output_section)
1937 {
1938 if ((toff - roff + 0x80000000) > 0xffffffff)
1939 return TRUE;
1940 }
1941 else
1942 {
1943 bfd_signed_vma distance;
1944
1945 /* At this point, we don't know the load addresses of TSEC
1946 section nor SEC section. We estimate the distrance between
1947 SEC and TSEC. We store the estimated distances in the
1948 compressed_size field of the output section, which is only
1949 used to decompress the compressed input section. */
1950 if (sec->output_section->compressed_size == 0)
1951 {
1952 asection *asect;
1953 bfd_size_type size = 0;
1954 for (asect = link_info->output_bfd->sections;
1955 asect != NULL;
1956 asect = asect->next)
1957 /* Skip debug sections since compressed_size is used to
1958 compress debug sections. */
1959 if ((asect->flags & SEC_DEBUGGING) == 0)
1960 {
1961 asection *i;
1962 for (i = asect->map_head.s;
1963 i != NULL;
1964 i = i->map_head.s)
1965 {
1966 size = align_power (size, i->alignment_power);
1967 size += i->size;
1968 }
1969 asect->compressed_size = size;
1970 }
1971 }
1972
1973 /* Don't convert GOTPCREL relocations if TSEC isn't placed
1974 after SEC. */
1975 distance = (tsec->output_section->compressed_size
1976 - sec->output_section->compressed_size);
1977 if (distance < 0)
1978 return TRUE;
1979
1980 /* Take PT_GNU_RELRO segment into account by adding
1981 maxpagesize. */
1982 if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
1983 - roff + 0x80000000) > 0xffffffff)
1984 return TRUE;
1985 }
1986
1987 convert:
1988 if (opcode == 0xff)
1989 {
1990 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1991 unsigned int nop;
1992 unsigned int disp;
1993 bfd_vma nop_offset;
1994
1995 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1996 R_X86_64_PC32. */
1997 modrm = bfd_get_8 (abfd, contents + roff - 1);
1998 if (modrm == 0x25)
1999 {
2000 /* Convert to "jmp foo nop". */
2001 modrm = 0xe9;
2002 nop = NOP_OPCODE;
2003 nop_offset = irel->r_offset + 3;
2004 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2005 irel->r_offset -= 1;
2006 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2007 }
2008 else
2009 {
2010 struct elf_x86_64_link_hash_entry *eh
2011 = (struct elf_x86_64_link_hash_entry *) h;
2012
2013 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2014 is a nop prefix. */
2015 modrm = 0xe8;
2016 /* To support TLS optimization, always use addr32 prefix for
2017 "call *__tls_get_addr@GOTPCREL(%rip)". */
2018 if (eh && eh->tls_get_addr == 1)
2019 {
2020 nop = 0x67;
2021 nop_offset = irel->r_offset - 2;
2022 }
2023 else
2024 {
2025 nop = link_info->call_nop_byte;
2026 if (link_info->call_nop_as_suffix)
2027 {
2028 nop_offset = irel->r_offset + 3;
2029 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2030 irel->r_offset -= 1;
2031 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2032 }
2033 else
2034 nop_offset = irel->r_offset - 2;
2035 }
2036 }
2037 bfd_put_8 (abfd, nop, contents + nop_offset);
2038 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2039 r_type = R_X86_64_PC32;
2040 }
2041 else
2042 {
2043 unsigned int rex;
2044 unsigned int rex_mask = REX_R;
2045
2046 if (r_type == R_X86_64_REX_GOTPCRELX)
2047 rex = bfd_get_8 (abfd, contents + roff - 3);
2048 else
2049 rex = 0;
2050
2051 if (opcode == 0x8b)
2052 {
2053 if (to_reloc_pc32)
2054 {
2055 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2056 "lea foo(%rip), %reg". */
2057 opcode = 0x8d;
2058 r_type = R_X86_64_PC32;
2059 }
2060 else
2061 {
2062 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2063 "mov $foo, %reg". */
2064 opcode = 0xc7;
2065 modrm = bfd_get_8 (abfd, contents + roff - 1);
2066 modrm = 0xc0 | (modrm & 0x38) >> 3;
2067 if ((rex & REX_W) != 0
2068 && ABI_64_P (link_info->output_bfd))
2069 {
2070 /* Keep the REX_W bit in REX byte for LP64. */
2071 r_type = R_X86_64_32S;
2072 goto rewrite_modrm_rex;
2073 }
2074 else
2075 {
2076 /* If the REX_W bit in REX byte isn't needed,
2077 use R_X86_64_32 and clear the W bit to avoid
2078 sign-extend imm32 to imm64. */
2079 r_type = R_X86_64_32;
2080 /* Clear the W bit in REX byte. */
2081 rex_mask |= REX_W;
2082 goto rewrite_modrm_rex;
2083 }
2084 }
2085 }
2086 else
2087 {
2088 /* R_X86_64_PC32 isn't supported. */
2089 if (to_reloc_pc32)
2090 return TRUE;
2091
2092 modrm = bfd_get_8 (abfd, contents + roff - 1);
2093 if (opcode == 0x85)
2094 {
2095 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2096 "test $foo, %reg". */
2097 modrm = 0xc0 | (modrm & 0x38) >> 3;
2098 opcode = 0xf7;
2099 }
2100 else
2101 {
2102 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2103 "binop $foo, %reg". */
2104 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
2105 opcode = 0x81;
2106 }
2107
2108 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2109 overflow when sign-extending imm32 to imm64. */
2110 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
2111
2112 rewrite_modrm_rex:
2113 bfd_put_8 (abfd, modrm, contents + roff - 1);
2114
2115 if (rex)
2116 {
2117 /* Move the R bit to the B bit in REX byte. */
2118 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2119 bfd_put_8 (abfd, rex, contents + roff - 3);
2120 }
2121
2122 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2123 irel->r_addend = 0;
2124 }
2125
2126 bfd_put_8 (abfd, opcode, contents + roff - 2);
2127 }
2128
2129 irel->r_info = htab->r_info (r_symndx, r_type);
2130
2131 *converted = TRUE;
2132
2133 return TRUE;
2134 }
2135
2136 /* Look through the relocs for a section during the first phase, and
2137 calculate needed space in the global offset table, procedure
2138 linkage table, and dynamic reloc sections. */
2139
2140 static bfd_boolean
2141 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
2142 asection *sec,
2143 const Elf_Internal_Rela *relocs)
2144 {
2145 struct elf_x86_64_link_hash_table *htab;
2146 Elf_Internal_Shdr *symtab_hdr;
2147 struct elf_link_hash_entry **sym_hashes;
2148 const Elf_Internal_Rela *rel;
2149 const Elf_Internal_Rela *rel_end;
2150 asection *sreloc;
2151 bfd_byte *contents;
2152 bfd_boolean use_plt_got;
2153
2154 if (bfd_link_relocatable (info))
2155 return TRUE;
2156
2157 /* Don't do anything special with non-loaded, non-alloced sections.
2158 In particular, any relocs in such sections should not affect GOT
2159 and PLT reference counting (ie. we don't allow them to create GOT
2160 or PLT entries), there's no possibility or desire to optimize TLS
2161 relocs, and there's not much point in propagating relocs to shared
2162 libs that the dynamic linker won't relocate. */
2163 if ((sec->flags & SEC_ALLOC) == 0)
2164 return TRUE;
2165
2166 BFD_ASSERT (is_x86_64_elf (abfd));
2167
2168 htab = elf_x86_64_hash_table (info);
2169 if (htab == NULL)
2170 {
2171 sec->check_relocs_failed = 1;
2172 return FALSE;
2173 }
2174
2175 /* Get the section contents. */
2176 if (elf_section_data (sec)->this_hdr.contents != NULL)
2177 contents = elf_section_data (sec)->this_hdr.contents;
2178 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2179 {
2180 sec->check_relocs_failed = 1;
2181 return FALSE;
2182 }
2183
2184 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
2185
2186 symtab_hdr = &elf_symtab_hdr (abfd);
2187 sym_hashes = elf_sym_hashes (abfd);
2188
2189 sreloc = NULL;
2190
2191 rel_end = relocs + sec->reloc_count;
2192 for (rel = relocs; rel < rel_end; rel++)
2193 {
2194 unsigned int r_type;
2195 unsigned long r_symndx;
2196 struct elf_link_hash_entry *h;
2197 struct elf_x86_64_link_hash_entry *eh;
2198 Elf_Internal_Sym *isym;
2199 const char *name;
2200 bfd_boolean size_reloc;
2201
2202 r_symndx = htab->r_sym (rel->r_info);
2203 r_type = ELF32_R_TYPE (rel->r_info);
2204
2205 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2206 {
2207 _bfd_error_handler (_("%B: bad symbol index: %d"),
2208 abfd, r_symndx);
2209 goto error_return;
2210 }
2211
2212 if (r_symndx < symtab_hdr->sh_info)
2213 {
2214 /* A local symbol. */
2215 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2216 abfd, r_symndx);
2217 if (isym == NULL)
2218 goto error_return;
2219
2220 /* Check relocation against local STT_GNU_IFUNC symbol. */
2221 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2222 {
2223 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
2224 TRUE);
2225 if (h == NULL)
2226 goto error_return;
2227
2228 /* Fake a STT_GNU_IFUNC symbol. */
2229 h->type = STT_GNU_IFUNC;
2230 h->def_regular = 1;
2231 h->ref_regular = 1;
2232 h->forced_local = 1;
2233 h->root.type = bfd_link_hash_defined;
2234 }
2235 else
2236 h = NULL;
2237 }
2238 else
2239 {
2240 isym = NULL;
2241 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2242 while (h->root.type == bfd_link_hash_indirect
2243 || h->root.type == bfd_link_hash_warning)
2244 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2245 }
2246
2247 /* Check invalid x32 relocations. */
2248 if (!ABI_64_P (abfd))
2249 switch (r_type)
2250 {
2251 default:
2252 break;
2253
2254 case R_X86_64_DTPOFF64:
2255 case R_X86_64_TPOFF64:
2256 case R_X86_64_PC64:
2257 case R_X86_64_GOTOFF64:
2258 case R_X86_64_GOT64:
2259 case R_X86_64_GOTPCREL64:
2260 case R_X86_64_GOTPC64:
2261 case R_X86_64_GOTPLT64:
2262 case R_X86_64_PLTOFF64:
2263 {
2264 if (h)
2265 name = h->root.root.string;
2266 else
2267 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2268 NULL);
2269 _bfd_error_handler
2270 (_("%B: relocation %s against symbol `%s' isn't "
2271 "supported in x32 mode"), abfd,
2272 x86_64_elf_howto_table[r_type].name, name);
2273 bfd_set_error (bfd_error_bad_value);
2274 goto error_return;
2275 }
2276 break;
2277 }
2278
2279 if (h != NULL)
2280 {
2281 switch (r_type)
2282 {
2283 default:
2284 break;
2285
2286 case R_X86_64_PC32_BND:
2287 case R_X86_64_PLT32_BND:
2288 case R_X86_64_PC32:
2289 case R_X86_64_PLT32:
2290 case R_X86_64_32:
2291 case R_X86_64_64:
2292 /* MPX PLT is supported only if elf_x86_64_arch_bed
2293 is used in 64-bit mode. */
2294 if (ABI_64_P (abfd)
2295 && info->bndplt
2296 && (get_elf_x86_64_backend_data (abfd)
2297 == &elf_x86_64_arch_bed))
2298 {
2299 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
2300
2301 /* Create the second PLT for Intel MPX support. */
2302 if (htab->plt_bnd == NULL)
2303 {
2304 unsigned int plt_bnd_align;
2305 const struct elf_backend_data *bed;
2306
2307 bed = get_elf_backend_data (info->output_bfd);
2308 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
2309 && (sizeof (elf_x86_64_bnd_plt2_entry)
2310 == sizeof (elf_x86_64_legacy_plt2_entry)));
2311 plt_bnd_align = 3;
2312
2313 if (htab->elf.dynobj == NULL)
2314 htab->elf.dynobj = abfd;
2315 htab->plt_bnd
2316 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2317 ".plt.bnd",
2318 (bed->dynamic_sec_flags
2319 | SEC_ALLOC
2320 | SEC_CODE
2321 | SEC_LOAD
2322 | SEC_READONLY));
2323 if (htab->plt_bnd == NULL
2324 || !bfd_set_section_alignment (htab->elf.dynobj,
2325 htab->plt_bnd,
2326 plt_bnd_align))
2327 goto error_return;
2328 }
2329 }
2330 /* Fall through. */
2331
2332 case R_X86_64_32S:
2333 case R_X86_64_PC64:
2334 case R_X86_64_GOTPCREL:
2335 case R_X86_64_GOTPCRELX:
2336 case R_X86_64_REX_GOTPCRELX:
2337 case R_X86_64_GOTPCREL64:
2338 if (htab->elf.dynobj == NULL)
2339 htab->elf.dynobj = abfd;
2340 /* Create the ifunc sections for static executables. */
2341 if (h->type == STT_GNU_IFUNC
2342 && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
2343 info))
2344 goto error_return;
2345 break;
2346 }
2347
2348 /* It is referenced by a non-shared object. */
2349 h->ref_regular = 1;
2350 h->root.non_ir_ref = 1;
2351
2352 if (h->type == STT_GNU_IFUNC)
2353 elf_tdata (info->output_bfd)->has_gnu_symbols
2354 |= elf_gnu_symbol_ifunc;
2355 }
2356
2357 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2358 symtab_hdr, sym_hashes,
2359 &r_type, GOT_UNKNOWN,
2360 rel, rel_end, h, r_symndx, FALSE))
2361 goto error_return;
2362
2363 eh = (struct elf_x86_64_link_hash_entry *) h;
2364 switch (r_type)
2365 {
2366 case R_X86_64_TLSLD:
2367 htab->tls_ld_got.refcount += 1;
2368 goto create_got;
2369
2370 case R_X86_64_TPOFF32:
2371 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2372 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2373 &x86_64_elf_howto_table[r_type]);
2374 if (eh != NULL)
2375 eh->has_got_reloc = 1;
2376 break;
2377
2378 case R_X86_64_GOTTPOFF:
2379 if (!bfd_link_executable (info))
2380 info->flags |= DF_STATIC_TLS;
2381 /* Fall through */
2382
2383 case R_X86_64_GOT32:
2384 case R_X86_64_GOTPCREL:
2385 case R_X86_64_GOTPCRELX:
2386 case R_X86_64_REX_GOTPCRELX:
2387 case R_X86_64_TLSGD:
2388 case R_X86_64_GOT64:
2389 case R_X86_64_GOTPCREL64:
2390 case R_X86_64_GOTPLT64:
2391 case R_X86_64_GOTPC32_TLSDESC:
2392 case R_X86_64_TLSDESC_CALL:
2393 /* This symbol requires a global offset table entry. */
2394 {
2395 int tls_type, old_tls_type;
2396
2397 switch (r_type)
2398 {
2399 default: tls_type = GOT_NORMAL; break;
2400 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2401 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2402 case R_X86_64_GOTPC32_TLSDESC:
2403 case R_X86_64_TLSDESC_CALL:
2404 tls_type = GOT_TLS_GDESC; break;
2405 }
2406
2407 if (h != NULL)
2408 {
2409 h->got.refcount += 1;
2410 old_tls_type = eh->tls_type;
2411 }
2412 else
2413 {
2414 bfd_signed_vma *local_got_refcounts;
2415
2416 /* This is a global offset table entry for a local symbol. */
2417 local_got_refcounts = elf_local_got_refcounts (abfd);
2418 if (local_got_refcounts == NULL)
2419 {
2420 bfd_size_type size;
2421
2422 size = symtab_hdr->sh_info;
2423 size *= sizeof (bfd_signed_vma)
2424 + sizeof (bfd_vma) + sizeof (char);
2425 local_got_refcounts = ((bfd_signed_vma *)
2426 bfd_zalloc (abfd, size));
2427 if (local_got_refcounts == NULL)
2428 goto error_return;
2429 elf_local_got_refcounts (abfd) = local_got_refcounts;
2430 elf_x86_64_local_tlsdesc_gotent (abfd)
2431 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2432 elf_x86_64_local_got_tls_type (abfd)
2433 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2434 }
2435 local_got_refcounts[r_symndx] += 1;
2436 old_tls_type
2437 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
2438 }
2439
2440 /* If a TLS symbol is accessed using IE at least once,
2441 there is no point to use dynamic model for it. */
2442 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2443 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2444 || tls_type != GOT_TLS_IE))
2445 {
2446 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2447 tls_type = old_tls_type;
2448 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2449 && GOT_TLS_GD_ANY_P (tls_type))
2450 tls_type |= old_tls_type;
2451 else
2452 {
2453 if (h)
2454 name = h->root.root.string;
2455 else
2456 name = bfd_elf_sym_name (abfd, symtab_hdr,
2457 isym, NULL);
2458 _bfd_error_handler
2459 (_("%B: '%s' accessed both as normal and thread local symbol"),
2460 abfd, name);
2461 bfd_set_error (bfd_error_bad_value);
2462 goto error_return;
2463 }
2464 }
2465
2466 if (old_tls_type != tls_type)
2467 {
2468 if (eh != NULL)
2469 eh->tls_type = tls_type;
2470 else
2471 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
2472 }
2473 }
2474 /* Fall through */
2475
2476 case R_X86_64_GOTOFF64:
2477 case R_X86_64_GOTPC32:
2478 case R_X86_64_GOTPC64:
2479 create_got:
2480 if (eh != NULL)
2481 eh->has_got_reloc = 1;
2482 if (htab->elf.sgot == NULL)
2483 {
2484 if (htab->elf.dynobj == NULL)
2485 htab->elf.dynobj = abfd;
2486 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
2487 info))
2488 goto error_return;
2489 }
2490 break;
2491
2492 case R_X86_64_PLT32:
2493 case R_X86_64_PLT32_BND:
2494 /* This symbol requires a procedure linkage table entry. We
2495 actually build the entry in adjust_dynamic_symbol,
2496 because this might be a case of linking PIC code which is
2497 never referenced by a dynamic object, in which case we
2498 don't need to generate a procedure linkage table entry
2499 after all. */
2500
2501 /* If this is a local symbol, we resolve it directly without
2502 creating a procedure linkage table entry. */
2503 if (h == NULL)
2504 continue;
2505
2506 eh->has_got_reloc = 1;
2507 h->needs_plt = 1;
2508 h->plt.refcount += 1;
2509 break;
2510
2511 case R_X86_64_PLTOFF64:
2512 /* This tries to form the 'address' of a function relative
2513 to GOT. For global symbols we need a PLT entry. */
2514 if (h != NULL)
2515 {
2516 h->needs_plt = 1;
2517 h->plt.refcount += 1;
2518 }
2519 goto create_got;
2520
2521 case R_X86_64_SIZE32:
2522 case R_X86_64_SIZE64:
2523 size_reloc = TRUE;
2524 goto do_size;
2525
2526 case R_X86_64_32:
2527 if (!ABI_64_P (abfd))
2528 goto pointer;
2529 /* Fall through. */
2530 case R_X86_64_8:
2531 case R_X86_64_16:
2532 case R_X86_64_32S:
2533 /* Check relocation overflow as these relocs may lead to
2534 run-time relocation overflow. Don't error out for
2535 sections we don't care about, such as debug sections or
2536 when relocation overflow check is disabled. */
2537 if (!info->no_reloc_overflow_check
2538 && (bfd_link_pic (info)
2539 || (bfd_link_executable (info)
2540 && h != NULL
2541 && !h->def_regular
2542 && h->def_dynamic
2543 && (sec->flags & SEC_READONLY) == 0)))
2544 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2545 &x86_64_elf_howto_table[r_type]);
2546 /* Fall through. */
2547
2548 case R_X86_64_PC8:
2549 case R_X86_64_PC16:
2550 case R_X86_64_PC32:
2551 case R_X86_64_PC32_BND:
2552 case R_X86_64_PC64:
2553 case R_X86_64_64:
2554 pointer:
2555 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2556 eh->has_non_got_reloc = 1;
2557 /* We are called after all symbols have been resolved. Only
2558 relocation against STT_GNU_IFUNC symbol must go through
2559 PLT. */
2560 if (h != NULL
2561 && (bfd_link_executable (info)
2562 || h->type == STT_GNU_IFUNC))
2563 {
2564 /* If this reloc is in a read-only section, we might
2565 need a copy reloc. We can't check reliably at this
2566 stage whether the section is read-only, as input
2567 sections have not yet been mapped to output sections.
2568 Tentatively set the flag for now, and correct in
2569 adjust_dynamic_symbol. */
2570 h->non_got_ref = 1;
2571
2572 /* We may need a .plt entry if the symbol is a function
2573 defined in a shared lib or is a STT_GNU_IFUNC function
2574 referenced from the code or read-only section. */
2575 if (!h->def_regular
2576 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2577 h->plt.refcount += 1;
2578
2579 if (r_type == R_X86_64_PC32)
2580 {
2581 /* Since something like ".long foo - ." may be used
2582 as pointer, make sure that PLT is used if foo is
2583 a function defined in a shared library. */
2584 if ((sec->flags & SEC_CODE) == 0)
2585 h->pointer_equality_needed = 1;
2586 }
2587 else if (r_type != R_X86_64_PC32_BND
2588 && r_type != R_X86_64_PC64)
2589 {
2590 h->pointer_equality_needed = 1;
2591 /* At run-time, R_X86_64_64 can be resolved for both
2592 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2593 can only be resolved for x32. */
2594 if ((sec->flags & SEC_READONLY) == 0
2595 && (r_type == R_X86_64_64
2596 || (!ABI_64_P (abfd)
2597 && (r_type == R_X86_64_32
2598 || r_type == R_X86_64_32S))))
2599 eh->func_pointer_refcount += 1;
2600 }
2601 }
2602
2603 size_reloc = FALSE;
2604 do_size:
2605 /* If we are creating a shared library, and this is a reloc
2606 against a global symbol, or a non PC relative reloc
2607 against a local symbol, then we need to copy the reloc
2608 into the shared library. However, if we are linking with
2609 -Bsymbolic, we do not need to copy a reloc against a
2610 global symbol which is defined in an object we are
2611 including in the link (i.e., DEF_REGULAR is set). At
2612 this point we have not seen all the input files, so it is
2613 possible that DEF_REGULAR is not set now but will be set
2614 later (it is never cleared). In case of a weak definition,
2615 DEF_REGULAR may be cleared later by a strong definition in
2616 a shared library. We account for that possibility below by
2617 storing information in the relocs_copied field of the hash
2618 table entry. A similar situation occurs when creating
2619 shared libraries and symbol visibility changes render the
2620 symbol local.
2621
2622 If on the other hand, we are creating an executable, we
2623 may need to keep relocations for symbols satisfied by a
2624 dynamic library if we manage to avoid copy relocs for the
2625 symbol.
2626
2627 Generate dynamic pointer relocation against STT_GNU_IFUNC
2628 symbol in the non-code section. */
2629 if ((bfd_link_pic (info)
2630 && (! IS_X86_64_PCREL_TYPE (r_type)
2631 || (h != NULL
2632 && (! (bfd_link_pie (info)
2633 || SYMBOLIC_BIND (info, h))
2634 || h->root.type == bfd_link_hash_defweak
2635 || !h->def_regular))))
2636 || (h != NULL
2637 && h->type == STT_GNU_IFUNC
2638 && r_type == htab->pointer_r_type
2639 && (sec->flags & SEC_CODE) == 0)
2640 || (ELIMINATE_COPY_RELOCS
2641 && !bfd_link_pic (info)
2642 && h != NULL
2643 && (h->root.type == bfd_link_hash_defweak
2644 || !h->def_regular)))
2645 {
2646 struct elf_dyn_relocs *p;
2647 struct elf_dyn_relocs **head;
2648
2649 /* We must copy these reloc types into the output file.
2650 Create a reloc section in dynobj and make room for
2651 this reloc. */
2652 if (sreloc == NULL)
2653 {
2654 if (htab->elf.dynobj == NULL)
2655 htab->elf.dynobj = abfd;
2656
2657 sreloc = _bfd_elf_make_dynamic_reloc_section
2658 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2659 abfd, /*rela?*/ TRUE);
2660
2661 if (sreloc == NULL)
2662 goto error_return;
2663 }
2664
2665 /* If this is a global symbol, we count the number of
2666 relocations we need for this symbol. */
2667 if (h != NULL)
2668 head = &eh->dyn_relocs;
2669 else
2670 {
2671 /* Track dynamic relocs needed for local syms too.
2672 We really need local syms available to do this
2673 easily. Oh well. */
2674 asection *s;
2675 void **vpp;
2676
2677 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2678 abfd, r_symndx);
2679 if (isym == NULL)
2680 goto error_return;
2681
2682 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2683 if (s == NULL)
2684 s = sec;
2685
2686 /* Beware of type punned pointers vs strict aliasing
2687 rules. */
2688 vpp = &(elf_section_data (s)->local_dynrel);
2689 head = (struct elf_dyn_relocs **)vpp;
2690 }
2691
2692 p = *head;
2693 if (p == NULL || p->sec != sec)
2694 {
2695 bfd_size_type amt = sizeof *p;
2696
2697 p = ((struct elf_dyn_relocs *)
2698 bfd_alloc (htab->elf.dynobj, amt));
2699 if (p == NULL)
2700 goto error_return;
2701 p->next = *head;
2702 *head = p;
2703 p->sec = sec;
2704 p->count = 0;
2705 p->pc_count = 0;
2706 }
2707
2708 p->count += 1;
2709 /* Count size relocation as PC-relative relocation. */
2710 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2711 p->pc_count += 1;
2712 }
2713 break;
2714
2715 /* This relocation describes the C++ object vtable hierarchy.
2716 Reconstruct it for later use during GC. */
2717 case R_X86_64_GNU_VTINHERIT:
2718 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2719 goto error_return;
2720 break;
2721
2722 /* This relocation describes which C++ vtable entries are actually
2723 used. Record for later use during GC. */
2724 case R_X86_64_GNU_VTENTRY:
2725 BFD_ASSERT (h != NULL);
2726 if (h != NULL
2727 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2728 goto error_return;
2729 break;
2730
2731 default:
2732 break;
2733 }
2734
2735 if (use_plt_got
2736 && h != NULL
2737 && h->plt.refcount > 0
2738 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2739 || h->got.refcount > 0)
2740 && htab->plt_got == NULL)
2741 {
2742 /* Create the GOT procedure linkage table. */
2743 unsigned int plt_got_align;
2744 const struct elf_backend_data *bed;
2745
2746 bed = get_elf_backend_data (info->output_bfd);
2747 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2748 && (sizeof (elf_x86_64_bnd_plt2_entry)
2749 == sizeof (elf_x86_64_legacy_plt2_entry)));
2750 plt_got_align = 3;
2751
2752 if (htab->elf.dynobj == NULL)
2753 htab->elf.dynobj = abfd;
2754 htab->plt_got
2755 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2756 ".plt.got",
2757 (bed->dynamic_sec_flags
2758 | SEC_ALLOC
2759 | SEC_CODE
2760 | SEC_LOAD
2761 | SEC_READONLY));
2762 if (htab->plt_got == NULL
2763 || !bfd_set_section_alignment (htab->elf.dynobj,
2764 htab->plt_got,
2765 plt_got_align))
2766 goto error_return;
2767 }
2768
2769 if ((r_type == R_X86_64_GOTPCREL
2770 || r_type == R_X86_64_GOTPCRELX
2771 || r_type == R_X86_64_REX_GOTPCRELX)
2772 && (h == NULL || h->type != STT_GNU_IFUNC))
2773 sec->need_convert_load = 1;
2774 }
2775
2776 if (elf_section_data (sec)->this_hdr.contents != contents)
2777 {
2778 if (!info->keep_memory)
2779 free (contents);
2780 else
2781 {
2782 /* Cache the section contents for elf_link_input_bfd. */
2783 elf_section_data (sec)->this_hdr.contents = contents;
2784 }
2785 }
2786
2787 return TRUE;
2788
2789 error_return:
2790 if (elf_section_data (sec)->this_hdr.contents != contents)
2791 free (contents);
2792 sec->check_relocs_failed = 1;
2793 return FALSE;
2794 }
2795
2796 /* Return the section that should be marked against GC for a given
2797 relocation. */
2798
2799 static asection *
2800 elf_x86_64_gc_mark_hook (asection *sec,
2801 struct bfd_link_info *info,
2802 Elf_Internal_Rela *rel,
2803 struct elf_link_hash_entry *h,
2804 Elf_Internal_Sym *sym)
2805 {
2806 if (h != NULL)
2807 switch (ELF32_R_TYPE (rel->r_info))
2808 {
2809 case R_X86_64_GNU_VTINHERIT:
2810 case R_X86_64_GNU_VTENTRY:
2811 return NULL;
2812 }
2813
2814 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2815 }
2816
2817 /* Remove undefined weak symbol from the dynamic symbol table if it
2818 is resolved to 0. */
2819
2820 static bfd_boolean
2821 elf_x86_64_fixup_symbol (struct bfd_link_info *info,
2822 struct elf_link_hash_entry *h)
2823 {
2824 if (h->dynindx != -1
2825 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2826 elf_x86_64_hash_entry (h)->has_got_reloc,
2827 elf_x86_64_hash_entry (h)))
2828 {
2829 h->dynindx = -1;
2830 _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
2831 h->dynstr_index);
2832 }
2833 return TRUE;
2834 }
2835
2836 /* Adjust a symbol defined by a dynamic object and referenced by a
2837 regular object. The current definition is in some section of the
2838 dynamic object, but we're not including those sections. We have to
2839 change the definition to something the rest of the link can
2840 understand. */
2841
2842 static bfd_boolean
2843 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2844 struct elf_link_hash_entry *h)
2845 {
2846 struct elf_x86_64_link_hash_table *htab;
2847 asection *s;
2848 struct elf_x86_64_link_hash_entry *eh;
2849 struct elf_dyn_relocs *p;
2850
2851 /* STT_GNU_IFUNC symbol must go through PLT. */
2852 if (h->type == STT_GNU_IFUNC)
2853 {
2854 /* All local STT_GNU_IFUNC references must be treate as local
2855 calls via local PLT. */
2856 if (h->ref_regular
2857 && SYMBOL_CALLS_LOCAL (info, h))
2858 {
2859 bfd_size_type pc_count = 0, count = 0;
2860 struct elf_dyn_relocs **pp;
2861
2862 eh = (struct elf_x86_64_link_hash_entry *) h;
2863 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2864 {
2865 pc_count += p->pc_count;
2866 p->count -= p->pc_count;
2867 p->pc_count = 0;
2868 count += p->count;
2869 if (p->count == 0)
2870 *pp = p->next;
2871 else
2872 pp = &p->next;
2873 }
2874
2875 if (pc_count || count)
2876 {
2877 h->non_got_ref = 1;
2878 if (pc_count)
2879 {
2880 /* Increment PLT reference count only for PC-relative
2881 references. */
2882 h->needs_plt = 1;
2883 if (h->plt.refcount <= 0)
2884 h->plt.refcount = 1;
2885 else
2886 h->plt.refcount += 1;
2887 }
2888 }
2889 }
2890
2891 if (h->plt.refcount <= 0)
2892 {
2893 h->plt.offset = (bfd_vma) -1;
2894 h->needs_plt = 0;
2895 }
2896 return TRUE;
2897 }
2898
2899 /* If this is a function, put it in the procedure linkage table. We
2900 will fill in the contents of the procedure linkage table later,
2901 when we know the address of the .got section. */
2902 if (h->type == STT_FUNC
2903 || h->needs_plt)
2904 {
2905 if (h->plt.refcount <= 0
2906 || SYMBOL_CALLS_LOCAL (info, h)
2907 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2908 && h->root.type == bfd_link_hash_undefweak))
2909 {
2910 /* This case can occur if we saw a PLT32 reloc in an input
2911 file, but the symbol was never referred to by a dynamic
2912 object, or if all references were garbage collected. In
2913 such a case, we don't actually need to build a procedure
2914 linkage table, and we can just do a PC32 reloc instead. */
2915 h->plt.offset = (bfd_vma) -1;
2916 h->needs_plt = 0;
2917 }
2918
2919 return TRUE;
2920 }
2921 else
2922 /* It's possible that we incorrectly decided a .plt reloc was
2923 needed for an R_X86_64_PC32 reloc to a non-function sym in
2924 check_relocs. We can't decide accurately between function and
2925 non-function syms in check-relocs; Objects loaded later in
2926 the link may change h->type. So fix it now. */
2927 h->plt.offset = (bfd_vma) -1;
2928
2929 /* If this is a weak symbol, and there is a real definition, the
2930 processor independent code will have arranged for us to see the
2931 real definition first, and we can just use the same value. */
2932 if (h->u.weakdef != NULL)
2933 {
2934 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2935 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2936 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2937 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2938 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2939 {
2940 eh = (struct elf_x86_64_link_hash_entry *) h;
2941 h->non_got_ref = h->u.weakdef->non_got_ref;
2942 eh->needs_copy = h->u.weakdef->needs_copy;
2943 }
2944 return TRUE;
2945 }
2946
2947 /* This is a reference to a symbol defined by a dynamic object which
2948 is not a function. */
2949
2950 /* If we are creating a shared library, we must presume that the
2951 only references to the symbol are via the global offset table.
2952 For such cases we need not do anything here; the relocations will
2953 be handled correctly by relocate_section. */
2954 if (!bfd_link_executable (info))
2955 return TRUE;
2956
2957 /* If there are no references to this symbol that do not use the
2958 GOT, we don't need to generate a copy reloc. */
2959 if (!h->non_got_ref)
2960 return TRUE;
2961
2962 /* If -z nocopyreloc was given, we won't generate them either. */
2963 if (info->nocopyreloc)
2964 {
2965 h->non_got_ref = 0;
2966 return TRUE;
2967 }
2968
2969 if (ELIMINATE_COPY_RELOCS)
2970 {
2971 eh = (struct elf_x86_64_link_hash_entry *) h;
2972 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2973 {
2974 s = p->sec->output_section;
2975 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2976 break;
2977 }
2978
2979 /* If we didn't find any dynamic relocs in read-only sections, then
2980 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2981 if (p == NULL)
2982 {
2983 h->non_got_ref = 0;
2984 return TRUE;
2985 }
2986 }
2987
2988 /* We must allocate the symbol in our .dynbss section, which will
2989 become part of the .bss section of the executable. There will be
2990 an entry for this symbol in the .dynsym section. The dynamic
2991 object will contain position independent code, so all references
2992 from the dynamic object to this symbol will go through the global
2993 offset table. The dynamic linker will use the .dynsym entry to
2994 determine the address it must put in the global offset table, so
2995 both the dynamic object and the regular object will refer to the
2996 same memory location for the variable. */
2997
2998 htab = elf_x86_64_hash_table (info);
2999 if (htab == NULL)
3000 return FALSE;
3001
3002 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
3003 to copy the initial value out of the dynamic object and into the
3004 runtime process image. */
3005 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3006 {
3007 const struct elf_backend_data *bed;
3008 bed = get_elf_backend_data (info->output_bfd);
3009 htab->srelbss->size += bed->s->sizeof_rela;
3010 h->needs_copy = 1;
3011 }
3012
3013 s = htab->sdynbss;
3014
3015 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3016 }
3017
3018 /* Allocate space in .plt, .got and associated reloc sections for
3019 dynamic relocs. */
3020
3021 static bfd_boolean
3022 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
3023 {
3024 struct bfd_link_info *info;
3025 struct elf_x86_64_link_hash_table *htab;
3026 struct elf_x86_64_link_hash_entry *eh;
3027 struct elf_dyn_relocs *p;
3028 const struct elf_backend_data *bed;
3029 unsigned int plt_entry_size;
3030 bfd_boolean resolved_to_zero;
3031
3032 if (h->root.type == bfd_link_hash_indirect)
3033 return TRUE;
3034
3035 eh = (struct elf_x86_64_link_hash_entry *) h;
3036
3037 info = (struct bfd_link_info *) inf;
3038 htab = elf_x86_64_hash_table (info);
3039 if (htab == NULL)
3040 return FALSE;
3041 bed = get_elf_backend_data (info->output_bfd);
3042 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3043
3044 resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3045 eh->has_got_reloc,
3046 eh);
3047
3048 /* We can't use the GOT PLT if pointer equality is needed since
3049 finish_dynamic_symbol won't clear symbol value and the dynamic
3050 linker won't update the GOT slot. We will get into an infinite
3051 loop at run-time. */
3052 if (htab->plt_got != NULL
3053 && h->type != STT_GNU_IFUNC
3054 && !h->pointer_equality_needed
3055 && h->plt.refcount > 0
3056 && h->got.refcount > 0)
3057 {
3058 /* Don't use the regular PLT if there are both GOT and GOTPLT
3059 reloctions. */
3060 h->plt.offset = (bfd_vma) -1;
3061
3062 /* Use the GOT PLT. */
3063 eh->plt_got.refcount = 1;
3064 }
3065
3066 /* Clear the reference count of function pointer relocations if
3067 symbol isn't a normal function. */
3068 if (h->type != STT_FUNC)
3069 eh->func_pointer_refcount = 0;
3070
3071 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
3072 here if it is defined and referenced in a non-shared object. */
3073 if (h->type == STT_GNU_IFUNC
3074 && h->def_regular)
3075 {
3076 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
3077 &eh->dyn_relocs,
3078 &htab->readonly_dynrelocs_against_ifunc,
3079 plt_entry_size,
3080 plt_entry_size,
3081 GOT_ENTRY_SIZE, TRUE))
3082 {
3083 asection *s = htab->plt_bnd;
3084 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
3085 {
3086 /* Use the .plt.bnd section if it is created. */
3087 eh->plt_bnd.offset = s->size;
3088
3089 /* Make room for this entry in the .plt.bnd section. */
3090 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3091 }
3092
3093 return TRUE;
3094 }
3095 else
3096 return FALSE;
3097 }
3098 /* Don't create the PLT entry if there are only function pointer
3099 relocations which can be resolved at run-time. */
3100 else if (htab->elf.dynamic_sections_created
3101 && (h->plt.refcount > eh->func_pointer_refcount
3102 || eh->plt_got.refcount > 0))
3103 {
3104 bfd_boolean use_plt_got;
3105
3106 /* Clear the reference count of function pointer relocations
3107 if PLT is used. */
3108 eh->func_pointer_refcount = 0;
3109
3110 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
3111 {
3112 /* Don't use the regular PLT for DF_BIND_NOW. */
3113 h->plt.offset = (bfd_vma) -1;
3114
3115 /* Use the GOT PLT. */
3116 h->got.refcount = 1;
3117 eh->plt_got.refcount = 1;
3118 }
3119
3120 use_plt_got = eh->plt_got.refcount > 0;
3121
3122 /* Make sure this symbol is output as a dynamic symbol.
3123 Undefined weak syms won't yet be marked as dynamic. */
3124 if (h->dynindx == -1
3125 && !h->forced_local
3126 && !resolved_to_zero)
3127 {
3128 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3129 return FALSE;
3130 }
3131
3132 if (bfd_link_pic (info)
3133 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3134 {
3135 asection *s = htab->elf.splt;
3136 asection *bnd_s = htab->plt_bnd;
3137 asection *got_s = htab->plt_got;
3138
3139 /* If this is the first .plt entry, make room for the special
3140 first entry. The .plt section is used by prelink to undo
3141 prelinking for dynamic relocations. */
3142 if (s->size == 0)
3143 s->size = plt_entry_size;
3144
3145 if (use_plt_got)
3146 eh->plt_got.offset = got_s->size;
3147 else
3148 {
3149 h->plt.offset = s->size;
3150 if (bnd_s)
3151 eh->plt_bnd.offset = bnd_s->size;
3152 }
3153
3154 /* If this symbol is not defined in a regular file, and we are
3155 not generating a shared library, then set the symbol to this
3156 location in the .plt. This is required to make function
3157 pointers compare as equal between the normal executable and
3158 the shared library. */
3159 if (! bfd_link_pic (info)
3160 && !h->def_regular)
3161 {
3162 if (use_plt_got)
3163 {
3164 /* We need to make a call to the entry of the GOT PLT
3165 instead of regular PLT entry. */
3166 h->root.u.def.section = got_s;
3167 h->root.u.def.value = eh->plt_got.offset;
3168 }
3169 else
3170 {
3171 if (bnd_s)
3172 {
3173 /* We need to make a call to the entry of the second
3174 PLT instead of regular PLT entry. */
3175 h->root.u.def.section = bnd_s;
3176 h->root.u.def.value = eh->plt_bnd.offset;
3177 }
3178 else
3179 {
3180 h->root.u.def.section = s;
3181 h->root.u.def.value = h->plt.offset;
3182 }
3183 }
3184 }
3185
3186 /* Make room for this entry. */
3187 if (use_plt_got)
3188 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3189 else
3190 {
3191 s->size += plt_entry_size;
3192 if (bnd_s)
3193 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3194
3195 /* We also need to make an entry in the .got.plt section,
3196 which will be placed in the .got section by the linker
3197 script. */
3198 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
3199
3200 /* There should be no PLT relocation against resolved
3201 undefined weak symbol in executable. */
3202 if (!resolved_to_zero)
3203 {
3204 /* We also need to make an entry in the .rela.plt
3205 section. */
3206 htab->elf.srelplt->size += bed->s->sizeof_rela;
3207 htab->elf.srelplt->reloc_count++;
3208 }
3209 }
3210 }
3211 else
3212 {
3213 eh->plt_got.offset = (bfd_vma) -1;
3214 h->plt.offset = (bfd_vma) -1;
3215 h->needs_plt = 0;
3216 }
3217 }
3218 else
3219 {
3220 eh->plt_got.offset = (bfd_vma) -1;
3221 h->plt.offset = (bfd_vma) -1;
3222 h->needs_plt = 0;
3223 }
3224
3225 eh->tlsdesc_got = (bfd_vma) -1;
3226
3227 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
3228 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
3229 if (h->got.refcount > 0
3230 && bfd_link_executable (info)
3231 && h->dynindx == -1
3232 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
3233 {
3234 h->got.offset = (bfd_vma) -1;
3235 }
3236 else if (h->got.refcount > 0)
3237 {
3238 asection *s;
3239 bfd_boolean dyn;
3240 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
3241
3242 /* Make sure this symbol is output as a dynamic symbol.
3243 Undefined weak syms won't yet be marked as dynamic. */
3244 if (h->dynindx == -1
3245 && !h->forced_local
3246 && !resolved_to_zero)
3247 {
3248 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3249 return FALSE;
3250 }
3251
3252 if (GOT_TLS_GDESC_P (tls_type))
3253 {
3254 eh->tlsdesc_got = htab->elf.sgotplt->size
3255 - elf_x86_64_compute_jump_table_size (htab);
3256 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3257 h->got.offset = (bfd_vma) -2;
3258 }
3259 if (! GOT_TLS_GDESC_P (tls_type)
3260 || GOT_TLS_GD_P (tls_type))
3261 {
3262 s = htab->elf.sgot;
3263 h->got.offset = s->size;
3264 s->size += GOT_ENTRY_SIZE;
3265 if (GOT_TLS_GD_P (tls_type))
3266 s->size += GOT_ENTRY_SIZE;
3267 }
3268 dyn = htab->elf.dynamic_sections_created;
3269 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
3270 and two if global. R_X86_64_GOTTPOFF needs one dynamic
3271 relocation. No dynamic relocation against resolved undefined
3272 weak symbol in executable. */
3273 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
3274 || tls_type == GOT_TLS_IE)
3275 htab->elf.srelgot->size += bed->s->sizeof_rela;
3276 else if (GOT_TLS_GD_P (tls_type))
3277 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
3278 else if (! GOT_TLS_GDESC_P (tls_type)
3279 && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3280 && !resolved_to_zero)
3281 || h->root.type != bfd_link_hash_undefweak)
3282 && (bfd_link_pic (info)
3283 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3284 htab->elf.srelgot->size += bed->s->sizeof_rela;
3285 if (GOT_TLS_GDESC_P (tls_type))
3286 {
3287 htab->elf.srelplt->size += bed->s->sizeof_rela;
3288 htab->tlsdesc_plt = (bfd_vma) -1;
3289 }
3290 }
3291 else
3292 h->got.offset = (bfd_vma) -1;
3293
3294 if (eh->dyn_relocs == NULL)
3295 return TRUE;
3296
3297 /* In the shared -Bsymbolic case, discard space allocated for
3298 dynamic pc-relative relocs against symbols which turn out to be
3299 defined in regular objects. For the normal shared case, discard
3300 space for pc-relative relocs that have become local due to symbol
3301 visibility changes. */
3302
3303 if (bfd_link_pic (info))
3304 {
3305 /* Relocs that use pc_count are those that appear on a call
3306 insn, or certain REL relocs that can generated via assembly.
3307 We want calls to protected symbols to resolve directly to the
3308 function rather than going via the plt. If people want
3309 function pointer comparisons to work as expected then they
3310 should avoid writing weird assembly. */
3311 if (SYMBOL_CALLS_LOCAL (info, h))
3312 {
3313 struct elf_dyn_relocs **pp;
3314
3315 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3316 {
3317 p->count -= p->pc_count;
3318 p->pc_count = 0;
3319 if (p->count == 0)
3320 *pp = p->next;
3321 else
3322 pp = &p->next;
3323 }
3324 }
3325
3326 /* Also discard relocs on undefined weak syms with non-default
3327 visibility or in PIE. */
3328 if (eh->dyn_relocs != NULL)
3329 {
3330 if (h->root.type == bfd_link_hash_undefweak)
3331 {
3332 /* Undefined weak symbol is never bound locally in shared
3333 library. */
3334 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3335 || resolved_to_zero)
3336 eh->dyn_relocs = NULL;
3337 else if (h->dynindx == -1
3338 && ! h->forced_local
3339 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3340 return FALSE;
3341 }
3342 /* For PIE, discard space for pc-relative relocs against
3343 symbols which turn out to need copy relocs. */
3344 else if (bfd_link_executable (info)
3345 && (h->needs_copy || eh->needs_copy)
3346 && h->def_dynamic
3347 && !h->def_regular)
3348 {
3349 struct elf_dyn_relocs **pp;
3350
3351 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3352 {
3353 if (p->pc_count != 0)
3354 *pp = p->next;
3355 else
3356 pp = &p->next;
3357 }
3358 }
3359 }
3360 }
3361 else if (ELIMINATE_COPY_RELOCS)
3362 {
3363 /* For the non-shared case, discard space for relocs against
3364 symbols which turn out to need copy relocs or are not
3365 dynamic. Keep dynamic relocations for run-time function
3366 pointer initialization. */
3367
3368 if ((!h->non_got_ref
3369 || eh->func_pointer_refcount > 0
3370 || (h->root.type == bfd_link_hash_undefweak
3371 && !resolved_to_zero))
3372 && ((h->def_dynamic
3373 && !h->def_regular)
3374 || (htab->elf.dynamic_sections_created
3375 && (h->root.type == bfd_link_hash_undefweak
3376 || h->root.type == bfd_link_hash_undefined))))
3377 {
3378 /* Make sure this symbol is output as a dynamic symbol.
3379 Undefined weak syms won't yet be marked as dynamic. */
3380 if (h->dynindx == -1
3381 && ! h->forced_local
3382 && ! resolved_to_zero
3383 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3384 return FALSE;
3385
3386 /* If that succeeded, we know we'll be keeping all the
3387 relocs. */
3388 if (h->dynindx != -1)
3389 goto keep;
3390 }
3391
3392 eh->dyn_relocs = NULL;
3393 eh->func_pointer_refcount = 0;
3394
3395 keep: ;
3396 }
3397
3398 /* Finally, allocate space. */
3399 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3400 {
3401 asection * sreloc;
3402
3403 sreloc = elf_section_data (p->sec)->sreloc;
3404
3405 BFD_ASSERT (sreloc != NULL);
3406
3407 sreloc->size += p->count * bed->s->sizeof_rela;
3408 }
3409
3410 return TRUE;
3411 }
3412
3413 /* Allocate space in .plt, .got and associated reloc sections for
3414 local dynamic relocs. */
3415
3416 static bfd_boolean
3417 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
3418 {
3419 struct elf_link_hash_entry *h
3420 = (struct elf_link_hash_entry *) *slot;
3421
3422 if (h->type != STT_GNU_IFUNC
3423 || !h->def_regular
3424 || !h->ref_regular
3425 || !h->forced_local
3426 || h->root.type != bfd_link_hash_defined)
3427 abort ();
3428
3429 return elf_x86_64_allocate_dynrelocs (h, inf);
3430 }
3431
3432 /* Find any dynamic relocs that apply to read-only sections. */
3433
3434 static bfd_boolean
3435 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
3436 void * inf)
3437 {
3438 struct elf_x86_64_link_hash_entry *eh;
3439 struct elf_dyn_relocs *p;
3440
3441 /* Skip local IFUNC symbols. */
3442 if (h->forced_local && h->type == STT_GNU_IFUNC)
3443 return TRUE;
3444
3445 eh = (struct elf_x86_64_link_hash_entry *) h;
3446 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3447 {
3448 asection *s = p->sec->output_section;
3449
3450 if (s != NULL && (s->flags & SEC_READONLY) != 0)
3451 {
3452 struct bfd_link_info *info = (struct bfd_link_info *) inf;
3453
3454 info->flags |= DF_TEXTREL;
3455
3456 if ((info->warn_shared_textrel && bfd_link_pic (info))
3457 || info->error_textrel)
3458 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
3459 p->sec->owner, h->root.root.string,
3460 p->sec);
3461
3462 /* Not an error, just cut short the traversal. */
3463 return FALSE;
3464 }
3465 }
3466 return TRUE;
3467 }
3468
3469 /* Convert load via the GOT slot to load immediate. */
3470
3471 static bfd_boolean
3472 elf_x86_64_convert_load (bfd *abfd, asection *sec,
3473 struct bfd_link_info *link_info)
3474 {
3475 Elf_Internal_Shdr *symtab_hdr;
3476 Elf_Internal_Rela *internal_relocs;
3477 Elf_Internal_Rela *irel, *irelend;
3478 bfd_byte *contents;
3479 struct elf_x86_64_link_hash_table *htab;
3480 bfd_boolean changed;
3481 bfd_signed_vma *local_got_refcounts;
3482
3483 /* Don't even try to convert non-ELF outputs. */
3484 if (!is_elf_hash_table (link_info->hash))
3485 return FALSE;
3486
3487 /* Nothing to do if there is no need or no output. */
3488 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3489 || sec->need_convert_load == 0
3490 || bfd_is_abs_section (sec->output_section))
3491 return TRUE;
3492
3493 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3494
3495 /* Load the relocations for this section. */
3496 internal_relocs = (_bfd_elf_link_read_relocs
3497 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3498 link_info->keep_memory));
3499 if (internal_relocs == NULL)
3500 return FALSE;
3501
3502 changed = FALSE;
3503 htab = elf_x86_64_hash_table (link_info);
3504 local_got_refcounts = elf_local_got_refcounts (abfd);
3505
3506 /* Get the section contents. */
3507 if (elf_section_data (sec)->this_hdr.contents != NULL)
3508 contents = elf_section_data (sec)->this_hdr.contents;
3509 else
3510 {
3511 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3512 goto error_return;
3513 }
3514
3515 irelend = internal_relocs + sec->reloc_count;
3516 for (irel = internal_relocs; irel < irelend; irel++)
3517 {
3518 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3519 unsigned int r_symndx;
3520 struct elf_link_hash_entry *h;
3521 bfd_boolean converted;
3522
3523 if (r_type != R_X86_64_GOTPCRELX
3524 && r_type != R_X86_64_REX_GOTPCRELX
3525 && r_type != R_X86_64_GOTPCREL)
3526 continue;
3527
3528 r_symndx = htab->r_sym (irel->r_info);
3529 if (r_symndx < symtab_hdr->sh_info)
3530 h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
3531 (const Elf_Internal_Rela *) irel,
3532 FALSE);
3533 else
3534 {
3535 h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
3536 while (h->root.type == bfd_link_hash_indirect
3537 || h->root.type == bfd_link_hash_warning)
3538 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3539 }
3540
3541 /* STT_GNU_IFUNC must keep GOTPCREL relocations. */
3542 if (h != NULL && h->type == STT_GNU_IFUNC)
3543 continue;
3544
3545 converted = FALSE;
3546 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h,
3547 &converted, link_info))
3548 goto error_return;
3549
3550 if (converted)
3551 {
3552 changed = converted;
3553 if (h)
3554 {
3555 if (h->got.refcount > 0)
3556 h->got.refcount -= 1;
3557 }
3558 else
3559 {
3560 if (local_got_refcounts != NULL
3561 && local_got_refcounts[r_symndx] > 0)
3562 local_got_refcounts[r_symndx] -= 1;
3563 }
3564 }
3565 }
3566
3567 if (contents != NULL
3568 && elf_section_data (sec)->this_hdr.contents != contents)
3569 {
3570 if (!changed && !link_info->keep_memory)
3571 free (contents);
3572 else
3573 {
3574 /* Cache the section contents for elf_link_input_bfd. */
3575 elf_section_data (sec)->this_hdr.contents = contents;
3576 }
3577 }
3578
3579 if (elf_section_data (sec)->relocs != internal_relocs)
3580 {
3581 if (!changed)
3582 free (internal_relocs);
3583 else
3584 elf_section_data (sec)->relocs = internal_relocs;
3585 }
3586
3587 return TRUE;
3588
3589 error_return:
3590 if (contents != NULL
3591 && elf_section_data (sec)->this_hdr.contents != contents)
3592 free (contents);
3593 if (internal_relocs != NULL
3594 && elf_section_data (sec)->relocs != internal_relocs)
3595 free (internal_relocs);
3596 return FALSE;
3597 }
3598
3599 /* Set the sizes of the dynamic sections. */
3600
3601 static bfd_boolean
3602 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3603 struct bfd_link_info *info)
3604 {
3605 struct elf_x86_64_link_hash_table *htab;
3606 bfd *dynobj;
3607 asection *s;
3608 bfd_boolean relocs;
3609 bfd *ibfd;
3610 const struct elf_backend_data *bed;
3611
3612 htab = elf_x86_64_hash_table (info);
3613 if (htab == NULL)
3614 return FALSE;
3615 bed = get_elf_backend_data (output_bfd);
3616
3617 dynobj = htab->elf.dynobj;
3618 if (dynobj == NULL)
3619 abort ();
3620
3621 /* Set up .got offsets for local syms, and space for local dynamic
3622 relocs. */
3623 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3624 {
3625 bfd_signed_vma *local_got;
3626 bfd_signed_vma *end_local_got;
3627 char *local_tls_type;
3628 bfd_vma *local_tlsdesc_gotent;
3629 bfd_size_type locsymcount;
3630 Elf_Internal_Shdr *symtab_hdr;
3631 asection *srel;
3632
3633 if (! is_x86_64_elf (ibfd))
3634 continue;
3635
3636 for (s = ibfd->sections; s != NULL; s = s->next)
3637 {
3638 struct elf_dyn_relocs *p;
3639
3640 if (!elf_x86_64_convert_load (ibfd, s, info))
3641 return FALSE;
3642
3643 for (p = (struct elf_dyn_relocs *)
3644 (elf_section_data (s)->local_dynrel);
3645 p != NULL;
3646 p = p->next)
3647 {
3648 if (!bfd_is_abs_section (p->sec)
3649 && bfd_is_abs_section (p->sec->output_section))
3650 {
3651 /* Input section has been discarded, either because
3652 it is a copy of a linkonce section or due to
3653 linker script /DISCARD/, so we'll be discarding
3654 the relocs too. */
3655 }
3656 else if (p->count != 0)
3657 {
3658 srel = elf_section_data (p->sec)->sreloc;
3659 srel->size += p->count * bed->s->sizeof_rela;
3660 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3661 && (info->flags & DF_TEXTREL) == 0)
3662 {
3663 info->flags |= DF_TEXTREL;
3664 if ((info->warn_shared_textrel && bfd_link_pic (info))
3665 || info->error_textrel)
3666 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3667 p->sec->owner, p->sec);
3668 }
3669 }
3670 }
3671 }
3672
3673 local_got = elf_local_got_refcounts (ibfd);
3674 if (!local_got)
3675 continue;
3676
3677 symtab_hdr = &elf_symtab_hdr (ibfd);
3678 locsymcount = symtab_hdr->sh_info;
3679 end_local_got = local_got + locsymcount;
3680 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3681 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3682 s = htab->elf.sgot;
3683 srel = htab->elf.srelgot;
3684 for (; local_got < end_local_got;
3685 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3686 {
3687 *local_tlsdesc_gotent = (bfd_vma) -1;
3688 if (*local_got > 0)
3689 {
3690 if (GOT_TLS_GDESC_P (*local_tls_type))
3691 {
3692 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3693 - elf_x86_64_compute_jump_table_size (htab);
3694 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3695 *local_got = (bfd_vma) -2;
3696 }
3697 if (! GOT_TLS_GDESC_P (*local_tls_type)
3698 || GOT_TLS_GD_P (*local_tls_type))
3699 {
3700 *local_got = s->size;
3701 s->size += GOT_ENTRY_SIZE;
3702 if (GOT_TLS_GD_P (*local_tls_type))
3703 s->size += GOT_ENTRY_SIZE;
3704 }
3705 if (bfd_link_pic (info)
3706 || GOT_TLS_GD_ANY_P (*local_tls_type)
3707 || *local_tls_type == GOT_TLS_IE)
3708 {
3709 if (GOT_TLS_GDESC_P (*local_tls_type))
3710 {
3711 htab->elf.srelplt->size
3712 += bed->s->sizeof_rela;
3713 htab->tlsdesc_plt = (bfd_vma) -1;
3714 }
3715 if (! GOT_TLS_GDESC_P (*local_tls_type)
3716 || GOT_TLS_GD_P (*local_tls_type))
3717 srel->size += bed->s->sizeof_rela;
3718 }
3719 }
3720 else
3721 *local_got = (bfd_vma) -1;
3722 }
3723 }
3724
3725 if (htab->tls_ld_got.refcount > 0)
3726 {
3727 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3728 relocs. */
3729 htab->tls_ld_got.offset = htab->elf.sgot->size;
3730 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3731 htab->elf.srelgot->size += bed->s->sizeof_rela;
3732 }
3733 else
3734 htab->tls_ld_got.offset = -1;
3735
3736 /* Allocate global sym .plt and .got entries, and space for global
3737 sym dynamic relocs. */
3738 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3739 info);
3740
3741 /* Allocate .plt and .got entries, and space for local symbols. */
3742 htab_traverse (htab->loc_hash_table,
3743 elf_x86_64_allocate_local_dynrelocs,
3744 info);
3745
3746 /* For every jump slot reserved in the sgotplt, reloc_count is
3747 incremented. However, when we reserve space for TLS descriptors,
3748 it's not incremented, so in order to compute the space reserved
3749 for them, it suffices to multiply the reloc count by the jump
3750 slot size.
3751
3752 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3753 so that R_X86_64_IRELATIVE entries come last. */
3754 if (htab->elf.srelplt)
3755 {
3756 htab->sgotplt_jump_table_size
3757 = elf_x86_64_compute_jump_table_size (htab);
3758 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3759 }
3760 else if (htab->elf.irelplt)
3761 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3762
3763 if (htab->tlsdesc_plt)
3764 {
3765 /* If we're not using lazy TLS relocations, don't generate the
3766 PLT and GOT entries they require. */
3767 if ((info->flags & DF_BIND_NOW))
3768 htab->tlsdesc_plt = 0;
3769 else
3770 {
3771 htab->tlsdesc_got = htab->elf.sgot->size;
3772 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3773 /* Reserve room for the initial entry.
3774 FIXME: we could probably do away with it in this case. */
3775 if (htab->elf.splt->size == 0)
3776 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3777 htab->tlsdesc_plt = htab->elf.splt->size;
3778 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3779 }
3780 }
3781
3782 if (htab->elf.sgotplt)
3783 {
3784 /* Don't allocate .got.plt section if there are no GOT nor PLT
3785 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3786 if ((htab->elf.hgot == NULL
3787 || !htab->elf.hgot->ref_regular_nonweak)
3788 && (htab->elf.sgotplt->size
3789 == get_elf_backend_data (output_bfd)->got_header_size)
3790 && (htab->elf.splt == NULL
3791 || htab->elf.splt->size == 0)
3792 && (htab->elf.sgot == NULL
3793 || htab->elf.sgot->size == 0)
3794 && (htab->elf.iplt == NULL
3795 || htab->elf.iplt->size == 0)
3796 && (htab->elf.igotplt == NULL
3797 || htab->elf.igotplt->size == 0))
3798 htab->elf.sgotplt->size = 0;
3799 }
3800
3801 if (htab->plt_eh_frame != NULL
3802 && htab->elf.splt != NULL
3803 && htab->elf.splt->size != 0
3804 && !bfd_is_abs_section (htab->elf.splt->output_section)
3805 && _bfd_elf_eh_frame_present (info))
3806 {
3807 const struct elf_x86_64_backend_data *arch_data
3808 = get_elf_x86_64_arch_data (bed);
3809 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3810 }
3811
3812 /* We now have determined the sizes of the various dynamic sections.
3813 Allocate memory for them. */
3814 relocs = FALSE;
3815 for (s = dynobj->sections; s != NULL; s = s->next)
3816 {
3817 if ((s->flags & SEC_LINKER_CREATED) == 0)
3818 continue;
3819
3820 if (s == htab->elf.splt
3821 || s == htab->elf.sgot
3822 || s == htab->elf.sgotplt
3823 || s == htab->elf.iplt
3824 || s == htab->elf.igotplt
3825 || s == htab->plt_bnd
3826 || s == htab->plt_got
3827 || s == htab->plt_eh_frame
3828 || s == htab->sdynbss)
3829 {
3830 /* Strip this section if we don't need it; see the
3831 comment below. */
3832 }
3833 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3834 {
3835 if (s->size != 0 && s != htab->elf.srelplt)
3836 relocs = TRUE;
3837
3838 /* We use the reloc_count field as a counter if we need
3839 to copy relocs into the output file. */
3840 if (s != htab->elf.srelplt)
3841 s->reloc_count = 0;
3842 }
3843 else
3844 {
3845 /* It's not one of our sections, so don't allocate space. */
3846 continue;
3847 }
3848
3849 if (s->size == 0)
3850 {
3851 /* If we don't need this section, strip it from the
3852 output file. This is mostly to handle .rela.bss and
3853 .rela.plt. We must create both sections in
3854 create_dynamic_sections, because they must be created
3855 before the linker maps input sections to output
3856 sections. The linker does that before
3857 adjust_dynamic_symbol is called, and it is that
3858 function which decides whether anything needs to go
3859 into these sections. */
3860
3861 s->flags |= SEC_EXCLUDE;
3862 continue;
3863 }
3864
3865 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3866 continue;
3867
3868 /* Allocate memory for the section contents. We use bfd_zalloc
3869 here in case unused entries are not reclaimed before the
3870 section's contents are written out. This should not happen,
3871 but this way if it does, we get a R_X86_64_NONE reloc instead
3872 of garbage. */
3873 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3874 if (s->contents == NULL)
3875 return FALSE;
3876 }
3877
3878 if (htab->plt_eh_frame != NULL
3879 && htab->plt_eh_frame->contents != NULL)
3880 {
3881 const struct elf_x86_64_backend_data *arch_data
3882 = get_elf_x86_64_arch_data (bed);
3883
3884 memcpy (htab->plt_eh_frame->contents,
3885 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3886 bfd_put_32 (dynobj, htab->elf.splt->size,
3887 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3888 }
3889
3890 if (htab->elf.dynamic_sections_created)
3891 {
3892 /* Add some entries to the .dynamic section. We fill in the
3893 values later, in elf_x86_64_finish_dynamic_sections, but we
3894 must add the entries now so that we get the correct size for
3895 the .dynamic section. The DT_DEBUG entry is filled in by the
3896 dynamic linker and used by the debugger. */
3897 #define add_dynamic_entry(TAG, VAL) \
3898 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3899
3900 if (bfd_link_executable (info))
3901 {
3902 if (!add_dynamic_entry (DT_DEBUG, 0))
3903 return FALSE;
3904 }
3905
3906 if (htab->elf.splt->size != 0)
3907 {
3908 /* DT_PLTGOT is used by prelink even if there is no PLT
3909 relocation. */
3910 if (!add_dynamic_entry (DT_PLTGOT, 0))
3911 return FALSE;
3912
3913 if (htab->elf.srelplt->size != 0)
3914 {
3915 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3916 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3917 || !add_dynamic_entry (DT_JMPREL, 0))
3918 return FALSE;
3919 }
3920
3921 if (htab->tlsdesc_plt
3922 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3923 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3924 return FALSE;
3925 }
3926
3927 if (relocs)
3928 {
3929 if (!add_dynamic_entry (DT_RELA, 0)
3930 || !add_dynamic_entry (DT_RELASZ, 0)
3931 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3932 return FALSE;
3933
3934 /* If any dynamic relocs apply to a read-only section,
3935 then we need a DT_TEXTREL entry. */
3936 if ((info->flags & DF_TEXTREL) == 0)
3937 elf_link_hash_traverse (&htab->elf,
3938 elf_x86_64_readonly_dynrelocs,
3939 info);
3940
3941 if ((info->flags & DF_TEXTREL) != 0)
3942 {
3943 if (htab->readonly_dynrelocs_against_ifunc)
3944 {
3945 info->callbacks->einfo
3946 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3947 bfd_set_error (bfd_error_bad_value);
3948 return FALSE;
3949 }
3950
3951 if (!add_dynamic_entry (DT_TEXTREL, 0))
3952 return FALSE;
3953 }
3954 }
3955 }
3956 #undef add_dynamic_entry
3957
3958 return TRUE;
3959 }
3960
3961 static bfd_boolean
3962 elf_x86_64_always_size_sections (bfd *output_bfd,
3963 struct bfd_link_info *info)
3964 {
3965 asection *tls_sec = elf_hash_table (info)->tls_sec;
3966
3967 if (tls_sec)
3968 {
3969 struct elf_link_hash_entry *tlsbase;
3970
3971 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3972 "_TLS_MODULE_BASE_",
3973 FALSE, FALSE, FALSE);
3974
3975 if (tlsbase && tlsbase->type == STT_TLS)
3976 {
3977 struct elf_x86_64_link_hash_table *htab;
3978 struct bfd_link_hash_entry *bh = NULL;
3979 const struct elf_backend_data *bed
3980 = get_elf_backend_data (output_bfd);
3981
3982 htab = elf_x86_64_hash_table (info);
3983 if (htab == NULL)
3984 return FALSE;
3985
3986 if (!(_bfd_generic_link_add_one_symbol
3987 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3988 tls_sec, 0, NULL, FALSE,
3989 bed->collect, &bh)))
3990 return FALSE;
3991
3992 htab->tls_module_base = bh;
3993
3994 tlsbase = (struct elf_link_hash_entry *)bh;
3995 tlsbase->def_regular = 1;
3996 tlsbase->other = STV_HIDDEN;
3997 tlsbase->root.linker_def = 1;
3998 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3999 }
4000 }
4001
4002 return TRUE;
4003 }
4004
4005 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
4006 executables. Rather than setting it to the beginning of the TLS
4007 section, we have to set it to the end. This function may be called
4008 multiple times, it is idempotent. */
4009
4010 static void
4011 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
4012 {
4013 struct elf_x86_64_link_hash_table *htab;
4014 struct bfd_link_hash_entry *base;
4015
4016 if (!bfd_link_executable (info))
4017 return;
4018
4019 htab = elf_x86_64_hash_table (info);
4020 if (htab == NULL)
4021 return;
4022
4023 base = htab->tls_module_base;
4024 if (base == NULL)
4025 return;
4026
4027 base->u.def.value = htab->elf.tls_size;
4028 }
4029
4030 /* Return the base VMA address which should be subtracted from real addresses
4031 when resolving @dtpoff relocation.
4032 This is PT_TLS segment p_vaddr. */
4033
4034 static bfd_vma
4035 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
4036 {
4037 /* If tls_sec is NULL, we should have signalled an error already. */
4038 if (elf_hash_table (info)->tls_sec == NULL)
4039 return 0;
4040 return elf_hash_table (info)->tls_sec->vma;
4041 }
4042
4043 /* Return the relocation value for @tpoff relocation
4044 if STT_TLS virtual address is ADDRESS. */
4045
4046 static bfd_vma
4047 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
4048 {
4049 struct elf_link_hash_table *htab = elf_hash_table (info);
4050 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
4051 bfd_vma static_tls_size;
4052
4053 /* If tls_segment is NULL, we should have signalled an error already. */
4054 if (htab->tls_sec == NULL)
4055 return 0;
4056
4057 /* Consider special static TLS alignment requirements. */
4058 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
4059 return address - static_tls_size - htab->tls_sec->vma;
4060 }
4061
4062 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
4063 branch? */
4064
4065 static bfd_boolean
4066 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
4067 {
4068 /* Opcode Instruction
4069 0xe8 call
4070 0xe9 jump
4071 0x0f 0x8x conditional jump */
4072 return ((offset > 0
4073 && (contents [offset - 1] == 0xe8
4074 || contents [offset - 1] == 0xe9))
4075 || (offset > 1
4076 && contents [offset - 2] == 0x0f
4077 && (contents [offset - 1] & 0xf0) == 0x80));
4078 }
4079
4080 /* Relocate an x86_64 ELF section. */
4081
4082 static bfd_boolean
4083 elf_x86_64_relocate_section (bfd *output_bfd,
4084 struct bfd_link_info *info,
4085 bfd *input_bfd,
4086 asection *input_section,
4087 bfd_byte *contents,
4088 Elf_Internal_Rela *relocs,
4089 Elf_Internal_Sym *local_syms,
4090 asection **local_sections)
4091 {
4092 struct elf_x86_64_link_hash_table *htab;
4093 Elf_Internal_Shdr *symtab_hdr;
4094 struct elf_link_hash_entry **sym_hashes;
4095 bfd_vma *local_got_offsets;
4096 bfd_vma *local_tlsdesc_gotents;
4097 Elf_Internal_Rela *rel;
4098 Elf_Internal_Rela *wrel;
4099 Elf_Internal_Rela *relend;
4100 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
4101
4102 BFD_ASSERT (is_x86_64_elf (input_bfd));
4103
4104 /* Skip if check_relocs failed. */
4105 if (input_section->check_relocs_failed)
4106 return FALSE;
4107
4108 htab = elf_x86_64_hash_table (info);
4109 if (htab == NULL)
4110 return FALSE;
4111 symtab_hdr = &elf_symtab_hdr (input_bfd);
4112 sym_hashes = elf_sym_hashes (input_bfd);
4113 local_got_offsets = elf_local_got_offsets (input_bfd);
4114 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
4115
4116 elf_x86_64_set_tls_module_base (info);
4117
4118 rel = wrel = relocs;
4119 relend = relocs + input_section->reloc_count;
4120 for (; rel < relend; wrel++, rel++)
4121 {
4122 unsigned int r_type;
4123 reloc_howto_type *howto;
4124 unsigned long r_symndx;
4125 struct elf_link_hash_entry *h;
4126 struct elf_x86_64_link_hash_entry *eh;
4127 Elf_Internal_Sym *sym;
4128 asection *sec;
4129 bfd_vma off, offplt, plt_offset;
4130 bfd_vma relocation;
4131 bfd_boolean unresolved_reloc;
4132 bfd_reloc_status_type r;
4133 int tls_type;
4134 asection *base_got, *resolved_plt;
4135 bfd_vma st_size;
4136 bfd_boolean resolved_to_zero;
4137
4138 r_type = ELF32_R_TYPE (rel->r_info);
4139 if (r_type == (int) R_X86_64_GNU_VTINHERIT
4140 || r_type == (int) R_X86_64_GNU_VTENTRY)
4141 {
4142 if (wrel != rel)
4143 *wrel = *rel;
4144 continue;
4145 }
4146
4147 if (r_type >= (int) R_X86_64_standard)
4148 {
4149 _bfd_error_handler
4150 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4151 input_bfd, input_section, r_type);
4152 bfd_set_error (bfd_error_bad_value);
4153 return FALSE;
4154 }
4155
4156 if (r_type != (int) R_X86_64_32
4157 || ABI_64_P (output_bfd))
4158 howto = x86_64_elf_howto_table + r_type;
4159 else
4160 howto = (x86_64_elf_howto_table
4161 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
4162 r_symndx = htab->r_sym (rel->r_info);
4163 h = NULL;
4164 sym = NULL;
4165 sec = NULL;
4166 unresolved_reloc = FALSE;
4167 if (r_symndx < symtab_hdr->sh_info)
4168 {
4169 sym = local_syms + r_symndx;
4170 sec = local_sections[r_symndx];
4171
4172 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
4173 &sec, rel);
4174 st_size = sym->st_size;
4175
4176 /* Relocate against local STT_GNU_IFUNC symbol. */
4177 if (!bfd_link_relocatable (info)
4178 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4179 {
4180 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
4181 rel, FALSE);
4182 if (h == NULL)
4183 abort ();
4184
4185 /* Set STT_GNU_IFUNC symbol value. */
4186 h->root.u.def.value = sym->st_value;
4187 h->root.u.def.section = sec;
4188 }
4189 }
4190 else
4191 {
4192 bfd_boolean warned ATTRIBUTE_UNUSED;
4193 bfd_boolean ignored ATTRIBUTE_UNUSED;
4194
4195 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4196 r_symndx, symtab_hdr, sym_hashes,
4197 h, sec, relocation,
4198 unresolved_reloc, warned, ignored);
4199 st_size = h->size;
4200 }
4201
4202 if (sec != NULL && discarded_section (sec))
4203 {
4204 _bfd_clear_contents (howto, input_bfd, input_section,
4205 contents + rel->r_offset);
4206 wrel->r_offset = rel->r_offset;
4207 wrel->r_info = 0;
4208 wrel->r_addend = 0;
4209
4210 /* For ld -r, remove relocations in debug sections against
4211 sections defined in discarded sections. Not done for
4212 eh_frame editing code expects to be present. */
4213 if (bfd_link_relocatable (info)
4214 && (input_section->flags & SEC_DEBUGGING))
4215 wrel--;
4216
4217 continue;
4218 }
4219
4220 if (bfd_link_relocatable (info))
4221 {
4222 if (wrel != rel)
4223 *wrel = *rel;
4224 continue;
4225 }
4226
4227 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
4228 {
4229 if (r_type == R_X86_64_64)
4230 {
4231 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
4232 zero-extend it to 64bit if addend is zero. */
4233 r_type = R_X86_64_32;
4234 memset (contents + rel->r_offset + 4, 0, 4);
4235 }
4236 else if (r_type == R_X86_64_SIZE64)
4237 {
4238 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4239 zero-extend it to 64bit if addend is zero. */
4240 r_type = R_X86_64_SIZE32;
4241 memset (contents + rel->r_offset + 4, 0, 4);
4242 }
4243 }
4244
4245 eh = (struct elf_x86_64_link_hash_entry *) h;
4246
4247 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4248 it here if it is defined in a non-shared object. */
4249 if (h != NULL
4250 && h->type == STT_GNU_IFUNC
4251 && h->def_regular)
4252 {
4253 bfd_vma plt_index;
4254 const char *name;
4255
4256 if ((input_section->flags & SEC_ALLOC) == 0)
4257 {
4258 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4259 sections because such sections are not SEC_ALLOC and
4260 thus ld.so will not process them. */
4261 if ((input_section->flags & SEC_DEBUGGING) != 0)
4262 continue;
4263 abort ();
4264 }
4265
4266 switch (r_type)
4267 {
4268 default:
4269 break;
4270
4271 case R_X86_64_GOTPCREL:
4272 case R_X86_64_GOTPCRELX:
4273 case R_X86_64_REX_GOTPCRELX:
4274 case R_X86_64_GOTPCREL64:
4275 base_got = htab->elf.sgot;
4276 off = h->got.offset;
4277
4278 if (base_got == NULL)
4279 abort ();
4280
4281 if (off == (bfd_vma) -1)
4282 {
4283 /* We can't use h->got.offset here to save state, or
4284 even just remember the offset, as finish_dynamic_symbol
4285 would use that as offset into .got. */
4286
4287 if (h->plt.offset == (bfd_vma) -1)
4288 abort ();
4289
4290 if (htab->elf.splt != NULL)
4291 {
4292 plt_index = h->plt.offset / plt_entry_size - 1;
4293 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4294 base_got = htab->elf.sgotplt;
4295 }
4296 else
4297 {
4298 plt_index = h->plt.offset / plt_entry_size;
4299 off = plt_index * GOT_ENTRY_SIZE;
4300 base_got = htab->elf.igotplt;
4301 }
4302
4303 if (h->dynindx == -1
4304 || h->forced_local
4305 || info->symbolic)
4306 {
4307 /* This references the local defitionion. We must
4308 initialize this entry in the global offset table.
4309 Since the offset must always be a multiple of 8,
4310 we use the least significant bit to record
4311 whether we have initialized it already.
4312
4313 When doing a dynamic link, we create a .rela.got
4314 relocation entry to initialize the value. This
4315 is done in the finish_dynamic_symbol routine. */
4316 if ((off & 1) != 0)
4317 off &= ~1;
4318 else
4319 {
4320 bfd_put_64 (output_bfd, relocation,
4321 base_got->contents + off);
4322 /* Note that this is harmless for the GOTPLT64
4323 case, as -1 | 1 still is -1. */
4324 h->got.offset |= 1;
4325 }
4326 }
4327 }
4328
4329 relocation = (base_got->output_section->vma
4330 + base_got->output_offset + off);
4331
4332 goto do_relocation;
4333 }
4334
4335 if (h->plt.offset == (bfd_vma) -1)
4336 {
4337 /* Handle static pointers of STT_GNU_IFUNC symbols. */
4338 if (r_type == htab->pointer_r_type
4339 && (input_section->flags & SEC_CODE) == 0)
4340 goto do_ifunc_pointer;
4341 goto bad_ifunc_reloc;
4342 }
4343
4344 /* STT_GNU_IFUNC symbol must go through PLT. */
4345 if (htab->elf.splt != NULL)
4346 {
4347 if (htab->plt_bnd != NULL)
4348 {
4349 resolved_plt = htab->plt_bnd;
4350 plt_offset = eh->plt_bnd.offset;
4351 }
4352 else
4353 {
4354 resolved_plt = htab->elf.splt;
4355 plt_offset = h->plt.offset;
4356 }
4357 }
4358 else
4359 {
4360 resolved_plt = htab->elf.iplt;
4361 plt_offset = h->plt.offset;
4362 }
4363
4364 relocation = (resolved_plt->output_section->vma
4365 + resolved_plt->output_offset + plt_offset);
4366
4367 switch (r_type)
4368 {
4369 default:
4370 bad_ifunc_reloc:
4371 if (h->root.root.string)
4372 name = h->root.root.string;
4373 else
4374 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4375 NULL);
4376 _bfd_error_handler
4377 (_("%B: relocation %s against STT_GNU_IFUNC "
4378 "symbol `%s' isn't supported"), input_bfd,
4379 howto->name, name);
4380 bfd_set_error (bfd_error_bad_value);
4381 return FALSE;
4382
4383 case R_X86_64_32S:
4384 if (bfd_link_pic (info))
4385 abort ();
4386 goto do_relocation;
4387
4388 case R_X86_64_32:
4389 if (ABI_64_P (output_bfd))
4390 goto do_relocation;
4391 /* FALLTHROUGH */
4392 case R_X86_64_64:
4393 do_ifunc_pointer:
4394 if (rel->r_addend != 0)
4395 {
4396 if (h->root.root.string)
4397 name = h->root.root.string;
4398 else
4399 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4400 sym, NULL);
4401 _bfd_error_handler
4402 (_("%B: relocation %s against STT_GNU_IFUNC "
4403 "symbol `%s' has non-zero addend: %d"),
4404 input_bfd, howto->name, name, rel->r_addend);
4405 bfd_set_error (bfd_error_bad_value);
4406 return FALSE;
4407 }
4408
4409 /* Generate dynamic relcoation only when there is a
4410 non-GOT reference in a shared object or there is no
4411 PLT. */
4412 if ((bfd_link_pic (info) && h->non_got_ref)
4413 || h->plt.offset == (bfd_vma) -1)
4414 {
4415 Elf_Internal_Rela outrel;
4416 asection *sreloc;
4417
4418 /* Need a dynamic relocation to get the real function
4419 address. */
4420 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4421 info,
4422 input_section,
4423 rel->r_offset);
4424 if (outrel.r_offset == (bfd_vma) -1
4425 || outrel.r_offset == (bfd_vma) -2)
4426 abort ();
4427
4428 outrel.r_offset += (input_section->output_section->vma
4429 + input_section->output_offset);
4430
4431 if (h->dynindx == -1
4432 || h->forced_local
4433 || bfd_link_executable (info))
4434 {
4435 /* This symbol is resolved locally. */
4436 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4437 outrel.r_addend = (h->root.u.def.value
4438 + h->root.u.def.section->output_section->vma
4439 + h->root.u.def.section->output_offset);
4440 }
4441 else
4442 {
4443 outrel.r_info = htab->r_info (h->dynindx, r_type);
4444 outrel.r_addend = 0;
4445 }
4446
4447 /* Dynamic relocations are stored in
4448 1. .rela.ifunc section in PIC object.
4449 2. .rela.got section in dynamic executable.
4450 3. .rela.iplt section in static executable. */
4451 if (bfd_link_pic (info))
4452 sreloc = htab->elf.irelifunc;
4453 else if (htab->elf.splt != NULL)
4454 sreloc = htab->elf.srelgot;
4455 else
4456 sreloc = htab->elf.irelplt;
4457 elf_append_rela (output_bfd, sreloc, &outrel);
4458
4459 /* If this reloc is against an external symbol, we
4460 do not want to fiddle with the addend. Otherwise,
4461 we need to include the symbol value so that it
4462 becomes an addend for the dynamic reloc. For an
4463 internal symbol, we have updated addend. */
4464 continue;
4465 }
4466 /* FALLTHROUGH */
4467 case R_X86_64_PC32:
4468 case R_X86_64_PC32_BND:
4469 case R_X86_64_PC64:
4470 case R_X86_64_PLT32:
4471 case R_X86_64_PLT32_BND:
4472 goto do_relocation;
4473 }
4474 }
4475
4476 resolved_to_zero = (eh != NULL
4477 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
4478 eh->has_got_reloc,
4479 eh));
4480
4481 /* When generating a shared object, the relocations handled here are
4482 copied into the output file to be resolved at run time. */
4483 switch (r_type)
4484 {
4485 case R_X86_64_GOT32:
4486 case R_X86_64_GOT64:
4487 /* Relocation is to the entry for this symbol in the global
4488 offset table. */
4489 case R_X86_64_GOTPCREL:
4490 case R_X86_64_GOTPCRELX:
4491 case R_X86_64_REX_GOTPCRELX:
4492 case R_X86_64_GOTPCREL64:
4493 /* Use global offset table entry as symbol value. */
4494 case R_X86_64_GOTPLT64:
4495 /* This is obsolete and treated the the same as GOT64. */
4496 base_got = htab->elf.sgot;
4497
4498 if (htab->elf.sgot == NULL)
4499 abort ();
4500
4501 if (h != NULL)
4502 {
4503 bfd_boolean dyn;
4504
4505 off = h->got.offset;
4506 if (h->needs_plt
4507 && h->plt.offset != (bfd_vma)-1
4508 && off == (bfd_vma)-1)
4509 {
4510 /* We can't use h->got.offset here to save
4511 state, or even just remember the offset, as
4512 finish_dynamic_symbol would use that as offset into
4513 .got. */
4514 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4515 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4516 base_got = htab->elf.sgotplt;
4517 }
4518
4519 dyn = htab->elf.dynamic_sections_created;
4520
4521 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4522 || (bfd_link_pic (info)
4523 && SYMBOL_REFERENCES_LOCAL (info, h))
4524 || (ELF_ST_VISIBILITY (h->other)
4525 && h->root.type == bfd_link_hash_undefweak))
4526 {
4527 /* This is actually a static link, or it is a -Bsymbolic
4528 link and the symbol is defined locally, or the symbol
4529 was forced to be local because of a version file. We
4530 must initialize this entry in the global offset table.
4531 Since the offset must always be a multiple of 8, we
4532 use the least significant bit to record whether we
4533 have initialized it already.
4534
4535 When doing a dynamic link, we create a .rela.got
4536 relocation entry to initialize the value. This is
4537 done in the finish_dynamic_symbol routine. */
4538 if ((off & 1) != 0)
4539 off &= ~1;
4540 else
4541 {
4542 bfd_put_64 (output_bfd, relocation,
4543 base_got->contents + off);
4544 /* Note that this is harmless for the GOTPLT64 case,
4545 as -1 | 1 still is -1. */
4546 h->got.offset |= 1;
4547 }
4548 }
4549 else
4550 unresolved_reloc = FALSE;
4551 }
4552 else
4553 {
4554 if (local_got_offsets == NULL)
4555 abort ();
4556
4557 off = local_got_offsets[r_symndx];
4558
4559 /* The offset must always be a multiple of 8. We use
4560 the least significant bit to record whether we have
4561 already generated the necessary reloc. */
4562 if ((off & 1) != 0)
4563 off &= ~1;
4564 else
4565 {
4566 bfd_put_64 (output_bfd, relocation,
4567 base_got->contents + off);
4568
4569 if (bfd_link_pic (info))
4570 {
4571 asection *s;
4572 Elf_Internal_Rela outrel;
4573
4574 /* We need to generate a R_X86_64_RELATIVE reloc
4575 for the dynamic linker. */
4576 s = htab->elf.srelgot;
4577 if (s == NULL)
4578 abort ();
4579
4580 outrel.r_offset = (base_got->output_section->vma
4581 + base_got->output_offset
4582 + off);
4583 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4584 outrel.r_addend = relocation;
4585 elf_append_rela (output_bfd, s, &outrel);
4586 }
4587
4588 local_got_offsets[r_symndx] |= 1;
4589 }
4590 }
4591
4592 if (off >= (bfd_vma) -2)
4593 abort ();
4594
4595 relocation = base_got->output_section->vma
4596 + base_got->output_offset + off;
4597 if (r_type != R_X86_64_GOTPCREL
4598 && r_type != R_X86_64_GOTPCRELX
4599 && r_type != R_X86_64_REX_GOTPCRELX
4600 && r_type != R_X86_64_GOTPCREL64)
4601 relocation -= htab->elf.sgotplt->output_section->vma
4602 - htab->elf.sgotplt->output_offset;
4603
4604 break;
4605
4606 case R_X86_64_GOTOFF64:
4607 /* Relocation is relative to the start of the global offset
4608 table. */
4609
4610 /* Check to make sure it isn't a protected function or data
4611 symbol for shared library since it may not be local when
4612 used as function address or with copy relocation. We also
4613 need to make sure that a symbol is referenced locally. */
4614 if (bfd_link_pic (info) && h)
4615 {
4616 if (!h->def_regular)
4617 {
4618 const char *v;
4619
4620 switch (ELF_ST_VISIBILITY (h->other))
4621 {
4622 case STV_HIDDEN:
4623 v = _("hidden symbol");
4624 break;
4625 case STV_INTERNAL:
4626 v = _("internal symbol");
4627 break;
4628 case STV_PROTECTED:
4629 v = _("protected symbol");
4630 break;
4631 default:
4632 v = _("symbol");
4633 break;
4634 }
4635
4636 _bfd_error_handler
4637 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4638 input_bfd, v, h->root.root.string);
4639 bfd_set_error (bfd_error_bad_value);
4640 return FALSE;
4641 }
4642 else if (!bfd_link_executable (info)
4643 && !SYMBOL_REFERENCES_LOCAL (info, h)
4644 && (h->type == STT_FUNC
4645 || h->type == STT_OBJECT)
4646 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4647 {
4648 _bfd_error_handler
4649 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4650 input_bfd,
4651 h->type == STT_FUNC ? "function" : "data",
4652 h->root.root.string);
4653 bfd_set_error (bfd_error_bad_value);
4654 return FALSE;
4655 }
4656 }
4657
4658 /* Note that sgot is not involved in this
4659 calculation. We always want the start of .got.plt. If we
4660 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4661 permitted by the ABI, we might have to change this
4662 calculation. */
4663 relocation -= htab->elf.sgotplt->output_section->vma
4664 + htab->elf.sgotplt->output_offset;
4665 break;
4666
4667 case R_X86_64_GOTPC32:
4668 case R_X86_64_GOTPC64:
4669 /* Use global offset table as symbol value. */
4670 relocation = htab->elf.sgotplt->output_section->vma
4671 + htab->elf.sgotplt->output_offset;
4672 unresolved_reloc = FALSE;
4673 break;
4674
4675 case R_X86_64_PLTOFF64:
4676 /* Relocation is PLT entry relative to GOT. For local
4677 symbols it's the symbol itself relative to GOT. */
4678 if (h != NULL
4679 /* See PLT32 handling. */
4680 && h->plt.offset != (bfd_vma) -1
4681 && htab->elf.splt != NULL)
4682 {
4683 if (htab->plt_bnd != NULL)
4684 {
4685 resolved_plt = htab->plt_bnd;
4686 plt_offset = eh->plt_bnd.offset;
4687 }
4688 else
4689 {
4690 resolved_plt = htab->elf.splt;
4691 plt_offset = h->plt.offset;
4692 }
4693
4694 relocation = (resolved_plt->output_section->vma
4695 + resolved_plt->output_offset
4696 + plt_offset);
4697 unresolved_reloc = FALSE;
4698 }
4699
4700 relocation -= htab->elf.sgotplt->output_section->vma
4701 + htab->elf.sgotplt->output_offset;
4702 break;
4703
4704 case R_X86_64_PLT32:
4705 case R_X86_64_PLT32_BND:
4706 /* Relocation is to the entry for this symbol in the
4707 procedure linkage table. */
4708
4709 /* Resolve a PLT32 reloc against a local symbol directly,
4710 without using the procedure linkage table. */
4711 if (h == NULL)
4712 break;
4713
4714 if ((h->plt.offset == (bfd_vma) -1
4715 && eh->plt_got.offset == (bfd_vma) -1)
4716 || htab->elf.splt == NULL)
4717 {
4718 /* We didn't make a PLT entry for this symbol. This
4719 happens when statically linking PIC code, or when
4720 using -Bsymbolic. */
4721 break;
4722 }
4723
4724 if (h->plt.offset != (bfd_vma) -1)
4725 {
4726 if (htab->plt_bnd != NULL)
4727 {
4728 resolved_plt = htab->plt_bnd;
4729 plt_offset = eh->plt_bnd.offset;
4730 }
4731 else
4732 {
4733 resolved_plt = htab->elf.splt;
4734 plt_offset = h->plt.offset;
4735 }
4736 }
4737 else
4738 {
4739 /* Use the GOT PLT. */
4740 resolved_plt = htab->plt_got;
4741 plt_offset = eh->plt_got.offset;
4742 }
4743
4744 relocation = (resolved_plt->output_section->vma
4745 + resolved_plt->output_offset
4746 + plt_offset);
4747 unresolved_reloc = FALSE;
4748 break;
4749
4750 case R_X86_64_SIZE32:
4751 case R_X86_64_SIZE64:
4752 /* Set to symbol size. */
4753 relocation = st_size;
4754 goto direct;
4755
4756 case R_X86_64_PC8:
4757 case R_X86_64_PC16:
4758 case R_X86_64_PC32:
4759 case R_X86_64_PC32_BND:
4760 /* Don't complain about -fPIC if the symbol is undefined when
4761 building executable unless it is unresolved weak symbol. */
4762 if ((input_section->flags & SEC_ALLOC) != 0
4763 && (input_section->flags & SEC_READONLY) != 0
4764 && h != NULL
4765 && ((bfd_link_executable (info)
4766 && h->root.type == bfd_link_hash_undefweak
4767 && !resolved_to_zero)
4768 || (bfd_link_pic (info)
4769 && !(bfd_link_pie (info)
4770 && h->root.type == bfd_link_hash_undefined))))
4771 {
4772 bfd_boolean fail = FALSE;
4773 bfd_boolean branch
4774 = ((r_type == R_X86_64_PC32
4775 || r_type == R_X86_64_PC32_BND)
4776 && is_32bit_relative_branch (contents, rel->r_offset));
4777
4778 if (SYMBOL_REFERENCES_LOCAL (info, h))
4779 {
4780 /* Symbol is referenced locally. Make sure it is
4781 defined locally or for a branch. */
4782 fail = !h->def_regular && !branch;
4783 }
4784 else if (!(bfd_link_pie (info)
4785 && (h->needs_copy || eh->needs_copy)))
4786 {
4787 /* Symbol doesn't need copy reloc and isn't referenced
4788 locally. We only allow branch to symbol with
4789 non-default visibility. */
4790 fail = (!branch
4791 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4792 }
4793
4794 if (fail)
4795 return elf_x86_64_need_pic (input_bfd, input_section,
4796 h, NULL, NULL, howto);
4797 }
4798 /* Fall through. */
4799
4800 case R_X86_64_8:
4801 case R_X86_64_16:
4802 case R_X86_64_32:
4803 case R_X86_64_PC64:
4804 case R_X86_64_64:
4805 /* FIXME: The ABI says the linker should make sure the value is
4806 the same when it's zeroextended to 64 bit. */
4807
4808 direct:
4809 if ((input_section->flags & SEC_ALLOC) == 0)
4810 break;
4811
4812 /* Don't copy a pc-relative relocation into the output file
4813 if the symbol needs copy reloc or the symbol is undefined
4814 when building executable. Copy dynamic function pointer
4815 relocations. Don't generate dynamic relocations against
4816 resolved undefined weak symbols in PIE. */
4817 if ((bfd_link_pic (info)
4818 && !(bfd_link_pie (info)
4819 && h != NULL
4820 && (h->needs_copy
4821 || eh->needs_copy
4822 || h->root.type == bfd_link_hash_undefined)
4823 && (IS_X86_64_PCREL_TYPE (r_type)
4824 || r_type == R_X86_64_SIZE32
4825 || r_type == R_X86_64_SIZE64))
4826 && (h == NULL
4827 || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4828 && !resolved_to_zero)
4829 || h->root.type != bfd_link_hash_undefweak))
4830 && ((! IS_X86_64_PCREL_TYPE (r_type)
4831 && r_type != R_X86_64_SIZE32
4832 && r_type != R_X86_64_SIZE64)
4833 || ! SYMBOL_CALLS_LOCAL (info, h)))
4834 || (ELIMINATE_COPY_RELOCS
4835 && !bfd_link_pic (info)
4836 && h != NULL
4837 && h->dynindx != -1
4838 && (!h->non_got_ref
4839 || eh->func_pointer_refcount > 0
4840 || (h->root.type == bfd_link_hash_undefweak
4841 && !resolved_to_zero))
4842 && ((h->def_dynamic && !h->def_regular)
4843 /* Undefined weak symbol is bound locally when
4844 PIC is false. */
4845 || h->root.type == bfd_link_hash_undefined)))
4846 {
4847 Elf_Internal_Rela outrel;
4848 bfd_boolean skip, relocate;
4849 asection *sreloc;
4850
4851 /* When generating a shared object, these relocations
4852 are copied into the output file to be resolved at run
4853 time. */
4854 skip = FALSE;
4855 relocate = FALSE;
4856
4857 outrel.r_offset =
4858 _bfd_elf_section_offset (output_bfd, info, input_section,
4859 rel->r_offset);
4860 if (outrel.r_offset == (bfd_vma) -1)
4861 skip = TRUE;
4862 else if (outrel.r_offset == (bfd_vma) -2)
4863 skip = TRUE, relocate = TRUE;
4864
4865 outrel.r_offset += (input_section->output_section->vma
4866 + input_section->output_offset);
4867
4868 if (skip)
4869 memset (&outrel, 0, sizeof outrel);
4870
4871 /* h->dynindx may be -1 if this symbol was marked to
4872 become local. */
4873 else if (h != NULL
4874 && h->dynindx != -1
4875 && (IS_X86_64_PCREL_TYPE (r_type)
4876 || !(bfd_link_executable (info)
4877 || SYMBOLIC_BIND (info, h))
4878 || ! h->def_regular))
4879 {
4880 outrel.r_info = htab->r_info (h->dynindx, r_type);
4881 outrel.r_addend = rel->r_addend;
4882 }
4883 else
4884 {
4885 /* This symbol is local, or marked to become local.
4886 When relocation overflow check is disabled, we
4887 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4888 if (r_type == htab->pointer_r_type
4889 || (r_type == R_X86_64_32
4890 && info->no_reloc_overflow_check))
4891 {
4892 relocate = TRUE;
4893 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4894 outrel.r_addend = relocation + rel->r_addend;
4895 }
4896 else if (r_type == R_X86_64_64
4897 && !ABI_64_P (output_bfd))
4898 {
4899 relocate = TRUE;
4900 outrel.r_info = htab->r_info (0,
4901 R_X86_64_RELATIVE64);
4902 outrel.r_addend = relocation + rel->r_addend;
4903 /* Check addend overflow. */
4904 if ((outrel.r_addend & 0x80000000)
4905 != (rel->r_addend & 0x80000000))
4906 {
4907 const char *name;
4908 int addend = rel->r_addend;
4909 if (h && h->root.root.string)
4910 name = h->root.root.string;
4911 else
4912 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4913 sym, NULL);
4914 if (addend < 0)
4915 _bfd_error_handler
4916 (_("%B: addend -0x%x in relocation %s against "
4917 "symbol `%s' at 0x%lx in section `%A' is "
4918 "out of range"),
4919 input_bfd, input_section, addend,
4920 howto->name, name,
4921 (unsigned long) rel->r_offset);
4922 else
4923 _bfd_error_handler
4924 (_("%B: addend 0x%x in relocation %s against "
4925 "symbol `%s' at 0x%lx in section `%A' is "
4926 "out of range"),
4927 input_bfd, input_section, addend,
4928 howto->name, name,
4929 (unsigned long) rel->r_offset);
4930 bfd_set_error (bfd_error_bad_value);
4931 return FALSE;
4932 }
4933 }
4934 else
4935 {
4936 long sindx;
4937
4938 if (bfd_is_abs_section (sec))
4939 sindx = 0;
4940 else if (sec == NULL || sec->owner == NULL)
4941 {
4942 bfd_set_error (bfd_error_bad_value);
4943 return FALSE;
4944 }
4945 else
4946 {
4947 asection *osec;
4948
4949 /* We are turning this relocation into one
4950 against a section symbol. It would be
4951 proper to subtract the symbol's value,
4952 osec->vma, from the emitted reloc addend,
4953 but ld.so expects buggy relocs. */
4954 osec = sec->output_section;
4955 sindx = elf_section_data (osec)->dynindx;
4956 if (sindx == 0)
4957 {
4958 asection *oi = htab->elf.text_index_section;
4959 sindx = elf_section_data (oi)->dynindx;
4960 }
4961 BFD_ASSERT (sindx != 0);
4962 }
4963
4964 outrel.r_info = htab->r_info (sindx, r_type);
4965 outrel.r_addend = relocation + rel->r_addend;
4966 }
4967 }
4968
4969 sreloc = elf_section_data (input_section)->sreloc;
4970
4971 if (sreloc == NULL || sreloc->contents == NULL)
4972 {
4973 r = bfd_reloc_notsupported;
4974 goto check_relocation_error;
4975 }
4976
4977 elf_append_rela (output_bfd, sreloc, &outrel);
4978
4979 /* If this reloc is against an external symbol, we do
4980 not want to fiddle with the addend. Otherwise, we
4981 need to include the symbol value so that it becomes
4982 an addend for the dynamic reloc. */
4983 if (! relocate)
4984 continue;
4985 }
4986
4987 break;
4988
4989 case R_X86_64_TLSGD:
4990 case R_X86_64_GOTPC32_TLSDESC:
4991 case R_X86_64_TLSDESC_CALL:
4992 case R_X86_64_GOTTPOFF:
4993 tls_type = GOT_UNKNOWN;
4994 if (h == NULL && local_got_offsets)
4995 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4996 else if (h != NULL)
4997 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4998
4999 if (! elf_x86_64_tls_transition (info, input_bfd,
5000 input_section, contents,
5001 symtab_hdr, sym_hashes,
5002 &r_type, tls_type, rel,
5003 relend, h, r_symndx, TRUE))
5004 return FALSE;
5005
5006 if (r_type == R_X86_64_TPOFF32)
5007 {
5008 bfd_vma roff = rel->r_offset;
5009
5010 BFD_ASSERT (! unresolved_reloc);
5011
5012 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5013 {
5014 /* GD->LE transition. For 64bit, change
5015 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5016 .word 0x6666; rex64; call __tls_get_addr@PLT
5017 or
5018 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5019 .byte 0x66; rex64
5020 call *__tls_get_addr@GOTPCREL(%rip)
5021 which may be converted to
5022 addr32 call __tls_get_addr
5023 into:
5024 movq %fs:0, %rax
5025 leaq foo@tpoff(%rax), %rax
5026 For 32bit, change
5027 leaq foo@tlsgd(%rip), %rdi
5028 .word 0x6666; rex64; call __tls_get_addr@PLT
5029 or
5030 leaq foo@tlsgd(%rip), %rdi
5031 .byte 0x66; rex64
5032 call *__tls_get_addr@GOTPCREL(%rip)
5033 which may be converted to
5034 addr32 call __tls_get_addr
5035 into:
5036 movl %fs:0, %eax
5037 leaq foo@tpoff(%rax), %rax
5038 For largepic, change:
5039 leaq foo@tlsgd(%rip), %rdi
5040 movabsq $__tls_get_addr@pltoff, %rax
5041 addq %r15, %rax
5042 call *%rax
5043 into:
5044 movq %fs:0, %rax
5045 leaq foo@tpoff(%rax), %rax
5046 nopw 0x0(%rax,%rax,1) */
5047 int largepic = 0;
5048 if (ABI_64_P (output_bfd))
5049 {
5050 if (contents[roff + 5] == 0xb8)
5051 {
5052 memcpy (contents + roff - 3,
5053 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
5054 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5055 largepic = 1;
5056 }
5057 else
5058 memcpy (contents + roff - 4,
5059 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5060 16);
5061 }
5062 else
5063 memcpy (contents + roff - 3,
5064 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5065 15);
5066 bfd_put_32 (output_bfd,
5067 elf_x86_64_tpoff (info, relocation),
5068 contents + roff + 8 + largepic);
5069 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
5070 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
5071 rel++;
5072 wrel++;
5073 continue;
5074 }
5075 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5076 {
5077 /* GDesc -> LE transition.
5078 It's originally something like:
5079 leaq x@tlsdesc(%rip), %rax
5080
5081 Change it to:
5082 movl $x@tpoff, %rax. */
5083
5084 unsigned int val, type;
5085
5086 type = bfd_get_8 (input_bfd, contents + roff - 3);
5087 val = bfd_get_8 (input_bfd, contents + roff - 1);
5088 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
5089 contents + roff - 3);
5090 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
5091 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
5092 contents + roff - 1);
5093 bfd_put_32 (output_bfd,
5094 elf_x86_64_tpoff (info, relocation),
5095 contents + roff);
5096 continue;
5097 }
5098 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5099 {
5100 /* GDesc -> LE transition.
5101 It's originally:
5102 call *(%rax)
5103 Turn it into:
5104 xchg %ax,%ax. */
5105 bfd_put_8 (output_bfd, 0x66, contents + roff);
5106 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5107 continue;
5108 }
5109 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
5110 {
5111 /* IE->LE transition:
5112 For 64bit, originally it can be one of:
5113 movq foo@gottpoff(%rip), %reg
5114 addq foo@gottpoff(%rip), %reg
5115 We change it into:
5116 movq $foo, %reg
5117 leaq foo(%reg), %reg
5118 addq $foo, %reg.
5119 For 32bit, originally it can be one of:
5120 movq foo@gottpoff(%rip), %reg
5121 addl foo@gottpoff(%rip), %reg
5122 We change it into:
5123 movq $foo, %reg
5124 leal foo(%reg), %reg
5125 addl $foo, %reg. */
5126
5127 unsigned int val, type, reg;
5128
5129 if (roff >= 3)
5130 val = bfd_get_8 (input_bfd, contents + roff - 3);
5131 else
5132 val = 0;
5133 type = bfd_get_8 (input_bfd, contents + roff - 2);
5134 reg = bfd_get_8 (input_bfd, contents + roff - 1);
5135 reg >>= 3;
5136 if (type == 0x8b)
5137 {
5138 /* movq */
5139 if (val == 0x4c)
5140 bfd_put_8 (output_bfd, 0x49,
5141 contents + roff - 3);
5142 else if (!ABI_64_P (output_bfd) && val == 0x44)
5143 bfd_put_8 (output_bfd, 0x41,
5144 contents + roff - 3);
5145 bfd_put_8 (output_bfd, 0xc7,
5146 contents + roff - 2);
5147 bfd_put_8 (output_bfd, 0xc0 | reg,
5148 contents + roff - 1);
5149 }
5150 else if (reg == 4)
5151 {
5152 /* addq/addl -> addq/addl - addressing with %rsp/%r12
5153 is special */
5154 if (val == 0x4c)
5155 bfd_put_8 (output_bfd, 0x49,
5156 contents + roff - 3);
5157 else if (!ABI_64_P (output_bfd) && val == 0x44)
5158 bfd_put_8 (output_bfd, 0x41,
5159 contents + roff - 3);
5160 bfd_put_8 (output_bfd, 0x81,
5161 contents + roff - 2);
5162 bfd_put_8 (output_bfd, 0xc0 | reg,
5163 contents + roff - 1);
5164 }
5165 else
5166 {
5167 /* addq/addl -> leaq/leal */
5168 if (val == 0x4c)
5169 bfd_put_8 (output_bfd, 0x4d,
5170 contents + roff - 3);
5171 else if (!ABI_64_P (output_bfd) && val == 0x44)
5172 bfd_put_8 (output_bfd, 0x45,
5173 contents + roff - 3);
5174 bfd_put_8 (output_bfd, 0x8d,
5175 contents + roff - 2);
5176 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
5177 contents + roff - 1);
5178 }
5179 bfd_put_32 (output_bfd,
5180 elf_x86_64_tpoff (info, relocation),
5181 contents + roff);
5182 continue;
5183 }
5184 else
5185 BFD_ASSERT (FALSE);
5186 }
5187
5188 if (htab->elf.sgot == NULL)
5189 abort ();
5190
5191 if (h != NULL)
5192 {
5193 off = h->got.offset;
5194 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
5195 }
5196 else
5197 {
5198 if (local_got_offsets == NULL)
5199 abort ();
5200
5201 off = local_got_offsets[r_symndx];
5202 offplt = local_tlsdesc_gotents[r_symndx];
5203 }
5204
5205 if ((off & 1) != 0)
5206 off &= ~1;
5207 else
5208 {
5209 Elf_Internal_Rela outrel;
5210 int dr_type, indx;
5211 asection *sreloc;
5212
5213 if (htab->elf.srelgot == NULL)
5214 abort ();
5215
5216 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5217
5218 if (GOT_TLS_GDESC_P (tls_type))
5219 {
5220 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
5221 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
5222 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
5223 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
5224 + htab->elf.sgotplt->output_offset
5225 + offplt
5226 + htab->sgotplt_jump_table_size);
5227 sreloc = htab->elf.srelplt;
5228 if (indx == 0)
5229 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5230 else
5231 outrel.r_addend = 0;
5232 elf_append_rela (output_bfd, sreloc, &outrel);
5233 }
5234
5235 sreloc = htab->elf.srelgot;
5236
5237 outrel.r_offset = (htab->elf.sgot->output_section->vma
5238 + htab->elf.sgot->output_offset + off);
5239
5240 if (GOT_TLS_GD_P (tls_type))
5241 dr_type = R_X86_64_DTPMOD64;
5242 else if (GOT_TLS_GDESC_P (tls_type))
5243 goto dr_done;
5244 else
5245 dr_type = R_X86_64_TPOFF64;
5246
5247 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
5248 outrel.r_addend = 0;
5249 if ((dr_type == R_X86_64_TPOFF64
5250 || dr_type == R_X86_64_TLSDESC) && indx == 0)
5251 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5252 outrel.r_info = htab->r_info (indx, dr_type);
5253
5254 elf_append_rela (output_bfd, sreloc, &outrel);
5255
5256 if (GOT_TLS_GD_P (tls_type))
5257 {
5258 if (indx == 0)
5259 {
5260 BFD_ASSERT (! unresolved_reloc);
5261 bfd_put_64 (output_bfd,
5262 relocation - elf_x86_64_dtpoff_base (info),
5263 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5264 }
5265 else
5266 {
5267 bfd_put_64 (output_bfd, 0,
5268 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5269 outrel.r_info = htab->r_info (indx,
5270 R_X86_64_DTPOFF64);
5271 outrel.r_offset += GOT_ENTRY_SIZE;
5272 elf_append_rela (output_bfd, sreloc,
5273 &outrel);
5274 }
5275 }
5276
5277 dr_done:
5278 if (h != NULL)
5279 h->got.offset |= 1;
5280 else
5281 local_got_offsets[r_symndx] |= 1;
5282 }
5283
5284 if (off >= (bfd_vma) -2
5285 && ! GOT_TLS_GDESC_P (tls_type))
5286 abort ();
5287 if (r_type == ELF32_R_TYPE (rel->r_info))
5288 {
5289 if (r_type == R_X86_64_GOTPC32_TLSDESC
5290 || r_type == R_X86_64_TLSDESC_CALL)
5291 relocation = htab->elf.sgotplt->output_section->vma
5292 + htab->elf.sgotplt->output_offset
5293 + offplt + htab->sgotplt_jump_table_size;
5294 else
5295 relocation = htab->elf.sgot->output_section->vma
5296 + htab->elf.sgot->output_offset + off;
5297 unresolved_reloc = FALSE;
5298 }
5299 else
5300 {
5301 bfd_vma roff = rel->r_offset;
5302
5303 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5304 {
5305 /* GD->IE transition. For 64bit, change
5306 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5307 .word 0x6666; rex64; call __tls_get_addr@PLT
5308 or
5309 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5310 .byte 0x66; rex64
5311 call *__tls_get_addr@GOTPCREL(%rip
5312 which may be converted to
5313 addr32 call __tls_get_addr
5314 into:
5315 movq %fs:0, %rax
5316 addq foo@gottpoff(%rip), %rax
5317 For 32bit, change
5318 leaq foo@tlsgd(%rip), %rdi
5319 .word 0x6666; rex64; call __tls_get_addr@PLT
5320 or
5321 leaq foo@tlsgd(%rip), %rdi
5322 .byte 0x66; rex64;
5323 call *__tls_get_addr@GOTPCREL(%rip)
5324 which may be converted to
5325 addr32 call __tls_get_addr
5326 into:
5327 movl %fs:0, %eax
5328 addq foo@gottpoff(%rip), %rax
5329 For largepic, change:
5330 leaq foo@tlsgd(%rip), %rdi
5331 movabsq $__tls_get_addr@pltoff, %rax
5332 addq %r15, %rax
5333 call *%rax
5334 into:
5335 movq %fs:0, %rax
5336 addq foo@gottpoff(%rax), %rax
5337 nopw 0x0(%rax,%rax,1) */
5338 int largepic = 0;
5339 if (ABI_64_P (output_bfd))
5340 {
5341 if (contents[roff + 5] == 0xb8)
5342 {
5343 memcpy (contents + roff - 3,
5344 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5345 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5346 largepic = 1;
5347 }
5348 else
5349 memcpy (contents + roff - 4,
5350 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5351 16);
5352 }
5353 else
5354 memcpy (contents + roff - 3,
5355 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5356 15);
5357
5358 relocation = (htab->elf.sgot->output_section->vma
5359 + htab->elf.sgot->output_offset + off
5360 - roff
5361 - largepic
5362 - input_section->output_section->vma
5363 - input_section->output_offset
5364 - 12);
5365 bfd_put_32 (output_bfd, relocation,
5366 contents + roff + 8 + largepic);
5367 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5368 rel++;
5369 wrel++;
5370 continue;
5371 }
5372 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5373 {
5374 /* GDesc -> IE transition.
5375 It's originally something like:
5376 leaq x@tlsdesc(%rip), %rax
5377
5378 Change it to:
5379 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5380
5381 /* Now modify the instruction as appropriate. To
5382 turn a leaq into a movq in the form we use it, it
5383 suffices to change the second byte from 0x8d to
5384 0x8b. */
5385 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5386
5387 bfd_put_32 (output_bfd,
5388 htab->elf.sgot->output_section->vma
5389 + htab->elf.sgot->output_offset + off
5390 - rel->r_offset
5391 - input_section->output_section->vma
5392 - input_section->output_offset
5393 - 4,
5394 contents + roff);
5395 continue;
5396 }
5397 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5398 {
5399 /* GDesc -> IE transition.
5400 It's originally:
5401 call *(%rax)
5402
5403 Change it to:
5404 xchg %ax, %ax. */
5405
5406 bfd_put_8 (output_bfd, 0x66, contents + roff);
5407 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5408 continue;
5409 }
5410 else
5411 BFD_ASSERT (FALSE);
5412 }
5413 break;
5414
5415 case R_X86_64_TLSLD:
5416 if (! elf_x86_64_tls_transition (info, input_bfd,
5417 input_section, contents,
5418 symtab_hdr, sym_hashes,
5419 &r_type, GOT_UNKNOWN, rel,
5420 relend, h, r_symndx, TRUE))
5421 return FALSE;
5422
5423 if (r_type != R_X86_64_TLSLD)
5424 {
5425 /* LD->LE transition:
5426 leaq foo@tlsld(%rip), %rdi
5427 call __tls_get_addr@PLT
5428 For 64bit, we change it into:
5429 .word 0x6666; .byte 0x66; movq %fs:0, %rax
5430 For 32bit, we change it into:
5431 nopl 0x0(%rax); movl %fs:0, %eax
5432 Or
5433 leaq foo@tlsld(%rip), %rdi;
5434 call *__tls_get_addr@GOTPCREL(%rip)
5435 which may be converted to
5436 addr32 call __tls_get_addr
5437 For 64bit, we change it into:
5438 .word 0x6666; .word 0x6666; movq %fs:0, %rax
5439 For 32bit, we change it into:
5440 nopw 0x0(%rax); movl %fs:0, %eax
5441 For largepic, change:
5442 leaq foo@tlsgd(%rip), %rdi
5443 movabsq $__tls_get_addr@pltoff, %rax
5444 addq %rbx, %rax
5445 call *%rax
5446 into
5447 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
5448 movq %fs:0, %eax */
5449
5450 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5451 if (ABI_64_P (output_bfd))
5452 {
5453 if (contents[rel->r_offset + 5] == 0xb8)
5454 memcpy (contents + rel->r_offset - 3,
5455 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5456 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5457 else if (contents[rel->r_offset + 4] == 0xff
5458 || contents[rel->r_offset + 4] == 0x67)
5459 memcpy (contents + rel->r_offset - 3,
5460 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
5461 13);
5462 else
5463 memcpy (contents + rel->r_offset - 3,
5464 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5465 }
5466 else
5467 {
5468 if (contents[rel->r_offset + 4] == 0xff)
5469 memcpy (contents + rel->r_offset - 3,
5470 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
5471 13);
5472 else
5473 memcpy (contents + rel->r_offset - 3,
5474 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5475 }
5476 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
5477 and R_X86_64_PLTOFF64. */
5478 rel++;
5479 wrel++;
5480 continue;
5481 }
5482
5483 if (htab->elf.sgot == NULL)
5484 abort ();
5485
5486 off = htab->tls_ld_got.offset;
5487 if (off & 1)
5488 off &= ~1;
5489 else
5490 {
5491 Elf_Internal_Rela outrel;
5492
5493 if (htab->elf.srelgot == NULL)
5494 abort ();
5495
5496 outrel.r_offset = (htab->elf.sgot->output_section->vma
5497 + htab->elf.sgot->output_offset + off);
5498
5499 bfd_put_64 (output_bfd, 0,
5500 htab->elf.sgot->contents + off);
5501 bfd_put_64 (output_bfd, 0,
5502 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5503 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5504 outrel.r_addend = 0;
5505 elf_append_rela (output_bfd, htab->elf.srelgot,
5506 &outrel);
5507 htab->tls_ld_got.offset |= 1;
5508 }
5509 relocation = htab->elf.sgot->output_section->vma
5510 + htab->elf.sgot->output_offset + off;
5511 unresolved_reloc = FALSE;
5512 break;
5513
5514 case R_X86_64_DTPOFF32:
5515 if (!bfd_link_executable (info)
5516 || (input_section->flags & SEC_CODE) == 0)
5517 relocation -= elf_x86_64_dtpoff_base (info);
5518 else
5519 relocation = elf_x86_64_tpoff (info, relocation);
5520 break;
5521
5522 case R_X86_64_TPOFF32:
5523 case R_X86_64_TPOFF64:
5524 BFD_ASSERT (bfd_link_executable (info));
5525 relocation = elf_x86_64_tpoff (info, relocation);
5526 break;
5527
5528 case R_X86_64_DTPOFF64:
5529 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5530 relocation -= elf_x86_64_dtpoff_base (info);
5531 break;
5532
5533 default:
5534 break;
5535 }
5536
5537 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5538 because such sections are not SEC_ALLOC and thus ld.so will
5539 not process them. */
5540 if (unresolved_reloc
5541 && !((input_section->flags & SEC_DEBUGGING) != 0
5542 && h->def_dynamic)
5543 && _bfd_elf_section_offset (output_bfd, info, input_section,
5544 rel->r_offset) != (bfd_vma) -1)
5545 {
5546 _bfd_error_handler
5547 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5548 input_bfd,
5549 input_section,
5550 (long) rel->r_offset,
5551 howto->name,
5552 h->root.root.string);
5553 return FALSE;
5554 }
5555
5556 do_relocation:
5557 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5558 contents, rel->r_offset,
5559 relocation, rel->r_addend);
5560
5561 check_relocation_error:
5562 if (r != bfd_reloc_ok)
5563 {
5564 const char *name;
5565
5566 if (h != NULL)
5567 name = h->root.root.string;
5568 else
5569 {
5570 name = bfd_elf_string_from_elf_section (input_bfd,
5571 symtab_hdr->sh_link,
5572 sym->st_name);
5573 if (name == NULL)
5574 return FALSE;
5575 if (*name == '\0')
5576 name = bfd_section_name (input_bfd, sec);
5577 }
5578
5579 if (r == bfd_reloc_overflow)
5580 (*info->callbacks->reloc_overflow)
5581 (info, (h ? &h->root : NULL), name, howto->name,
5582 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5583 else
5584 {
5585 _bfd_error_handler
5586 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5587 input_bfd, input_section,
5588 (long) rel->r_offset, name, (int) r);
5589 return FALSE;
5590 }
5591 }
5592
5593 if (wrel != rel)
5594 *wrel = *rel;
5595 }
5596
5597 if (wrel != rel)
5598 {
5599 Elf_Internal_Shdr *rel_hdr;
5600 size_t deleted = rel - wrel;
5601
5602 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5603 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5604 if (rel_hdr->sh_size == 0)
5605 {
5606 /* It is too late to remove an empty reloc section. Leave
5607 one NONE reloc.
5608 ??? What is wrong with an empty section??? */
5609 rel_hdr->sh_size = rel_hdr->sh_entsize;
5610 deleted -= 1;
5611 }
5612 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5613 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5614 input_section->reloc_count -= deleted;
5615 }
5616
5617 return TRUE;
5618 }
5619
5620 /* Finish up dynamic symbol handling. We set the contents of various
5621 dynamic sections here. */
5622
5623 static bfd_boolean
5624 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5625 struct bfd_link_info *info,
5626 struct elf_link_hash_entry *h,
5627 Elf_Internal_Sym *sym)
5628 {
5629 struct elf_x86_64_link_hash_table *htab;
5630 const struct elf_x86_64_backend_data *abed;
5631 bfd_boolean use_plt_bnd;
5632 struct elf_x86_64_link_hash_entry *eh;
5633 bfd_boolean local_undefweak;
5634
5635 htab = elf_x86_64_hash_table (info);
5636 if (htab == NULL)
5637 return FALSE;
5638
5639 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5640 section only if there is .plt section. */
5641 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5642 abed = (use_plt_bnd
5643 ? &elf_x86_64_bnd_arch_bed
5644 : get_elf_x86_64_backend_data (output_bfd));
5645
5646 eh = (struct elf_x86_64_link_hash_entry *) h;
5647
5648 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5649 resolved undefined weak symbols in executable so that their
5650 references have value 0 at run-time. */
5651 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
5652 eh->has_got_reloc,
5653 eh);
5654
5655 if (h->plt.offset != (bfd_vma) -1)
5656 {
5657 bfd_vma plt_index;
5658 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5659 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5660 Elf_Internal_Rela rela;
5661 bfd_byte *loc;
5662 asection *plt, *gotplt, *relplt, *resolved_plt;
5663 const struct elf_backend_data *bed;
5664 bfd_vma plt_got_pcrel_offset;
5665
5666 /* When building a static executable, use .iplt, .igot.plt and
5667 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5668 if (htab->elf.splt != NULL)
5669 {
5670 plt = htab->elf.splt;
5671 gotplt = htab->elf.sgotplt;
5672 relplt = htab->elf.srelplt;
5673 }
5674 else
5675 {
5676 plt = htab->elf.iplt;
5677 gotplt = htab->elf.igotplt;
5678 relplt = htab->elf.irelplt;
5679 }
5680
5681 /* This symbol has an entry in the procedure linkage table. Set
5682 it up. */
5683 if ((h->dynindx == -1
5684 && !local_undefweak
5685 && !((h->forced_local || bfd_link_executable (info))
5686 && h->def_regular
5687 && h->type == STT_GNU_IFUNC))
5688 || plt == NULL
5689 || gotplt == NULL
5690 || relplt == NULL)
5691 abort ();
5692
5693 /* Get the index in the procedure linkage table which
5694 corresponds to this symbol. This is the index of this symbol
5695 in all the symbols for which we are making plt entries. The
5696 first entry in the procedure linkage table is reserved.
5697
5698 Get the offset into the .got table of the entry that
5699 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5700 bytes. The first three are reserved for the dynamic linker.
5701
5702 For static executables, we don't reserve anything. */
5703
5704 if (plt == htab->elf.splt)
5705 {
5706 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5707 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5708 }
5709 else
5710 {
5711 got_offset = h->plt.offset / abed->plt_entry_size;
5712 got_offset = got_offset * GOT_ENTRY_SIZE;
5713 }
5714
5715 plt_plt_insn_end = abed->plt_plt_insn_end;
5716 plt_plt_offset = abed->plt_plt_offset;
5717 plt_got_insn_size = abed->plt_got_insn_size;
5718 plt_got_offset = abed->plt_got_offset;
5719 if (use_plt_bnd)
5720 {
5721 /* Use the second PLT with BND relocations. */
5722 const bfd_byte *plt_entry, *plt2_entry;
5723
5724 if (eh->has_bnd_reloc)
5725 {
5726 plt_entry = elf_x86_64_bnd_plt_entry;
5727 plt2_entry = elf_x86_64_bnd_plt2_entry;
5728 }
5729 else
5730 {
5731 plt_entry = elf_x86_64_legacy_plt_entry;
5732 plt2_entry = elf_x86_64_legacy_plt2_entry;
5733
5734 /* Subtract 1 since there is no BND prefix. */
5735 plt_plt_insn_end -= 1;
5736 plt_plt_offset -= 1;
5737 plt_got_insn_size -= 1;
5738 plt_got_offset -= 1;
5739 }
5740
5741 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5742 == sizeof (elf_x86_64_legacy_plt_entry));
5743
5744 /* Fill in the entry in the procedure linkage table. */
5745 memcpy (plt->contents + h->plt.offset,
5746 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5747 /* Fill in the entry in the second PLT. */
5748 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5749 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5750
5751 resolved_plt = htab->plt_bnd;
5752 plt_offset = eh->plt_bnd.offset;
5753 }
5754 else
5755 {
5756 /* Fill in the entry in the procedure linkage table. */
5757 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5758 abed->plt_entry_size);
5759
5760 resolved_plt = plt;
5761 plt_offset = h->plt.offset;
5762 }
5763
5764 /* Insert the relocation positions of the plt section. */
5765
5766 /* Put offset the PC-relative instruction referring to the GOT entry,
5767 subtracting the size of that instruction. */
5768 plt_got_pcrel_offset = (gotplt->output_section->vma
5769 + gotplt->output_offset
5770 + got_offset
5771 - resolved_plt->output_section->vma
5772 - resolved_plt->output_offset
5773 - plt_offset
5774 - plt_got_insn_size);
5775
5776 /* Check PC-relative offset overflow in PLT entry. */
5777 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5778 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5779 output_bfd, h->root.root.string);
5780
5781 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5782 resolved_plt->contents + plt_offset + plt_got_offset);
5783
5784 /* Fill in the entry in the global offset table, initially this
5785 points to the second part of the PLT entry. Leave the entry
5786 as zero for undefined weak symbol in PIE. No PLT relocation
5787 against undefined weak symbol in PIE. */
5788 if (!local_undefweak)
5789 {
5790 bfd_put_64 (output_bfd, (plt->output_section->vma
5791 + plt->output_offset
5792 + h->plt.offset
5793 + abed->plt_lazy_offset),
5794 gotplt->contents + got_offset);
5795
5796 /* Fill in the entry in the .rela.plt section. */
5797 rela.r_offset = (gotplt->output_section->vma
5798 + gotplt->output_offset
5799 + got_offset);
5800 if (h->dynindx == -1
5801 || ((bfd_link_executable (info)
5802 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5803 && h->def_regular
5804 && h->type == STT_GNU_IFUNC))
5805 {
5806 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5807 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5808 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5809 rela.r_addend = (h->root.u.def.value
5810 + h->root.u.def.section->output_section->vma
5811 + h->root.u.def.section->output_offset);
5812 /* R_X86_64_IRELATIVE comes last. */
5813 plt_index = htab->next_irelative_index--;
5814 }
5815 else
5816 {
5817 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5818 rela.r_addend = 0;
5819 plt_index = htab->next_jump_slot_index++;
5820 }
5821
5822 /* Don't fill PLT entry for static executables. */
5823 if (plt == htab->elf.splt)
5824 {
5825 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5826
5827 /* Put relocation index. */
5828 bfd_put_32 (output_bfd, plt_index,
5829 (plt->contents + h->plt.offset
5830 + abed->plt_reloc_offset));
5831
5832 /* Put offset for jmp .PLT0 and check for overflow. We don't
5833 check relocation index for overflow since branch displacement
5834 will overflow first. */
5835 if (plt0_offset > 0x80000000)
5836 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5837 output_bfd, h->root.root.string);
5838 bfd_put_32 (output_bfd, - plt0_offset,
5839 plt->contents + h->plt.offset + plt_plt_offset);
5840 }
5841
5842 bed = get_elf_backend_data (output_bfd);
5843 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5844 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5845 }
5846 }
5847 else if (eh->plt_got.offset != (bfd_vma) -1)
5848 {
5849 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5850 asection *plt, *got;
5851 bfd_boolean got_after_plt;
5852 int32_t got_pcrel_offset;
5853 const bfd_byte *got_plt_entry;
5854
5855 /* Set the entry in the GOT procedure linkage table. */
5856 plt = htab->plt_got;
5857 got = htab->elf.sgot;
5858 got_offset = h->got.offset;
5859
5860 if (got_offset == (bfd_vma) -1
5861 || h->type == STT_GNU_IFUNC
5862 || plt == NULL
5863 || got == NULL)
5864 abort ();
5865
5866 /* Use the second PLT entry template for the GOT PLT since they
5867 are the identical. */
5868 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5869 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5870 if (eh->has_bnd_reloc)
5871 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5872 else
5873 {
5874 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5875
5876 /* Subtract 1 since there is no BND prefix. */
5877 plt_got_insn_size -= 1;
5878 plt_got_offset -= 1;
5879 }
5880
5881 /* Fill in the entry in the GOT procedure linkage table. */
5882 plt_offset = eh->plt_got.offset;
5883 memcpy (plt->contents + plt_offset,
5884 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5885
5886 /* Put offset the PC-relative instruction referring to the GOT
5887 entry, subtracting the size of that instruction. */
5888 got_pcrel_offset = (got->output_section->vma
5889 + got->output_offset
5890 + got_offset
5891 - plt->output_section->vma
5892 - plt->output_offset
5893 - plt_offset
5894 - plt_got_insn_size);
5895
5896 /* Check PC-relative offset overflow in GOT PLT entry. */
5897 got_after_plt = got->output_section->vma > plt->output_section->vma;
5898 if ((got_after_plt && got_pcrel_offset < 0)
5899 || (!got_after_plt && got_pcrel_offset > 0))
5900 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5901 output_bfd, h->root.root.string);
5902
5903 bfd_put_32 (output_bfd, got_pcrel_offset,
5904 plt->contents + plt_offset + plt_got_offset);
5905 }
5906
5907 if (!local_undefweak
5908 && !h->def_regular
5909 && (h->plt.offset != (bfd_vma) -1
5910 || eh->plt_got.offset != (bfd_vma) -1))
5911 {
5912 /* Mark the symbol as undefined, rather than as defined in
5913 the .plt section. Leave the value if there were any
5914 relocations where pointer equality matters (this is a clue
5915 for the dynamic linker, to make function pointer
5916 comparisons work between an application and shared
5917 library), otherwise set it to zero. If a function is only
5918 called from a binary, there is no need to slow down
5919 shared libraries because of that. */
5920 sym->st_shndx = SHN_UNDEF;
5921 if (!h->pointer_equality_needed)
5922 sym->st_value = 0;
5923 }
5924
5925 /* Don't generate dynamic GOT relocation against undefined weak
5926 symbol in executable. */
5927 if (h->got.offset != (bfd_vma) -1
5928 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5929 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
5930 && !local_undefweak)
5931 {
5932 Elf_Internal_Rela rela;
5933 asection *relgot = htab->elf.srelgot;
5934
5935 /* This symbol has an entry in the global offset table. Set it
5936 up. */
5937 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5938 abort ();
5939
5940 rela.r_offset = (htab->elf.sgot->output_section->vma
5941 + htab->elf.sgot->output_offset
5942 + (h->got.offset &~ (bfd_vma) 1));
5943
5944 /* If this is a static link, or it is a -Bsymbolic link and the
5945 symbol is defined locally or was forced to be local because
5946 of a version file, we just want to emit a RELATIVE reloc.
5947 The entry in the global offset table will already have been
5948 initialized in the relocate_section function. */
5949 if (h->def_regular
5950 && h->type == STT_GNU_IFUNC)
5951 {
5952 if (h->plt.offset == (bfd_vma) -1)
5953 {
5954 /* STT_GNU_IFUNC is referenced without PLT. */
5955 if (htab->elf.splt == NULL)
5956 {
5957 /* use .rel[a].iplt section to store .got relocations
5958 in static executable. */
5959 relgot = htab->elf.irelplt;
5960 }
5961 if (SYMBOL_REFERENCES_LOCAL (info, h))
5962 {
5963 rela.r_info = htab->r_info (0,
5964 R_X86_64_IRELATIVE);
5965 rela.r_addend = (h->root.u.def.value
5966 + h->root.u.def.section->output_section->vma
5967 + h->root.u.def.section->output_offset);
5968 }
5969 else
5970 goto do_glob_dat;
5971 }
5972 else if (bfd_link_pic (info))
5973 {
5974 /* Generate R_X86_64_GLOB_DAT. */
5975 goto do_glob_dat;
5976 }
5977 else
5978 {
5979 asection *plt;
5980
5981 if (!h->pointer_equality_needed)
5982 abort ();
5983
5984 /* For non-shared object, we can't use .got.plt, which
5985 contains the real function addres if we need pointer
5986 equality. We load the GOT entry with the PLT entry. */
5987 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5988 bfd_put_64 (output_bfd, (plt->output_section->vma
5989 + plt->output_offset
5990 + h->plt.offset),
5991 htab->elf.sgot->contents + h->got.offset);
5992 return TRUE;
5993 }
5994 }
5995 else if (bfd_link_pic (info)
5996 && SYMBOL_REFERENCES_LOCAL (info, h))
5997 {
5998 if (!h->def_regular)
5999 return FALSE;
6000 BFD_ASSERT((h->got.offset & 1) != 0);
6001 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
6002 rela.r_addend = (h->root.u.def.value
6003 + h->root.u.def.section->output_section->vma
6004 + h->root.u.def.section->output_offset);
6005 }
6006 else
6007 {
6008 BFD_ASSERT((h->got.offset & 1) == 0);
6009 do_glob_dat:
6010 bfd_put_64 (output_bfd, (bfd_vma) 0,
6011 htab->elf.sgot->contents + h->got.offset);
6012 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
6013 rela.r_addend = 0;
6014 }
6015
6016 elf_append_rela (output_bfd, relgot, &rela);
6017 }
6018
6019 if (h->needs_copy)
6020 {
6021 Elf_Internal_Rela rela;
6022
6023 /* This symbol needs a copy reloc. Set it up. */
6024
6025 if (h->dynindx == -1
6026 || (h->root.type != bfd_link_hash_defined
6027 && h->root.type != bfd_link_hash_defweak)
6028 || htab->srelbss == NULL)
6029 abort ();
6030
6031 rela.r_offset = (h->root.u.def.value
6032 + h->root.u.def.section->output_section->vma
6033 + h->root.u.def.section->output_offset);
6034 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
6035 rela.r_addend = 0;
6036 elf_append_rela (output_bfd, htab->srelbss, &rela);
6037 }
6038
6039 return TRUE;
6040 }
6041
6042 /* Finish up local dynamic symbol handling. We set the contents of
6043 various dynamic sections here. */
6044
6045 static bfd_boolean
6046 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
6047 {
6048 struct elf_link_hash_entry *h
6049 = (struct elf_link_hash_entry *) *slot;
6050 struct bfd_link_info *info
6051 = (struct bfd_link_info *) inf;
6052
6053 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6054 info, h, NULL);
6055 }
6056
6057 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
6058 here since undefined weak symbol may not be dynamic and may not be
6059 called for elf_x86_64_finish_dynamic_symbol. */
6060
6061 static bfd_boolean
6062 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
6063 void *inf)
6064 {
6065 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
6066 struct bfd_link_info *info = (struct bfd_link_info *) inf;
6067
6068 if (h->root.type != bfd_link_hash_undefweak
6069 || h->dynindx != -1)
6070 return TRUE;
6071
6072 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6073 info, h, NULL);
6074 }
6075
6076 /* Used to decide how to sort relocs in an optimal manner for the
6077 dynamic linker, before writing them out. */
6078
6079 static enum elf_reloc_type_class
6080 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
6081 const asection *rel_sec ATTRIBUTE_UNUSED,
6082 const Elf_Internal_Rela *rela)
6083 {
6084 bfd *abfd = info->output_bfd;
6085 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6086 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6087
6088 if (htab->elf.dynsym != NULL
6089 && htab->elf.dynsym->contents != NULL)
6090 {
6091 /* Check relocation against STT_GNU_IFUNC symbol if there are
6092 dynamic symbols. */
6093 unsigned long r_symndx = htab->r_sym (rela->r_info);
6094 if (r_symndx != STN_UNDEF)
6095 {
6096 Elf_Internal_Sym sym;
6097 if (!bed->s->swap_symbol_in (abfd,
6098 (htab->elf.dynsym->contents
6099 + r_symndx * bed->s->sizeof_sym),
6100 0, &sym))
6101 abort ();
6102
6103 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
6104 return reloc_class_ifunc;
6105 }
6106 }
6107
6108 switch ((int) ELF32_R_TYPE (rela->r_info))
6109 {
6110 case R_X86_64_IRELATIVE:
6111 return reloc_class_ifunc;
6112 case R_X86_64_RELATIVE:
6113 case R_X86_64_RELATIVE64:
6114 return reloc_class_relative;
6115 case R_X86_64_JUMP_SLOT:
6116 return reloc_class_plt;
6117 case R_X86_64_COPY:
6118 return reloc_class_copy;
6119 default:
6120 return reloc_class_normal;
6121 }
6122 }
6123
6124 /* Finish up the dynamic sections. */
6125
6126 static bfd_boolean
6127 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
6128 struct bfd_link_info *info)
6129 {
6130 struct elf_x86_64_link_hash_table *htab;
6131 bfd *dynobj;
6132 asection *sdyn;
6133 const struct elf_x86_64_backend_data *abed;
6134
6135 htab = elf_x86_64_hash_table (info);
6136 if (htab == NULL)
6137 return FALSE;
6138
6139 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
6140 section only if there is .plt section. */
6141 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
6142 ? &elf_x86_64_bnd_arch_bed
6143 : get_elf_x86_64_backend_data (output_bfd));
6144
6145 dynobj = htab->elf.dynobj;
6146 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6147
6148 if (htab->elf.dynamic_sections_created)
6149 {
6150 bfd_byte *dyncon, *dynconend;
6151 const struct elf_backend_data *bed;
6152 bfd_size_type sizeof_dyn;
6153
6154 if (sdyn == NULL || htab->elf.sgot == NULL)
6155 abort ();
6156
6157 bed = get_elf_backend_data (dynobj);
6158 sizeof_dyn = bed->s->sizeof_dyn;
6159 dyncon = sdyn->contents;
6160 dynconend = sdyn->contents + sdyn->size;
6161 for (; dyncon < dynconend; dyncon += sizeof_dyn)
6162 {
6163 Elf_Internal_Dyn dyn;
6164 asection *s;
6165
6166 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
6167
6168 switch (dyn.d_tag)
6169 {
6170 default:
6171 continue;
6172
6173 case DT_PLTGOT:
6174 s = htab->elf.sgotplt;
6175 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6176 break;
6177
6178 case DT_JMPREL:
6179 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
6180 break;
6181
6182 case DT_PLTRELSZ:
6183 s = htab->elf.srelplt->output_section;
6184 dyn.d_un.d_val = s->size;
6185 break;
6186
6187 case DT_RELASZ:
6188 /* The procedure linkage table relocs (DT_JMPREL) should
6189 not be included in the overall relocs (DT_RELA).
6190 Therefore, we override the DT_RELASZ entry here to
6191 make it not include the JMPREL relocs. Since the
6192 linker script arranges for .rela.plt to follow all
6193 other relocation sections, we don't have to worry
6194 about changing the DT_RELA entry. */
6195 if (htab->elf.srelplt != NULL)
6196 {
6197 s = htab->elf.srelplt->output_section;
6198 dyn.d_un.d_val -= s->size;
6199 }
6200 break;
6201
6202 case DT_TLSDESC_PLT:
6203 s = htab->elf.splt;
6204 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6205 + htab->tlsdesc_plt;
6206 break;
6207
6208 case DT_TLSDESC_GOT:
6209 s = htab->elf.sgot;
6210 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6211 + htab->tlsdesc_got;
6212 break;
6213 }
6214
6215 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
6216 }
6217
6218 /* Fill in the special first entry in the procedure linkage table. */
6219 if (htab->elf.splt && htab->elf.splt->size > 0)
6220 {
6221 /* Fill in the first entry in the procedure linkage table. */
6222 memcpy (htab->elf.splt->contents,
6223 abed->plt0_entry, abed->plt_entry_size);
6224 /* Add offset for pushq GOT+8(%rip), since the instruction
6225 uses 6 bytes subtract this value. */
6226 bfd_put_32 (output_bfd,
6227 (htab->elf.sgotplt->output_section->vma
6228 + htab->elf.sgotplt->output_offset
6229 + 8
6230 - htab->elf.splt->output_section->vma
6231 - htab->elf.splt->output_offset
6232 - 6),
6233 htab->elf.splt->contents + abed->plt0_got1_offset);
6234 /* Add offset for the PC-relative instruction accessing GOT+16,
6235 subtracting the offset to the end of that instruction. */
6236 bfd_put_32 (output_bfd,
6237 (htab->elf.sgotplt->output_section->vma
6238 + htab->elf.sgotplt->output_offset
6239 + 16
6240 - htab->elf.splt->output_section->vma
6241 - htab->elf.splt->output_offset
6242 - abed->plt0_got2_insn_end),
6243 htab->elf.splt->contents + abed->plt0_got2_offset);
6244
6245 elf_section_data (htab->elf.splt->output_section)
6246 ->this_hdr.sh_entsize = abed->plt_entry_size;
6247
6248 if (htab->tlsdesc_plt)
6249 {
6250 bfd_put_64 (output_bfd, (bfd_vma) 0,
6251 htab->elf.sgot->contents + htab->tlsdesc_got);
6252
6253 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
6254 abed->plt0_entry, abed->plt_entry_size);
6255
6256 /* Add offset for pushq GOT+8(%rip), since the
6257 instruction uses 6 bytes subtract this value. */
6258 bfd_put_32 (output_bfd,
6259 (htab->elf.sgotplt->output_section->vma
6260 + htab->elf.sgotplt->output_offset
6261 + 8
6262 - htab->elf.splt->output_section->vma
6263 - htab->elf.splt->output_offset
6264 - htab->tlsdesc_plt
6265 - 6),
6266 htab->elf.splt->contents
6267 + htab->tlsdesc_plt + abed->plt0_got1_offset);
6268 /* Add offset for the PC-relative instruction accessing GOT+TDG,
6269 where TGD stands for htab->tlsdesc_got, subtracting the offset
6270 to the end of that instruction. */
6271 bfd_put_32 (output_bfd,
6272 (htab->elf.sgot->output_section->vma
6273 + htab->elf.sgot->output_offset
6274 + htab->tlsdesc_got
6275 - htab->elf.splt->output_section->vma
6276 - htab->elf.splt->output_offset
6277 - htab->tlsdesc_plt
6278 - abed->plt0_got2_insn_end),
6279 htab->elf.splt->contents
6280 + htab->tlsdesc_plt + abed->plt0_got2_offset);
6281 }
6282 }
6283 }
6284
6285 if (htab->plt_bnd != NULL)
6286 elf_section_data (htab->plt_bnd->output_section)
6287 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
6288
6289 if (htab->elf.sgotplt)
6290 {
6291 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
6292 {
6293 _bfd_error_handler
6294 (_("discarded output section: `%A'"), htab->elf.sgotplt);
6295 return FALSE;
6296 }
6297
6298 /* Fill in the first three entries in the global offset table. */
6299 if (htab->elf.sgotplt->size > 0)
6300 {
6301 /* Set the first entry in the global offset table to the address of
6302 the dynamic section. */
6303 if (sdyn == NULL)
6304 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
6305 else
6306 bfd_put_64 (output_bfd,
6307 sdyn->output_section->vma + sdyn->output_offset,
6308 htab->elf.sgotplt->contents);
6309 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6310 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
6311 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
6312 }
6313
6314 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
6315 GOT_ENTRY_SIZE;
6316 }
6317
6318 /* Adjust .eh_frame for .plt section. */
6319 if (htab->plt_eh_frame != NULL
6320 && htab->plt_eh_frame->contents != NULL)
6321 {
6322 if (htab->elf.splt != NULL
6323 && htab->elf.splt->size != 0
6324 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
6325 && htab->elf.splt->output_section != NULL
6326 && htab->plt_eh_frame->output_section != NULL)
6327 {
6328 bfd_vma plt_start = htab->elf.splt->output_section->vma;
6329 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
6330 + htab->plt_eh_frame->output_offset
6331 + PLT_FDE_START_OFFSET;
6332 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
6333 htab->plt_eh_frame->contents
6334 + PLT_FDE_START_OFFSET);
6335 }
6336 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
6337 {
6338 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
6339 htab->plt_eh_frame,
6340 htab->plt_eh_frame->contents))
6341 return FALSE;
6342 }
6343 }
6344
6345 if (htab->elf.sgot && htab->elf.sgot->size > 0)
6346 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
6347 = GOT_ENTRY_SIZE;
6348
6349 /* Fill PLT entries for undefined weak symbols in PIE. */
6350 if (bfd_link_pie (info))
6351 bfd_hash_traverse (&info->hash->table,
6352 elf_x86_64_pie_finish_undefweak_symbol,
6353 info);
6354
6355 return TRUE;
6356 }
6357
6358 /* Fill PLT/GOT entries and allocate dynamic relocations for local
6359 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
6360 It has to be done before elf_link_sort_relocs is called so that
6361 dynamic relocations are properly sorted. */
6362
6363 static bfd_boolean
6364 elf_x86_64_output_arch_local_syms
6365 (bfd *output_bfd ATTRIBUTE_UNUSED,
6366 struct bfd_link_info *info,
6367 void *flaginfo ATTRIBUTE_UNUSED,
6368 int (*func) (void *, const char *,
6369 Elf_Internal_Sym *,
6370 asection *,
6371 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
6372 {
6373 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6374 if (htab == NULL)
6375 return FALSE;
6376
6377 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
6378 htab_traverse (htab->loc_hash_table,
6379 elf_x86_64_finish_local_dynamic_symbol,
6380 info);
6381
6382 return TRUE;
6383 }
6384
6385 /* Return an array of PLT entry symbol values. */
6386
6387 static bfd_vma *
6388 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
6389 asection *relplt)
6390 {
6391 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
6392 arelent *p;
6393 long count, i;
6394 bfd_vma *plt_sym_val;
6395 bfd_vma plt_offset;
6396 bfd_byte *plt_contents;
6397 const struct elf_x86_64_backend_data *bed;
6398 Elf_Internal_Shdr *hdr;
6399 asection *plt_bnd;
6400
6401 /* Get the .plt section contents. PLT passed down may point to the
6402 .plt.bnd section. Make sure that PLT always points to the .plt
6403 section. */
6404 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6405 if (plt_bnd)
6406 {
6407 if (plt != plt_bnd)
6408 abort ();
6409 plt = bfd_get_section_by_name (abfd, ".plt");
6410 if (plt == NULL)
6411 abort ();
6412 bed = &elf_x86_64_bnd_arch_bed;
6413 }
6414 else
6415 bed = get_elf_x86_64_backend_data (abfd);
6416
6417 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6418 if (plt_contents == NULL)
6419 return NULL;
6420 if (!bfd_get_section_contents (abfd, (asection *) plt,
6421 plt_contents, 0, plt->size))
6422 {
6423 bad_return:
6424 free (plt_contents);
6425 return NULL;
6426 }
6427
6428 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6429 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6430 goto bad_return;
6431
6432 hdr = &elf_section_data (relplt)->this_hdr;
6433 count = relplt->size / hdr->sh_entsize;
6434
6435 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6436 if (plt_sym_val == NULL)
6437 goto bad_return;
6438
6439 for (i = 0; i < count; i++)
6440 plt_sym_val[i] = -1;
6441
6442 plt_offset = bed->plt_entry_size;
6443 p = relplt->relocation;
6444 for (i = 0; i < count; i++, p++)
6445 {
6446 long reloc_index;
6447
6448 /* Skip unknown relocation. */
6449 if (p->howto == NULL)
6450 continue;
6451
6452 if (p->howto->type != R_X86_64_JUMP_SLOT
6453 && p->howto->type != R_X86_64_IRELATIVE)
6454 continue;
6455
6456 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6457 + bed->plt_reloc_offset));
6458 if (reloc_index < count)
6459 {
6460 if (plt_bnd)
6461 {
6462 /* This is the index in .plt section. */
6463 long plt_index = plt_offset / bed->plt_entry_size;
6464 /* Store VMA + the offset in .plt.bnd section. */
6465 plt_sym_val[reloc_index] =
6466 (plt_bnd->vma
6467 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6468 }
6469 else
6470 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6471 }
6472 plt_offset += bed->plt_entry_size;
6473
6474 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6475 section. */
6476 if (plt_offset >= plt->size)
6477 break;
6478 }
6479
6480 free (plt_contents);
6481
6482 return plt_sym_val;
6483 }
6484
6485 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6486 support. */
6487
6488 static long
6489 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6490 long symcount,
6491 asymbol **syms,
6492 long dynsymcount,
6493 asymbol **dynsyms,
6494 asymbol **ret)
6495 {
6496 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6497 as PLT if it exists. */
6498 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6499 if (plt == NULL)
6500 plt = bfd_get_section_by_name (abfd, ".plt");
6501 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6502 dynsymcount, dynsyms, ret,
6503 plt,
6504 elf_x86_64_get_plt_sym_val);
6505 }
6506
6507 /* Handle an x86-64 specific section when reading an object file. This
6508 is called when elfcode.h finds a section with an unknown type. */
6509
6510 static bfd_boolean
6511 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6512 const char *name, int shindex)
6513 {
6514 if (hdr->sh_type != SHT_X86_64_UNWIND)
6515 return FALSE;
6516
6517 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6518 return FALSE;
6519
6520 return TRUE;
6521 }
6522
6523 /* Hook called by the linker routine which adds symbols from an object
6524 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6525 of .bss. */
6526
6527 static bfd_boolean
6528 elf_x86_64_add_symbol_hook (bfd *abfd,
6529 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6530 Elf_Internal_Sym *sym,
6531 const char **namep ATTRIBUTE_UNUSED,
6532 flagword *flagsp ATTRIBUTE_UNUSED,
6533 asection **secp,
6534 bfd_vma *valp)
6535 {
6536 asection *lcomm;
6537
6538 switch (sym->st_shndx)
6539 {
6540 case SHN_X86_64_LCOMMON:
6541 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6542 if (lcomm == NULL)
6543 {
6544 lcomm = bfd_make_section_with_flags (abfd,
6545 "LARGE_COMMON",
6546 (SEC_ALLOC
6547 | SEC_IS_COMMON
6548 | SEC_LINKER_CREATED));
6549 if (lcomm == NULL)
6550 return FALSE;
6551 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6552 }
6553 *secp = lcomm;
6554 *valp = sym->st_size;
6555 return TRUE;
6556 }
6557
6558 return TRUE;
6559 }
6560
6561
6562 /* Given a BFD section, try to locate the corresponding ELF section
6563 index. */
6564
6565 static bfd_boolean
6566 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6567 asection *sec, int *index_return)
6568 {
6569 if (sec == &_bfd_elf_large_com_section)
6570 {
6571 *index_return = SHN_X86_64_LCOMMON;
6572 return TRUE;
6573 }
6574 return FALSE;
6575 }
6576
6577 /* Process a symbol. */
6578
6579 static void
6580 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6581 asymbol *asym)
6582 {
6583 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6584
6585 switch (elfsym->internal_elf_sym.st_shndx)
6586 {
6587 case SHN_X86_64_LCOMMON:
6588 asym->section = &_bfd_elf_large_com_section;
6589 asym->value = elfsym->internal_elf_sym.st_size;
6590 /* Common symbol doesn't set BSF_GLOBAL. */
6591 asym->flags &= ~BSF_GLOBAL;
6592 break;
6593 }
6594 }
6595
6596 static bfd_boolean
6597 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6598 {
6599 return (sym->st_shndx == SHN_COMMON
6600 || sym->st_shndx == SHN_X86_64_LCOMMON);
6601 }
6602
6603 static unsigned int
6604 elf_x86_64_common_section_index (asection *sec)
6605 {
6606 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6607 return SHN_COMMON;
6608 else
6609 return SHN_X86_64_LCOMMON;
6610 }
6611
6612 static asection *
6613 elf_x86_64_common_section (asection *sec)
6614 {
6615 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6616 return bfd_com_section_ptr;
6617 else
6618 return &_bfd_elf_large_com_section;
6619 }
6620
6621 static bfd_boolean
6622 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6623 const Elf_Internal_Sym *sym,
6624 asection **psec,
6625 bfd_boolean newdef,
6626 bfd_boolean olddef,
6627 bfd *oldbfd,
6628 const asection *oldsec)
6629 {
6630 /* A normal common symbol and a large common symbol result in a
6631 normal common symbol. We turn the large common symbol into a
6632 normal one. */
6633 if (!olddef
6634 && h->root.type == bfd_link_hash_common
6635 && !newdef
6636 && bfd_is_com_section (*psec)
6637 && oldsec != *psec)
6638 {
6639 if (sym->st_shndx == SHN_COMMON
6640 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6641 {
6642 h->root.u.c.p->section
6643 = bfd_make_section_old_way (oldbfd, "COMMON");
6644 h->root.u.c.p->section->flags = SEC_ALLOC;
6645 }
6646 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6647 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6648 *psec = bfd_com_section_ptr;
6649 }
6650
6651 return TRUE;
6652 }
6653
6654 static int
6655 elf_x86_64_additional_program_headers (bfd *abfd,
6656 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6657 {
6658 asection *s;
6659 int count = 0;
6660
6661 /* Check to see if we need a large readonly segment. */
6662 s = bfd_get_section_by_name (abfd, ".lrodata");
6663 if (s && (s->flags & SEC_LOAD))
6664 count++;
6665
6666 /* Check to see if we need a large data segment. Since .lbss sections
6667 is placed right after the .bss section, there should be no need for
6668 a large data segment just because of .lbss. */
6669 s = bfd_get_section_by_name (abfd, ".ldata");
6670 if (s && (s->flags & SEC_LOAD))
6671 count++;
6672
6673 return count;
6674 }
6675
6676 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6677
6678 static bfd_boolean
6679 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6680 {
6681 if (h->plt.offset != (bfd_vma) -1
6682 && !h->def_regular
6683 && !h->pointer_equality_needed)
6684 return FALSE;
6685
6686 return _bfd_elf_hash_symbol (h);
6687 }
6688
6689 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6690
6691 static bfd_boolean
6692 elf_x86_64_relocs_compatible (const bfd_target *input,
6693 const bfd_target *output)
6694 {
6695 return ((xvec_get_elf_backend_data (input)->s->elfclass
6696 == xvec_get_elf_backend_data (output)->s->elfclass)
6697 && _bfd_elf_relocs_compatible (input, output));
6698 }
6699
6700 static const struct bfd_elf_special_section
6701 elf_x86_64_special_sections[]=
6702 {
6703 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6704 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6705 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6706 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6707 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6708 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6709 { NULL, 0, 0, 0, 0 }
6710 };
6711
6712 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6713 #define TARGET_LITTLE_NAME "elf64-x86-64"
6714 #define ELF_ARCH bfd_arch_i386
6715 #define ELF_TARGET_ID X86_64_ELF_DATA
6716 #define ELF_MACHINE_CODE EM_X86_64
6717 #define ELF_MAXPAGESIZE 0x200000
6718 #define ELF_MINPAGESIZE 0x1000
6719 #define ELF_COMMONPAGESIZE 0x1000
6720
6721 #define elf_backend_can_gc_sections 1
6722 #define elf_backend_can_refcount 1
6723 #define elf_backend_want_got_plt 1
6724 #define elf_backend_plt_readonly 1
6725 #define elf_backend_want_plt_sym 0
6726 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6727 #define elf_backend_rela_normal 1
6728 #define elf_backend_plt_alignment 4
6729 #define elf_backend_extern_protected_data 1
6730 #define elf_backend_caches_rawsize 1
6731
6732 #define elf_info_to_howto elf_x86_64_info_to_howto
6733
6734 #define bfd_elf64_bfd_link_hash_table_create \
6735 elf_x86_64_link_hash_table_create
6736 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6737 #define bfd_elf64_bfd_reloc_name_lookup \
6738 elf_x86_64_reloc_name_lookup
6739
6740 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6741 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6742 #define elf_backend_check_relocs elf_x86_64_check_relocs
6743 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6744 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6745 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6746 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6747 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
6748 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6749 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6750 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6751 #ifdef CORE_HEADER
6752 #define elf_backend_write_core_note elf_x86_64_write_core_note
6753 #endif
6754 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6755 #define elf_backend_relocate_section elf_x86_64_relocate_section
6756 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6757 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6758 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6759 #define elf_backend_object_p elf64_x86_64_elf_object_p
6760 #define bfd_elf64_mkobject elf_x86_64_mkobject
6761 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6762
6763 #define elf_backend_section_from_shdr \
6764 elf_x86_64_section_from_shdr
6765
6766 #define elf_backend_section_from_bfd_section \
6767 elf_x86_64_elf_section_from_bfd_section
6768 #define elf_backend_add_symbol_hook \
6769 elf_x86_64_add_symbol_hook
6770 #define elf_backend_symbol_processing \
6771 elf_x86_64_symbol_processing
6772 #define elf_backend_common_section_index \
6773 elf_x86_64_common_section_index
6774 #define elf_backend_common_section \
6775 elf_x86_64_common_section
6776 #define elf_backend_common_definition \
6777 elf_x86_64_common_definition
6778 #define elf_backend_merge_symbol \
6779 elf_x86_64_merge_symbol
6780 #define elf_backend_special_sections \
6781 elf_x86_64_special_sections
6782 #define elf_backend_additional_program_headers \
6783 elf_x86_64_additional_program_headers
6784 #define elf_backend_hash_symbol \
6785 elf_x86_64_hash_symbol
6786 #define elf_backend_omit_section_dynsym \
6787 ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
6788 #define elf_backend_fixup_symbol \
6789 elf_x86_64_fixup_symbol
6790
6791 #include "elf64-target.h"
6792
6793 /* CloudABI support. */
6794
6795 #undef TARGET_LITTLE_SYM
6796 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6797 #undef TARGET_LITTLE_NAME
6798 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6799
6800 #undef ELF_OSABI
6801 #define ELF_OSABI ELFOSABI_CLOUDABI
6802
6803 #undef elf64_bed
6804 #define elf64_bed elf64_x86_64_cloudabi_bed
6805
6806 #include "elf64-target.h"
6807
6808 /* FreeBSD support. */
6809
6810 #undef TARGET_LITTLE_SYM
6811 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6812 #undef TARGET_LITTLE_NAME
6813 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6814
6815 #undef ELF_OSABI
6816 #define ELF_OSABI ELFOSABI_FREEBSD
6817
6818 #undef elf64_bed
6819 #define elf64_bed elf64_x86_64_fbsd_bed
6820
6821 #include "elf64-target.h"
6822
6823 /* Solaris 2 support. */
6824
6825 #undef TARGET_LITTLE_SYM
6826 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6827 #undef TARGET_LITTLE_NAME
6828 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6829
6830 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6831 objects won't be recognized. */
6832 #undef ELF_OSABI
6833
6834 #undef elf64_bed
6835 #define elf64_bed elf64_x86_64_sol2_bed
6836
6837 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6838 boundary. */
6839 #undef elf_backend_static_tls_alignment
6840 #define elf_backend_static_tls_alignment 16
6841
6842 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6843
6844 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6845 File, p.63. */
6846 #undef elf_backend_want_plt_sym
6847 #define elf_backend_want_plt_sym 1
6848
6849 #undef elf_backend_strtab_flags
6850 #define elf_backend_strtab_flags SHF_STRINGS
6851
6852 static bfd_boolean
6853 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6854 bfd *obfd ATTRIBUTE_UNUSED,
6855 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6856 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6857 {
6858 /* PR 19938: FIXME: Need to add code for setting the sh_info
6859 and sh_link fields of Solaris specific section types. */
6860 return FALSE;
6861 }
6862
6863 #undef elf_backend_copy_special_section_fields
6864 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6865
6866 #include "elf64-target.h"
6867
6868 /* Native Client support. */
6869
6870 static bfd_boolean
6871 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6872 {
6873 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6874 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6875 return TRUE;
6876 }
6877
6878 #undef TARGET_LITTLE_SYM
6879 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6880 #undef TARGET_LITTLE_NAME
6881 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6882 #undef elf64_bed
6883 #define elf64_bed elf64_x86_64_nacl_bed
6884
6885 #undef ELF_MAXPAGESIZE
6886 #undef ELF_MINPAGESIZE
6887 #undef ELF_COMMONPAGESIZE
6888 #define ELF_MAXPAGESIZE 0x10000
6889 #define ELF_MINPAGESIZE 0x10000
6890 #define ELF_COMMONPAGESIZE 0x10000
6891
6892 /* Restore defaults. */
6893 #undef ELF_OSABI
6894 #undef elf_backend_static_tls_alignment
6895 #undef elf_backend_want_plt_sym
6896 #define elf_backend_want_plt_sym 0
6897 #undef elf_backend_strtab_flags
6898 #undef elf_backend_copy_special_section_fields
6899
6900 /* NaCl uses substantially different PLT entries for the same effects. */
6901
6902 #undef elf_backend_plt_alignment
6903 #define elf_backend_plt_alignment 5
6904 #define NACL_PLT_ENTRY_SIZE 64
6905 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6906
6907 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6908 {
6909 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6910 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6911 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6912 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6913 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6914
6915 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6916 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6917
6918 /* 32 bytes of nop to pad out to the standard size. */
6919 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6920 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6921 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6922 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6923 0x66, /* excess data16 prefix */
6924 0x90 /* nop */
6925 };
6926
6927 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6928 {
6929 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6930 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6931 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6932 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6933
6934 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6935 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6936 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6937
6938 /* Lazy GOT entries point here (32-byte aligned). */
6939 0x68, /* pushq immediate */
6940 0, 0, 0, 0, /* replaced with index into relocation table. */
6941 0xe9, /* jmp relative */
6942 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6943
6944 /* 22 bytes of nop to pad out to the standard size. */
6945 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6946 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6947 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6948 };
6949
6950 /* .eh_frame covering the .plt section. */
6951
6952 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6953 {
6954 #if (PLT_CIE_LENGTH != 20 \
6955 || PLT_FDE_LENGTH != 36 \
6956 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6957 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6958 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6959 #endif
6960 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6961 0, 0, 0, 0, /* CIE ID */
6962 1, /* CIE version */
6963 'z', 'R', 0, /* Augmentation string */
6964 1, /* Code alignment factor */
6965 0x78, /* Data alignment factor */
6966 16, /* Return address column */
6967 1, /* Augmentation size */
6968 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6969 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6970 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6971 DW_CFA_nop, DW_CFA_nop,
6972
6973 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6974 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6975 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6976 0, 0, 0, 0, /* .plt size goes here */
6977 0, /* Augmentation size */
6978 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6979 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6980 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6981 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6982 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6983 13, /* Block length */
6984 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6985 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6986 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6987 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6988 DW_CFA_nop, DW_CFA_nop
6989 };
6990
6991 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6992 {
6993 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6994 elf_x86_64_nacl_plt_entry, /* plt_entry */
6995 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6996 2, /* plt0_got1_offset */
6997 9, /* plt0_got2_offset */
6998 13, /* plt0_got2_insn_end */
6999 3, /* plt_got_offset */
7000 33, /* plt_reloc_offset */
7001 38, /* plt_plt_offset */
7002 7, /* plt_got_insn_size */
7003 42, /* plt_plt_insn_end */
7004 32, /* plt_lazy_offset */
7005 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
7006 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
7007 };
7008
7009 #undef elf_backend_arch_data
7010 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
7011
7012 #undef elf_backend_object_p
7013 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
7014 #undef elf_backend_modify_segment_map
7015 #define elf_backend_modify_segment_map nacl_modify_segment_map
7016 #undef elf_backend_modify_program_headers
7017 #define elf_backend_modify_program_headers nacl_modify_program_headers
7018 #undef elf_backend_final_write_processing
7019 #define elf_backend_final_write_processing nacl_final_write_processing
7020
7021 #include "elf64-target.h"
7022
7023 /* Native Client x32 support. */
7024
7025 static bfd_boolean
7026 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
7027 {
7028 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
7029 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
7030 return TRUE;
7031 }
7032
7033 #undef TARGET_LITTLE_SYM
7034 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
7035 #undef TARGET_LITTLE_NAME
7036 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
7037 #undef elf32_bed
7038 #define elf32_bed elf32_x86_64_nacl_bed
7039
7040 #define bfd_elf32_bfd_link_hash_table_create \
7041 elf_x86_64_link_hash_table_create
7042 #define bfd_elf32_bfd_reloc_type_lookup \
7043 elf_x86_64_reloc_type_lookup
7044 #define bfd_elf32_bfd_reloc_name_lookup \
7045 elf_x86_64_reloc_name_lookup
7046 #define bfd_elf32_mkobject \
7047 elf_x86_64_mkobject
7048 #define bfd_elf32_get_synthetic_symtab \
7049 elf_x86_64_get_synthetic_symtab
7050
7051 #undef elf_backend_object_p
7052 #define elf_backend_object_p \
7053 elf32_x86_64_nacl_elf_object_p
7054
7055 #undef elf_backend_bfd_from_remote_memory
7056 #define elf_backend_bfd_from_remote_memory \
7057 _bfd_elf32_bfd_from_remote_memory
7058
7059 #undef elf_backend_size_info
7060 #define elf_backend_size_info \
7061 _bfd_elf32_size_info
7062
7063 #include "elf32-target.h"
7064
7065 /* Restore defaults. */
7066 #undef elf_backend_object_p
7067 #define elf_backend_object_p elf64_x86_64_elf_object_p
7068 #undef elf_backend_bfd_from_remote_memory
7069 #undef elf_backend_size_info
7070 #undef elf_backend_modify_segment_map
7071 #undef elf_backend_modify_program_headers
7072 #undef elf_backend_final_write_processing
7073
7074 /* Intel L1OM support. */
7075
7076 static bfd_boolean
7077 elf64_l1om_elf_object_p (bfd *abfd)
7078 {
7079 /* Set the right machine number for an L1OM elf64 file. */
7080 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
7081 return TRUE;
7082 }
7083
7084 #undef TARGET_LITTLE_SYM
7085 #define TARGET_LITTLE_SYM l1om_elf64_vec
7086 #undef TARGET_LITTLE_NAME
7087 #define TARGET_LITTLE_NAME "elf64-l1om"
7088 #undef ELF_ARCH
7089 #define ELF_ARCH bfd_arch_l1om
7090
7091 #undef ELF_MACHINE_CODE
7092 #define ELF_MACHINE_CODE EM_L1OM
7093
7094 #undef ELF_OSABI
7095
7096 #undef elf64_bed
7097 #define elf64_bed elf64_l1om_bed
7098
7099 #undef elf_backend_object_p
7100 #define elf_backend_object_p elf64_l1om_elf_object_p
7101
7102 /* Restore defaults. */
7103 #undef ELF_MAXPAGESIZE
7104 #undef ELF_MINPAGESIZE
7105 #undef ELF_COMMONPAGESIZE
7106 #define ELF_MAXPAGESIZE 0x200000
7107 #define ELF_MINPAGESIZE 0x1000
7108 #define ELF_COMMONPAGESIZE 0x1000
7109 #undef elf_backend_plt_alignment
7110 #define elf_backend_plt_alignment 4
7111 #undef elf_backend_arch_data
7112 #define elf_backend_arch_data &elf_x86_64_arch_bed
7113
7114 #include "elf64-target.h"
7115
7116 /* FreeBSD L1OM support. */
7117
7118 #undef TARGET_LITTLE_SYM
7119 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
7120 #undef TARGET_LITTLE_NAME
7121 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
7122
7123 #undef ELF_OSABI
7124 #define ELF_OSABI ELFOSABI_FREEBSD
7125
7126 #undef elf64_bed
7127 #define elf64_bed elf64_l1om_fbsd_bed
7128
7129 #include "elf64-target.h"
7130
7131 /* Intel K1OM support. */
7132
7133 static bfd_boolean
7134 elf64_k1om_elf_object_p (bfd *abfd)
7135 {
7136 /* Set the right machine number for an K1OM elf64 file. */
7137 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
7138 return TRUE;
7139 }
7140
7141 #undef TARGET_LITTLE_SYM
7142 #define TARGET_LITTLE_SYM k1om_elf64_vec
7143 #undef TARGET_LITTLE_NAME
7144 #define TARGET_LITTLE_NAME "elf64-k1om"
7145 #undef ELF_ARCH
7146 #define ELF_ARCH bfd_arch_k1om
7147
7148 #undef ELF_MACHINE_CODE
7149 #define ELF_MACHINE_CODE EM_K1OM
7150
7151 #undef ELF_OSABI
7152
7153 #undef elf64_bed
7154 #define elf64_bed elf64_k1om_bed
7155
7156 #undef elf_backend_object_p
7157 #define elf_backend_object_p elf64_k1om_elf_object_p
7158
7159 #undef elf_backend_static_tls_alignment
7160
7161 #undef elf_backend_want_plt_sym
7162 #define elf_backend_want_plt_sym 0
7163
7164 #include "elf64-target.h"
7165
7166 /* FreeBSD K1OM support. */
7167
7168 #undef TARGET_LITTLE_SYM
7169 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
7170 #undef TARGET_LITTLE_NAME
7171 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
7172
7173 #undef ELF_OSABI
7174 #define ELF_OSABI ELFOSABI_FREEBSD
7175
7176 #undef elf64_bed
7177 #define elf64_bed elf64_k1om_fbsd_bed
7178
7179 #include "elf64-target.h"
7180
7181 /* 32bit x86-64 support. */
7182
7183 #undef TARGET_LITTLE_SYM
7184 #define TARGET_LITTLE_SYM x86_64_elf32_vec
7185 #undef TARGET_LITTLE_NAME
7186 #define TARGET_LITTLE_NAME "elf32-x86-64"
7187 #undef elf32_bed
7188
7189 #undef ELF_ARCH
7190 #define ELF_ARCH bfd_arch_i386
7191
7192 #undef ELF_MACHINE_CODE
7193 #define ELF_MACHINE_CODE EM_X86_64
7194
7195 #undef ELF_OSABI
7196
7197 #undef elf_backend_object_p
7198 #define elf_backend_object_p \
7199 elf32_x86_64_elf_object_p
7200
7201 #undef elf_backend_bfd_from_remote_memory
7202 #define elf_backend_bfd_from_remote_memory \
7203 _bfd_elf32_bfd_from_remote_memory
7204
7205 #undef elf_backend_size_info
7206 #define elf_backend_size_info \
7207 _bfd_elf32_size_info
7208
7209 #include "elf32-target.h"
This page took 0.182266 seconds and 4 git commands to generate.