Automatic date update in version.in
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2017 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
286 abfd, (int) r_type);
287 r_type = R_X86_64_NONE;
288 }
289 i = r_type;
290 }
291 else
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
295 }
296
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
301 {
302 unsigned int i;
303
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
305 i++)
306 {
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
310 }
311 return NULL;
312 }
313
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
316 const char *r_name)
317 {
318 unsigned int i;
319
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
321 {
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
326 return reloc;
327 }
328
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
333
334 return NULL;
335 }
336
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
338
339 static void
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
342 {
343 unsigned r_type;
344
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 #endif
528 \f
529 /* Functions for the x86-64 ELF linker. */
530
531 /* The size in bytes of an entry in the global offset table. */
532
533 #define GOT_ENTRY_SIZE 8
534
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
536
537 #define LAZY_PLT_ENTRY_SIZE 16
538
539 /* The size in bytes of an entry in the non-lazy procedure linkage
540 table. */
541
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
543
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
546 works. */
547
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
549 {
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
553 };
554
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
556
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
558 {
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
565 };
566
567 /* The first entry in a lazy procedure linkage table with BND prefix
568 like this. */
569
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
571 {
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
575 };
576
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
579
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
581 {
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
585 };
586
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
591 this. */
592
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
594 {
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
598 0x90 /* nop */
599 };
600
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
604
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
606 {
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
611 };
612
613 /* Entries in the non-lazey procedure linkage table look like this. */
614
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
616 {
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
620 };
621
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
624
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
626 {
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
629 0x90 /* nop */
630 };
631
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
634 PLT entry. */
635
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
637 {
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
642 };
643
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
646 PLT entry. */
647
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
649 {
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
654 };
655
656 /* .eh_frame covering the lazy .plt section. */
657
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
659 {
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
662 1, /* CIE version */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
672
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
689 };
690
691 /* .eh_frame covering the lazy BND .plt section. */
692
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
694 {
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
697 1, /* CIE version */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
707
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
724 };
725
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
727
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
729 {
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
732 1, /* CIE version */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
742
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
759 };
760
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
762
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
764 {
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
767 1, /* CIE version */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
777
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
794 };
795
796 /* .eh_frame covering the non-lazy .plt section. */
797
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
799 {
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
803 1, /* CIE version */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
813
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
821 };
822
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
825 {
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
843 };
844
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
846 {
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
854 };
855
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
857 {
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
878 {
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
886 };
887
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
889 {
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
928 };
929
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
931 {
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
939 };
940
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
942 {
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
950 };
951
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
953 {
954 is_normal /* os */
955 };
956
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
958
959 static bfd_boolean
960 elf64_x86_64_elf_object_p (bfd *abfd)
961 {
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
964 return TRUE;
965 }
966
967 static bfd_boolean
968 elf32_x86_64_elf_object_p (bfd *abfd)
969 {
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
972 return TRUE;
973 }
974
975 /* Return TRUE if the TLS access code sequence support transition
976 from R_TYPE. */
977
978 static bfd_boolean
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
981 asection *sec,
982 bfd_byte *contents,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
985 unsigned int r_type,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
988 {
989 unsigned int val;
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
993 bfd_vma offset;
994 struct elf_x86_link_hash_table *htab;
995 bfd_byte *call;
996 bfd_boolean indirect_call;
997
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1000 switch (r_type)
1001 {
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1005 return FALSE;
1006
1007 if (r_type == R_X86_64_TLSGD)
1008 {
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1012 or
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .byte 0x66; rex64
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1021 or
1022 leaq foo@tlsgd(%rip), %rdi
1023 .byte 0x66; rex64
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1028 we also support:
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1031 addq $r15, %rax
1032 call *%rax
1033 or
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1036 addq $rbx, %rax
1037 call *%rax */
1038
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1040
1041 if ((offset + 12) > sec->size)
1042 return FALSE;
1043
1044 call = contents + offset + 4;
1045 if (call[0] != 0x66
1046 || !((call[1] == 0x48
1047 && call[2] == 0xff
1048 && call[3] == 0x15)
1049 || (call[1] == 0x48
1050 && call[2] == 0x67
1051 && call[3] == 0xe8)
1052 || (call[1] == 0x66
1053 && call[2] == 0x48
1054 && call[3] == 0xe8)))
1055 {
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1058 || offset < 3
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1061 || call[11] != 0x01
1062 || call[13] != 0xff
1063 || call[14] != 0xd0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1066 return FALSE;
1067 largepic = TRUE;
1068 }
1069 else if (ABI_64_P (abfd))
1070 {
1071 if (offset < 4
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1073 return FALSE;
1074 }
1075 else
1076 {
1077 if (offset < 3
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1079 return FALSE;
1080 }
1081 indirect_call = call[2] == 0xff;
1082 }
1083 else
1084 {
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1088 or
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1094 we also support:
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1097 addq $r15, %rax
1098 call *%rax
1099 or
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1102 addq $rbx, %rax
1103 call *%rax */
1104
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1106
1107 if (offset < 3 || (offset + 9) > sec->size)
1108 return FALSE;
1109
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1111 return FALSE;
1112
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1117 {
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1121 || call[11] != 0x01
1122 || call[13] != 0xff
1123 || call[14] != 0xd0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1126 return FALSE;
1127 largepic = TRUE;
1128 }
1129 indirect_call = call[0] == 0xff;
1130 }
1131
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1134 return FALSE;
1135
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1137 if (h == NULL
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1139 return FALSE;
1140 else
1141 {
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1144 if (largepic)
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1148 else
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1150 }
1151
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1156 */
1157
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1160 {
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1163 {
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1166 return FALSE;
1167 }
1168 }
1169 else
1170 {
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1173 return FALSE;
1174 if (offset < 2 || (offset + 3) > sec->size)
1175 return FALSE;
1176 }
1177
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1180 return FALSE;
1181
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1184
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1188
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1191 going to be rax. */
1192
1193 if (offset < 3 || (offset + 4) > sec->size)
1194 return FALSE;
1195
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1198 return FALSE;
1199
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1201 return FALSE;
1202
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1205
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1209 */
1210 if (offset + 2 <= sec->size)
1211 {
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1215 }
1216
1217 return FALSE;
1218
1219 default:
1220 abort ();
1221 }
1222 }
1223
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1226
1227 static bfd_boolean
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1238 {
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1242
1243 /* Skip TLS transition for functions. */
1244 if (h != NULL
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1247 return TRUE;
1248
1249 switch (from_type)
1250 {
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1256 {
1257 if (h == NULL)
1258 to_type = R_X86_64_TPOFF32;
1259 else
1260 to_type = R_X86_64_GOTTPOFF;
1261 }
1262
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1266 {
1267 unsigned int new_to_type = to_type;
1268
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1271
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1275 {
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1278 }
1279
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1285 }
1286
1287 break;
1288
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1292 break;
1293
1294 default:
1295 return TRUE;
1296 }
1297
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1300 return TRUE;
1301
1302 /* Check if the transition can be performed. */
1303 if (check
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1307 {
1308 reloc_howto_type *from, *to;
1309 const char *name;
1310
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1313
1314 if (h)
1315 name = h->root.root.string;
1316 else
1317 {
1318 struct elf_x86_link_hash_table *htab;
1319
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1321 if (htab == NULL)
1322 name = "*unknown*";
1323 else
1324 {
1325 Elf_Internal_Sym *isym;
1326
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1328 abfd, r_symndx);
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1330 }
1331 }
1332
1333 _bfd_error_handler
1334 /* xgettext:c-format */
1335 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%A' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1339 return FALSE;
1340 }
1341
1342 *r_type = to_type;
1343 return TRUE;
1344 }
1345
1346 /* Rename some of the generic section flags to better document how they
1347 are used here. */
1348 #define check_relocs_failed sec_flg0
1349
1350 static bfd_boolean
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1357 {
1358 const char *v = "";
1359 const char *und = "";
1360 const char *pic = "";
1361 const char *object;
1362
1363 const char *name;
1364 if (h)
1365 {
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1368 {
1369 case STV_HIDDEN:
1370 v = _("hidden symbol ");
1371 break;
1372 case STV_INTERNAL:
1373 v = _("internal symbol ");
1374 break;
1375 case STV_PROTECTED:
1376 v = _("protected symbol ");
1377 break;
1378 default:
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1381 else
1382 v = _("symbol ");
1383 pic = _("; recompile with -fPIC");
1384 break;
1385 }
1386
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1389 }
1390 else
1391 {
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1394 }
1395
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1400 else
1401 object = _("a PDE object");
1402
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1407 object, pic);
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1410 return FALSE;
1411 }
1412
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1415 to
1416 lea foo(%rip), %reg
1417 and convert
1418 call/jmp *foo@GOTPCREL(%rip)
1419 to
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1423 to
1424 test $foo, %reg
1425 and convert
1426 binop foo@GOTPCREL(%rip), %reg
1427 to
1428 binop $foo, %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1430 instructions. */
1431
1432 static bfd_boolean
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1434 bfd_byte *contents,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1440 {
1441 struct elf_x86_link_hash_table *htab;
1442 bfd_boolean is_pic;
1443 bfd_boolean no_overflow;
1444 bfd_boolean relocx;
1445 bfd_boolean to_reloc_pc32;
1446 asection *tsec;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1449 unsigned int modrm;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1453
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1455 return TRUE;
1456
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1459 if (raddend != -4)
1460 return TRUE;
1461
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1464
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1467
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1470
1471 r_symndx = htab->r_sym (irel->r_info);
1472
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1474
1475 /* Convert mov to lea since it has been done for a while. */
1476 if (opcode != 0x8b)
1477 {
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1481 if (!relocx)
1482 return TRUE;
1483 }
1484
1485 /* We convert only to R_X86_64_PC32:
1486 1. Branch.
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1489 4. PIC.
1490 */
1491 to_reloc_pc32 = (opcode == 0xff
1492 || !relocx
1493 || no_overflow
1494 || is_pic);
1495
1496 /* Get the symbol referred to by the reloc. */
1497 if (h == NULL)
1498 {
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1501
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1504 return TRUE;
1505
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1512 else
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1514 }
1515 else
1516 {
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1522 R_X86_64_PC32. */
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1525
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1530 && !eh->linker_def
1531 && local_ref))
1532 {
1533 if (opcode == 0xff)
1534 {
1535 /* Skip for branch instructions since R_X86_64_PC32
1536 may overflow. */
1537 if (no_overflow)
1538 return TRUE;
1539 }
1540 else if (relocx)
1541 {
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1544 is a REX byte. */
1545 to_reloc_pc32 = FALSE;
1546 }
1547
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1551 return TRUE;
1552
1553 goto convert;
1554 }
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1558 || eh->linker_def
1559 || ((h->def_regular
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1563 && local_ref))
1564 {
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1569 SECNAME. */
1570 if (h->start_stop
1571 || eh->linker_def
1572 || (h->def_regular
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1578 {
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1580 if (no_overflow)
1581 return TRUE;
1582 goto convert;
1583 }
1584 tsec = h->root.u.def.section;
1585 }
1586 else
1587 return TRUE;
1588 }
1589
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1593 return TRUE;
1594
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1596 if (no_overflow)
1597 return TRUE;
1598
1599 convert:
1600 if (opcode == 0xff)
1601 {
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1603 unsigned int nop;
1604 unsigned int disp;
1605 bfd_vma nop_offset;
1606
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1608 R_X86_64_PC32. */
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1610 if (modrm == 0x25)
1611 {
1612 /* Convert to "jmp foo nop". */
1613 modrm = 0xe9;
1614 nop = NOP_OPCODE;
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1619 }
1620 else
1621 {
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1624
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1626 is a nop prefix. */
1627 modrm = 0xe8;
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1631 {
1632 nop = 0x67;
1633 nop_offset = irel->r_offset - 2;
1634 }
1635 else
1636 {
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1639 {
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1644 }
1645 else
1646 nop_offset = irel->r_offset - 2;
1647 }
1648 }
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1652 }
1653 else
1654 {
1655 unsigned int rex;
1656 unsigned int rex_mask = REX_R;
1657
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1660 else
1661 rex = 0;
1662
1663 if (opcode == 0x8b)
1664 {
1665 if (to_reloc_pc32)
1666 {
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1669 opcode = 0x8d;
1670 r_type = R_X86_64_PC32;
1671 }
1672 else
1673 {
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1676 opcode = 0xc7;
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1681 {
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1685 }
1686 else
1687 {
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1693 rex_mask |= REX_W;
1694 goto rewrite_modrm_rex;
1695 }
1696 }
1697 }
1698 else
1699 {
1700 /* R_X86_64_PC32 isn't supported. */
1701 if (to_reloc_pc32)
1702 return TRUE;
1703
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1705 if (opcode == 0x85)
1706 {
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1710 opcode = 0xf7;
1711 }
1712 else
1713 {
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1717 opcode = 0x81;
1718 }
1719
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1723
1724 rewrite_modrm_rex:
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1726
1727 if (rex)
1728 {
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1732 }
1733
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1735 irel->r_addend = 0;
1736 }
1737
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1739 }
1740
1741 *r_type_p = r_type;
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1744
1745 *converted = TRUE;
1746
1747 return TRUE;
1748 }
1749
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1753
1754 static bfd_boolean
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1756 asection *sec,
1757 const Elf_Internal_Rela *relocs)
1758 {
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1764 asection *sreloc;
1765 bfd_byte *contents;
1766 bfd_boolean converted;
1767
1768 if (bfd_link_relocatable (info))
1769 return TRUE;
1770
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1778 return TRUE;
1779
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1781 if (htab == NULL)
1782 {
1783 sec->check_relocs_failed = 1;
1784 return FALSE;
1785 }
1786
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1788
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1793 {
1794 sec->check_relocs_failed = 1;
1795 return FALSE;
1796 }
1797
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1800
1801 converted = FALSE;
1802
1803 sreloc = NULL;
1804
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1807 {
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1813 const char *name;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1816
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1819
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1821 {
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%B: bad symbol index: %d"),
1824 abfd, r_symndx);
1825 goto error_return;
1826 }
1827
1828 if (r_symndx < symtab_hdr->sh_info)
1829 {
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1832 abfd, r_symndx);
1833 if (isym == NULL)
1834 goto error_return;
1835
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1838 {
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1840 TRUE);
1841 if (h == NULL)
1842 goto error_return;
1843
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1846 isym, NULL);
1847 h->type = STT_GNU_IFUNC;
1848 h->def_regular = 1;
1849 h->ref_regular = 1;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1852 }
1853 else
1854 h = NULL;
1855 }
1856 else
1857 {
1858 isym = NULL;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1863 }
1864
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1867 switch (r_type)
1868 {
1869 default:
1870 break;
1871
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1874 case R_X86_64_PC64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1881 {
1882 if (h)
1883 name = h->root.root.string;
1884 else
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1886 NULL);
1887 _bfd_error_handler
1888 /* xgettext:c-format */
1889 (_("%B: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1893 goto error_return;
1894 }
1895 break;
1896 }
1897
1898 if (h != NULL)
1899 {
1900 /* It is referenced by a non-shared object. */
1901 h->ref_regular = 1;
1902 h->root.non_ir_ref_regular = 1;
1903
1904 if (h->type == STT_GNU_IFUNC)
1905 elf_tdata (info->output_bfd)->has_gnu_symbols
1906 |= elf_gnu_symbol_ifunc;
1907 }
1908
1909 converted_reloc = FALSE;
1910 if ((r_type == R_X86_64_GOTPCREL
1911 || r_type == R_X86_64_GOTPCRELX
1912 || r_type == R_X86_64_REX_GOTPCRELX)
1913 && (h == NULL || h->type != STT_GNU_IFUNC))
1914 {
1915 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1916 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1917 irel, h, &converted_reloc,
1918 info))
1919 goto error_return;
1920
1921 if (converted_reloc)
1922 converted = TRUE;
1923 }
1924
1925 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1926 symtab_hdr, sym_hashes,
1927 &r_type, GOT_UNKNOWN,
1928 rel, rel_end, h, r_symndx, FALSE))
1929 goto error_return;
1930
1931 eh = (struct elf_x86_link_hash_entry *) h;
1932 switch (r_type)
1933 {
1934 case R_X86_64_TLSLD:
1935 htab->tls_ld_or_ldm_got.refcount = 1;
1936 goto create_got;
1937
1938 case R_X86_64_TPOFF32:
1939 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1940 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1941 &x86_64_elf_howto_table[r_type]);
1942 if (eh != NULL)
1943 eh->zero_undefweak &= 0x2;
1944 break;
1945
1946 case R_X86_64_GOTTPOFF:
1947 if (!bfd_link_executable (info))
1948 info->flags |= DF_STATIC_TLS;
1949 /* Fall through */
1950
1951 case R_X86_64_GOT32:
1952 case R_X86_64_GOTPCREL:
1953 case R_X86_64_GOTPCRELX:
1954 case R_X86_64_REX_GOTPCRELX:
1955 case R_X86_64_TLSGD:
1956 case R_X86_64_GOT64:
1957 case R_X86_64_GOTPCREL64:
1958 case R_X86_64_GOTPLT64:
1959 case R_X86_64_GOTPC32_TLSDESC:
1960 case R_X86_64_TLSDESC_CALL:
1961 /* This symbol requires a global offset table entry. */
1962 {
1963 int tls_type, old_tls_type;
1964
1965 switch (r_type)
1966 {
1967 default: tls_type = GOT_NORMAL; break;
1968 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1969 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1970 case R_X86_64_GOTPC32_TLSDESC:
1971 case R_X86_64_TLSDESC_CALL:
1972 tls_type = GOT_TLS_GDESC; break;
1973 }
1974
1975 if (h != NULL)
1976 {
1977 h->got.refcount = 1;
1978 old_tls_type = eh->tls_type;
1979 }
1980 else
1981 {
1982 bfd_signed_vma *local_got_refcounts;
1983
1984 /* This is a global offset table entry for a local symbol. */
1985 local_got_refcounts = elf_local_got_refcounts (abfd);
1986 if (local_got_refcounts == NULL)
1987 {
1988 bfd_size_type size;
1989
1990 size = symtab_hdr->sh_info;
1991 size *= sizeof (bfd_signed_vma)
1992 + sizeof (bfd_vma) + sizeof (char);
1993 local_got_refcounts = ((bfd_signed_vma *)
1994 bfd_zalloc (abfd, size));
1995 if (local_got_refcounts == NULL)
1996 goto error_return;
1997 elf_local_got_refcounts (abfd) = local_got_refcounts;
1998 elf_x86_local_tlsdesc_gotent (abfd)
1999 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2000 elf_x86_local_got_tls_type (abfd)
2001 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2002 }
2003 local_got_refcounts[r_symndx] = 1;
2004 old_tls_type
2005 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2006 }
2007
2008 /* If a TLS symbol is accessed using IE at least once,
2009 there is no point to use dynamic model for it. */
2010 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2011 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2012 || tls_type != GOT_TLS_IE))
2013 {
2014 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2015 tls_type = old_tls_type;
2016 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2017 && GOT_TLS_GD_ANY_P (tls_type))
2018 tls_type |= old_tls_type;
2019 else
2020 {
2021 if (h)
2022 name = h->root.root.string;
2023 else
2024 name = bfd_elf_sym_name (abfd, symtab_hdr,
2025 isym, NULL);
2026 _bfd_error_handler
2027 /* xgettext:c-format */
2028 (_("%B: '%s' accessed both as normal and"
2029 " thread local symbol"),
2030 abfd, name);
2031 bfd_set_error (bfd_error_bad_value);
2032 goto error_return;
2033 }
2034 }
2035
2036 if (old_tls_type != tls_type)
2037 {
2038 if (eh != NULL)
2039 eh->tls_type = tls_type;
2040 else
2041 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2042 }
2043 }
2044 /* Fall through */
2045
2046 case R_X86_64_GOTOFF64:
2047 case R_X86_64_GOTPC32:
2048 case R_X86_64_GOTPC64:
2049 create_got:
2050 if (eh != NULL)
2051 eh->zero_undefweak &= 0x2;
2052 break;
2053
2054 case R_X86_64_PLT32:
2055 case R_X86_64_PLT32_BND:
2056 /* This symbol requires a procedure linkage table entry. We
2057 actually build the entry in adjust_dynamic_symbol,
2058 because this might be a case of linking PIC code which is
2059 never referenced by a dynamic object, in which case we
2060 don't need to generate a procedure linkage table entry
2061 after all. */
2062
2063 /* If this is a local symbol, we resolve it directly without
2064 creating a procedure linkage table entry. */
2065 if (h == NULL)
2066 continue;
2067
2068 eh->zero_undefweak &= 0x2;
2069 h->needs_plt = 1;
2070 h->plt.refcount += 1;
2071 break;
2072
2073 case R_X86_64_PLTOFF64:
2074 /* This tries to form the 'address' of a function relative
2075 to GOT. For global symbols we need a PLT entry. */
2076 if (h != NULL)
2077 {
2078 h->needs_plt = 1;
2079 h->plt.refcount += 1;
2080 }
2081 goto create_got;
2082
2083 case R_X86_64_SIZE32:
2084 case R_X86_64_SIZE64:
2085 size_reloc = TRUE;
2086 goto do_size;
2087
2088 case R_X86_64_32:
2089 if (!ABI_64_P (abfd))
2090 goto pointer;
2091 /* Fall through. */
2092 case R_X86_64_8:
2093 case R_X86_64_16:
2094 case R_X86_64_32S:
2095 /* Check relocation overflow as these relocs may lead to
2096 run-time relocation overflow. Don't error out for
2097 sections we don't care about, such as debug sections or
2098 when relocation overflow check is disabled. */
2099 if (!info->no_reloc_overflow_check
2100 && !converted_reloc
2101 && (bfd_link_pic (info)
2102 || (bfd_link_executable (info)
2103 && h != NULL
2104 && !h->def_regular
2105 && h->def_dynamic
2106 && (sec->flags & SEC_READONLY) == 0)))
2107 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2108 &x86_64_elf_howto_table[r_type]);
2109 /* Fall through. */
2110
2111 case R_X86_64_PC8:
2112 case R_X86_64_PC16:
2113 case R_X86_64_PC32:
2114 case R_X86_64_PC32_BND:
2115 case R_X86_64_PC64:
2116 case R_X86_64_64:
2117 pointer:
2118 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2119 eh->zero_undefweak |= 0x2;
2120 /* We are called after all symbols have been resolved. Only
2121 relocation against STT_GNU_IFUNC symbol must go through
2122 PLT. */
2123 if (h != NULL
2124 && (bfd_link_executable (info)
2125 || h->type == STT_GNU_IFUNC))
2126 {
2127 /* If this reloc is in a read-only section, we might
2128 need a copy reloc. We can't check reliably at this
2129 stage whether the section is read-only, as input
2130 sections have not yet been mapped to output sections.
2131 Tentatively set the flag for now, and correct in
2132 adjust_dynamic_symbol. */
2133 h->non_got_ref = 1;
2134
2135 /* We may need a .plt entry if the symbol is a function
2136 defined in a shared lib or is a STT_GNU_IFUNC function
2137 referenced from the code or read-only section. */
2138 if (!h->def_regular
2139 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2140 h->plt.refcount += 1;
2141
2142 if (r_type == R_X86_64_PC32)
2143 {
2144 /* Since something like ".long foo - ." may be used
2145 as pointer, make sure that PLT is used if foo is
2146 a function defined in a shared library. */
2147 if ((sec->flags & SEC_CODE) == 0)
2148 h->pointer_equality_needed = 1;
2149 }
2150 else if (r_type != R_X86_64_PC32_BND
2151 && r_type != R_X86_64_PC64)
2152 {
2153 h->pointer_equality_needed = 1;
2154 /* At run-time, R_X86_64_64 can be resolved for both
2155 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2156 can only be resolved for x32. */
2157 if ((sec->flags & SEC_READONLY) == 0
2158 && (r_type == R_X86_64_64
2159 || (!ABI_64_P (abfd)
2160 && (r_type == R_X86_64_32
2161 || r_type == R_X86_64_32S))))
2162 eh->func_pointer_refcount += 1;
2163 }
2164 }
2165
2166 size_reloc = FALSE;
2167 do_size:
2168 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2169 htab->pointer_r_type))
2170 {
2171 struct elf_dyn_relocs *p;
2172 struct elf_dyn_relocs **head;
2173
2174 /* We must copy these reloc types into the output file.
2175 Create a reloc section in dynobj and make room for
2176 this reloc. */
2177 if (sreloc == NULL)
2178 {
2179 sreloc = _bfd_elf_make_dynamic_reloc_section
2180 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2181 abfd, /*rela?*/ TRUE);
2182
2183 if (sreloc == NULL)
2184 goto error_return;
2185 }
2186
2187 /* If this is a global symbol, we count the number of
2188 relocations we need for this symbol. */
2189 if (h != NULL)
2190 head = &eh->dyn_relocs;
2191 else
2192 {
2193 /* Track dynamic relocs needed for local syms too.
2194 We really need local syms available to do this
2195 easily. Oh well. */
2196 asection *s;
2197 void **vpp;
2198
2199 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2200 abfd, r_symndx);
2201 if (isym == NULL)
2202 goto error_return;
2203
2204 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2205 if (s == NULL)
2206 s = sec;
2207
2208 /* Beware of type punned pointers vs strict aliasing
2209 rules. */
2210 vpp = &(elf_section_data (s)->local_dynrel);
2211 head = (struct elf_dyn_relocs **)vpp;
2212 }
2213
2214 p = *head;
2215 if (p == NULL || p->sec != sec)
2216 {
2217 bfd_size_type amt = sizeof *p;
2218
2219 p = ((struct elf_dyn_relocs *)
2220 bfd_alloc (htab->elf.dynobj, amt));
2221 if (p == NULL)
2222 goto error_return;
2223 p->next = *head;
2224 *head = p;
2225 p->sec = sec;
2226 p->count = 0;
2227 p->pc_count = 0;
2228 }
2229
2230 p->count += 1;
2231 /* Count size relocation as PC-relative relocation. */
2232 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2233 p->pc_count += 1;
2234 }
2235 break;
2236
2237 /* This relocation describes the C++ object vtable hierarchy.
2238 Reconstruct it for later use during GC. */
2239 case R_X86_64_GNU_VTINHERIT:
2240 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2241 goto error_return;
2242 break;
2243
2244 /* This relocation describes which C++ vtable entries are actually
2245 used. Record for later use during GC. */
2246 case R_X86_64_GNU_VTENTRY:
2247 BFD_ASSERT (h != NULL);
2248 if (h != NULL
2249 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2250 goto error_return;
2251 break;
2252
2253 default:
2254 break;
2255 }
2256 }
2257
2258 if (elf_section_data (sec)->this_hdr.contents != contents)
2259 {
2260 if (!converted && !info->keep_memory)
2261 free (contents);
2262 else
2263 {
2264 /* Cache the section contents for elf_link_input_bfd if any
2265 load is converted or --no-keep-memory isn't used. */
2266 elf_section_data (sec)->this_hdr.contents = contents;
2267 }
2268 }
2269
2270 /* Cache relocations if any load is converted. */
2271 if (elf_section_data (sec)->relocs != relocs && converted)
2272 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2273
2274 return TRUE;
2275
2276 error_return:
2277 if (elf_section_data (sec)->this_hdr.contents != contents)
2278 free (contents);
2279 sec->check_relocs_failed = 1;
2280 return FALSE;
2281 }
2282
2283 /* Return the relocation value for @tpoff relocation
2284 if STT_TLS virtual address is ADDRESS. */
2285
2286 static bfd_vma
2287 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2288 {
2289 struct elf_link_hash_table *htab = elf_hash_table (info);
2290 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2291 bfd_vma static_tls_size;
2292
2293 /* If tls_segment is NULL, we should have signalled an error already. */
2294 if (htab->tls_sec == NULL)
2295 return 0;
2296
2297 /* Consider special static TLS alignment requirements. */
2298 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2299 return address - static_tls_size - htab->tls_sec->vma;
2300 }
2301
2302 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2303 branch? */
2304
2305 static bfd_boolean
2306 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2307 {
2308 /* Opcode Instruction
2309 0xe8 call
2310 0xe9 jump
2311 0x0f 0x8x conditional jump */
2312 return ((offset > 0
2313 && (contents [offset - 1] == 0xe8
2314 || contents [offset - 1] == 0xe9))
2315 || (offset > 1
2316 && contents [offset - 2] == 0x0f
2317 && (contents [offset - 1] & 0xf0) == 0x80));
2318 }
2319
2320 /* Relocate an x86_64 ELF section. */
2321
2322 static bfd_boolean
2323 elf_x86_64_relocate_section (bfd *output_bfd,
2324 struct bfd_link_info *info,
2325 bfd *input_bfd,
2326 asection *input_section,
2327 bfd_byte *contents,
2328 Elf_Internal_Rela *relocs,
2329 Elf_Internal_Sym *local_syms,
2330 asection **local_sections)
2331 {
2332 struct elf_x86_link_hash_table *htab;
2333 Elf_Internal_Shdr *symtab_hdr;
2334 struct elf_link_hash_entry **sym_hashes;
2335 bfd_vma *local_got_offsets;
2336 bfd_vma *local_tlsdesc_gotents;
2337 Elf_Internal_Rela *rel;
2338 Elf_Internal_Rela *wrel;
2339 Elf_Internal_Rela *relend;
2340 unsigned int plt_entry_size;
2341
2342 /* Skip if check_relocs failed. */
2343 if (input_section->check_relocs_failed)
2344 return FALSE;
2345
2346 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2347 if (htab == NULL)
2348 return FALSE;
2349
2350 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2351
2352 plt_entry_size = htab->plt.plt_entry_size;
2353 symtab_hdr = &elf_symtab_hdr (input_bfd);
2354 sym_hashes = elf_sym_hashes (input_bfd);
2355 local_got_offsets = elf_local_got_offsets (input_bfd);
2356 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2357
2358 _bfd_x86_elf_set_tls_module_base (info);
2359
2360 rel = wrel = relocs;
2361 relend = relocs + input_section->reloc_count;
2362 for (; rel < relend; wrel++, rel++)
2363 {
2364 unsigned int r_type, r_type_tls;
2365 reloc_howto_type *howto;
2366 unsigned long r_symndx;
2367 struct elf_link_hash_entry *h;
2368 struct elf_x86_link_hash_entry *eh;
2369 Elf_Internal_Sym *sym;
2370 asection *sec;
2371 bfd_vma off, offplt, plt_offset;
2372 bfd_vma relocation;
2373 bfd_boolean unresolved_reloc;
2374 bfd_reloc_status_type r;
2375 int tls_type;
2376 asection *base_got, *resolved_plt;
2377 bfd_vma st_size;
2378 bfd_boolean resolved_to_zero;
2379 bfd_boolean relative_reloc;
2380 bfd_boolean converted_reloc;
2381 bfd_boolean need_copy_reloc_in_pie;
2382
2383 r_type = ELF32_R_TYPE (rel->r_info);
2384 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2385 || r_type == (int) R_X86_64_GNU_VTENTRY)
2386 {
2387 if (wrel != rel)
2388 *wrel = *rel;
2389 continue;
2390 }
2391
2392 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2393 r_type &= ~R_X86_64_converted_reloc_bit;
2394
2395 if (r_type >= (int) R_X86_64_standard)
2396 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2397
2398 if (r_type != (int) R_X86_64_32
2399 || ABI_64_P (output_bfd))
2400 howto = x86_64_elf_howto_table + r_type;
2401 else
2402 howto = (x86_64_elf_howto_table
2403 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2404 r_symndx = htab->r_sym (rel->r_info);
2405 h = NULL;
2406 sym = NULL;
2407 sec = NULL;
2408 unresolved_reloc = FALSE;
2409 if (r_symndx < symtab_hdr->sh_info)
2410 {
2411 sym = local_syms + r_symndx;
2412 sec = local_sections[r_symndx];
2413
2414 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2415 &sec, rel);
2416 st_size = sym->st_size;
2417
2418 /* Relocate against local STT_GNU_IFUNC symbol. */
2419 if (!bfd_link_relocatable (info)
2420 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2421 {
2422 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2423 rel, FALSE);
2424 if (h == NULL)
2425 abort ();
2426
2427 /* Set STT_GNU_IFUNC symbol value. */
2428 h->root.u.def.value = sym->st_value;
2429 h->root.u.def.section = sec;
2430 }
2431 }
2432 else
2433 {
2434 bfd_boolean warned ATTRIBUTE_UNUSED;
2435 bfd_boolean ignored ATTRIBUTE_UNUSED;
2436
2437 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2438 r_symndx, symtab_hdr, sym_hashes,
2439 h, sec, relocation,
2440 unresolved_reloc, warned, ignored);
2441 st_size = h->size;
2442 }
2443
2444 if (sec != NULL && discarded_section (sec))
2445 {
2446 _bfd_clear_contents (howto, input_bfd, input_section,
2447 contents + rel->r_offset);
2448 wrel->r_offset = rel->r_offset;
2449 wrel->r_info = 0;
2450 wrel->r_addend = 0;
2451
2452 /* For ld -r, remove relocations in debug sections against
2453 sections defined in discarded sections. Not done for
2454 eh_frame editing code expects to be present. */
2455 if (bfd_link_relocatable (info)
2456 && (input_section->flags & SEC_DEBUGGING))
2457 wrel--;
2458
2459 continue;
2460 }
2461
2462 if (bfd_link_relocatable (info))
2463 {
2464 if (wrel != rel)
2465 *wrel = *rel;
2466 continue;
2467 }
2468
2469 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2470 {
2471 if (r_type == R_X86_64_64)
2472 {
2473 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2474 zero-extend it to 64bit if addend is zero. */
2475 r_type = R_X86_64_32;
2476 memset (contents + rel->r_offset + 4, 0, 4);
2477 }
2478 else if (r_type == R_X86_64_SIZE64)
2479 {
2480 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2481 zero-extend it to 64bit if addend is zero. */
2482 r_type = R_X86_64_SIZE32;
2483 memset (contents + rel->r_offset + 4, 0, 4);
2484 }
2485 }
2486
2487 eh = (struct elf_x86_link_hash_entry *) h;
2488
2489 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2490 it here if it is defined in a non-shared object. */
2491 if (h != NULL
2492 && h->type == STT_GNU_IFUNC
2493 && h->def_regular)
2494 {
2495 bfd_vma plt_index;
2496 const char *name;
2497
2498 if ((input_section->flags & SEC_ALLOC) == 0)
2499 {
2500 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2501 sections because such sections are not SEC_ALLOC and
2502 thus ld.so will not process them. */
2503 if ((input_section->flags & SEC_DEBUGGING) != 0)
2504 continue;
2505 abort ();
2506 }
2507
2508 switch (r_type)
2509 {
2510 default:
2511 break;
2512
2513 case R_X86_64_GOTPCREL:
2514 case R_X86_64_GOTPCRELX:
2515 case R_X86_64_REX_GOTPCRELX:
2516 case R_X86_64_GOTPCREL64:
2517 base_got = htab->elf.sgot;
2518 off = h->got.offset;
2519
2520 if (base_got == NULL)
2521 abort ();
2522
2523 if (off == (bfd_vma) -1)
2524 {
2525 /* We can't use h->got.offset here to save state, or
2526 even just remember the offset, as finish_dynamic_symbol
2527 would use that as offset into .got. */
2528
2529 if (h->plt.offset == (bfd_vma) -1)
2530 abort ();
2531
2532 if (htab->elf.splt != NULL)
2533 {
2534 plt_index = (h->plt.offset / plt_entry_size
2535 - htab->plt.has_plt0);
2536 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2537 base_got = htab->elf.sgotplt;
2538 }
2539 else
2540 {
2541 plt_index = h->plt.offset / plt_entry_size;
2542 off = plt_index * GOT_ENTRY_SIZE;
2543 base_got = htab->elf.igotplt;
2544 }
2545
2546 if (h->dynindx == -1
2547 || h->forced_local
2548 || info->symbolic)
2549 {
2550 /* This references the local defitionion. We must
2551 initialize this entry in the global offset table.
2552 Since the offset must always be a multiple of 8,
2553 we use the least significant bit to record
2554 whether we have initialized it already.
2555
2556 When doing a dynamic link, we create a .rela.got
2557 relocation entry to initialize the value. This
2558 is done in the finish_dynamic_symbol routine. */
2559 if ((off & 1) != 0)
2560 off &= ~1;
2561 else
2562 {
2563 bfd_put_64 (output_bfd, relocation,
2564 base_got->contents + off);
2565 /* Note that this is harmless for the GOTPLT64
2566 case, as -1 | 1 still is -1. */
2567 h->got.offset |= 1;
2568 }
2569 }
2570 }
2571
2572 relocation = (base_got->output_section->vma
2573 + base_got->output_offset + off);
2574
2575 goto do_relocation;
2576 }
2577
2578 if (h->plt.offset == (bfd_vma) -1)
2579 {
2580 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2581 if (r_type == htab->pointer_r_type
2582 && (input_section->flags & SEC_CODE) == 0)
2583 goto do_ifunc_pointer;
2584 goto bad_ifunc_reloc;
2585 }
2586
2587 /* STT_GNU_IFUNC symbol must go through PLT. */
2588 if (htab->elf.splt != NULL)
2589 {
2590 if (htab->plt_second != NULL)
2591 {
2592 resolved_plt = htab->plt_second;
2593 plt_offset = eh->plt_second.offset;
2594 }
2595 else
2596 {
2597 resolved_plt = htab->elf.splt;
2598 plt_offset = h->plt.offset;
2599 }
2600 }
2601 else
2602 {
2603 resolved_plt = htab->elf.iplt;
2604 plt_offset = h->plt.offset;
2605 }
2606
2607 relocation = (resolved_plt->output_section->vma
2608 + resolved_plt->output_offset + plt_offset);
2609
2610 switch (r_type)
2611 {
2612 default:
2613 bad_ifunc_reloc:
2614 if (h->root.root.string)
2615 name = h->root.root.string;
2616 else
2617 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2618 NULL);
2619 _bfd_error_handler
2620 /* xgettext:c-format */
2621 (_("%B: relocation %s against STT_GNU_IFUNC "
2622 "symbol `%s' isn't supported"), input_bfd,
2623 howto->name, name);
2624 bfd_set_error (bfd_error_bad_value);
2625 return FALSE;
2626
2627 case R_X86_64_32S:
2628 if (bfd_link_pic (info))
2629 abort ();
2630 goto do_relocation;
2631
2632 case R_X86_64_32:
2633 if (ABI_64_P (output_bfd))
2634 goto do_relocation;
2635 /* FALLTHROUGH */
2636 case R_X86_64_64:
2637 do_ifunc_pointer:
2638 if (rel->r_addend != 0)
2639 {
2640 if (h->root.root.string)
2641 name = h->root.root.string;
2642 else
2643 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2644 sym, NULL);
2645 _bfd_error_handler
2646 /* xgettext:c-format */
2647 (_("%B: relocation %s against STT_GNU_IFUNC "
2648 "symbol `%s' has non-zero addend: %Ld"),
2649 input_bfd, howto->name, name, rel->r_addend);
2650 bfd_set_error (bfd_error_bad_value);
2651 return FALSE;
2652 }
2653
2654 /* Generate dynamic relcoation only when there is a
2655 non-GOT reference in a shared object or there is no
2656 PLT. */
2657 if ((bfd_link_pic (info) && h->non_got_ref)
2658 || h->plt.offset == (bfd_vma) -1)
2659 {
2660 Elf_Internal_Rela outrel;
2661 asection *sreloc;
2662
2663 /* Need a dynamic relocation to get the real function
2664 address. */
2665 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2666 info,
2667 input_section,
2668 rel->r_offset);
2669 if (outrel.r_offset == (bfd_vma) -1
2670 || outrel.r_offset == (bfd_vma) -2)
2671 abort ();
2672
2673 outrel.r_offset += (input_section->output_section->vma
2674 + input_section->output_offset);
2675
2676 if (POINTER_LOCAL_IFUNC_P (info, h))
2677 {
2678 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2679 h->root.root.string,
2680 h->root.u.def.section->owner);
2681
2682 /* This symbol is resolved locally. */
2683 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2684 outrel.r_addend = (h->root.u.def.value
2685 + h->root.u.def.section->output_section->vma
2686 + h->root.u.def.section->output_offset);
2687 }
2688 else
2689 {
2690 outrel.r_info = htab->r_info (h->dynindx, r_type);
2691 outrel.r_addend = 0;
2692 }
2693
2694 /* Dynamic relocations are stored in
2695 1. .rela.ifunc section in PIC object.
2696 2. .rela.got section in dynamic executable.
2697 3. .rela.iplt section in static executable. */
2698 if (bfd_link_pic (info))
2699 sreloc = htab->elf.irelifunc;
2700 else if (htab->elf.splt != NULL)
2701 sreloc = htab->elf.srelgot;
2702 else
2703 sreloc = htab->elf.irelplt;
2704 elf_append_rela (output_bfd, sreloc, &outrel);
2705
2706 /* If this reloc is against an external symbol, we
2707 do not want to fiddle with the addend. Otherwise,
2708 we need to include the symbol value so that it
2709 becomes an addend for the dynamic reloc. For an
2710 internal symbol, we have updated addend. */
2711 continue;
2712 }
2713 /* FALLTHROUGH */
2714 case R_X86_64_PC32:
2715 case R_X86_64_PC32_BND:
2716 case R_X86_64_PC64:
2717 case R_X86_64_PLT32:
2718 case R_X86_64_PLT32_BND:
2719 goto do_relocation;
2720 }
2721 }
2722
2723 resolved_to_zero = (eh != NULL
2724 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2725
2726 /* When generating a shared object, the relocations handled here are
2727 copied into the output file to be resolved at run time. */
2728 switch (r_type)
2729 {
2730 case R_X86_64_GOT32:
2731 case R_X86_64_GOT64:
2732 /* Relocation is to the entry for this symbol in the global
2733 offset table. */
2734 case R_X86_64_GOTPCREL:
2735 case R_X86_64_GOTPCRELX:
2736 case R_X86_64_REX_GOTPCRELX:
2737 case R_X86_64_GOTPCREL64:
2738 /* Use global offset table entry as symbol value. */
2739 case R_X86_64_GOTPLT64:
2740 /* This is obsolete and treated the same as GOT64. */
2741 base_got = htab->elf.sgot;
2742
2743 if (htab->elf.sgot == NULL)
2744 abort ();
2745
2746 relative_reloc = FALSE;
2747 if (h != NULL)
2748 {
2749 off = h->got.offset;
2750 if (h->needs_plt
2751 && h->plt.offset != (bfd_vma)-1
2752 && off == (bfd_vma)-1)
2753 {
2754 /* We can't use h->got.offset here to save
2755 state, or even just remember the offset, as
2756 finish_dynamic_symbol would use that as offset into
2757 .got. */
2758 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2759 - htab->plt.has_plt0);
2760 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2761 base_got = htab->elf.sgotplt;
2762 }
2763
2764 if (RESOLVED_LOCALLY_P (info, h, htab))
2765 {
2766 /* We must initialize this entry in the global offset
2767 table. Since the offset must always be a multiple
2768 of 8, we use the least significant bit to record
2769 whether we have initialized it already.
2770
2771 When doing a dynamic link, we create a .rela.got
2772 relocation entry to initialize the value. This is
2773 done in the finish_dynamic_symbol routine. */
2774 if ((off & 1) != 0)
2775 off &= ~1;
2776 else
2777 {
2778 bfd_put_64 (output_bfd, relocation,
2779 base_got->contents + off);
2780 /* Note that this is harmless for the GOTPLT64 case,
2781 as -1 | 1 still is -1. */
2782 h->got.offset |= 1;
2783
2784 if (GENERATE_RELATIVE_RELOC_P (info, h))
2785 {
2786 /* If this symbol isn't dynamic in PIC,
2787 generate R_X86_64_RELATIVE here. */
2788 eh->no_finish_dynamic_symbol = 1;
2789 relative_reloc = TRUE;
2790 }
2791 }
2792 }
2793 else
2794 unresolved_reloc = FALSE;
2795 }
2796 else
2797 {
2798 if (local_got_offsets == NULL)
2799 abort ();
2800
2801 off = local_got_offsets[r_symndx];
2802
2803 /* The offset must always be a multiple of 8. We use
2804 the least significant bit to record whether we have
2805 already generated the necessary reloc. */
2806 if ((off & 1) != 0)
2807 off &= ~1;
2808 else
2809 {
2810 bfd_put_64 (output_bfd, relocation,
2811 base_got->contents + off);
2812 local_got_offsets[r_symndx] |= 1;
2813
2814 if (bfd_link_pic (info))
2815 relative_reloc = TRUE;
2816 }
2817 }
2818
2819 if (relative_reloc)
2820 {
2821 asection *s;
2822 Elf_Internal_Rela outrel;
2823
2824 /* We need to generate a R_X86_64_RELATIVE reloc
2825 for the dynamic linker. */
2826 s = htab->elf.srelgot;
2827 if (s == NULL)
2828 abort ();
2829
2830 outrel.r_offset = (base_got->output_section->vma
2831 + base_got->output_offset
2832 + off);
2833 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2834 outrel.r_addend = relocation;
2835 elf_append_rela (output_bfd, s, &outrel);
2836 }
2837
2838 if (off >= (bfd_vma) -2)
2839 abort ();
2840
2841 relocation = base_got->output_section->vma
2842 + base_got->output_offset + off;
2843 if (r_type != R_X86_64_GOTPCREL
2844 && r_type != R_X86_64_GOTPCRELX
2845 && r_type != R_X86_64_REX_GOTPCRELX
2846 && r_type != R_X86_64_GOTPCREL64)
2847 relocation -= htab->elf.sgotplt->output_section->vma
2848 - htab->elf.sgotplt->output_offset;
2849
2850 break;
2851
2852 case R_X86_64_GOTOFF64:
2853 /* Relocation is relative to the start of the global offset
2854 table. */
2855
2856 /* Check to make sure it isn't a protected function or data
2857 symbol for shared library since it may not be local when
2858 used as function address or with copy relocation. We also
2859 need to make sure that a symbol is referenced locally. */
2860 if (bfd_link_pic (info) && h)
2861 {
2862 if (!h->def_regular)
2863 {
2864 const char *v;
2865
2866 switch (ELF_ST_VISIBILITY (h->other))
2867 {
2868 case STV_HIDDEN:
2869 v = _("hidden symbol");
2870 break;
2871 case STV_INTERNAL:
2872 v = _("internal symbol");
2873 break;
2874 case STV_PROTECTED:
2875 v = _("protected symbol");
2876 break;
2877 default:
2878 v = _("symbol");
2879 break;
2880 }
2881
2882 _bfd_error_handler
2883 /* xgettext:c-format */
2884 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2885 " `%s' can not be used when making a shared object"),
2886 input_bfd, v, h->root.root.string);
2887 bfd_set_error (bfd_error_bad_value);
2888 return FALSE;
2889 }
2890 else if (!bfd_link_executable (info)
2891 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2892 && (h->type == STT_FUNC
2893 || h->type == STT_OBJECT)
2894 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2895 {
2896 _bfd_error_handler
2897 /* xgettext:c-format */
2898 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2899 " `%s' can not be used when making a shared object"),
2900 input_bfd,
2901 h->type == STT_FUNC ? "function" : "data",
2902 h->root.root.string);
2903 bfd_set_error (bfd_error_bad_value);
2904 return FALSE;
2905 }
2906 }
2907
2908 /* Note that sgot is not involved in this
2909 calculation. We always want the start of .got.plt. If we
2910 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2911 permitted by the ABI, we might have to change this
2912 calculation. */
2913 relocation -= htab->elf.sgotplt->output_section->vma
2914 + htab->elf.sgotplt->output_offset;
2915 break;
2916
2917 case R_X86_64_GOTPC32:
2918 case R_X86_64_GOTPC64:
2919 /* Use global offset table as symbol value. */
2920 relocation = htab->elf.sgotplt->output_section->vma
2921 + htab->elf.sgotplt->output_offset;
2922 unresolved_reloc = FALSE;
2923 break;
2924
2925 case R_X86_64_PLTOFF64:
2926 /* Relocation is PLT entry relative to GOT. For local
2927 symbols it's the symbol itself relative to GOT. */
2928 if (h != NULL
2929 /* See PLT32 handling. */
2930 && (h->plt.offset != (bfd_vma) -1
2931 || eh->plt_got.offset != (bfd_vma) -1)
2932 && htab->elf.splt != NULL)
2933 {
2934 if (eh->plt_got.offset != (bfd_vma) -1)
2935 {
2936 /* Use the GOT PLT. */
2937 resolved_plt = htab->plt_got;
2938 plt_offset = eh->plt_got.offset;
2939 }
2940 else if (htab->plt_second != NULL)
2941 {
2942 resolved_plt = htab->plt_second;
2943 plt_offset = eh->plt_second.offset;
2944 }
2945 else
2946 {
2947 resolved_plt = htab->elf.splt;
2948 plt_offset = h->plt.offset;
2949 }
2950
2951 relocation = (resolved_plt->output_section->vma
2952 + resolved_plt->output_offset
2953 + plt_offset);
2954 unresolved_reloc = FALSE;
2955 }
2956
2957 relocation -= htab->elf.sgotplt->output_section->vma
2958 + htab->elf.sgotplt->output_offset;
2959 break;
2960
2961 case R_X86_64_PLT32:
2962 case R_X86_64_PLT32_BND:
2963 /* Relocation is to the entry for this symbol in the
2964 procedure linkage table. */
2965
2966 /* Resolve a PLT32 reloc against a local symbol directly,
2967 without using the procedure linkage table. */
2968 if (h == NULL)
2969 break;
2970
2971 if ((h->plt.offset == (bfd_vma) -1
2972 && eh->plt_got.offset == (bfd_vma) -1)
2973 || htab->elf.splt == NULL)
2974 {
2975 /* We didn't make a PLT entry for this symbol. This
2976 happens when statically linking PIC code, or when
2977 using -Bsymbolic. */
2978 break;
2979 }
2980
2981 if (h->plt.offset != (bfd_vma) -1)
2982 {
2983 if (htab->plt_second != NULL)
2984 {
2985 resolved_plt = htab->plt_second;
2986 plt_offset = eh->plt_second.offset;
2987 }
2988 else
2989 {
2990 resolved_plt = htab->elf.splt;
2991 plt_offset = h->plt.offset;
2992 }
2993 }
2994 else
2995 {
2996 /* Use the GOT PLT. */
2997 resolved_plt = htab->plt_got;
2998 plt_offset = eh->plt_got.offset;
2999 }
3000
3001 relocation = (resolved_plt->output_section->vma
3002 + resolved_plt->output_offset
3003 + plt_offset);
3004 unresolved_reloc = FALSE;
3005 break;
3006
3007 case R_X86_64_SIZE32:
3008 case R_X86_64_SIZE64:
3009 /* Set to symbol size. */
3010 relocation = st_size;
3011 goto direct;
3012
3013 case R_X86_64_PC8:
3014 case R_X86_64_PC16:
3015 case R_X86_64_PC32:
3016 case R_X86_64_PC32_BND:
3017 /* Don't complain about -fPIC if the symbol is undefined when
3018 building executable unless it is unresolved weak symbol or
3019 -z nocopyreloc is used. */
3020 if ((input_section->flags & SEC_ALLOC) != 0
3021 && (input_section->flags & SEC_READONLY) != 0
3022 && h != NULL
3023 && ((bfd_link_executable (info)
3024 && ((h->root.type == bfd_link_hash_undefweak
3025 && !resolved_to_zero)
3026 || ((info->nocopyreloc
3027 || (eh->def_protected
3028 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3029 && h->def_dynamic
3030 && !(h->root.u.def.section->flags & SEC_CODE))))
3031 || bfd_link_dll (info)))
3032 {
3033 bfd_boolean fail = FALSE;
3034 bfd_boolean branch
3035 = ((r_type == R_X86_64_PC32
3036 || r_type == R_X86_64_PC32_BND)
3037 && is_32bit_relative_branch (contents, rel->r_offset));
3038
3039 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3040 {
3041 /* Symbol is referenced locally. Make sure it is
3042 defined locally or for a branch. */
3043 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3044 && !branch);
3045 }
3046 else if (!(bfd_link_pie (info)
3047 && (h->needs_copy || eh->needs_copy)))
3048 {
3049 /* Symbol doesn't need copy reloc and isn't referenced
3050 locally. We only allow branch to symbol with
3051 non-default visibility. */
3052 fail = (!branch
3053 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3054 }
3055
3056 if (fail)
3057 return elf_x86_64_need_pic (info, input_bfd, input_section,
3058 h, NULL, NULL, howto);
3059 }
3060 /* Fall through. */
3061
3062 case R_X86_64_8:
3063 case R_X86_64_16:
3064 case R_X86_64_32:
3065 case R_X86_64_PC64:
3066 case R_X86_64_64:
3067 /* FIXME: The ABI says the linker should make sure the value is
3068 the same when it's zeroextended to 64 bit. */
3069
3070 direct:
3071 if ((input_section->flags & SEC_ALLOC) == 0)
3072 break;
3073
3074 need_copy_reloc_in_pie = (bfd_link_pie (info)
3075 && h != NULL
3076 && (h->needs_copy
3077 || eh->needs_copy
3078 || (h->root.type
3079 == bfd_link_hash_undefined))
3080 && (X86_PCREL_TYPE_P (r_type)
3081 || X86_SIZE_TYPE_P (r_type)));
3082
3083 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3084 need_copy_reloc_in_pie,
3085 resolved_to_zero, FALSE))
3086 {
3087 Elf_Internal_Rela outrel;
3088 bfd_boolean skip, relocate;
3089 asection *sreloc;
3090
3091 /* When generating a shared object, these relocations
3092 are copied into the output file to be resolved at run
3093 time. */
3094 skip = FALSE;
3095 relocate = FALSE;
3096
3097 outrel.r_offset =
3098 _bfd_elf_section_offset (output_bfd, info, input_section,
3099 rel->r_offset);
3100 if (outrel.r_offset == (bfd_vma) -1)
3101 skip = TRUE;
3102 else if (outrel.r_offset == (bfd_vma) -2)
3103 skip = TRUE, relocate = TRUE;
3104
3105 outrel.r_offset += (input_section->output_section->vma
3106 + input_section->output_offset);
3107
3108 if (skip)
3109 memset (&outrel, 0, sizeof outrel);
3110
3111 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3112 {
3113 outrel.r_info = htab->r_info (h->dynindx, r_type);
3114 outrel.r_addend = rel->r_addend;
3115 }
3116 else
3117 {
3118 /* This symbol is local, or marked to become local.
3119 When relocation overflow check is disabled, we
3120 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3121 if (r_type == htab->pointer_r_type
3122 || (r_type == R_X86_64_32
3123 && info->no_reloc_overflow_check))
3124 {
3125 relocate = TRUE;
3126 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3127 outrel.r_addend = relocation + rel->r_addend;
3128 }
3129 else if (r_type == R_X86_64_64
3130 && !ABI_64_P (output_bfd))
3131 {
3132 relocate = TRUE;
3133 outrel.r_info = htab->r_info (0,
3134 R_X86_64_RELATIVE64);
3135 outrel.r_addend = relocation + rel->r_addend;
3136 /* Check addend overflow. */
3137 if ((outrel.r_addend & 0x80000000)
3138 != (rel->r_addend & 0x80000000))
3139 {
3140 const char *name;
3141 int addend = rel->r_addend;
3142 if (h && h->root.root.string)
3143 name = h->root.root.string;
3144 else
3145 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3146 sym, NULL);
3147 _bfd_error_handler
3148 /* xgettext:c-format */
3149 (_("%B: addend %s%#x in relocation %s against "
3150 "symbol `%s' at %#Lx in section `%A' is "
3151 "out of range"),
3152 input_bfd, addend < 0 ? "-" : "", addend,
3153 howto->name, name, rel->r_offset, input_section);
3154 bfd_set_error (bfd_error_bad_value);
3155 return FALSE;
3156 }
3157 }
3158 else
3159 {
3160 long sindx;
3161
3162 if (bfd_is_abs_section (sec))
3163 sindx = 0;
3164 else if (sec == NULL || sec->owner == NULL)
3165 {
3166 bfd_set_error (bfd_error_bad_value);
3167 return FALSE;
3168 }
3169 else
3170 {
3171 asection *osec;
3172
3173 /* We are turning this relocation into one
3174 against a section symbol. It would be
3175 proper to subtract the symbol's value,
3176 osec->vma, from the emitted reloc addend,
3177 but ld.so expects buggy relocs. */
3178 osec = sec->output_section;
3179 sindx = elf_section_data (osec)->dynindx;
3180 if (sindx == 0)
3181 {
3182 asection *oi = htab->elf.text_index_section;
3183 sindx = elf_section_data (oi)->dynindx;
3184 }
3185 BFD_ASSERT (sindx != 0);
3186 }
3187
3188 outrel.r_info = htab->r_info (sindx, r_type);
3189 outrel.r_addend = relocation + rel->r_addend;
3190 }
3191 }
3192
3193 sreloc = elf_section_data (input_section)->sreloc;
3194
3195 if (sreloc == NULL || sreloc->contents == NULL)
3196 {
3197 r = bfd_reloc_notsupported;
3198 goto check_relocation_error;
3199 }
3200
3201 elf_append_rela (output_bfd, sreloc, &outrel);
3202
3203 /* If this reloc is against an external symbol, we do
3204 not want to fiddle with the addend. Otherwise, we
3205 need to include the symbol value so that it becomes
3206 an addend for the dynamic reloc. */
3207 if (! relocate)
3208 continue;
3209 }
3210
3211 break;
3212
3213 case R_X86_64_TLSGD:
3214 case R_X86_64_GOTPC32_TLSDESC:
3215 case R_X86_64_TLSDESC_CALL:
3216 case R_X86_64_GOTTPOFF:
3217 tls_type = GOT_UNKNOWN;
3218 if (h == NULL && local_got_offsets)
3219 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3220 else if (h != NULL)
3221 tls_type = elf_x86_hash_entry (h)->tls_type;
3222
3223 r_type_tls = r_type;
3224 if (! elf_x86_64_tls_transition (info, input_bfd,
3225 input_section, contents,
3226 symtab_hdr, sym_hashes,
3227 &r_type_tls, tls_type, rel,
3228 relend, h, r_symndx, TRUE))
3229 return FALSE;
3230
3231 if (r_type_tls == R_X86_64_TPOFF32)
3232 {
3233 bfd_vma roff = rel->r_offset;
3234
3235 BFD_ASSERT (! unresolved_reloc);
3236
3237 if (r_type == R_X86_64_TLSGD)
3238 {
3239 /* GD->LE transition. For 64bit, change
3240 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3241 .word 0x6666; rex64; call __tls_get_addr@PLT
3242 or
3243 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3244 .byte 0x66; rex64
3245 call *__tls_get_addr@GOTPCREL(%rip)
3246 which may be converted to
3247 addr32 call __tls_get_addr
3248 into:
3249 movq %fs:0, %rax
3250 leaq foo@tpoff(%rax), %rax
3251 For 32bit, change
3252 leaq foo@tlsgd(%rip), %rdi
3253 .word 0x6666; rex64; call __tls_get_addr@PLT
3254 or
3255 leaq foo@tlsgd(%rip), %rdi
3256 .byte 0x66; rex64
3257 call *__tls_get_addr@GOTPCREL(%rip)
3258 which may be converted to
3259 addr32 call __tls_get_addr
3260 into:
3261 movl %fs:0, %eax
3262 leaq foo@tpoff(%rax), %rax
3263 For largepic, change:
3264 leaq foo@tlsgd(%rip), %rdi
3265 movabsq $__tls_get_addr@pltoff, %rax
3266 addq %r15, %rax
3267 call *%rax
3268 into:
3269 movq %fs:0, %rax
3270 leaq foo@tpoff(%rax), %rax
3271 nopw 0x0(%rax,%rax,1) */
3272 int largepic = 0;
3273 if (ABI_64_P (output_bfd))
3274 {
3275 if (contents[roff + 5] == 0xb8)
3276 {
3277 memcpy (contents + roff - 3,
3278 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3279 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3280 largepic = 1;
3281 }
3282 else
3283 memcpy (contents + roff - 4,
3284 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3285 16);
3286 }
3287 else
3288 memcpy (contents + roff - 3,
3289 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3290 15);
3291 bfd_put_32 (output_bfd,
3292 elf_x86_64_tpoff (info, relocation),
3293 contents + roff + 8 + largepic);
3294 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3295 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3296 rel++;
3297 wrel++;
3298 continue;
3299 }
3300 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3301 {
3302 /* GDesc -> LE transition.
3303 It's originally something like:
3304 leaq x@tlsdesc(%rip), %rax
3305
3306 Change it to:
3307 movl $x@tpoff, %rax. */
3308
3309 unsigned int val, type;
3310
3311 type = bfd_get_8 (input_bfd, contents + roff - 3);
3312 val = bfd_get_8 (input_bfd, contents + roff - 1);
3313 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3314 contents + roff - 3);
3315 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3316 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3317 contents + roff - 1);
3318 bfd_put_32 (output_bfd,
3319 elf_x86_64_tpoff (info, relocation),
3320 contents + roff);
3321 continue;
3322 }
3323 else if (r_type == R_X86_64_TLSDESC_CALL)
3324 {
3325 /* GDesc -> LE transition.
3326 It's originally:
3327 call *(%rax)
3328 Turn it into:
3329 xchg %ax,%ax. */
3330 bfd_put_8 (output_bfd, 0x66, contents + roff);
3331 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3332 continue;
3333 }
3334 else if (r_type == R_X86_64_GOTTPOFF)
3335 {
3336 /* IE->LE transition:
3337 For 64bit, originally it can be one of:
3338 movq foo@gottpoff(%rip), %reg
3339 addq foo@gottpoff(%rip), %reg
3340 We change it into:
3341 movq $foo, %reg
3342 leaq foo(%reg), %reg
3343 addq $foo, %reg.
3344 For 32bit, originally it can be one of:
3345 movq foo@gottpoff(%rip), %reg
3346 addl foo@gottpoff(%rip), %reg
3347 We change it into:
3348 movq $foo, %reg
3349 leal foo(%reg), %reg
3350 addl $foo, %reg. */
3351
3352 unsigned int val, type, reg;
3353
3354 if (roff >= 3)
3355 val = bfd_get_8 (input_bfd, contents + roff - 3);
3356 else
3357 val = 0;
3358 type = bfd_get_8 (input_bfd, contents + roff - 2);
3359 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3360 reg >>= 3;
3361 if (type == 0x8b)
3362 {
3363 /* movq */
3364 if (val == 0x4c)
3365 bfd_put_8 (output_bfd, 0x49,
3366 contents + roff - 3);
3367 else if (!ABI_64_P (output_bfd) && val == 0x44)
3368 bfd_put_8 (output_bfd, 0x41,
3369 contents + roff - 3);
3370 bfd_put_8 (output_bfd, 0xc7,
3371 contents + roff - 2);
3372 bfd_put_8 (output_bfd, 0xc0 | reg,
3373 contents + roff - 1);
3374 }
3375 else if (reg == 4)
3376 {
3377 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3378 is special */
3379 if (val == 0x4c)
3380 bfd_put_8 (output_bfd, 0x49,
3381 contents + roff - 3);
3382 else if (!ABI_64_P (output_bfd) && val == 0x44)
3383 bfd_put_8 (output_bfd, 0x41,
3384 contents + roff - 3);
3385 bfd_put_8 (output_bfd, 0x81,
3386 contents + roff - 2);
3387 bfd_put_8 (output_bfd, 0xc0 | reg,
3388 contents + roff - 1);
3389 }
3390 else
3391 {
3392 /* addq/addl -> leaq/leal */
3393 if (val == 0x4c)
3394 bfd_put_8 (output_bfd, 0x4d,
3395 contents + roff - 3);
3396 else if (!ABI_64_P (output_bfd) && val == 0x44)
3397 bfd_put_8 (output_bfd, 0x45,
3398 contents + roff - 3);
3399 bfd_put_8 (output_bfd, 0x8d,
3400 contents + roff - 2);
3401 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3402 contents + roff - 1);
3403 }
3404 bfd_put_32 (output_bfd,
3405 elf_x86_64_tpoff (info, relocation),
3406 contents + roff);
3407 continue;
3408 }
3409 else
3410 BFD_ASSERT (FALSE);
3411 }
3412
3413 if (htab->elf.sgot == NULL)
3414 abort ();
3415
3416 if (h != NULL)
3417 {
3418 off = h->got.offset;
3419 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3420 }
3421 else
3422 {
3423 if (local_got_offsets == NULL)
3424 abort ();
3425
3426 off = local_got_offsets[r_symndx];
3427 offplt = local_tlsdesc_gotents[r_symndx];
3428 }
3429
3430 if ((off & 1) != 0)
3431 off &= ~1;
3432 else
3433 {
3434 Elf_Internal_Rela outrel;
3435 int dr_type, indx;
3436 asection *sreloc;
3437
3438 if (htab->elf.srelgot == NULL)
3439 abort ();
3440
3441 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3442
3443 if (GOT_TLS_GDESC_P (tls_type))
3444 {
3445 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3446 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3447 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3448 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3449 + htab->elf.sgotplt->output_offset
3450 + offplt
3451 + htab->sgotplt_jump_table_size);
3452 sreloc = htab->elf.srelplt;
3453 if (indx == 0)
3454 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3455 else
3456 outrel.r_addend = 0;
3457 elf_append_rela (output_bfd, sreloc, &outrel);
3458 }
3459
3460 sreloc = htab->elf.srelgot;
3461
3462 outrel.r_offset = (htab->elf.sgot->output_section->vma
3463 + htab->elf.sgot->output_offset + off);
3464
3465 if (GOT_TLS_GD_P (tls_type))
3466 dr_type = R_X86_64_DTPMOD64;
3467 else if (GOT_TLS_GDESC_P (tls_type))
3468 goto dr_done;
3469 else
3470 dr_type = R_X86_64_TPOFF64;
3471
3472 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3473 outrel.r_addend = 0;
3474 if ((dr_type == R_X86_64_TPOFF64
3475 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3476 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3477 outrel.r_info = htab->r_info (indx, dr_type);
3478
3479 elf_append_rela (output_bfd, sreloc, &outrel);
3480
3481 if (GOT_TLS_GD_P (tls_type))
3482 {
3483 if (indx == 0)
3484 {
3485 BFD_ASSERT (! unresolved_reloc);
3486 bfd_put_64 (output_bfd,
3487 relocation - _bfd_x86_elf_dtpoff_base (info),
3488 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3489 }
3490 else
3491 {
3492 bfd_put_64 (output_bfd, 0,
3493 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3494 outrel.r_info = htab->r_info (indx,
3495 R_X86_64_DTPOFF64);
3496 outrel.r_offset += GOT_ENTRY_SIZE;
3497 elf_append_rela (output_bfd, sreloc,
3498 &outrel);
3499 }
3500 }
3501
3502 dr_done:
3503 if (h != NULL)
3504 h->got.offset |= 1;
3505 else
3506 local_got_offsets[r_symndx] |= 1;
3507 }
3508
3509 if (off >= (bfd_vma) -2
3510 && ! GOT_TLS_GDESC_P (tls_type))
3511 abort ();
3512 if (r_type_tls == r_type)
3513 {
3514 if (r_type == R_X86_64_GOTPC32_TLSDESC
3515 || r_type == R_X86_64_TLSDESC_CALL)
3516 relocation = htab->elf.sgotplt->output_section->vma
3517 + htab->elf.sgotplt->output_offset
3518 + offplt + htab->sgotplt_jump_table_size;
3519 else
3520 relocation = htab->elf.sgot->output_section->vma
3521 + htab->elf.sgot->output_offset + off;
3522 unresolved_reloc = FALSE;
3523 }
3524 else
3525 {
3526 bfd_vma roff = rel->r_offset;
3527
3528 if (r_type == R_X86_64_TLSGD)
3529 {
3530 /* GD->IE transition. For 64bit, change
3531 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3532 .word 0x6666; rex64; call __tls_get_addr@PLT
3533 or
3534 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3535 .byte 0x66; rex64
3536 call *__tls_get_addr@GOTPCREL(%rip
3537 which may be converted to
3538 addr32 call __tls_get_addr
3539 into:
3540 movq %fs:0, %rax
3541 addq foo@gottpoff(%rip), %rax
3542 For 32bit, change
3543 leaq foo@tlsgd(%rip), %rdi
3544 .word 0x6666; rex64; call __tls_get_addr@PLT
3545 or
3546 leaq foo@tlsgd(%rip), %rdi
3547 .byte 0x66; rex64;
3548 call *__tls_get_addr@GOTPCREL(%rip)
3549 which may be converted to
3550 addr32 call __tls_get_addr
3551 into:
3552 movl %fs:0, %eax
3553 addq foo@gottpoff(%rip), %rax
3554 For largepic, change:
3555 leaq foo@tlsgd(%rip), %rdi
3556 movabsq $__tls_get_addr@pltoff, %rax
3557 addq %r15, %rax
3558 call *%rax
3559 into:
3560 movq %fs:0, %rax
3561 addq foo@gottpoff(%rax), %rax
3562 nopw 0x0(%rax,%rax,1) */
3563 int largepic = 0;
3564 if (ABI_64_P (output_bfd))
3565 {
3566 if (contents[roff + 5] == 0xb8)
3567 {
3568 memcpy (contents + roff - 3,
3569 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3570 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3571 largepic = 1;
3572 }
3573 else
3574 memcpy (contents + roff - 4,
3575 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3576 16);
3577 }
3578 else
3579 memcpy (contents + roff - 3,
3580 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3581 15);
3582
3583 relocation = (htab->elf.sgot->output_section->vma
3584 + htab->elf.sgot->output_offset + off
3585 - roff
3586 - largepic
3587 - input_section->output_section->vma
3588 - input_section->output_offset
3589 - 12);
3590 bfd_put_32 (output_bfd, relocation,
3591 contents + roff + 8 + largepic);
3592 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3593 rel++;
3594 wrel++;
3595 continue;
3596 }
3597 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3598 {
3599 /* GDesc -> IE transition.
3600 It's originally something like:
3601 leaq x@tlsdesc(%rip), %rax
3602
3603 Change it to:
3604 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3605
3606 /* Now modify the instruction as appropriate. To
3607 turn a leaq into a movq in the form we use it, it
3608 suffices to change the second byte from 0x8d to
3609 0x8b. */
3610 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3611
3612 bfd_put_32 (output_bfd,
3613 htab->elf.sgot->output_section->vma
3614 + htab->elf.sgot->output_offset + off
3615 - rel->r_offset
3616 - input_section->output_section->vma
3617 - input_section->output_offset
3618 - 4,
3619 contents + roff);
3620 continue;
3621 }
3622 else if (r_type == R_X86_64_TLSDESC_CALL)
3623 {
3624 /* GDesc -> IE transition.
3625 It's originally:
3626 call *(%rax)
3627
3628 Change it to:
3629 xchg %ax, %ax. */
3630
3631 bfd_put_8 (output_bfd, 0x66, contents + roff);
3632 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3633 continue;
3634 }
3635 else
3636 BFD_ASSERT (FALSE);
3637 }
3638 break;
3639
3640 case R_X86_64_TLSLD:
3641 if (! elf_x86_64_tls_transition (info, input_bfd,
3642 input_section, contents,
3643 symtab_hdr, sym_hashes,
3644 &r_type, GOT_UNKNOWN, rel,
3645 relend, h, r_symndx, TRUE))
3646 return FALSE;
3647
3648 if (r_type != R_X86_64_TLSLD)
3649 {
3650 /* LD->LE transition:
3651 leaq foo@tlsld(%rip), %rdi
3652 call __tls_get_addr@PLT
3653 For 64bit, we change it into:
3654 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3655 For 32bit, we change it into:
3656 nopl 0x0(%rax); movl %fs:0, %eax
3657 Or
3658 leaq foo@tlsld(%rip), %rdi;
3659 call *__tls_get_addr@GOTPCREL(%rip)
3660 which may be converted to
3661 addr32 call __tls_get_addr
3662 For 64bit, we change it into:
3663 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3664 For 32bit, we change it into:
3665 nopw 0x0(%rax); movl %fs:0, %eax
3666 For largepic, change:
3667 leaq foo@tlsgd(%rip), %rdi
3668 movabsq $__tls_get_addr@pltoff, %rax
3669 addq %rbx, %rax
3670 call *%rax
3671 into
3672 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3673 movq %fs:0, %eax */
3674
3675 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3676 if (ABI_64_P (output_bfd))
3677 {
3678 if (contents[rel->r_offset + 5] == 0xb8)
3679 memcpy (contents + rel->r_offset - 3,
3680 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3681 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3682 else if (contents[rel->r_offset + 4] == 0xff
3683 || contents[rel->r_offset + 4] == 0x67)
3684 memcpy (contents + rel->r_offset - 3,
3685 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3686 13);
3687 else
3688 memcpy (contents + rel->r_offset - 3,
3689 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3690 }
3691 else
3692 {
3693 if (contents[rel->r_offset + 4] == 0xff)
3694 memcpy (contents + rel->r_offset - 3,
3695 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3696 13);
3697 else
3698 memcpy (contents + rel->r_offset - 3,
3699 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3700 }
3701 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3702 and R_X86_64_PLTOFF64. */
3703 rel++;
3704 wrel++;
3705 continue;
3706 }
3707
3708 if (htab->elf.sgot == NULL)
3709 abort ();
3710
3711 off = htab->tls_ld_or_ldm_got.offset;
3712 if (off & 1)
3713 off &= ~1;
3714 else
3715 {
3716 Elf_Internal_Rela outrel;
3717
3718 if (htab->elf.srelgot == NULL)
3719 abort ();
3720
3721 outrel.r_offset = (htab->elf.sgot->output_section->vma
3722 + htab->elf.sgot->output_offset + off);
3723
3724 bfd_put_64 (output_bfd, 0,
3725 htab->elf.sgot->contents + off);
3726 bfd_put_64 (output_bfd, 0,
3727 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3728 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3729 outrel.r_addend = 0;
3730 elf_append_rela (output_bfd, htab->elf.srelgot,
3731 &outrel);
3732 htab->tls_ld_or_ldm_got.offset |= 1;
3733 }
3734 relocation = htab->elf.sgot->output_section->vma
3735 + htab->elf.sgot->output_offset + off;
3736 unresolved_reloc = FALSE;
3737 break;
3738
3739 case R_X86_64_DTPOFF32:
3740 if (!bfd_link_executable (info)
3741 || (input_section->flags & SEC_CODE) == 0)
3742 relocation -= _bfd_x86_elf_dtpoff_base (info);
3743 else
3744 relocation = elf_x86_64_tpoff (info, relocation);
3745 break;
3746
3747 case R_X86_64_TPOFF32:
3748 case R_X86_64_TPOFF64:
3749 BFD_ASSERT (bfd_link_executable (info));
3750 relocation = elf_x86_64_tpoff (info, relocation);
3751 break;
3752
3753 case R_X86_64_DTPOFF64:
3754 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3755 relocation -= _bfd_x86_elf_dtpoff_base (info);
3756 break;
3757
3758 default:
3759 break;
3760 }
3761
3762 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3763 because such sections are not SEC_ALLOC and thus ld.so will
3764 not process them. */
3765 if (unresolved_reloc
3766 && !((input_section->flags & SEC_DEBUGGING) != 0
3767 && h->def_dynamic)
3768 && _bfd_elf_section_offset (output_bfd, info, input_section,
3769 rel->r_offset) != (bfd_vma) -1)
3770 {
3771 switch (r_type)
3772 {
3773 case R_X86_64_32S:
3774 sec = h->root.u.def.section;
3775 if ((info->nocopyreloc
3776 || (eh->def_protected
3777 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3778 && !(h->root.u.def.section->flags & SEC_CODE))
3779 return elf_x86_64_need_pic (info, input_bfd, input_section,
3780 h, NULL, NULL, howto);
3781 /* Fall through. */
3782
3783 default:
3784 _bfd_error_handler
3785 /* xgettext:c-format */
3786 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3787 input_bfd,
3788 input_section,
3789 rel->r_offset,
3790 howto->name,
3791 h->root.root.string);
3792 return FALSE;
3793 }
3794 }
3795
3796 do_relocation:
3797 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3798 contents, rel->r_offset,
3799 relocation, rel->r_addend);
3800
3801 check_relocation_error:
3802 if (r != bfd_reloc_ok)
3803 {
3804 const char *name;
3805
3806 if (h != NULL)
3807 name = h->root.root.string;
3808 else
3809 {
3810 name = bfd_elf_string_from_elf_section (input_bfd,
3811 symtab_hdr->sh_link,
3812 sym->st_name);
3813 if (name == NULL)
3814 return FALSE;
3815 if (*name == '\0')
3816 name = bfd_section_name (input_bfd, sec);
3817 }
3818
3819 if (r == bfd_reloc_overflow)
3820 {
3821 if (converted_reloc)
3822 {
3823 info->callbacks->einfo
3824 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3825 return FALSE;
3826 }
3827 (*info->callbacks->reloc_overflow)
3828 (info, (h ? &h->root : NULL), name, howto->name,
3829 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3830 }
3831 else
3832 {
3833 _bfd_error_handler
3834 /* xgettext:c-format */
3835 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3836 input_bfd, input_section,
3837 rel->r_offset, name, (int) r);
3838 return FALSE;
3839 }
3840 }
3841
3842 if (wrel != rel)
3843 *wrel = *rel;
3844 }
3845
3846 if (wrel != rel)
3847 {
3848 Elf_Internal_Shdr *rel_hdr;
3849 size_t deleted = rel - wrel;
3850
3851 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3852 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3853 if (rel_hdr->sh_size == 0)
3854 {
3855 /* It is too late to remove an empty reloc section. Leave
3856 one NONE reloc.
3857 ??? What is wrong with an empty section??? */
3858 rel_hdr->sh_size = rel_hdr->sh_entsize;
3859 deleted -= 1;
3860 }
3861 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3862 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3863 input_section->reloc_count -= deleted;
3864 }
3865
3866 return TRUE;
3867 }
3868
3869 /* Finish up dynamic symbol handling. We set the contents of various
3870 dynamic sections here. */
3871
3872 static bfd_boolean
3873 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3874 struct bfd_link_info *info,
3875 struct elf_link_hash_entry *h,
3876 Elf_Internal_Sym *sym)
3877 {
3878 struct elf_x86_link_hash_table *htab;
3879 bfd_boolean use_plt_second;
3880 struct elf_x86_link_hash_entry *eh;
3881 bfd_boolean local_undefweak;
3882
3883 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3884 if (htab == NULL)
3885 return FALSE;
3886
3887 /* Use the second PLT section only if there is .plt section. */
3888 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3889
3890 eh = (struct elf_x86_link_hash_entry *) h;
3891 if (eh->no_finish_dynamic_symbol)
3892 abort ();
3893
3894 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3895 resolved undefined weak symbols in executable so that their
3896 references have value 0 at run-time. */
3897 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3898
3899 if (h->plt.offset != (bfd_vma) -1)
3900 {
3901 bfd_vma plt_index;
3902 bfd_vma got_offset, plt_offset;
3903 Elf_Internal_Rela rela;
3904 bfd_byte *loc;
3905 asection *plt, *gotplt, *relplt, *resolved_plt;
3906 const struct elf_backend_data *bed;
3907 bfd_vma plt_got_pcrel_offset;
3908
3909 /* When building a static executable, use .iplt, .igot.plt and
3910 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3911 if (htab->elf.splt != NULL)
3912 {
3913 plt = htab->elf.splt;
3914 gotplt = htab->elf.sgotplt;
3915 relplt = htab->elf.srelplt;
3916 }
3917 else
3918 {
3919 plt = htab->elf.iplt;
3920 gotplt = htab->elf.igotplt;
3921 relplt = htab->elf.irelplt;
3922 }
3923
3924 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3925
3926 /* Get the index in the procedure linkage table which
3927 corresponds to this symbol. This is the index of this symbol
3928 in all the symbols for which we are making plt entries. The
3929 first entry in the procedure linkage table is reserved.
3930
3931 Get the offset into the .got table of the entry that
3932 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3933 bytes. The first three are reserved for the dynamic linker.
3934
3935 For static executables, we don't reserve anything. */
3936
3937 if (plt == htab->elf.splt)
3938 {
3939 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3940 - htab->plt.has_plt0);
3941 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3942 }
3943 else
3944 {
3945 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3946 got_offset = got_offset * GOT_ENTRY_SIZE;
3947 }
3948
3949 /* Fill in the entry in the procedure linkage table. */
3950 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3951 htab->plt.plt_entry_size);
3952 if (use_plt_second)
3953 {
3954 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3955 htab->non_lazy_plt->plt_entry,
3956 htab->non_lazy_plt->plt_entry_size);
3957
3958 resolved_plt = htab->plt_second;
3959 plt_offset = eh->plt_second.offset;
3960 }
3961 else
3962 {
3963 resolved_plt = plt;
3964 plt_offset = h->plt.offset;
3965 }
3966
3967 /* Insert the relocation positions of the plt section. */
3968
3969 /* Put offset the PC-relative instruction referring to the GOT entry,
3970 subtracting the size of that instruction. */
3971 plt_got_pcrel_offset = (gotplt->output_section->vma
3972 + gotplt->output_offset
3973 + got_offset
3974 - resolved_plt->output_section->vma
3975 - resolved_plt->output_offset
3976 - plt_offset
3977 - htab->plt.plt_got_insn_size);
3978
3979 /* Check PC-relative offset overflow in PLT entry. */
3980 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3981 /* xgettext:c-format */
3982 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
3983 output_bfd, h->root.root.string);
3984
3985 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3986 (resolved_plt->contents + plt_offset
3987 + htab->plt.plt_got_offset));
3988
3989 /* Fill in the entry in the global offset table, initially this
3990 points to the second part of the PLT entry. Leave the entry
3991 as zero for undefined weak symbol in PIE. No PLT relocation
3992 against undefined weak symbol in PIE. */
3993 if (!local_undefweak)
3994 {
3995 if (htab->plt.has_plt0)
3996 bfd_put_64 (output_bfd, (plt->output_section->vma
3997 + plt->output_offset
3998 + h->plt.offset
3999 + htab->lazy_plt->plt_lazy_offset),
4000 gotplt->contents + got_offset);
4001
4002 /* Fill in the entry in the .rela.plt section. */
4003 rela.r_offset = (gotplt->output_section->vma
4004 + gotplt->output_offset
4005 + got_offset);
4006 if (PLT_LOCAL_IFUNC_P (info, h))
4007 {
4008 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4009 h->root.root.string,
4010 h->root.u.def.section->owner);
4011
4012 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4013 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4014 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4015 rela.r_addend = (h->root.u.def.value
4016 + h->root.u.def.section->output_section->vma
4017 + h->root.u.def.section->output_offset);
4018 /* R_X86_64_IRELATIVE comes last. */
4019 plt_index = htab->next_irelative_index--;
4020 }
4021 else
4022 {
4023 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4024 rela.r_addend = 0;
4025 plt_index = htab->next_jump_slot_index++;
4026 }
4027
4028 /* Don't fill the second and third slots in PLT entry for
4029 static executables nor without PLT0. */
4030 if (plt == htab->elf.splt && htab->plt.has_plt0)
4031 {
4032 bfd_vma plt0_offset
4033 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4034
4035 /* Put relocation index. */
4036 bfd_put_32 (output_bfd, plt_index,
4037 (plt->contents + h->plt.offset
4038 + htab->lazy_plt->plt_reloc_offset));
4039
4040 /* Put offset for jmp .PLT0 and check for overflow. We don't
4041 check relocation index for overflow since branch displacement
4042 will overflow first. */
4043 if (plt0_offset > 0x80000000)
4044 /* xgettext:c-format */
4045 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4046 output_bfd, h->root.root.string);
4047 bfd_put_32 (output_bfd, - plt0_offset,
4048 (plt->contents + h->plt.offset
4049 + htab->lazy_plt->plt_plt_offset));
4050 }
4051
4052 bed = get_elf_backend_data (output_bfd);
4053 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4054 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4055 }
4056 }
4057 else if (eh->plt_got.offset != (bfd_vma) -1)
4058 {
4059 bfd_vma got_offset, plt_offset;
4060 asection *plt, *got;
4061 bfd_boolean got_after_plt;
4062 int32_t got_pcrel_offset;
4063
4064 /* Set the entry in the GOT procedure linkage table. */
4065 plt = htab->plt_got;
4066 got = htab->elf.sgot;
4067 got_offset = h->got.offset;
4068
4069 if (got_offset == (bfd_vma) -1
4070 || (h->type == STT_GNU_IFUNC && h->def_regular)
4071 || plt == NULL
4072 || got == NULL)
4073 abort ();
4074
4075 /* Use the non-lazy PLT entry template for the GOT PLT since they
4076 are the identical. */
4077 /* Fill in the entry in the GOT procedure linkage table. */
4078 plt_offset = eh->plt_got.offset;
4079 memcpy (plt->contents + plt_offset,
4080 htab->non_lazy_plt->plt_entry,
4081 htab->non_lazy_plt->plt_entry_size);
4082
4083 /* Put offset the PC-relative instruction referring to the GOT
4084 entry, subtracting the size of that instruction. */
4085 got_pcrel_offset = (got->output_section->vma
4086 + got->output_offset
4087 + got_offset
4088 - plt->output_section->vma
4089 - plt->output_offset
4090 - plt_offset
4091 - htab->non_lazy_plt->plt_got_insn_size);
4092
4093 /* Check PC-relative offset overflow in GOT PLT entry. */
4094 got_after_plt = got->output_section->vma > plt->output_section->vma;
4095 if ((got_after_plt && got_pcrel_offset < 0)
4096 || (!got_after_plt && got_pcrel_offset > 0))
4097 /* xgettext:c-format */
4098 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4099 output_bfd, h->root.root.string);
4100
4101 bfd_put_32 (output_bfd, got_pcrel_offset,
4102 (plt->contents + plt_offset
4103 + htab->non_lazy_plt->plt_got_offset));
4104 }
4105
4106 if (!local_undefweak
4107 && !h->def_regular
4108 && (h->plt.offset != (bfd_vma) -1
4109 || eh->plt_got.offset != (bfd_vma) -1))
4110 {
4111 /* Mark the symbol as undefined, rather than as defined in
4112 the .plt section. Leave the value if there were any
4113 relocations where pointer equality matters (this is a clue
4114 for the dynamic linker, to make function pointer
4115 comparisons work between an application and shared
4116 library), otherwise set it to zero. If a function is only
4117 called from a binary, there is no need to slow down
4118 shared libraries because of that. */
4119 sym->st_shndx = SHN_UNDEF;
4120 if (!h->pointer_equality_needed)
4121 sym->st_value = 0;
4122 }
4123
4124 /* Don't generate dynamic GOT relocation against undefined weak
4125 symbol in executable. */
4126 if (h->got.offset != (bfd_vma) -1
4127 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4128 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4129 && !local_undefweak)
4130 {
4131 Elf_Internal_Rela rela;
4132 asection *relgot = htab->elf.srelgot;
4133
4134 /* This symbol has an entry in the global offset table. Set it
4135 up. */
4136 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4137 abort ();
4138
4139 rela.r_offset = (htab->elf.sgot->output_section->vma
4140 + htab->elf.sgot->output_offset
4141 + (h->got.offset &~ (bfd_vma) 1));
4142
4143 /* If this is a static link, or it is a -Bsymbolic link and the
4144 symbol is defined locally or was forced to be local because
4145 of a version file, we just want to emit a RELATIVE reloc.
4146 The entry in the global offset table will already have been
4147 initialized in the relocate_section function. */
4148 if (h->def_regular
4149 && h->type == STT_GNU_IFUNC)
4150 {
4151 if (h->plt.offset == (bfd_vma) -1)
4152 {
4153 /* STT_GNU_IFUNC is referenced without PLT. */
4154 if (htab->elf.splt == NULL)
4155 {
4156 /* use .rel[a].iplt section to store .got relocations
4157 in static executable. */
4158 relgot = htab->elf.irelplt;
4159 }
4160 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4161 {
4162 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4163 h->root.root.string,
4164 h->root.u.def.section->owner);
4165
4166 rela.r_info = htab->r_info (0,
4167 R_X86_64_IRELATIVE);
4168 rela.r_addend = (h->root.u.def.value
4169 + h->root.u.def.section->output_section->vma
4170 + h->root.u.def.section->output_offset);
4171 }
4172 else
4173 goto do_glob_dat;
4174 }
4175 else if (bfd_link_pic (info))
4176 {
4177 /* Generate R_X86_64_GLOB_DAT. */
4178 goto do_glob_dat;
4179 }
4180 else
4181 {
4182 asection *plt;
4183 bfd_vma plt_offset;
4184
4185 if (!h->pointer_equality_needed)
4186 abort ();
4187
4188 /* For non-shared object, we can't use .got.plt, which
4189 contains the real function addres if we need pointer
4190 equality. We load the GOT entry with the PLT entry. */
4191 if (htab->plt_second != NULL)
4192 {
4193 plt = htab->plt_second;
4194 plt_offset = eh->plt_second.offset;
4195 }
4196 else
4197 {
4198 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4199 plt_offset = h->plt.offset;
4200 }
4201 bfd_put_64 (output_bfd, (plt->output_section->vma
4202 + plt->output_offset
4203 + plt_offset),
4204 htab->elf.sgot->contents + h->got.offset);
4205 return TRUE;
4206 }
4207 }
4208 else if (bfd_link_pic (info)
4209 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4210 {
4211 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4212 return FALSE;
4213 BFD_ASSERT((h->got.offset & 1) != 0);
4214 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4215 rela.r_addend = (h->root.u.def.value
4216 + h->root.u.def.section->output_section->vma
4217 + h->root.u.def.section->output_offset);
4218 }
4219 else
4220 {
4221 BFD_ASSERT((h->got.offset & 1) == 0);
4222 do_glob_dat:
4223 bfd_put_64 (output_bfd, (bfd_vma) 0,
4224 htab->elf.sgot->contents + h->got.offset);
4225 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4226 rela.r_addend = 0;
4227 }
4228
4229 elf_append_rela (output_bfd, relgot, &rela);
4230 }
4231
4232 if (h->needs_copy)
4233 {
4234 Elf_Internal_Rela rela;
4235 asection *s;
4236
4237 /* This symbol needs a copy reloc. Set it up. */
4238 VERIFY_COPY_RELOC (h, htab)
4239
4240 rela.r_offset = (h->root.u.def.value
4241 + h->root.u.def.section->output_section->vma
4242 + h->root.u.def.section->output_offset);
4243 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4244 rela.r_addend = 0;
4245 if (h->root.u.def.section == htab->elf.sdynrelro)
4246 s = htab->elf.sreldynrelro;
4247 else
4248 s = htab->elf.srelbss;
4249 elf_append_rela (output_bfd, s, &rela);
4250 }
4251
4252 return TRUE;
4253 }
4254
4255 /* Finish up local dynamic symbol handling. We set the contents of
4256 various dynamic sections here. */
4257
4258 static bfd_boolean
4259 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4260 {
4261 struct elf_link_hash_entry *h
4262 = (struct elf_link_hash_entry *) *slot;
4263 struct bfd_link_info *info
4264 = (struct bfd_link_info *) inf;
4265
4266 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4267 info, h, NULL);
4268 }
4269
4270 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4271 here since undefined weak symbol may not be dynamic and may not be
4272 called for elf_x86_64_finish_dynamic_symbol. */
4273
4274 static bfd_boolean
4275 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4276 void *inf)
4277 {
4278 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4279 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4280
4281 if (h->root.type != bfd_link_hash_undefweak
4282 || h->dynindx != -1)
4283 return TRUE;
4284
4285 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4286 info, h, NULL);
4287 }
4288
4289 /* Used to decide how to sort relocs in an optimal manner for the
4290 dynamic linker, before writing them out. */
4291
4292 static enum elf_reloc_type_class
4293 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4294 const asection *rel_sec ATTRIBUTE_UNUSED,
4295 const Elf_Internal_Rela *rela)
4296 {
4297 bfd *abfd = info->output_bfd;
4298 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4299 struct elf_x86_link_hash_table *htab
4300 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4301
4302 if (htab->elf.dynsym != NULL
4303 && htab->elf.dynsym->contents != NULL)
4304 {
4305 /* Check relocation against STT_GNU_IFUNC symbol if there are
4306 dynamic symbols. */
4307 unsigned long r_symndx = htab->r_sym (rela->r_info);
4308 if (r_symndx != STN_UNDEF)
4309 {
4310 Elf_Internal_Sym sym;
4311 if (!bed->s->swap_symbol_in (abfd,
4312 (htab->elf.dynsym->contents
4313 + r_symndx * bed->s->sizeof_sym),
4314 0, &sym))
4315 abort ();
4316
4317 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4318 return reloc_class_ifunc;
4319 }
4320 }
4321
4322 switch ((int) ELF32_R_TYPE (rela->r_info))
4323 {
4324 case R_X86_64_IRELATIVE:
4325 return reloc_class_ifunc;
4326 case R_X86_64_RELATIVE:
4327 case R_X86_64_RELATIVE64:
4328 return reloc_class_relative;
4329 case R_X86_64_JUMP_SLOT:
4330 return reloc_class_plt;
4331 case R_X86_64_COPY:
4332 return reloc_class_copy;
4333 default:
4334 return reloc_class_normal;
4335 }
4336 }
4337
4338 /* Finish up the dynamic sections. */
4339
4340 static bfd_boolean
4341 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4342 struct bfd_link_info *info)
4343 {
4344 struct elf_x86_link_hash_table *htab;
4345
4346 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4347 if (htab == NULL)
4348 return FALSE;
4349
4350 if (! htab->elf.dynamic_sections_created)
4351 return TRUE;
4352
4353 if (htab->elf.splt && htab->elf.splt->size > 0)
4354 {
4355 elf_section_data (htab->elf.splt->output_section)
4356 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4357
4358 if (htab->plt.has_plt0)
4359 {
4360 /* Fill in the special first entry in the procedure linkage
4361 table. */
4362 memcpy (htab->elf.splt->contents,
4363 htab->lazy_plt->plt0_entry,
4364 htab->lazy_plt->plt0_entry_size);
4365 /* Add offset for pushq GOT+8(%rip), since the instruction
4366 uses 6 bytes subtract this value. */
4367 bfd_put_32 (output_bfd,
4368 (htab->elf.sgotplt->output_section->vma
4369 + htab->elf.sgotplt->output_offset
4370 + 8
4371 - htab->elf.splt->output_section->vma
4372 - htab->elf.splt->output_offset
4373 - 6),
4374 (htab->elf.splt->contents
4375 + htab->lazy_plt->plt0_got1_offset));
4376 /* Add offset for the PC-relative instruction accessing
4377 GOT+16, subtracting the offset to the end of that
4378 instruction. */
4379 bfd_put_32 (output_bfd,
4380 (htab->elf.sgotplt->output_section->vma
4381 + htab->elf.sgotplt->output_offset
4382 + 16
4383 - htab->elf.splt->output_section->vma
4384 - htab->elf.splt->output_offset
4385 - htab->lazy_plt->plt0_got2_insn_end),
4386 (htab->elf.splt->contents
4387 + htab->lazy_plt->plt0_got2_offset));
4388 }
4389
4390 if (htab->tlsdesc_plt)
4391 {
4392 bfd_put_64 (output_bfd, (bfd_vma) 0,
4393 htab->elf.sgot->contents + htab->tlsdesc_got);
4394
4395 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4396 htab->lazy_plt->plt0_entry,
4397 htab->lazy_plt->plt0_entry_size);
4398
4399 /* Add offset for pushq GOT+8(%rip), since the
4400 instruction uses 6 bytes subtract this value. */
4401 bfd_put_32 (output_bfd,
4402 (htab->elf.sgotplt->output_section->vma
4403 + htab->elf.sgotplt->output_offset
4404 + 8
4405 - htab->elf.splt->output_section->vma
4406 - htab->elf.splt->output_offset
4407 - htab->tlsdesc_plt
4408 - 6),
4409 (htab->elf.splt->contents
4410 + htab->tlsdesc_plt
4411 + htab->lazy_plt->plt0_got1_offset));
4412 /* Add offset for the PC-relative instruction accessing
4413 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4414 subtracting the offset to the end of that
4415 instruction. */
4416 bfd_put_32 (output_bfd,
4417 (htab->elf.sgot->output_section->vma
4418 + htab->elf.sgot->output_offset
4419 + htab->tlsdesc_got
4420 - htab->elf.splt->output_section->vma
4421 - htab->elf.splt->output_offset
4422 - htab->tlsdesc_plt
4423 - htab->lazy_plt->plt0_got2_insn_end),
4424 (htab->elf.splt->contents
4425 + htab->tlsdesc_plt
4426 + htab->lazy_plt->plt0_got2_offset));
4427 }
4428 }
4429
4430 /* Fill PLT entries for undefined weak symbols in PIE. */
4431 if (bfd_link_pie (info))
4432 bfd_hash_traverse (&info->hash->table,
4433 elf_x86_64_pie_finish_undefweak_symbol,
4434 info);
4435
4436 return TRUE;
4437 }
4438
4439 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4440 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4441 It has to be done before elf_link_sort_relocs is called so that
4442 dynamic relocations are properly sorted. */
4443
4444 static bfd_boolean
4445 elf_x86_64_output_arch_local_syms
4446 (bfd *output_bfd ATTRIBUTE_UNUSED,
4447 struct bfd_link_info *info,
4448 void *flaginfo ATTRIBUTE_UNUSED,
4449 int (*func) (void *, const char *,
4450 Elf_Internal_Sym *,
4451 asection *,
4452 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4453 {
4454 struct elf_x86_link_hash_table *htab
4455 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4456 if (htab == NULL)
4457 return FALSE;
4458
4459 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4460 htab_traverse (htab->loc_hash_table,
4461 elf_x86_64_finish_local_dynamic_symbol,
4462 info);
4463
4464 return TRUE;
4465 }
4466
4467 /* Forward declaration. */
4468 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4469
4470 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4471 dynamic relocations. */
4472
4473 static long
4474 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4475 long symcount ATTRIBUTE_UNUSED,
4476 asymbol **syms ATTRIBUTE_UNUSED,
4477 long dynsymcount,
4478 asymbol **dynsyms,
4479 asymbol **ret)
4480 {
4481 long count, i, n;
4482 int j;
4483 bfd_byte *plt_contents;
4484 long relsize;
4485 const struct elf_x86_lazy_plt_layout *lazy_plt;
4486 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4487 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4488 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4489 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4490 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4491 asection *plt;
4492 enum elf_x86_plt_type plt_type;
4493 struct elf_x86_plt plts[] =
4494 {
4495 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4496 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4497 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4498 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4499 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4500 };
4501
4502 *ret = NULL;
4503
4504 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4505 return 0;
4506
4507 if (dynsymcount <= 0)
4508 return 0;
4509
4510 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4511 if (relsize <= 0)
4512 return -1;
4513
4514 if (get_elf_x86_backend_data (abfd)->target_os == is_normal)
4515 {
4516 lazy_plt = &elf_x86_64_lazy_plt;
4517 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4518 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4519 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4520 if (ABI_64_P (abfd))
4521 {
4522 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4523 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4524 }
4525 else
4526 {
4527 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4528 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4529 }
4530 }
4531 else
4532 {
4533 lazy_plt = &elf_x86_64_nacl_plt;
4534 non_lazy_plt = NULL;
4535 lazy_bnd_plt = NULL;
4536 non_lazy_bnd_plt = NULL;
4537 lazy_ibt_plt = NULL;
4538 non_lazy_ibt_plt = NULL;
4539 }
4540
4541 count = 0;
4542 for (j = 0; plts[j].name != NULL; j++)
4543 {
4544 plt = bfd_get_section_by_name (abfd, plts[j].name);
4545 if (plt == NULL || plt->size == 0)
4546 continue;
4547
4548 /* Get the PLT section contents. */
4549 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4550 if (plt_contents == NULL)
4551 break;
4552 if (!bfd_get_section_contents (abfd, (asection *) plt,
4553 plt_contents, 0, plt->size))
4554 {
4555 free (plt_contents);
4556 break;
4557 }
4558
4559 /* Check what kind of PLT it is. */
4560 plt_type = plt_unknown;
4561 if (plts[j].type == plt_unknown
4562 && (plt->size >= (lazy_plt->plt_entry_size
4563 + lazy_plt->plt_entry_size)))
4564 {
4565 /* Match lazy PLT first. Need to check the first two
4566 instructions. */
4567 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4568 lazy_plt->plt0_got1_offset) == 0)
4569 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4570 2) == 0))
4571 plt_type = plt_lazy;
4572 else if (lazy_bnd_plt != NULL
4573 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4574 lazy_bnd_plt->plt0_got1_offset) == 0)
4575 && (memcmp (plt_contents + 6,
4576 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4577 {
4578 plt_type = plt_lazy | plt_second;
4579 /* The fist entry in the lazy IBT PLT is the same as the
4580 lazy BND PLT. */
4581 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4582 lazy_ibt_plt->plt_entry,
4583 lazy_ibt_plt->plt_got_offset) == 0))
4584 lazy_plt = lazy_ibt_plt;
4585 else
4586 lazy_plt = lazy_bnd_plt;
4587 }
4588 }
4589
4590 if (non_lazy_plt != NULL
4591 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4592 && plt->size >= non_lazy_plt->plt_entry_size)
4593 {
4594 /* Match non-lazy PLT. */
4595 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4596 non_lazy_plt->plt_got_offset) == 0)
4597 plt_type = plt_non_lazy;
4598 }
4599
4600 if (plt_type == plt_unknown || plt_type == plt_second)
4601 {
4602 if (non_lazy_bnd_plt != NULL
4603 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4604 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4605 non_lazy_bnd_plt->plt_got_offset) == 0))
4606 {
4607 /* Match BND PLT. */
4608 plt_type = plt_second;
4609 non_lazy_plt = non_lazy_bnd_plt;
4610 }
4611 else if (non_lazy_ibt_plt != NULL
4612 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4613 && (memcmp (plt_contents,
4614 non_lazy_ibt_plt->plt_entry,
4615 non_lazy_ibt_plt->plt_got_offset) == 0))
4616 {
4617 /* Match IBT PLT. */
4618 plt_type = plt_second;
4619 non_lazy_plt = non_lazy_ibt_plt;
4620 }
4621 }
4622
4623 if (plt_type == plt_unknown)
4624 {
4625 free (plt_contents);
4626 continue;
4627 }
4628
4629 plts[j].sec = plt;
4630 plts[j].type = plt_type;
4631
4632 if ((plt_type & plt_lazy))
4633 {
4634 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4635 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4636 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4637 /* Skip PLT0 in lazy PLT. */
4638 i = 1;
4639 }
4640 else
4641 {
4642 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4643 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4644 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4645 i = 0;
4646 }
4647
4648 /* Skip lazy PLT when the second PLT is used. */
4649 if (plt_type == (plt_lazy | plt_second))
4650 plts[j].count = 0;
4651 else
4652 {
4653 n = plt->size / plts[j].plt_entry_size;
4654 plts[j].count = n;
4655 count += n - i;
4656 }
4657
4658 plts[j].contents = plt_contents;
4659 }
4660
4661 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4662 (bfd_vma) 0, plts, dynsyms,
4663 ret);
4664 }
4665
4666 /* Handle an x86-64 specific section when reading an object file. This
4667 is called when elfcode.h finds a section with an unknown type. */
4668
4669 static bfd_boolean
4670 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4671 const char *name, int shindex)
4672 {
4673 if (hdr->sh_type != SHT_X86_64_UNWIND)
4674 return FALSE;
4675
4676 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4677 return FALSE;
4678
4679 return TRUE;
4680 }
4681
4682 /* Hook called by the linker routine which adds symbols from an object
4683 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4684 of .bss. */
4685
4686 static bfd_boolean
4687 elf_x86_64_add_symbol_hook (bfd *abfd,
4688 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4689 Elf_Internal_Sym *sym,
4690 const char **namep ATTRIBUTE_UNUSED,
4691 flagword *flagsp ATTRIBUTE_UNUSED,
4692 asection **secp,
4693 bfd_vma *valp)
4694 {
4695 asection *lcomm;
4696
4697 switch (sym->st_shndx)
4698 {
4699 case SHN_X86_64_LCOMMON:
4700 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4701 if (lcomm == NULL)
4702 {
4703 lcomm = bfd_make_section_with_flags (abfd,
4704 "LARGE_COMMON",
4705 (SEC_ALLOC
4706 | SEC_IS_COMMON
4707 | SEC_LINKER_CREATED));
4708 if (lcomm == NULL)
4709 return FALSE;
4710 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4711 }
4712 *secp = lcomm;
4713 *valp = sym->st_size;
4714 return TRUE;
4715 }
4716
4717 return TRUE;
4718 }
4719
4720
4721 /* Given a BFD section, try to locate the corresponding ELF section
4722 index. */
4723
4724 static bfd_boolean
4725 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4726 asection *sec, int *index_return)
4727 {
4728 if (sec == &_bfd_elf_large_com_section)
4729 {
4730 *index_return = SHN_X86_64_LCOMMON;
4731 return TRUE;
4732 }
4733 return FALSE;
4734 }
4735
4736 /* Process a symbol. */
4737
4738 static void
4739 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4740 asymbol *asym)
4741 {
4742 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4743
4744 switch (elfsym->internal_elf_sym.st_shndx)
4745 {
4746 case SHN_X86_64_LCOMMON:
4747 asym->section = &_bfd_elf_large_com_section;
4748 asym->value = elfsym->internal_elf_sym.st_size;
4749 /* Common symbol doesn't set BSF_GLOBAL. */
4750 asym->flags &= ~BSF_GLOBAL;
4751 break;
4752 }
4753 }
4754
4755 static bfd_boolean
4756 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4757 {
4758 return (sym->st_shndx == SHN_COMMON
4759 || sym->st_shndx == SHN_X86_64_LCOMMON);
4760 }
4761
4762 static unsigned int
4763 elf_x86_64_common_section_index (asection *sec)
4764 {
4765 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4766 return SHN_COMMON;
4767 else
4768 return SHN_X86_64_LCOMMON;
4769 }
4770
4771 static asection *
4772 elf_x86_64_common_section (asection *sec)
4773 {
4774 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4775 return bfd_com_section_ptr;
4776 else
4777 return &_bfd_elf_large_com_section;
4778 }
4779
4780 static bfd_boolean
4781 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4782 const Elf_Internal_Sym *sym,
4783 asection **psec,
4784 bfd_boolean newdef,
4785 bfd_boolean olddef,
4786 bfd *oldbfd,
4787 const asection *oldsec)
4788 {
4789 /* A normal common symbol and a large common symbol result in a
4790 normal common symbol. We turn the large common symbol into a
4791 normal one. */
4792 if (!olddef
4793 && h->root.type == bfd_link_hash_common
4794 && !newdef
4795 && bfd_is_com_section (*psec)
4796 && oldsec != *psec)
4797 {
4798 if (sym->st_shndx == SHN_COMMON
4799 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4800 {
4801 h->root.u.c.p->section
4802 = bfd_make_section_old_way (oldbfd, "COMMON");
4803 h->root.u.c.p->section->flags = SEC_ALLOC;
4804 }
4805 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4806 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4807 *psec = bfd_com_section_ptr;
4808 }
4809
4810 return TRUE;
4811 }
4812
4813 static int
4814 elf_x86_64_additional_program_headers (bfd *abfd,
4815 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4816 {
4817 asection *s;
4818 int count = 0;
4819
4820 /* Check to see if we need a large readonly segment. */
4821 s = bfd_get_section_by_name (abfd, ".lrodata");
4822 if (s && (s->flags & SEC_LOAD))
4823 count++;
4824
4825 /* Check to see if we need a large data segment. Since .lbss sections
4826 is placed right after the .bss section, there should be no need for
4827 a large data segment just because of .lbss. */
4828 s = bfd_get_section_by_name (abfd, ".ldata");
4829 if (s && (s->flags & SEC_LOAD))
4830 count++;
4831
4832 return count;
4833 }
4834
4835 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4836
4837 static bfd_boolean
4838 elf_x86_64_relocs_compatible (const bfd_target *input,
4839 const bfd_target *output)
4840 {
4841 return ((xvec_get_elf_backend_data (input)->s->elfclass
4842 == xvec_get_elf_backend_data (output)->s->elfclass)
4843 && _bfd_elf_relocs_compatible (input, output));
4844 }
4845
4846 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4847 with GNU properties if found. Otherwise, return NULL. */
4848
4849 static bfd *
4850 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4851 {
4852 struct elf_x86_init_table init_table;
4853
4854 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4855 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4856 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4857 != (int) R_X86_64_GNU_VTINHERIT)
4858 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4859 != (int) R_X86_64_GNU_VTENTRY))
4860 abort ();
4861
4862 /* This is unused for x86-64. */
4863 init_table.plt0_pad_byte = 0x90;
4864
4865 if (get_elf_x86_backend_data (info->output_bfd)->target_os
4866 == is_normal)
4867 {
4868 if (info->bndplt)
4869 {
4870 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4871 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4872 }
4873 else
4874 {
4875 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4876 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4877 }
4878
4879 if (ABI_64_P (info->output_bfd))
4880 {
4881 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4882 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4883 }
4884 else
4885 {
4886 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4887 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4888 }
4889 }
4890 else
4891 {
4892 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4893 init_table.non_lazy_plt = NULL;
4894 init_table.lazy_ibt_plt = NULL;
4895 init_table.non_lazy_ibt_plt = NULL;
4896 }
4897
4898 if (ABI_64_P (info->output_bfd))
4899 {
4900 init_table.r_info = elf64_r_info;
4901 init_table.r_sym = elf64_r_sym;
4902 }
4903 else
4904 {
4905 init_table.r_info = elf32_r_info;
4906 init_table.r_sym = elf32_r_sym;
4907 }
4908
4909 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4910 }
4911
4912 static const struct bfd_elf_special_section
4913 elf_x86_64_special_sections[]=
4914 {
4915 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4916 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4917 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4918 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4919 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4920 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4921 { NULL, 0, 0, 0, 0 }
4922 };
4923
4924 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4925 #define TARGET_LITTLE_NAME "elf64-x86-64"
4926 #define ELF_ARCH bfd_arch_i386
4927 #define ELF_TARGET_ID X86_64_ELF_DATA
4928 #define ELF_MACHINE_CODE EM_X86_64
4929 #define ELF_MAXPAGESIZE 0x200000
4930 #define ELF_MINPAGESIZE 0x1000
4931 #define ELF_COMMONPAGESIZE 0x1000
4932
4933 #define elf_backend_can_gc_sections 1
4934 #define elf_backend_can_refcount 1
4935 #define elf_backend_want_got_plt 1
4936 #define elf_backend_plt_readonly 1
4937 #define elf_backend_want_plt_sym 0
4938 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4939 #define elf_backend_rela_normal 1
4940 #define elf_backend_plt_alignment 4
4941 #define elf_backend_extern_protected_data 1
4942 #define elf_backend_caches_rawsize 1
4943 #define elf_backend_dtrel_excludes_plt 1
4944 #define elf_backend_want_dynrelro 1
4945
4946 #define elf_info_to_howto elf_x86_64_info_to_howto
4947
4948 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4949 #define bfd_elf64_bfd_reloc_name_lookup \
4950 elf_x86_64_reloc_name_lookup
4951
4952 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4953 #define elf_backend_check_relocs elf_x86_64_check_relocs
4954 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4955 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4956 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4957 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4958 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4959 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4960 #ifdef CORE_HEADER
4961 #define elf_backend_write_core_note elf_x86_64_write_core_note
4962 #endif
4963 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4964 #define elf_backend_relocate_section elf_x86_64_relocate_section
4965 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4966 #define elf_backend_object_p elf64_x86_64_elf_object_p
4967 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4968
4969 #define elf_backend_section_from_shdr \
4970 elf_x86_64_section_from_shdr
4971
4972 #define elf_backend_section_from_bfd_section \
4973 elf_x86_64_elf_section_from_bfd_section
4974 #define elf_backend_add_symbol_hook \
4975 elf_x86_64_add_symbol_hook
4976 #define elf_backend_symbol_processing \
4977 elf_x86_64_symbol_processing
4978 #define elf_backend_common_section_index \
4979 elf_x86_64_common_section_index
4980 #define elf_backend_common_section \
4981 elf_x86_64_common_section
4982 #define elf_backend_common_definition \
4983 elf_x86_64_common_definition
4984 #define elf_backend_merge_symbol \
4985 elf_x86_64_merge_symbol
4986 #define elf_backend_special_sections \
4987 elf_x86_64_special_sections
4988 #define elf_backend_additional_program_headers \
4989 elf_x86_64_additional_program_headers
4990 #define elf_backend_setup_gnu_properties \
4991 elf_x86_64_link_setup_gnu_properties
4992 #define elf_backend_hide_symbol \
4993 _bfd_x86_elf_hide_symbol
4994
4995 #include "elf64-target.h"
4996
4997 /* CloudABI support. */
4998
4999 #undef TARGET_LITTLE_SYM
5000 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5001 #undef TARGET_LITTLE_NAME
5002 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5003
5004 #undef ELF_OSABI
5005 #define ELF_OSABI ELFOSABI_CLOUDABI
5006
5007 #undef elf64_bed
5008 #define elf64_bed elf64_x86_64_cloudabi_bed
5009
5010 #include "elf64-target.h"
5011
5012 /* FreeBSD support. */
5013
5014 #undef TARGET_LITTLE_SYM
5015 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5016 #undef TARGET_LITTLE_NAME
5017 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5018
5019 #undef ELF_OSABI
5020 #define ELF_OSABI ELFOSABI_FREEBSD
5021
5022 #undef elf64_bed
5023 #define elf64_bed elf64_x86_64_fbsd_bed
5024
5025 #include "elf64-target.h"
5026
5027 /* Solaris 2 support. */
5028
5029 #undef TARGET_LITTLE_SYM
5030 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5031 #undef TARGET_LITTLE_NAME
5032 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5033
5034 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5035 objects won't be recognized. */
5036 #undef ELF_OSABI
5037
5038 #undef elf64_bed
5039 #define elf64_bed elf64_x86_64_sol2_bed
5040
5041 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5042 boundary. */
5043 #undef elf_backend_static_tls_alignment
5044 #define elf_backend_static_tls_alignment 16
5045
5046 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5047
5048 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5049 File, p.63. */
5050 #undef elf_backend_want_plt_sym
5051 #define elf_backend_want_plt_sym 1
5052
5053 #undef elf_backend_strtab_flags
5054 #define elf_backend_strtab_flags SHF_STRINGS
5055
5056 static bfd_boolean
5057 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5058 bfd *obfd ATTRIBUTE_UNUSED,
5059 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5060 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5061 {
5062 /* PR 19938: FIXME: Need to add code for setting the sh_info
5063 and sh_link fields of Solaris specific section types. */
5064 return FALSE;
5065 }
5066
5067 #undef elf_backend_copy_special_section_fields
5068 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5069
5070 #include "elf64-target.h"
5071
5072 /* Native Client support. */
5073
5074 static bfd_boolean
5075 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5076 {
5077 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5078 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5079 return TRUE;
5080 }
5081
5082 #undef TARGET_LITTLE_SYM
5083 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5084 #undef TARGET_LITTLE_NAME
5085 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5086 #undef elf64_bed
5087 #define elf64_bed elf64_x86_64_nacl_bed
5088
5089 #undef ELF_MAXPAGESIZE
5090 #undef ELF_MINPAGESIZE
5091 #undef ELF_COMMONPAGESIZE
5092 #define ELF_MAXPAGESIZE 0x10000
5093 #define ELF_MINPAGESIZE 0x10000
5094 #define ELF_COMMONPAGESIZE 0x10000
5095
5096 /* Restore defaults. */
5097 #undef ELF_OSABI
5098 #undef elf_backend_static_tls_alignment
5099 #undef elf_backend_want_plt_sym
5100 #define elf_backend_want_plt_sym 0
5101 #undef elf_backend_strtab_flags
5102 #undef elf_backend_copy_special_section_fields
5103
5104 /* NaCl uses substantially different PLT entries for the same effects. */
5105
5106 #undef elf_backend_plt_alignment
5107 #define elf_backend_plt_alignment 5
5108 #define NACL_PLT_ENTRY_SIZE 64
5109 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5110
5111 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5112 {
5113 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5114 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5115 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5116 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5117 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5118
5119 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5120 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5121
5122 /* 32 bytes of nop to pad out to the standard size. */
5123 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5124 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5125 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5126 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5127 0x66, /* excess data16 prefix */
5128 0x90 /* nop */
5129 };
5130
5131 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5132 {
5133 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5134 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5135 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5136 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5137
5138 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5139 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5140 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5141
5142 /* Lazy GOT entries point here (32-byte aligned). */
5143 0x68, /* pushq immediate */
5144 0, 0, 0, 0, /* replaced with index into relocation table. */
5145 0xe9, /* jmp relative */
5146 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5147
5148 /* 22 bytes of nop to pad out to the standard size. */
5149 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5150 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5151 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5152 };
5153
5154 /* .eh_frame covering the .plt section. */
5155
5156 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5157 {
5158 #if (PLT_CIE_LENGTH != 20 \
5159 || PLT_FDE_LENGTH != 36 \
5160 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5161 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5162 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5163 #endif
5164 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5165 0, 0, 0, 0, /* CIE ID */
5166 1, /* CIE version */
5167 'z', 'R', 0, /* Augmentation string */
5168 1, /* Code alignment factor */
5169 0x78, /* Data alignment factor */
5170 16, /* Return address column */
5171 1, /* Augmentation size */
5172 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5173 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5174 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5175 DW_CFA_nop, DW_CFA_nop,
5176
5177 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5178 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5179 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5180 0, 0, 0, 0, /* .plt size goes here */
5181 0, /* Augmentation size */
5182 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5183 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5184 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5185 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5186 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5187 13, /* Block length */
5188 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5189 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5190 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5191 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5192 DW_CFA_nop, DW_CFA_nop
5193 };
5194
5195 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5196 {
5197 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5198 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5199 elf_x86_64_nacl_plt_entry, /* plt_entry */
5200 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5201 2, /* plt0_got1_offset */
5202 9, /* plt0_got2_offset */
5203 13, /* plt0_got2_insn_end */
5204 3, /* plt_got_offset */
5205 33, /* plt_reloc_offset */
5206 38, /* plt_plt_offset */
5207 7, /* plt_got_insn_size */
5208 42, /* plt_plt_insn_end */
5209 32, /* plt_lazy_offset */
5210 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5211 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5212 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5213 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5214 };
5215
5216 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5217 {
5218 is_nacl /* os */
5219 };
5220
5221 #undef elf_backend_arch_data
5222 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5223
5224 #undef elf_backend_object_p
5225 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5226 #undef elf_backend_modify_segment_map
5227 #define elf_backend_modify_segment_map nacl_modify_segment_map
5228 #undef elf_backend_modify_program_headers
5229 #define elf_backend_modify_program_headers nacl_modify_program_headers
5230 #undef elf_backend_final_write_processing
5231 #define elf_backend_final_write_processing nacl_final_write_processing
5232
5233 #include "elf64-target.h"
5234
5235 /* Native Client x32 support. */
5236
5237 static bfd_boolean
5238 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5239 {
5240 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5241 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5242 return TRUE;
5243 }
5244
5245 #undef TARGET_LITTLE_SYM
5246 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5247 #undef TARGET_LITTLE_NAME
5248 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5249 #undef elf32_bed
5250 #define elf32_bed elf32_x86_64_nacl_bed
5251
5252 #define bfd_elf32_bfd_reloc_type_lookup \
5253 elf_x86_64_reloc_type_lookup
5254 #define bfd_elf32_bfd_reloc_name_lookup \
5255 elf_x86_64_reloc_name_lookup
5256 #define bfd_elf32_get_synthetic_symtab \
5257 elf_x86_64_get_synthetic_symtab
5258
5259 #undef elf_backend_object_p
5260 #define elf_backend_object_p \
5261 elf32_x86_64_nacl_elf_object_p
5262
5263 #undef elf_backend_bfd_from_remote_memory
5264 #define elf_backend_bfd_from_remote_memory \
5265 _bfd_elf32_bfd_from_remote_memory
5266
5267 #undef elf_backend_size_info
5268 #define elf_backend_size_info \
5269 _bfd_elf32_size_info
5270
5271 #include "elf32-target.h"
5272
5273 /* Restore defaults. */
5274 #undef elf_backend_object_p
5275 #define elf_backend_object_p elf64_x86_64_elf_object_p
5276 #undef elf_backend_bfd_from_remote_memory
5277 #undef elf_backend_size_info
5278 #undef elf_backend_modify_segment_map
5279 #undef elf_backend_modify_program_headers
5280 #undef elf_backend_final_write_processing
5281
5282 /* Intel L1OM support. */
5283
5284 static bfd_boolean
5285 elf64_l1om_elf_object_p (bfd *abfd)
5286 {
5287 /* Set the right machine number for an L1OM elf64 file. */
5288 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5289 return TRUE;
5290 }
5291
5292 #undef TARGET_LITTLE_SYM
5293 #define TARGET_LITTLE_SYM l1om_elf64_vec
5294 #undef TARGET_LITTLE_NAME
5295 #define TARGET_LITTLE_NAME "elf64-l1om"
5296 #undef ELF_ARCH
5297 #define ELF_ARCH bfd_arch_l1om
5298
5299 #undef ELF_MACHINE_CODE
5300 #define ELF_MACHINE_CODE EM_L1OM
5301
5302 #undef ELF_OSABI
5303
5304 #undef elf64_bed
5305 #define elf64_bed elf64_l1om_bed
5306
5307 #undef elf_backend_object_p
5308 #define elf_backend_object_p elf64_l1om_elf_object_p
5309
5310 /* Restore defaults. */
5311 #undef ELF_MAXPAGESIZE
5312 #undef ELF_MINPAGESIZE
5313 #undef ELF_COMMONPAGESIZE
5314 #define ELF_MAXPAGESIZE 0x200000
5315 #define ELF_MINPAGESIZE 0x1000
5316 #define ELF_COMMONPAGESIZE 0x1000
5317 #undef elf_backend_plt_alignment
5318 #define elf_backend_plt_alignment 4
5319 #undef elf_backend_arch_data
5320 #define elf_backend_arch_data &elf_x86_64_arch_bed
5321
5322 #include "elf64-target.h"
5323
5324 /* FreeBSD L1OM support. */
5325
5326 #undef TARGET_LITTLE_SYM
5327 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5328 #undef TARGET_LITTLE_NAME
5329 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5330
5331 #undef ELF_OSABI
5332 #define ELF_OSABI ELFOSABI_FREEBSD
5333
5334 #undef elf64_bed
5335 #define elf64_bed elf64_l1om_fbsd_bed
5336
5337 #include "elf64-target.h"
5338
5339 /* Intel K1OM support. */
5340
5341 static bfd_boolean
5342 elf64_k1om_elf_object_p (bfd *abfd)
5343 {
5344 /* Set the right machine number for an K1OM elf64 file. */
5345 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5346 return TRUE;
5347 }
5348
5349 #undef TARGET_LITTLE_SYM
5350 #define TARGET_LITTLE_SYM k1om_elf64_vec
5351 #undef TARGET_LITTLE_NAME
5352 #define TARGET_LITTLE_NAME "elf64-k1om"
5353 #undef ELF_ARCH
5354 #define ELF_ARCH bfd_arch_k1om
5355
5356 #undef ELF_MACHINE_CODE
5357 #define ELF_MACHINE_CODE EM_K1OM
5358
5359 #undef ELF_OSABI
5360
5361 #undef elf64_bed
5362 #define elf64_bed elf64_k1om_bed
5363
5364 #undef elf_backend_object_p
5365 #define elf_backend_object_p elf64_k1om_elf_object_p
5366
5367 #undef elf_backend_static_tls_alignment
5368
5369 #undef elf_backend_want_plt_sym
5370 #define elf_backend_want_plt_sym 0
5371
5372 #include "elf64-target.h"
5373
5374 /* FreeBSD K1OM support. */
5375
5376 #undef TARGET_LITTLE_SYM
5377 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5378 #undef TARGET_LITTLE_NAME
5379 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5380
5381 #undef ELF_OSABI
5382 #define ELF_OSABI ELFOSABI_FREEBSD
5383
5384 #undef elf64_bed
5385 #define elf64_bed elf64_k1om_fbsd_bed
5386
5387 #include "elf64-target.h"
5388
5389 /* 32bit x86-64 support. */
5390
5391 #undef TARGET_LITTLE_SYM
5392 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5393 #undef TARGET_LITTLE_NAME
5394 #define TARGET_LITTLE_NAME "elf32-x86-64"
5395 #undef elf32_bed
5396
5397 #undef ELF_ARCH
5398 #define ELF_ARCH bfd_arch_i386
5399
5400 #undef ELF_MACHINE_CODE
5401 #define ELF_MACHINE_CODE EM_X86_64
5402
5403 #undef ELF_OSABI
5404
5405 #undef elf_backend_object_p
5406 #define elf_backend_object_p \
5407 elf32_x86_64_elf_object_p
5408
5409 #undef elf_backend_bfd_from_remote_memory
5410 #define elf_backend_bfd_from_remote_memory \
5411 _bfd_elf32_bfd_from_remote_memory
5412
5413 #undef elf_backend_size_info
5414 #define elf_backend_size_info \
5415 _bfd_elf32_size_info
5416
5417 #include "elf32-target.h"
This page took 0.239146 seconds and 4 git commands to generate.