New 2016 binutils ChangeLog files
[deliverable/binutils-gdb.git] / bfd / elfnn-aarch64.c
1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 /* Notes on implementation:
22
23 Thread Local Store (TLS)
24
25 Overview:
26
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
29
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
32
33 adrp x0, :tlsgd:foo
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
37 bl __tls_get_addr
38 nop
39
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
42
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
46 .tlsdesccall foo
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
48
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
52
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
56
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
59
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
64
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
70
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
75
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
80
81 Implementation:
82
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
86
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
92
93 The flow:
94
95 elfNN_aarch64_check_relocs()
96
97 This function is invoked for each relocation.
98
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
103
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
106
107 elfNN_aarch64_allocate_dynrelocs ()
108
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
113 for this symbol.
114
115 elfNN_aarch64_size_dynamic_sections ()
116
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
121
122 elfNN_aarch64_relocate_section ()
123
124 Calls elfNN_aarch64_final_link_relocate ()
125
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
133
134 elfNN_aarch64_final_link_relocate ()
135
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
137
138 #include "sysdep.h"
139 #include "bfd.h"
140 #include "libiberty.h"
141 #include "libbfd.h"
142 #include "bfd_stdint.h"
143 #include "elf-bfd.h"
144 #include "bfdlink.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
148
149 #define ARCH_SIZE NN
150
151 #if ARCH_SIZE == 64
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
157 #endif
158
159 #if ARCH_SIZE == 32
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
165 #endif
166
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12 \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
207 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
208 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
209 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
210
211 #define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
212 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
213 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
214 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
215 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
216 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
217 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
218 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
219 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
220 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
221 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
222 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
223 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
224 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
225 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
226 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC \
227 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_MOVW_G1 \
228 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
229 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
230 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC \
231 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
232 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
233 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
234
235 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
236 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
237 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
238 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
239 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
240 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
241 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
242 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
243 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
244 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
245 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
246 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
247 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
248
249 #define ELIMINATE_COPY_RELOCS 0
250
251 /* Return size of a relocation entry. HTAB is the bfd's
252 elf_aarch64_link_hash_entry. */
253 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
254
255 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
256 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
257 #define PLT_ENTRY_SIZE (32)
258 #define PLT_SMALL_ENTRY_SIZE (16)
259 #define PLT_TLSDESC_ENTRY_SIZE (32)
260
261 /* Encoding of the nop instruction */
262 #define INSN_NOP 0xd503201f
263
264 #define aarch64_compute_jump_table_size(htab) \
265 (((htab)->root.srelplt == NULL) ? 0 \
266 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
267
268 /* The first entry in a procedure linkage table looks like this
269 if the distance between the PLTGOT and the PLT is < 4GB use
270 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
271 in x16 and needs to work out PLTGOT[1] by using an address of
272 [x16,#-GOT_ENTRY_SIZE]. */
273 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
274 {
275 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
276 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
277 #if ARCH_SIZE == 64
278 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
279 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
280 #else
281 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
282 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
283 #endif
284 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
285 0x1f, 0x20, 0x03, 0xd5, /* nop */
286 0x1f, 0x20, 0x03, 0xd5, /* nop */
287 0x1f, 0x20, 0x03, 0xd5, /* nop */
288 };
289
290 /* Per function entry in a procedure linkage table looks like this
291 if the distance between the PLTGOT and the PLT is < 4GB use
292 these PLT entries. */
293 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
294 {
295 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
296 #if ARCH_SIZE == 64
297 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
298 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
299 #else
300 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
301 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
302 #endif
303 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
304 };
305
306 static const bfd_byte
307 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
308 {
309 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
310 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
311 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
312 #if ARCH_SIZE == 64
313 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
314 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
315 #else
316 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
317 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
318 #endif
319 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
320 0x1f, 0x20, 0x03, 0xd5, /* nop */
321 0x1f, 0x20, 0x03, 0xd5, /* nop */
322 };
323
324 #define elf_info_to_howto elfNN_aarch64_info_to_howto
325 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
326
327 #define AARCH64_ELF_ABI_VERSION 0
328
329 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
330 #define ALL_ONES (~ (bfd_vma) 0)
331
332 /* Indexed by the bfd interal reloc enumerators.
333 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
334 in reloc.c. */
335
336 static reloc_howto_type elfNN_aarch64_howto_table[] =
337 {
338 EMPTY_HOWTO (0),
339
340 /* Basic data relocations. */
341
342 #if ARCH_SIZE == 64
343 HOWTO (R_AARCH64_NULL, /* type */
344 0, /* rightshift */
345 3, /* size (0 = byte, 1 = short, 2 = long) */
346 0, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_dont, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_AARCH64_NULL", /* name */
352 FALSE, /* partial_inplace */
353 0, /* src_mask */
354 0, /* dst_mask */
355 FALSE), /* pcrel_offset */
356 #else
357 HOWTO (R_AARCH64_NONE, /* type */
358 0, /* rightshift */
359 3, /* size (0 = byte, 1 = short, 2 = long) */
360 0, /* bitsize */
361 FALSE, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_dont, /* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_AARCH64_NONE", /* name */
366 FALSE, /* partial_inplace */
367 0, /* src_mask */
368 0, /* dst_mask */
369 FALSE), /* pcrel_offset */
370 #endif
371
372 /* .xword: (S+A) */
373 HOWTO64 (AARCH64_R (ABS64), /* type */
374 0, /* rightshift */
375 4, /* size (4 = long long) */
376 64, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_unsigned, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (ABS64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 /* .word: (S+A) */
388 HOWTO (AARCH64_R (ABS32), /* type */
389 0, /* rightshift */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
391 32, /* bitsize */
392 FALSE, /* pc_relative */
393 0, /* bitpos */
394 complain_overflow_unsigned, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (ABS32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 FALSE), /* pcrel_offset */
401
402 /* .half: (S+A) */
403 HOWTO (AARCH64_R (ABS16), /* type */
404 0, /* rightshift */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
406 16, /* bitsize */
407 FALSE, /* pc_relative */
408 0, /* bitpos */
409 complain_overflow_unsigned, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (ABS16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 FALSE), /* pcrel_offset */
416
417 /* .xword: (S+A-P) */
418 HOWTO64 (AARCH64_R (PREL64), /* type */
419 0, /* rightshift */
420 4, /* size (4 = long long) */
421 64, /* bitsize */
422 TRUE, /* pc_relative */
423 0, /* bitpos */
424 complain_overflow_signed, /* complain_on_overflow */
425 bfd_elf_generic_reloc, /* special_function */
426 AARCH64_R_STR (PREL64), /* name */
427 FALSE, /* partial_inplace */
428 ALL_ONES, /* src_mask */
429 ALL_ONES, /* dst_mask */
430 TRUE), /* pcrel_offset */
431
432 /* .word: (S+A-P) */
433 HOWTO (AARCH64_R (PREL32), /* type */
434 0, /* rightshift */
435 2, /* size (0 = byte, 1 = short, 2 = long) */
436 32, /* bitsize */
437 TRUE, /* pc_relative */
438 0, /* bitpos */
439 complain_overflow_signed, /* complain_on_overflow */
440 bfd_elf_generic_reloc, /* special_function */
441 AARCH64_R_STR (PREL32), /* name */
442 FALSE, /* partial_inplace */
443 0xffffffff, /* src_mask */
444 0xffffffff, /* dst_mask */
445 TRUE), /* pcrel_offset */
446
447 /* .half: (S+A-P) */
448 HOWTO (AARCH64_R (PREL16), /* type */
449 0, /* rightshift */
450 1, /* size (0 = byte, 1 = short, 2 = long) */
451 16, /* bitsize */
452 TRUE, /* pc_relative */
453 0, /* bitpos */
454 complain_overflow_signed, /* complain_on_overflow */
455 bfd_elf_generic_reloc, /* special_function */
456 AARCH64_R_STR (PREL16), /* name */
457 FALSE, /* partial_inplace */
458 0xffff, /* src_mask */
459 0xffff, /* dst_mask */
460 TRUE), /* pcrel_offset */
461
462 /* Group relocations to create a 16, 32, 48 or 64 bit
463 unsigned data or abs address inline. */
464
465 /* MOVZ: ((S+A) >> 0) & 0xffff */
466 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
467 0, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 16, /* bitsize */
470 FALSE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_unsigned, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G0), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
479
480 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
481 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
482 0, /* rightshift */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
484 16, /* bitsize */
485 FALSE, /* pc_relative */
486 0, /* bitpos */
487 complain_overflow_dont, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
494
495 /* MOVZ: ((S+A) >> 16) & 0xffff */
496 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
497 16, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 16, /* bitsize */
500 FALSE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_unsigned, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G1), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
509
510 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
512 16, /* rightshift */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
514 16, /* bitsize */
515 FALSE, /* pc_relative */
516 0, /* bitpos */
517 complain_overflow_dont, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
524
525 /* MOVZ: ((S+A) >> 32) & 0xffff */
526 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
527 32, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 16, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_unsigned, /* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 AARCH64_R_STR (MOVW_UABS_G2), /* name */
535 FALSE, /* partial_inplace */
536 0xffff, /* src_mask */
537 0xffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
541 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
542 32, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 16, /* bitsize */
545 FALSE, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont, /* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
550 FALSE, /* partial_inplace */
551 0xffff, /* src_mask */
552 0xffff, /* dst_mask */
553 FALSE), /* pcrel_offset */
554
555 /* MOVZ: ((S+A) >> 48) & 0xffff */
556 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
557 48, /* rightshift */
558 2, /* size (0 = byte, 1 = short, 2 = long) */
559 16, /* bitsize */
560 FALSE, /* pc_relative */
561 0, /* bitpos */
562 complain_overflow_unsigned, /* complain_on_overflow */
563 bfd_elf_generic_reloc, /* special_function */
564 AARCH64_R_STR (MOVW_UABS_G3), /* name */
565 FALSE, /* partial_inplace */
566 0xffff, /* src_mask */
567 0xffff, /* dst_mask */
568 FALSE), /* pcrel_offset */
569
570 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
571 signed data or abs address inline. Will change instruction
572 to MOVN or MOVZ depending on sign of calculated value. */
573
574 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
575 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
576 0, /* rightshift */
577 2, /* size (0 = byte, 1 = short, 2 = long) */
578 16, /* bitsize */
579 FALSE, /* pc_relative */
580 0, /* bitpos */
581 complain_overflow_signed, /* complain_on_overflow */
582 bfd_elf_generic_reloc, /* special_function */
583 AARCH64_R_STR (MOVW_SABS_G0), /* name */
584 FALSE, /* partial_inplace */
585 0xffff, /* src_mask */
586 0xffff, /* dst_mask */
587 FALSE), /* pcrel_offset */
588
589 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
590 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
591 16, /* rightshift */
592 2, /* size (0 = byte, 1 = short, 2 = long) */
593 16, /* bitsize */
594 FALSE, /* pc_relative */
595 0, /* bitpos */
596 complain_overflow_signed, /* complain_on_overflow */
597 bfd_elf_generic_reloc, /* special_function */
598 AARCH64_R_STR (MOVW_SABS_G1), /* name */
599 FALSE, /* partial_inplace */
600 0xffff, /* src_mask */
601 0xffff, /* dst_mask */
602 FALSE), /* pcrel_offset */
603
604 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
605 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
606 32, /* rightshift */
607 2, /* size (0 = byte, 1 = short, 2 = long) */
608 16, /* bitsize */
609 FALSE, /* pc_relative */
610 0, /* bitpos */
611 complain_overflow_signed, /* complain_on_overflow */
612 bfd_elf_generic_reloc, /* special_function */
613 AARCH64_R_STR (MOVW_SABS_G2), /* name */
614 FALSE, /* partial_inplace */
615 0xffff, /* src_mask */
616 0xffff, /* dst_mask */
617 FALSE), /* pcrel_offset */
618
619 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
620 addresses: PG(x) is (x & ~0xfff). */
621
622 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
623 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
624 2, /* rightshift */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
626 19, /* bitsize */
627 TRUE, /* pc_relative */
628 0, /* bitpos */
629 complain_overflow_signed, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (LD_PREL_LO19), /* name */
632 FALSE, /* partial_inplace */
633 0x7ffff, /* src_mask */
634 0x7ffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
636
637 /* ADR: (S+A-P) & 0x1fffff */
638 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 21, /* bitsize */
642 TRUE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_signed, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADR_PREL_LO21), /* name */
647 FALSE, /* partial_inplace */
648 0x1fffff, /* src_mask */
649 0x1fffff, /* dst_mask */
650 TRUE), /* pcrel_offset */
651
652 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
653 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
654 12, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 21, /* bitsize */
657 TRUE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_signed, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
662 FALSE, /* partial_inplace */
663 0x1fffff, /* src_mask */
664 0x1fffff, /* dst_mask */
665 TRUE), /* pcrel_offset */
666
667 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
668 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
669 12, /* rightshift */
670 2, /* size (0 = byte, 1 = short, 2 = long) */
671 21, /* bitsize */
672 TRUE, /* pc_relative */
673 0, /* bitpos */
674 complain_overflow_dont, /* complain_on_overflow */
675 bfd_elf_generic_reloc, /* special_function */
676 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
677 FALSE, /* partial_inplace */
678 0x1fffff, /* src_mask */
679 0x1fffff, /* dst_mask */
680 TRUE), /* pcrel_offset */
681
682 /* ADD: (S+A) & 0xfff [no overflow check] */
683 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
684 0, /* rightshift */
685 2, /* size (0 = byte, 1 = short, 2 = long) */
686 12, /* bitsize */
687 FALSE, /* pc_relative */
688 10, /* bitpos */
689 complain_overflow_dont, /* complain_on_overflow */
690 bfd_elf_generic_reloc, /* special_function */
691 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
692 FALSE, /* partial_inplace */
693 0x3ffc00, /* src_mask */
694 0x3ffc00, /* dst_mask */
695 FALSE), /* pcrel_offset */
696
697 /* LD/ST8: (S+A) & 0xfff */
698 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
699 0, /* rightshift */
700 2, /* size (0 = byte, 1 = short, 2 = long) */
701 12, /* bitsize */
702 FALSE, /* pc_relative */
703 0, /* bitpos */
704 complain_overflow_dont, /* complain_on_overflow */
705 bfd_elf_generic_reloc, /* special_function */
706 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
707 FALSE, /* partial_inplace */
708 0xfff, /* src_mask */
709 0xfff, /* dst_mask */
710 FALSE), /* pcrel_offset */
711
712 /* Relocations for control-flow instructions. */
713
714 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
715 HOWTO (AARCH64_R (TSTBR14), /* type */
716 2, /* rightshift */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
718 14, /* bitsize */
719 TRUE, /* pc_relative */
720 0, /* bitpos */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (TSTBR14), /* name */
724 FALSE, /* partial_inplace */
725 0x3fff, /* src_mask */
726 0x3fff, /* dst_mask */
727 TRUE), /* pcrel_offset */
728
729 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
730 HOWTO (AARCH64_R (CONDBR19), /* type */
731 2, /* rightshift */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
733 19, /* bitsize */
734 TRUE, /* pc_relative */
735 0, /* bitpos */
736 complain_overflow_signed, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (CONDBR19), /* name */
739 FALSE, /* partial_inplace */
740 0x7ffff, /* src_mask */
741 0x7ffff, /* dst_mask */
742 TRUE), /* pcrel_offset */
743
744 /* B: ((S+A-P) >> 2) & 0x3ffffff */
745 HOWTO (AARCH64_R (JUMP26), /* type */
746 2, /* rightshift */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
748 26, /* bitsize */
749 TRUE, /* pc_relative */
750 0, /* bitpos */
751 complain_overflow_signed, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (JUMP26), /* name */
754 FALSE, /* partial_inplace */
755 0x3ffffff, /* src_mask */
756 0x3ffffff, /* dst_mask */
757 TRUE), /* pcrel_offset */
758
759 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
760 HOWTO (AARCH64_R (CALL26), /* type */
761 2, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 26, /* bitsize */
764 TRUE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_signed, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (CALL26), /* name */
769 FALSE, /* partial_inplace */
770 0x3ffffff, /* src_mask */
771 0x3ffffff, /* dst_mask */
772 TRUE), /* pcrel_offset */
773
774 /* LD/ST16: (S+A) & 0xffe */
775 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
776 1, /* rightshift */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
778 12, /* bitsize */
779 FALSE, /* pc_relative */
780 0, /* bitpos */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xffe, /* src_mask */
786 0xffe, /* dst_mask */
787 FALSE), /* pcrel_offset */
788
789 /* LD/ST32: (S+A) & 0xffc */
790 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
791 2, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 12, /* bitsize */
794 FALSE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_dont, /* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
799 FALSE, /* partial_inplace */
800 0xffc, /* src_mask */
801 0xffc, /* dst_mask */
802 FALSE), /* pcrel_offset */
803
804 /* LD/ST64: (S+A) & 0xff8 */
805 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
806 3, /* rightshift */
807 2, /* size (0 = byte, 1 = short, 2 = long) */
808 12, /* bitsize */
809 FALSE, /* pc_relative */
810 0, /* bitpos */
811 complain_overflow_dont, /* complain_on_overflow */
812 bfd_elf_generic_reloc, /* special_function */
813 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
814 FALSE, /* partial_inplace */
815 0xff8, /* src_mask */
816 0xff8, /* dst_mask */
817 FALSE), /* pcrel_offset */
818
819 /* LD/ST128: (S+A) & 0xff0 */
820 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
821 4, /* rightshift */
822 2, /* size (0 = byte, 1 = short, 2 = long) */
823 12, /* bitsize */
824 FALSE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_dont, /* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
829 FALSE, /* partial_inplace */
830 0xff0, /* src_mask */
831 0xff0, /* dst_mask */
832 FALSE), /* pcrel_offset */
833
834 /* Set a load-literal immediate field to bits
835 0x1FFFFC of G(S)-P */
836 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
837 2, /* rightshift */
838 2, /* size (0 = byte,1 = short,2 = long) */
839 19, /* bitsize */
840 TRUE, /* pc_relative */
841 0, /* bitpos */
842 complain_overflow_signed, /* complain_on_overflow */
843 bfd_elf_generic_reloc, /* special_function */
844 AARCH64_R_STR (GOT_LD_PREL19), /* name */
845 FALSE, /* partial_inplace */
846 0xffffe0, /* src_mask */
847 0xffffe0, /* dst_mask */
848 TRUE), /* pcrel_offset */
849
850 /* Get to the page for the GOT entry for the symbol
851 (G(S) - P) using an ADRP instruction. */
852 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
853 12, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 21, /* bitsize */
856 TRUE, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont, /* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
861 FALSE, /* partial_inplace */
862 0x1fffff, /* src_mask */
863 0x1fffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
865
866 /* LD64: GOT offset G(S) & 0xff8 */
867 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
868 3, /* rightshift */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
870 12, /* bitsize */
871 FALSE, /* pc_relative */
872 0, /* bitpos */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
876 FALSE, /* partial_inplace */
877 0xff8, /* src_mask */
878 0xff8, /* dst_mask */
879 FALSE), /* pcrel_offset */
880
881 /* LD32: GOT offset G(S) & 0xffc */
882 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
883 2, /* rightshift */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
885 12, /* bitsize */
886 FALSE, /* pc_relative */
887 0, /* bitpos */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xffc, /* src_mask */
893 0xffc, /* dst_mask */
894 FALSE), /* pcrel_offset */
895
896 /* Lower 16 bits of GOT offset for the symbol. */
897 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G0_NC), /* type */
898 0, /* rightshift */
899 2, /* size (0 = byte, 1 = short, 2 = long) */
900 16, /* bitsize */
901 FALSE, /* pc_relative */
902 0, /* bitpos */
903 complain_overflow_dont, /* complain_on_overflow */
904 bfd_elf_generic_reloc, /* special_function */
905 AARCH64_R_STR (MOVW_GOTOFF_G0_NC), /* name */
906 FALSE, /* partial_inplace */
907 0xffff, /* src_mask */
908 0xffff, /* dst_mask */
909 FALSE), /* pcrel_offset */
910
911 /* Higher 16 bits of GOT offset for the symbol. */
912 HOWTO64 (AARCH64_R (MOVW_GOTOFF_G1), /* type */
913 16, /* rightshift */
914 2, /* size (0 = byte, 1 = short, 2 = long) */
915 16, /* bitsize */
916 FALSE, /* pc_relative */
917 0, /* bitpos */
918 complain_overflow_unsigned, /* complain_on_overflow */
919 bfd_elf_generic_reloc, /* special_function */
920 AARCH64_R_STR (MOVW_GOTOFF_G1), /* name */
921 FALSE, /* partial_inplace */
922 0xffff, /* src_mask */
923 0xffff, /* dst_mask */
924 FALSE), /* pcrel_offset */
925
926 /* LD64: GOT offset for the symbol. */
927 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
928 3, /* rightshift */
929 2, /* size (0 = byte, 1 = short, 2 = long) */
930 12, /* bitsize */
931 FALSE, /* pc_relative */
932 0, /* bitpos */
933 complain_overflow_unsigned, /* complain_on_overflow */
934 bfd_elf_generic_reloc, /* special_function */
935 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
936 FALSE, /* partial_inplace */
937 0x7ff8, /* src_mask */
938 0x7ff8, /* dst_mask */
939 FALSE), /* pcrel_offset */
940
941 /* LD32: GOT offset to the page address of GOT table.
942 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
943 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
944 2, /* rightshift */
945 2, /* size (0 = byte, 1 = short, 2 = long) */
946 12, /* bitsize */
947 FALSE, /* pc_relative */
948 0, /* bitpos */
949 complain_overflow_unsigned, /* complain_on_overflow */
950 bfd_elf_generic_reloc, /* special_function */
951 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
952 FALSE, /* partial_inplace */
953 0x5ffc, /* src_mask */
954 0x5ffc, /* dst_mask */
955 FALSE), /* pcrel_offset */
956
957 /* LD64: GOT offset to the page address of GOT table.
958 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
959 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
960 3, /* rightshift */
961 2, /* size (0 = byte, 1 = short, 2 = long) */
962 12, /* bitsize */
963 FALSE, /* pc_relative */
964 0, /* bitpos */
965 complain_overflow_unsigned, /* complain_on_overflow */
966 bfd_elf_generic_reloc, /* special_function */
967 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
968 FALSE, /* partial_inplace */
969 0x7ff8, /* src_mask */
970 0x7ff8, /* dst_mask */
971 FALSE), /* pcrel_offset */
972
973 /* Get to the page for the GOT entry for the symbol
974 (G(S) - P) using an ADRP instruction. */
975 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
976 12, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 21, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont, /* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
984 FALSE, /* partial_inplace */
985 0x1fffff, /* src_mask */
986 0x1fffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 21, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont, /* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
998 FALSE, /* partial_inplace */
999 0x1fffff, /* src_mask */
1000 0x1fffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1004 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
1005 0, /* rightshift */
1006 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 12, /* bitsize */
1008 FALSE, /* pc_relative */
1009 0, /* bitpos */
1010 complain_overflow_dont, /* complain_on_overflow */
1011 bfd_elf_generic_reloc, /* special_function */
1012 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
1013 FALSE, /* partial_inplace */
1014 0xfff, /* src_mask */
1015 0xfff, /* dst_mask */
1016 FALSE), /* pcrel_offset */
1017
1018 /* Lower 16 bits of GOT offset to tls_index. */
1019 HOWTO64 (AARCH64_R (TLSGD_MOVW_G0_NC), /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 16, /* bitsize */
1023 FALSE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont, /* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 AARCH64_R_STR (TLSGD_MOVW_G0_NC), /* name */
1028 FALSE, /* partial_inplace */
1029 0xffff, /* src_mask */
1030 0xffff, /* dst_mask */
1031 FALSE), /* pcrel_offset */
1032
1033 /* Higher 16 bits of GOT offset to tls_index. */
1034 HOWTO64 (AARCH64_R (TLSGD_MOVW_G1), /* type */
1035 16, /* rightshift */
1036 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 16, /* bitsize */
1038 FALSE, /* pc_relative */
1039 0, /* bitpos */
1040 complain_overflow_unsigned, /* complain_on_overflow */
1041 bfd_elf_generic_reloc, /* special_function */
1042 AARCH64_R_STR (TLSGD_MOVW_G1), /* name */
1043 FALSE, /* partial_inplace */
1044 0xffff, /* src_mask */
1045 0xffff, /* dst_mask */
1046 FALSE), /* pcrel_offset */
1047
1048 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
1049 12, /* rightshift */
1050 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 21, /* bitsize */
1052 FALSE, /* pc_relative */
1053 0, /* bitpos */
1054 complain_overflow_dont, /* complain_on_overflow */
1055 bfd_elf_generic_reloc, /* special_function */
1056 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
1057 FALSE, /* partial_inplace */
1058 0x1fffff, /* src_mask */
1059 0x1fffff, /* dst_mask */
1060 FALSE), /* pcrel_offset */
1061
1062 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
1063 3, /* rightshift */
1064 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 12, /* bitsize */
1066 FALSE, /* pc_relative */
1067 0, /* bitpos */
1068 complain_overflow_dont, /* complain_on_overflow */
1069 bfd_elf_generic_reloc, /* special_function */
1070 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
1071 FALSE, /* partial_inplace */
1072 0xff8, /* src_mask */
1073 0xff8, /* dst_mask */
1074 FALSE), /* pcrel_offset */
1075
1076 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1077 2, /* rightshift */
1078 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 12, /* bitsize */
1080 FALSE, /* pc_relative */
1081 0, /* bitpos */
1082 complain_overflow_dont, /* complain_on_overflow */
1083 bfd_elf_generic_reloc, /* special_function */
1084 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1085 FALSE, /* partial_inplace */
1086 0xffc, /* src_mask */
1087 0xffc, /* dst_mask */
1088 FALSE), /* pcrel_offset */
1089
1090 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1091 2, /* rightshift */
1092 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 19, /* bitsize */
1094 FALSE, /* pc_relative */
1095 0, /* bitpos */
1096 complain_overflow_dont, /* complain_on_overflow */
1097 bfd_elf_generic_reloc, /* special_function */
1098 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1099 FALSE, /* partial_inplace */
1100 0x1ffffc, /* src_mask */
1101 0x1ffffc, /* dst_mask */
1102 FALSE), /* pcrel_offset */
1103
1104 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
1105 0, /* rightshift */
1106 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 16, /* bitsize */
1108 FALSE, /* pc_relative */
1109 0, /* bitpos */
1110 complain_overflow_dont, /* complain_on_overflow */
1111 bfd_elf_generic_reloc, /* special_function */
1112 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
1113 FALSE, /* partial_inplace */
1114 0xffff, /* src_mask */
1115 0xffff, /* dst_mask */
1116 FALSE), /* pcrel_offset */
1117
1118 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
1119 16, /* rightshift */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 16, /* bitsize */
1122 FALSE, /* pc_relative */
1123 0, /* bitpos */
1124 complain_overflow_unsigned, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
1127 FALSE, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 FALSE), /* pcrel_offset */
1131
1132 /* ADD: bit[23:12] of byte offset to module TLS base address. */
1133 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_HI12), /* type */
1134 12, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 12, /* bitsize */
1137 FALSE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_unsigned, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (TLSLD_ADD_DTPREL_HI12), /* name */
1142 FALSE, /* partial_inplace */
1143 0xfff, /* src_mask */
1144 0xfff, /* dst_mask */
1145 FALSE), /* pcrel_offset */
1146
1147 /* Unsigned 12 bit byte offset to module TLS base address. */
1148 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 12, /* bitsize */
1152 FALSE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_unsigned, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1157 FALSE, /* partial_inplace */
1158 0xfff, /* src_mask */
1159 0xfff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1161
1162 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12. */
1163 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12_NC), /* type */
1164 0, /* rightshift */
1165 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 12, /* bitsize */
1167 FALSE, /* pc_relative */
1168 0, /* bitpos */
1169 complain_overflow_dont, /* complain_on_overflow */
1170 bfd_elf_generic_reloc, /* special_function */
1171 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12_NC), /* name */
1172 FALSE, /* partial_inplace */
1173 0xfff, /* src_mask */
1174 0xfff, /* dst_mask */
1175 FALSE), /* pcrel_offset */
1176
1177 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1178 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1179 0, /* rightshift */
1180 2, /* size (0 = byte, 1 = short, 2 = long) */
1181 12, /* bitsize */
1182 FALSE, /* pc_relative */
1183 0, /* bitpos */
1184 complain_overflow_dont, /* complain_on_overflow */
1185 bfd_elf_generic_reloc, /* special_function */
1186 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1187 FALSE, /* partial_inplace */
1188 0xfff, /* src_mask */
1189 0xfff, /* dst_mask */
1190 FALSE), /* pcrel_offset */
1191
1192 /* Get to the page for the GOT entry for the symbol
1193 (G(S) - P) using an ADRP instruction. */
1194 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1195 12, /* rightshift */
1196 2, /* size (0 = byte, 1 = short, 2 = long) */
1197 21, /* bitsize */
1198 TRUE, /* pc_relative */
1199 0, /* bitpos */
1200 complain_overflow_signed, /* complain_on_overflow */
1201 bfd_elf_generic_reloc, /* special_function */
1202 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1203 FALSE, /* partial_inplace */
1204 0x1fffff, /* src_mask */
1205 0x1fffff, /* dst_mask */
1206 TRUE), /* pcrel_offset */
1207
1208 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1209 0, /* rightshift */
1210 2, /* size (0 = byte, 1 = short, 2 = long) */
1211 21, /* bitsize */
1212 TRUE, /* pc_relative */
1213 0, /* bitpos */
1214 complain_overflow_signed, /* complain_on_overflow */
1215 bfd_elf_generic_reloc, /* special_function */
1216 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1217 FALSE, /* partial_inplace */
1218 0x1fffff, /* src_mask */
1219 0x1fffff, /* dst_mask */
1220 TRUE), /* pcrel_offset */
1221
1222 /* LD/ST16: bit[11:1] of byte offset to module TLS base address. */
1223 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12), /* type */
1224 1, /* rightshift */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1226 11, /* bitsize */
1227 FALSE, /* pc_relative */
1228 10, /* bitpos */
1229 complain_overflow_unsigned, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12), /* name */
1232 FALSE, /* partial_inplace */
1233 0x1ffc00, /* src_mask */
1234 0x1ffc00, /* dst_mask */
1235 FALSE), /* pcrel_offset */
1236
1237 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, but no overflow check. */
1238 HOWTO64 (AARCH64_R (TLSLD_LDST16_DTPREL_LO12_NC), /* type */
1239 1, /* rightshift */
1240 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 11, /* bitsize */
1242 FALSE, /* pc_relative */
1243 10, /* bitpos */
1244 complain_overflow_dont, /* complain_on_overflow */
1245 bfd_elf_generic_reloc, /* special_function */
1246 AARCH64_R_STR (TLSLD_LDST16_DTPREL_LO12_NC), /* name */
1247 FALSE, /* partial_inplace */
1248 0x1ffc00, /* src_mask */
1249 0x1ffc00, /* dst_mask */
1250 FALSE), /* pcrel_offset */
1251
1252 /* LD/ST32: bit[11:2] of byte offset to module TLS base address. */
1253 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12), /* type */
1254 2, /* rightshift */
1255 2, /* size (0 = byte, 1 = short, 2 = long) */
1256 10, /* bitsize */
1257 FALSE, /* pc_relative */
1258 10, /* bitpos */
1259 complain_overflow_unsigned, /* complain_on_overflow */
1260 bfd_elf_generic_reloc, /* special_function */
1261 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12), /* name */
1262 FALSE, /* partial_inplace */
1263 0x3ffc00, /* src_mask */
1264 0x3ffc00, /* dst_mask */
1265 FALSE), /* pcrel_offset */
1266
1267 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, but no overflow check. */
1268 HOWTO64 (AARCH64_R (TLSLD_LDST32_DTPREL_LO12_NC), /* type */
1269 2, /* rightshift */
1270 2, /* size (0 = byte, 1 = short, 2 = long) */
1271 10, /* bitsize */
1272 FALSE, /* pc_relative */
1273 10, /* bitpos */
1274 complain_overflow_dont, /* complain_on_overflow */
1275 bfd_elf_generic_reloc, /* special_function */
1276 AARCH64_R_STR (TLSLD_LDST32_DTPREL_LO12_NC), /* name */
1277 FALSE, /* partial_inplace */
1278 0xffc00, /* src_mask */
1279 0xffc00, /* dst_mask */
1280 FALSE), /* pcrel_offset */
1281
1282 /* LD/ST64: bit[11:3] of byte offset to module TLS base address. */
1283 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12), /* type */
1284 3, /* rightshift */
1285 2, /* size (0 = byte, 1 = short, 2 = long) */
1286 9, /* bitsize */
1287 FALSE, /* pc_relative */
1288 10, /* bitpos */
1289 complain_overflow_unsigned, /* complain_on_overflow */
1290 bfd_elf_generic_reloc, /* special_function */
1291 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12), /* name */
1292 FALSE, /* partial_inplace */
1293 0x3ffc00, /* src_mask */
1294 0x3ffc00, /* dst_mask */
1295 FALSE), /* pcrel_offset */
1296
1297 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, but no overflow check. */
1298 HOWTO64 (AARCH64_R (TLSLD_LDST64_DTPREL_LO12_NC), /* type */
1299 3, /* rightshift */
1300 2, /* size (0 = byte, 1 = short, 2 = long) */
1301 9, /* bitsize */
1302 FALSE, /* pc_relative */
1303 10, /* bitpos */
1304 complain_overflow_dont, /* complain_on_overflow */
1305 bfd_elf_generic_reloc, /* special_function */
1306 AARCH64_R_STR (TLSLD_LDST64_DTPREL_LO12_NC), /* name */
1307 FALSE, /* partial_inplace */
1308 0x7fc00, /* src_mask */
1309 0x7fc00, /* dst_mask */
1310 FALSE), /* pcrel_offset */
1311
1312 /* LD/ST8: bit[11:0] of byte offset to module TLS base address. */
1313 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12), /* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 12, /* bitsize */
1317 FALSE, /* pc_relative */
1318 10, /* bitpos */
1319 complain_overflow_unsigned, /* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12), /* name */
1322 FALSE, /* partial_inplace */
1323 0x3ffc00, /* src_mask */
1324 0x3ffc00, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 /* Same as BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, but no overflow check. */
1328 HOWTO64 (AARCH64_R (TLSLD_LDST8_DTPREL_LO12_NC), /* type */
1329 0, /* rightshift */
1330 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 12, /* bitsize */
1332 FALSE, /* pc_relative */
1333 10, /* bitpos */
1334 complain_overflow_dont, /* complain_on_overflow */
1335 bfd_elf_generic_reloc, /* special_function */
1336 AARCH64_R_STR (TLSLD_LDST8_DTPREL_LO12_NC), /* name */
1337 FALSE, /* partial_inplace */
1338 0x3ffc00, /* src_mask */
1339 0x3ffc00, /* dst_mask */
1340 FALSE), /* pcrel_offset */
1341
1342 /* MOVZ: bit[15:0] of byte offset to module TLS base address. */
1343 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0), /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_unsigned, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0), /* name */
1352 FALSE, /* partial_inplace */
1353 0xffff, /* src_mask */
1354 0xffff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
1358 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G0_NC), /* type */
1359 0, /* rightshift */
1360 2, /* size (0 = byte, 1 = short, 2 = long) */
1361 16, /* bitsize */
1362 FALSE, /* pc_relative */
1363 0, /* bitpos */
1364 complain_overflow_dont, /* complain_on_overflow */
1365 bfd_elf_generic_reloc, /* special_function */
1366 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G0_NC), /* name */
1367 FALSE, /* partial_inplace */
1368 0xffff, /* src_mask */
1369 0xffff, /* dst_mask */
1370 FALSE), /* pcrel_offset */
1371
1372 /* MOVZ: bit[31:16] of byte offset to module TLS base address. */
1373 HOWTO (AARCH64_R (TLSLD_MOVW_DTPREL_G1), /* type */
1374 16, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 16, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_unsigned, /* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1), /* name */
1382 FALSE, /* partial_inplace */
1383 0xffff, /* src_mask */
1384 0xffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
1388 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G1_NC), /* type */
1389 16, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 16, /* bitsize */
1392 FALSE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G1_NC), /* name */
1397 FALSE, /* partial_inplace */
1398 0xffff, /* src_mask */
1399 0xffff, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1401
1402 /* MOVZ: bit[47:32] of byte offset to module TLS base address. */
1403 HOWTO64 (AARCH64_R (TLSLD_MOVW_DTPREL_G2), /* type */
1404 32, /* rightshift */
1405 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 16, /* bitsize */
1407 FALSE, /* pc_relative */
1408 0, /* bitpos */
1409 complain_overflow_unsigned, /* complain_on_overflow */
1410 bfd_elf_generic_reloc, /* special_function */
1411 AARCH64_R_STR (TLSLD_MOVW_DTPREL_G2), /* name */
1412 FALSE, /* partial_inplace */
1413 0xffff, /* src_mask */
1414 0xffff, /* dst_mask */
1415 FALSE), /* pcrel_offset */
1416
1417 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1418 32, /* rightshift */
1419 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 16, /* bitsize */
1421 FALSE, /* pc_relative */
1422 0, /* bitpos */
1423 complain_overflow_unsigned, /* complain_on_overflow */
1424 bfd_elf_generic_reloc, /* special_function */
1425 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1426 FALSE, /* partial_inplace */
1427 0xffff, /* src_mask */
1428 0xffff, /* dst_mask */
1429 FALSE), /* pcrel_offset */
1430
1431 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1432 16, /* rightshift */
1433 2, /* size (0 = byte, 1 = short, 2 = long) */
1434 16, /* bitsize */
1435 FALSE, /* pc_relative */
1436 0, /* bitpos */
1437 complain_overflow_dont, /* complain_on_overflow */
1438 bfd_elf_generic_reloc, /* special_function */
1439 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1440 FALSE, /* partial_inplace */
1441 0xffff, /* src_mask */
1442 0xffff, /* dst_mask */
1443 FALSE), /* pcrel_offset */
1444
1445 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1446 16, /* rightshift */
1447 2, /* size (0 = byte, 1 = short, 2 = long) */
1448 16, /* bitsize */
1449 FALSE, /* pc_relative */
1450 0, /* bitpos */
1451 complain_overflow_dont, /* complain_on_overflow */
1452 bfd_elf_generic_reloc, /* special_function */
1453 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1454 FALSE, /* partial_inplace */
1455 0xffff, /* src_mask */
1456 0xffff, /* dst_mask */
1457 FALSE), /* pcrel_offset */
1458
1459 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1460 0, /* rightshift */
1461 2, /* size (0 = byte, 1 = short, 2 = long) */
1462 16, /* bitsize */
1463 FALSE, /* pc_relative */
1464 0, /* bitpos */
1465 complain_overflow_dont, /* complain_on_overflow */
1466 bfd_elf_generic_reloc, /* special_function */
1467 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1468 FALSE, /* partial_inplace */
1469 0xffff, /* src_mask */
1470 0xffff, /* dst_mask */
1471 FALSE), /* pcrel_offset */
1472
1473 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1474 0, /* rightshift */
1475 2, /* size (0 = byte, 1 = short, 2 = long) */
1476 16, /* bitsize */
1477 FALSE, /* pc_relative */
1478 0, /* bitpos */
1479 complain_overflow_dont, /* complain_on_overflow */
1480 bfd_elf_generic_reloc, /* special_function */
1481 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1482 FALSE, /* partial_inplace */
1483 0xffff, /* src_mask */
1484 0xffff, /* dst_mask */
1485 FALSE), /* pcrel_offset */
1486
1487 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1488 12, /* rightshift */
1489 2, /* size (0 = byte, 1 = short, 2 = long) */
1490 12, /* bitsize */
1491 FALSE, /* pc_relative */
1492 0, /* bitpos */
1493 complain_overflow_unsigned, /* complain_on_overflow */
1494 bfd_elf_generic_reloc, /* special_function */
1495 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1496 FALSE, /* partial_inplace */
1497 0xfff, /* src_mask */
1498 0xfff, /* dst_mask */
1499 FALSE), /* pcrel_offset */
1500
1501 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1502 0, /* rightshift */
1503 2, /* size (0 = byte, 1 = short, 2 = long) */
1504 12, /* bitsize */
1505 FALSE, /* pc_relative */
1506 0, /* bitpos */
1507 complain_overflow_unsigned, /* complain_on_overflow */
1508 bfd_elf_generic_reloc, /* special_function */
1509 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1510 FALSE, /* partial_inplace */
1511 0xfff, /* src_mask */
1512 0xfff, /* dst_mask */
1513 FALSE), /* pcrel_offset */
1514
1515 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1516 0, /* rightshift */
1517 2, /* size (0 = byte, 1 = short, 2 = long) */
1518 12, /* bitsize */
1519 FALSE, /* pc_relative */
1520 0, /* bitpos */
1521 complain_overflow_dont, /* complain_on_overflow */
1522 bfd_elf_generic_reloc, /* special_function */
1523 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1524 FALSE, /* partial_inplace */
1525 0xfff, /* src_mask */
1526 0xfff, /* dst_mask */
1527 FALSE), /* pcrel_offset */
1528
1529 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1530 2, /* rightshift */
1531 2, /* size (0 = byte, 1 = short, 2 = long) */
1532 19, /* bitsize */
1533 TRUE, /* pc_relative */
1534 0, /* bitpos */
1535 complain_overflow_dont, /* complain_on_overflow */
1536 bfd_elf_generic_reloc, /* special_function */
1537 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1538 FALSE, /* partial_inplace */
1539 0x0ffffe0, /* src_mask */
1540 0x0ffffe0, /* dst_mask */
1541 TRUE), /* pcrel_offset */
1542
1543 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1544 0, /* rightshift */
1545 2, /* size (0 = byte, 1 = short, 2 = long) */
1546 21, /* bitsize */
1547 TRUE, /* pc_relative */
1548 0, /* bitpos */
1549 complain_overflow_dont, /* complain_on_overflow */
1550 bfd_elf_generic_reloc, /* special_function */
1551 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1552 FALSE, /* partial_inplace */
1553 0x1fffff, /* src_mask */
1554 0x1fffff, /* dst_mask */
1555 TRUE), /* pcrel_offset */
1556
1557 /* Get to the page for the GOT entry for the symbol
1558 (G(S) - P) using an ADRP instruction. */
1559 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1560 12, /* rightshift */
1561 2, /* size (0 = byte, 1 = short, 2 = long) */
1562 21, /* bitsize */
1563 TRUE, /* pc_relative */
1564 0, /* bitpos */
1565 complain_overflow_dont, /* complain_on_overflow */
1566 bfd_elf_generic_reloc, /* special_function */
1567 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1568 FALSE, /* partial_inplace */
1569 0x1fffff, /* src_mask */
1570 0x1fffff, /* dst_mask */
1571 TRUE), /* pcrel_offset */
1572
1573 /* LD64: GOT offset G(S) & 0xff8. */
1574 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1575 3, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 12, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_dont, /* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1583 FALSE, /* partial_inplace */
1584 0xff8, /* src_mask */
1585 0xff8, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 /* LD32: GOT offset G(S) & 0xffc. */
1589 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1590 2, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_dont, /* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1598 FALSE, /* partial_inplace */
1599 0xffc, /* src_mask */
1600 0xffc, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602
1603 /* ADD: GOT offset G(S) & 0xfff. */
1604 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1605 0, /* rightshift */
1606 2, /* size (0 = byte, 1 = short, 2 = long) */
1607 12, /* bitsize */
1608 FALSE, /* pc_relative */
1609 0, /* bitpos */
1610 complain_overflow_dont, /* complain_on_overflow */
1611 bfd_elf_generic_reloc, /* special_function */
1612 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1613 FALSE, /* partial_inplace */
1614 0xfff, /* src_mask */
1615 0xfff, /* dst_mask */
1616 FALSE), /* pcrel_offset */
1617
1618 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1619 16, /* rightshift */
1620 2, /* size (0 = byte, 1 = short, 2 = long) */
1621 12, /* bitsize */
1622 FALSE, /* pc_relative */
1623 0, /* bitpos */
1624 complain_overflow_unsigned, /* complain_on_overflow */
1625 bfd_elf_generic_reloc, /* special_function */
1626 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1627 FALSE, /* partial_inplace */
1628 0xffff, /* src_mask */
1629 0xffff, /* dst_mask */
1630 FALSE), /* pcrel_offset */
1631
1632 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1633 0, /* rightshift */
1634 2, /* size (0 = byte, 1 = short, 2 = long) */
1635 12, /* bitsize */
1636 FALSE, /* pc_relative */
1637 0, /* bitpos */
1638 complain_overflow_dont, /* complain_on_overflow */
1639 bfd_elf_generic_reloc, /* special_function */
1640 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1641 FALSE, /* partial_inplace */
1642 0xffff, /* src_mask */
1643 0xffff, /* dst_mask */
1644 FALSE), /* pcrel_offset */
1645
1646 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1647 0, /* rightshift */
1648 2, /* size (0 = byte, 1 = short, 2 = long) */
1649 12, /* bitsize */
1650 FALSE, /* pc_relative */
1651 0, /* bitpos */
1652 complain_overflow_dont, /* complain_on_overflow */
1653 bfd_elf_generic_reloc, /* special_function */
1654 AARCH64_R_STR (TLSDESC_LDR), /* name */
1655 FALSE, /* partial_inplace */
1656 0x0, /* src_mask */
1657 0x0, /* dst_mask */
1658 FALSE), /* pcrel_offset */
1659
1660 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1661 0, /* rightshift */
1662 2, /* size (0 = byte, 1 = short, 2 = long) */
1663 12, /* bitsize */
1664 FALSE, /* pc_relative */
1665 0, /* bitpos */
1666 complain_overflow_dont, /* complain_on_overflow */
1667 bfd_elf_generic_reloc, /* special_function */
1668 AARCH64_R_STR (TLSDESC_ADD), /* name */
1669 FALSE, /* partial_inplace */
1670 0x0, /* src_mask */
1671 0x0, /* dst_mask */
1672 FALSE), /* pcrel_offset */
1673
1674 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1675 0, /* rightshift */
1676 2, /* size (0 = byte, 1 = short, 2 = long) */
1677 0, /* bitsize */
1678 FALSE, /* pc_relative */
1679 0, /* bitpos */
1680 complain_overflow_dont, /* complain_on_overflow */
1681 bfd_elf_generic_reloc, /* special_function */
1682 AARCH64_R_STR (TLSDESC_CALL), /* name */
1683 FALSE, /* partial_inplace */
1684 0x0, /* src_mask */
1685 0x0, /* dst_mask */
1686 FALSE), /* pcrel_offset */
1687
1688 HOWTO (AARCH64_R (COPY), /* type */
1689 0, /* rightshift */
1690 2, /* size (0 = byte, 1 = short, 2 = long) */
1691 64, /* bitsize */
1692 FALSE, /* pc_relative */
1693 0, /* bitpos */
1694 complain_overflow_bitfield, /* complain_on_overflow */
1695 bfd_elf_generic_reloc, /* special_function */
1696 AARCH64_R_STR (COPY), /* name */
1697 TRUE, /* partial_inplace */
1698 0xffffffff, /* src_mask */
1699 0xffffffff, /* dst_mask */
1700 FALSE), /* pcrel_offset */
1701
1702 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1703 0, /* rightshift */
1704 2, /* size (0 = byte, 1 = short, 2 = long) */
1705 64, /* bitsize */
1706 FALSE, /* pc_relative */
1707 0, /* bitpos */
1708 complain_overflow_bitfield, /* complain_on_overflow */
1709 bfd_elf_generic_reloc, /* special_function */
1710 AARCH64_R_STR (GLOB_DAT), /* name */
1711 TRUE, /* partial_inplace */
1712 0xffffffff, /* src_mask */
1713 0xffffffff, /* dst_mask */
1714 FALSE), /* pcrel_offset */
1715
1716 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1717 0, /* rightshift */
1718 2, /* size (0 = byte, 1 = short, 2 = long) */
1719 64, /* bitsize */
1720 FALSE, /* pc_relative */
1721 0, /* bitpos */
1722 complain_overflow_bitfield, /* complain_on_overflow */
1723 bfd_elf_generic_reloc, /* special_function */
1724 AARCH64_R_STR (JUMP_SLOT), /* name */
1725 TRUE, /* partial_inplace */
1726 0xffffffff, /* src_mask */
1727 0xffffffff, /* dst_mask */
1728 FALSE), /* pcrel_offset */
1729
1730 HOWTO (AARCH64_R (RELATIVE), /* type */
1731 0, /* rightshift */
1732 2, /* size (0 = byte, 1 = short, 2 = long) */
1733 64, /* bitsize */
1734 FALSE, /* pc_relative */
1735 0, /* bitpos */
1736 complain_overflow_bitfield, /* complain_on_overflow */
1737 bfd_elf_generic_reloc, /* special_function */
1738 AARCH64_R_STR (RELATIVE), /* name */
1739 TRUE, /* partial_inplace */
1740 ALL_ONES, /* src_mask */
1741 ALL_ONES, /* dst_mask */
1742 FALSE), /* pcrel_offset */
1743
1744 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1745 0, /* rightshift */
1746 2, /* size (0 = byte, 1 = short, 2 = long) */
1747 64, /* bitsize */
1748 FALSE, /* pc_relative */
1749 0, /* bitpos */
1750 complain_overflow_dont, /* complain_on_overflow */
1751 bfd_elf_generic_reloc, /* special_function */
1752 #if ARCH_SIZE == 64
1753 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1754 #else
1755 AARCH64_R_STR (TLS_DTPMOD), /* name */
1756 #endif
1757 FALSE, /* partial_inplace */
1758 0, /* src_mask */
1759 ALL_ONES, /* dst_mask */
1760 FALSE), /* pc_reloffset */
1761
1762 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1763 0, /* rightshift */
1764 2, /* size (0 = byte, 1 = short, 2 = long) */
1765 64, /* bitsize */
1766 FALSE, /* pc_relative */
1767 0, /* bitpos */
1768 complain_overflow_dont, /* complain_on_overflow */
1769 bfd_elf_generic_reloc, /* special_function */
1770 #if ARCH_SIZE == 64
1771 AARCH64_R_STR (TLS_DTPREL64), /* name */
1772 #else
1773 AARCH64_R_STR (TLS_DTPREL), /* name */
1774 #endif
1775 FALSE, /* partial_inplace */
1776 0, /* src_mask */
1777 ALL_ONES, /* dst_mask */
1778 FALSE), /* pcrel_offset */
1779
1780 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1781 0, /* rightshift */
1782 2, /* size (0 = byte, 1 = short, 2 = long) */
1783 64, /* bitsize */
1784 FALSE, /* pc_relative */
1785 0, /* bitpos */
1786 complain_overflow_dont, /* complain_on_overflow */
1787 bfd_elf_generic_reloc, /* special_function */
1788 #if ARCH_SIZE == 64
1789 AARCH64_R_STR (TLS_TPREL64), /* name */
1790 #else
1791 AARCH64_R_STR (TLS_TPREL), /* name */
1792 #endif
1793 FALSE, /* partial_inplace */
1794 0, /* src_mask */
1795 ALL_ONES, /* dst_mask */
1796 FALSE), /* pcrel_offset */
1797
1798 HOWTO (AARCH64_R (TLSDESC), /* type */
1799 0, /* rightshift */
1800 2, /* size (0 = byte, 1 = short, 2 = long) */
1801 64, /* bitsize */
1802 FALSE, /* pc_relative */
1803 0, /* bitpos */
1804 complain_overflow_dont, /* complain_on_overflow */
1805 bfd_elf_generic_reloc, /* special_function */
1806 AARCH64_R_STR (TLSDESC), /* name */
1807 FALSE, /* partial_inplace */
1808 0, /* src_mask */
1809 ALL_ONES, /* dst_mask */
1810 FALSE), /* pcrel_offset */
1811
1812 HOWTO (AARCH64_R (IRELATIVE), /* type */
1813 0, /* rightshift */
1814 2, /* size (0 = byte, 1 = short, 2 = long) */
1815 64, /* bitsize */
1816 FALSE, /* pc_relative */
1817 0, /* bitpos */
1818 complain_overflow_bitfield, /* complain_on_overflow */
1819 bfd_elf_generic_reloc, /* special_function */
1820 AARCH64_R_STR (IRELATIVE), /* name */
1821 FALSE, /* partial_inplace */
1822 0, /* src_mask */
1823 ALL_ONES, /* dst_mask */
1824 FALSE), /* pcrel_offset */
1825
1826 EMPTY_HOWTO (0),
1827 };
1828
1829 static reloc_howto_type elfNN_aarch64_howto_none =
1830 HOWTO (R_AARCH64_NONE, /* type */
1831 0, /* rightshift */
1832 3, /* size (0 = byte, 1 = short, 2 = long) */
1833 0, /* bitsize */
1834 FALSE, /* pc_relative */
1835 0, /* bitpos */
1836 complain_overflow_dont,/* complain_on_overflow */
1837 bfd_elf_generic_reloc, /* special_function */
1838 "R_AARCH64_NONE", /* name */
1839 FALSE, /* partial_inplace */
1840 0, /* src_mask */
1841 0, /* dst_mask */
1842 FALSE); /* pcrel_offset */
1843
1844 /* Given HOWTO, return the bfd internal relocation enumerator. */
1845
1846 static bfd_reloc_code_real_type
1847 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1848 {
1849 const int size
1850 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1851 const ptrdiff_t offset
1852 = howto - elfNN_aarch64_howto_table;
1853
1854 if (offset > 0 && offset < size - 1)
1855 return BFD_RELOC_AARCH64_RELOC_START + offset;
1856
1857 if (howto == &elfNN_aarch64_howto_none)
1858 return BFD_RELOC_AARCH64_NONE;
1859
1860 return BFD_RELOC_AARCH64_RELOC_START;
1861 }
1862
1863 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1864
1865 static bfd_reloc_code_real_type
1866 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1867 {
1868 static bfd_boolean initialized_p = FALSE;
1869 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1870 static unsigned int offsets[R_AARCH64_end];
1871
1872 if (initialized_p == FALSE)
1873 {
1874 unsigned int i;
1875
1876 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1877 if (elfNN_aarch64_howto_table[i].type != 0)
1878 offsets[elfNN_aarch64_howto_table[i].type] = i;
1879
1880 initialized_p = TRUE;
1881 }
1882
1883 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1884 return BFD_RELOC_AARCH64_NONE;
1885
1886 /* PR 17512: file: b371e70a. */
1887 if (r_type >= R_AARCH64_end)
1888 {
1889 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1890 bfd_set_error (bfd_error_bad_value);
1891 return BFD_RELOC_AARCH64_NONE;
1892 }
1893
1894 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1895 }
1896
1897 struct elf_aarch64_reloc_map
1898 {
1899 bfd_reloc_code_real_type from;
1900 bfd_reloc_code_real_type to;
1901 };
1902
1903 /* Map bfd generic reloc to AArch64-specific reloc. */
1904 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1905 {
1906 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1907
1908 /* Basic data relocations. */
1909 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1910 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1911 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1912 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1913 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1914 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1915 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1916 };
1917
1918 /* Given the bfd internal relocation enumerator in CODE, return the
1919 corresponding howto entry. */
1920
1921 static reloc_howto_type *
1922 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1923 {
1924 unsigned int i;
1925
1926 /* Convert bfd generic reloc to AArch64-specific reloc. */
1927 if (code < BFD_RELOC_AARCH64_RELOC_START
1928 || code > BFD_RELOC_AARCH64_RELOC_END)
1929 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1930 if (elf_aarch64_reloc_map[i].from == code)
1931 {
1932 code = elf_aarch64_reloc_map[i].to;
1933 break;
1934 }
1935
1936 if (code > BFD_RELOC_AARCH64_RELOC_START
1937 && code < BFD_RELOC_AARCH64_RELOC_END)
1938 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1939 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1940
1941 if (code == BFD_RELOC_AARCH64_NONE)
1942 return &elfNN_aarch64_howto_none;
1943
1944 return NULL;
1945 }
1946
1947 static reloc_howto_type *
1948 elfNN_aarch64_howto_from_type (unsigned int r_type)
1949 {
1950 bfd_reloc_code_real_type val;
1951 reloc_howto_type *howto;
1952
1953 #if ARCH_SIZE == 32
1954 if (r_type > 256)
1955 {
1956 bfd_set_error (bfd_error_bad_value);
1957 return NULL;
1958 }
1959 #endif
1960
1961 if (r_type == R_AARCH64_NONE)
1962 return &elfNN_aarch64_howto_none;
1963
1964 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1965 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1966
1967 if (howto != NULL)
1968 return howto;
1969
1970 bfd_set_error (bfd_error_bad_value);
1971 return NULL;
1972 }
1973
1974 static void
1975 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1976 Elf_Internal_Rela *elf_reloc)
1977 {
1978 unsigned int r_type;
1979
1980 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1981 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1982 }
1983
1984 static reloc_howto_type *
1985 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1986 bfd_reloc_code_real_type code)
1987 {
1988 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1989
1990 if (howto != NULL)
1991 return howto;
1992
1993 bfd_set_error (bfd_error_bad_value);
1994 return NULL;
1995 }
1996
1997 static reloc_howto_type *
1998 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1999 const char *r_name)
2000 {
2001 unsigned int i;
2002
2003 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
2004 if (elfNN_aarch64_howto_table[i].name != NULL
2005 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
2006 return &elfNN_aarch64_howto_table[i];
2007
2008 return NULL;
2009 }
2010
2011 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
2012 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
2013 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
2014 #define TARGET_BIG_NAME "elfNN-bigaarch64"
2015
2016 /* The linker script knows the section names for placement.
2017 The entry_names are used to do simple name mangling on the stubs.
2018 Given a function name, and its type, the stub can be found. The
2019 name can be changed. The only requirement is the %s be present. */
2020 #define STUB_ENTRY_NAME "__%s_veneer"
2021
2022 /* The name of the dynamic interpreter. This is put in the .interp
2023 section. */
2024 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
2025
2026 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
2027 (((1 << 25) - 1) << 2)
2028 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
2029 (-((1 << 25) << 2))
2030
2031 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
2032 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
2033
2034 static int
2035 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
2036 {
2037 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
2038 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
2039 }
2040
2041 static int
2042 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
2043 {
2044 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
2045 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
2046 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
2047 }
2048
2049 static const uint32_t aarch64_adrp_branch_stub [] =
2050 {
2051 0x90000010, /* adrp ip0, X */
2052 /* R_AARCH64_ADR_HI21_PCREL(X) */
2053 0x91000210, /* add ip0, ip0, :lo12:X */
2054 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
2055 0xd61f0200, /* br ip0 */
2056 };
2057
2058 static const uint32_t aarch64_long_branch_stub[] =
2059 {
2060 #if ARCH_SIZE == 64
2061 0x58000090, /* ldr ip0, 1f */
2062 #else
2063 0x18000090, /* ldr wip0, 1f */
2064 #endif
2065 0x10000011, /* adr ip1, #0 */
2066 0x8b110210, /* add ip0, ip0, ip1 */
2067 0xd61f0200, /* br ip0 */
2068 0x00000000, /* 1: .xword or .word
2069 R_AARCH64_PRELNN(X) + 12
2070 */
2071 0x00000000,
2072 };
2073
2074 static const uint32_t aarch64_erratum_835769_stub[] =
2075 {
2076 0x00000000, /* Placeholder for multiply accumulate. */
2077 0x14000000, /* b <label> */
2078 };
2079
2080 static const uint32_t aarch64_erratum_843419_stub[] =
2081 {
2082 0x00000000, /* Placeholder for LDR instruction. */
2083 0x14000000, /* b <label> */
2084 };
2085
2086 /* Section name for stubs is the associated section name plus this
2087 string. */
2088 #define STUB_SUFFIX ".stub"
2089
2090 enum elf_aarch64_stub_type
2091 {
2092 aarch64_stub_none,
2093 aarch64_stub_adrp_branch,
2094 aarch64_stub_long_branch,
2095 aarch64_stub_erratum_835769_veneer,
2096 aarch64_stub_erratum_843419_veneer,
2097 };
2098
2099 struct elf_aarch64_stub_hash_entry
2100 {
2101 /* Base hash table entry structure. */
2102 struct bfd_hash_entry root;
2103
2104 /* The stub section. */
2105 asection *stub_sec;
2106
2107 /* Offset within stub_sec of the beginning of this stub. */
2108 bfd_vma stub_offset;
2109
2110 /* Given the symbol's value and its section we can determine its final
2111 value when building the stubs (so the stub knows where to jump). */
2112 bfd_vma target_value;
2113 asection *target_section;
2114
2115 enum elf_aarch64_stub_type stub_type;
2116
2117 /* The symbol table entry, if any, that this was derived from. */
2118 struct elf_aarch64_link_hash_entry *h;
2119
2120 /* Destination symbol type */
2121 unsigned char st_type;
2122
2123 /* Where this stub is being called from, or, in the case of combined
2124 stub sections, the first input section in the group. */
2125 asection *id_sec;
2126
2127 /* The name for the local symbol at the start of this stub. The
2128 stub name in the hash table has to be unique; this does not, so
2129 it can be friendlier. */
2130 char *output_name;
2131
2132 /* The instruction which caused this stub to be generated (only valid for
2133 erratum 835769 workaround stubs at present). */
2134 uint32_t veneered_insn;
2135
2136 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
2137 bfd_vma adrp_offset;
2138 };
2139
2140 /* Used to build a map of a section. This is required for mixed-endian
2141 code/data. */
2142
2143 typedef struct elf_elf_section_map
2144 {
2145 bfd_vma vma;
2146 char type;
2147 }
2148 elf_aarch64_section_map;
2149
2150
2151 typedef struct _aarch64_elf_section_data
2152 {
2153 struct bfd_elf_section_data elf;
2154 unsigned int mapcount;
2155 unsigned int mapsize;
2156 elf_aarch64_section_map *map;
2157 }
2158 _aarch64_elf_section_data;
2159
2160 #define elf_aarch64_section_data(sec) \
2161 ((_aarch64_elf_section_data *) elf_section_data (sec))
2162
2163 /* The size of the thread control block which is defined to be two pointers. */
2164 #define TCB_SIZE (ARCH_SIZE/8)*2
2165
2166 struct elf_aarch64_local_symbol
2167 {
2168 unsigned int got_type;
2169 bfd_signed_vma got_refcount;
2170 bfd_vma got_offset;
2171
2172 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
2173 offset is from the end of the jump table and reserved entries
2174 within the PLTGOT.
2175
2176 The magic value (bfd_vma) -1 indicates that an offset has not be
2177 allocated. */
2178 bfd_vma tlsdesc_got_jump_table_offset;
2179 };
2180
2181 struct elf_aarch64_obj_tdata
2182 {
2183 struct elf_obj_tdata root;
2184
2185 /* local symbol descriptors */
2186 struct elf_aarch64_local_symbol *locals;
2187
2188 /* Zero to warn when linking objects with incompatible enum sizes. */
2189 int no_enum_size_warning;
2190
2191 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2192 int no_wchar_size_warning;
2193 };
2194
2195 #define elf_aarch64_tdata(bfd) \
2196 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
2197
2198 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
2199
2200 #define is_aarch64_elf(bfd) \
2201 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2202 && elf_tdata (bfd) != NULL \
2203 && elf_object_id (bfd) == AARCH64_ELF_DATA)
2204
2205 static bfd_boolean
2206 elfNN_aarch64_mkobject (bfd *abfd)
2207 {
2208 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
2209 AARCH64_ELF_DATA);
2210 }
2211
2212 #define elf_aarch64_hash_entry(ent) \
2213 ((struct elf_aarch64_link_hash_entry *)(ent))
2214
2215 #define GOT_UNKNOWN 0
2216 #define GOT_NORMAL 1
2217 #define GOT_TLS_GD 2
2218 #define GOT_TLS_IE 4
2219 #define GOT_TLSDESC_GD 8
2220
2221 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
2222
2223 /* AArch64 ELF linker hash entry. */
2224 struct elf_aarch64_link_hash_entry
2225 {
2226 struct elf_link_hash_entry root;
2227
2228 /* Track dynamic relocs copied for this symbol. */
2229 struct elf_dyn_relocs *dyn_relocs;
2230
2231 /* Since PLT entries have variable size, we need to record the
2232 index into .got.plt instead of recomputing it from the PLT
2233 offset. */
2234 bfd_signed_vma plt_got_offset;
2235
2236 /* Bit mask representing the type of GOT entry(s) if any required by
2237 this symbol. */
2238 unsigned int got_type;
2239
2240 /* A pointer to the most recently used stub hash entry against this
2241 symbol. */
2242 struct elf_aarch64_stub_hash_entry *stub_cache;
2243
2244 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
2245 is from the end of the jump table and reserved entries within the PLTGOT.
2246
2247 The magic value (bfd_vma) -1 indicates that an offset has not
2248 be allocated. */
2249 bfd_vma tlsdesc_got_jump_table_offset;
2250 };
2251
2252 static unsigned int
2253 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
2254 bfd *abfd,
2255 unsigned long r_symndx)
2256 {
2257 if (h)
2258 return elf_aarch64_hash_entry (h)->got_type;
2259
2260 if (! elf_aarch64_locals (abfd))
2261 return GOT_UNKNOWN;
2262
2263 return elf_aarch64_locals (abfd)[r_symndx].got_type;
2264 }
2265
2266 /* Get the AArch64 elf linker hash table from a link_info structure. */
2267 #define elf_aarch64_hash_table(info) \
2268 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
2269
2270 #define aarch64_stub_hash_lookup(table, string, create, copy) \
2271 ((struct elf_aarch64_stub_hash_entry *) \
2272 bfd_hash_lookup ((table), (string), (create), (copy)))
2273
2274 /* AArch64 ELF linker hash table. */
2275 struct elf_aarch64_link_hash_table
2276 {
2277 /* The main hash table. */
2278 struct elf_link_hash_table root;
2279
2280 /* Nonzero to force PIC branch veneers. */
2281 int pic_veneer;
2282
2283 /* Fix erratum 835769. */
2284 int fix_erratum_835769;
2285
2286 /* Fix erratum 843419. */
2287 int fix_erratum_843419;
2288
2289 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
2290 int fix_erratum_843419_adr;
2291
2292 /* The number of bytes in the initial entry in the PLT. */
2293 bfd_size_type plt_header_size;
2294
2295 /* The number of bytes in the subsequent PLT etries. */
2296 bfd_size_type plt_entry_size;
2297
2298 /* Short-cuts to get to dynamic linker sections. */
2299 asection *sdynbss;
2300 asection *srelbss;
2301
2302 /* Small local sym cache. */
2303 struct sym_cache sym_cache;
2304
2305 /* For convenience in allocate_dynrelocs. */
2306 bfd *obfd;
2307
2308 /* The amount of space used by the reserved portion of the sgotplt
2309 section, plus whatever space is used by the jump slots. */
2310 bfd_vma sgotplt_jump_table_size;
2311
2312 /* The stub hash table. */
2313 struct bfd_hash_table stub_hash_table;
2314
2315 /* Linker stub bfd. */
2316 bfd *stub_bfd;
2317
2318 /* Linker call-backs. */
2319 asection *(*add_stub_section) (const char *, asection *);
2320 void (*layout_sections_again) (void);
2321
2322 /* Array to keep track of which stub sections have been created, and
2323 information on stub grouping. */
2324 struct map_stub
2325 {
2326 /* This is the section to which stubs in the group will be
2327 attached. */
2328 asection *link_sec;
2329 /* The stub section. */
2330 asection *stub_sec;
2331 } *stub_group;
2332
2333 /* Assorted information used by elfNN_aarch64_size_stubs. */
2334 unsigned int bfd_count;
2335 unsigned int top_index;
2336 asection **input_list;
2337
2338 /* The offset into splt of the PLT entry for the TLS descriptor
2339 resolver. Special values are 0, if not necessary (or not found
2340 to be necessary yet), and -1 if needed but not determined
2341 yet. */
2342 bfd_vma tlsdesc_plt;
2343
2344 /* The GOT offset for the lazy trampoline. Communicated to the
2345 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2346 indicates an offset is not allocated. */
2347 bfd_vma dt_tlsdesc_got;
2348
2349 /* Used by local STT_GNU_IFUNC symbols. */
2350 htab_t loc_hash_table;
2351 void * loc_hash_memory;
2352 };
2353
2354 /* Create an entry in an AArch64 ELF linker hash table. */
2355
2356 static struct bfd_hash_entry *
2357 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2358 struct bfd_hash_table *table,
2359 const char *string)
2360 {
2361 struct elf_aarch64_link_hash_entry *ret =
2362 (struct elf_aarch64_link_hash_entry *) entry;
2363
2364 /* Allocate the structure if it has not already been allocated by a
2365 subclass. */
2366 if (ret == NULL)
2367 ret = bfd_hash_allocate (table,
2368 sizeof (struct elf_aarch64_link_hash_entry));
2369 if (ret == NULL)
2370 return (struct bfd_hash_entry *) ret;
2371
2372 /* Call the allocation method of the superclass. */
2373 ret = ((struct elf_aarch64_link_hash_entry *)
2374 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2375 table, string));
2376 if (ret != NULL)
2377 {
2378 ret->dyn_relocs = NULL;
2379 ret->got_type = GOT_UNKNOWN;
2380 ret->plt_got_offset = (bfd_vma) - 1;
2381 ret->stub_cache = NULL;
2382 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2383 }
2384
2385 return (struct bfd_hash_entry *) ret;
2386 }
2387
2388 /* Initialize an entry in the stub hash table. */
2389
2390 static struct bfd_hash_entry *
2391 stub_hash_newfunc (struct bfd_hash_entry *entry,
2392 struct bfd_hash_table *table, const char *string)
2393 {
2394 /* Allocate the structure if it has not already been allocated by a
2395 subclass. */
2396 if (entry == NULL)
2397 {
2398 entry = bfd_hash_allocate (table,
2399 sizeof (struct
2400 elf_aarch64_stub_hash_entry));
2401 if (entry == NULL)
2402 return entry;
2403 }
2404
2405 /* Call the allocation method of the superclass. */
2406 entry = bfd_hash_newfunc (entry, table, string);
2407 if (entry != NULL)
2408 {
2409 struct elf_aarch64_stub_hash_entry *eh;
2410
2411 /* Initialize the local fields. */
2412 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2413 eh->adrp_offset = 0;
2414 eh->stub_sec = NULL;
2415 eh->stub_offset = 0;
2416 eh->target_value = 0;
2417 eh->target_section = NULL;
2418 eh->stub_type = aarch64_stub_none;
2419 eh->h = NULL;
2420 eh->id_sec = NULL;
2421 }
2422
2423 return entry;
2424 }
2425
2426 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2427 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2428 as global symbol. We reuse indx and dynstr_index for local symbol
2429 hash since they aren't used by global symbols in this backend. */
2430
2431 static hashval_t
2432 elfNN_aarch64_local_htab_hash (const void *ptr)
2433 {
2434 struct elf_link_hash_entry *h
2435 = (struct elf_link_hash_entry *) ptr;
2436 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2437 }
2438
2439 /* Compare local hash entries. */
2440
2441 static int
2442 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2443 {
2444 struct elf_link_hash_entry *h1
2445 = (struct elf_link_hash_entry *) ptr1;
2446 struct elf_link_hash_entry *h2
2447 = (struct elf_link_hash_entry *) ptr2;
2448
2449 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2450 }
2451
2452 /* Find and/or create a hash entry for local symbol. */
2453
2454 static struct elf_link_hash_entry *
2455 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2456 bfd *abfd, const Elf_Internal_Rela *rel,
2457 bfd_boolean create)
2458 {
2459 struct elf_aarch64_link_hash_entry e, *ret;
2460 asection *sec = abfd->sections;
2461 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2462 ELFNN_R_SYM (rel->r_info));
2463 void **slot;
2464
2465 e.root.indx = sec->id;
2466 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2467 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2468 create ? INSERT : NO_INSERT);
2469
2470 if (!slot)
2471 return NULL;
2472
2473 if (*slot)
2474 {
2475 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2476 return &ret->root;
2477 }
2478
2479 ret = (struct elf_aarch64_link_hash_entry *)
2480 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2481 sizeof (struct elf_aarch64_link_hash_entry));
2482 if (ret)
2483 {
2484 memset (ret, 0, sizeof (*ret));
2485 ret->root.indx = sec->id;
2486 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2487 ret->root.dynindx = -1;
2488 *slot = ret;
2489 }
2490 return &ret->root;
2491 }
2492
2493 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2494
2495 static void
2496 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2497 struct elf_link_hash_entry *dir,
2498 struct elf_link_hash_entry *ind)
2499 {
2500 struct elf_aarch64_link_hash_entry *edir, *eind;
2501
2502 edir = (struct elf_aarch64_link_hash_entry *) dir;
2503 eind = (struct elf_aarch64_link_hash_entry *) ind;
2504
2505 if (eind->dyn_relocs != NULL)
2506 {
2507 if (edir->dyn_relocs != NULL)
2508 {
2509 struct elf_dyn_relocs **pp;
2510 struct elf_dyn_relocs *p;
2511
2512 /* Add reloc counts against the indirect sym to the direct sym
2513 list. Merge any entries against the same section. */
2514 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2515 {
2516 struct elf_dyn_relocs *q;
2517
2518 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2519 if (q->sec == p->sec)
2520 {
2521 q->pc_count += p->pc_count;
2522 q->count += p->count;
2523 *pp = p->next;
2524 break;
2525 }
2526 if (q == NULL)
2527 pp = &p->next;
2528 }
2529 *pp = edir->dyn_relocs;
2530 }
2531
2532 edir->dyn_relocs = eind->dyn_relocs;
2533 eind->dyn_relocs = NULL;
2534 }
2535
2536 if (ind->root.type == bfd_link_hash_indirect)
2537 {
2538 /* Copy over PLT info. */
2539 if (dir->got.refcount <= 0)
2540 {
2541 edir->got_type = eind->got_type;
2542 eind->got_type = GOT_UNKNOWN;
2543 }
2544 }
2545
2546 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2547 }
2548
2549 /* Destroy an AArch64 elf linker hash table. */
2550
2551 static void
2552 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2553 {
2554 struct elf_aarch64_link_hash_table *ret
2555 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2556
2557 if (ret->loc_hash_table)
2558 htab_delete (ret->loc_hash_table);
2559 if (ret->loc_hash_memory)
2560 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2561
2562 bfd_hash_table_free (&ret->stub_hash_table);
2563 _bfd_elf_link_hash_table_free (obfd);
2564 }
2565
2566 /* Create an AArch64 elf linker hash table. */
2567
2568 static struct bfd_link_hash_table *
2569 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2570 {
2571 struct elf_aarch64_link_hash_table *ret;
2572 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2573
2574 ret = bfd_zmalloc (amt);
2575 if (ret == NULL)
2576 return NULL;
2577
2578 if (!_bfd_elf_link_hash_table_init
2579 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2580 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2581 {
2582 free (ret);
2583 return NULL;
2584 }
2585
2586 ret->plt_header_size = PLT_ENTRY_SIZE;
2587 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2588 ret->obfd = abfd;
2589 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2590
2591 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2592 sizeof (struct elf_aarch64_stub_hash_entry)))
2593 {
2594 _bfd_elf_link_hash_table_free (abfd);
2595 return NULL;
2596 }
2597
2598 ret->loc_hash_table = htab_try_create (1024,
2599 elfNN_aarch64_local_htab_hash,
2600 elfNN_aarch64_local_htab_eq,
2601 NULL);
2602 ret->loc_hash_memory = objalloc_create ();
2603 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2604 {
2605 elfNN_aarch64_link_hash_table_free (abfd);
2606 return NULL;
2607 }
2608 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2609
2610 return &ret->root.root;
2611 }
2612
2613 static bfd_boolean
2614 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2615 bfd_vma offset, bfd_vma value)
2616 {
2617 reloc_howto_type *howto;
2618 bfd_vma place;
2619
2620 howto = elfNN_aarch64_howto_from_type (r_type);
2621 place = (input_section->output_section->vma + input_section->output_offset
2622 + offset);
2623
2624 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2625 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2626 return _bfd_aarch64_elf_put_addend (input_bfd,
2627 input_section->contents + offset, r_type,
2628 howto, value);
2629 }
2630
2631 static enum elf_aarch64_stub_type
2632 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2633 {
2634 if (aarch64_valid_for_adrp_p (value, place))
2635 return aarch64_stub_adrp_branch;
2636 return aarch64_stub_long_branch;
2637 }
2638
2639 /* Determine the type of stub needed, if any, for a call. */
2640
2641 static enum elf_aarch64_stub_type
2642 aarch64_type_of_stub (struct bfd_link_info *info,
2643 asection *input_sec,
2644 const Elf_Internal_Rela *rel,
2645 asection *sym_sec,
2646 unsigned char st_type,
2647 struct elf_aarch64_link_hash_entry *hash,
2648 bfd_vma destination)
2649 {
2650 bfd_vma location;
2651 bfd_signed_vma branch_offset;
2652 unsigned int r_type;
2653 struct elf_aarch64_link_hash_table *globals;
2654 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2655 bfd_boolean via_plt_p;
2656
2657 if (st_type != STT_FUNC
2658 && (sym_sec != bfd_abs_section_ptr))
2659 return stub_type;
2660
2661 globals = elf_aarch64_hash_table (info);
2662 via_plt_p = (globals->root.splt != NULL && hash != NULL
2663 && hash->root.plt.offset != (bfd_vma) - 1);
2664 /* Make sure call to plt stub can fit into the branch range. */
2665 if (via_plt_p)
2666 destination = (globals->root.splt->output_section->vma
2667 + globals->root.splt->output_offset
2668 + hash->root.plt.offset);
2669
2670 /* Determine where the call point is. */
2671 location = (input_sec->output_offset
2672 + input_sec->output_section->vma + rel->r_offset);
2673
2674 branch_offset = (bfd_signed_vma) (destination - location);
2675
2676 r_type = ELFNN_R_TYPE (rel->r_info);
2677
2678 /* We don't want to redirect any old unconditional jump in this way,
2679 only one which is being used for a sibcall, where it is
2680 acceptable for the IP0 and IP1 registers to be clobbered. */
2681 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2682 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2683 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2684 {
2685 stub_type = aarch64_stub_long_branch;
2686 }
2687
2688 return stub_type;
2689 }
2690
2691 /* Build a name for an entry in the stub hash table. */
2692
2693 static char *
2694 elfNN_aarch64_stub_name (const asection *input_section,
2695 const asection *sym_sec,
2696 const struct elf_aarch64_link_hash_entry *hash,
2697 const Elf_Internal_Rela *rel)
2698 {
2699 char *stub_name;
2700 bfd_size_type len;
2701
2702 if (hash)
2703 {
2704 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2705 stub_name = bfd_malloc (len);
2706 if (stub_name != NULL)
2707 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2708 (unsigned int) input_section->id,
2709 hash->root.root.root.string,
2710 rel->r_addend);
2711 }
2712 else
2713 {
2714 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2715 stub_name = bfd_malloc (len);
2716 if (stub_name != NULL)
2717 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2718 (unsigned int) input_section->id,
2719 (unsigned int) sym_sec->id,
2720 (unsigned int) ELFNN_R_SYM (rel->r_info),
2721 rel->r_addend);
2722 }
2723
2724 return stub_name;
2725 }
2726
2727 /* Look up an entry in the stub hash. Stub entries are cached because
2728 creating the stub name takes a bit of time. */
2729
2730 static struct elf_aarch64_stub_hash_entry *
2731 elfNN_aarch64_get_stub_entry (const asection *input_section,
2732 const asection *sym_sec,
2733 struct elf_link_hash_entry *hash,
2734 const Elf_Internal_Rela *rel,
2735 struct elf_aarch64_link_hash_table *htab)
2736 {
2737 struct elf_aarch64_stub_hash_entry *stub_entry;
2738 struct elf_aarch64_link_hash_entry *h =
2739 (struct elf_aarch64_link_hash_entry *) hash;
2740 const asection *id_sec;
2741
2742 if ((input_section->flags & SEC_CODE) == 0)
2743 return NULL;
2744
2745 /* If this input section is part of a group of sections sharing one
2746 stub section, then use the id of the first section in the group.
2747 Stub names need to include a section id, as there may well be
2748 more than one stub used to reach say, printf, and we need to
2749 distinguish between them. */
2750 id_sec = htab->stub_group[input_section->id].link_sec;
2751
2752 if (h != NULL && h->stub_cache != NULL
2753 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2754 {
2755 stub_entry = h->stub_cache;
2756 }
2757 else
2758 {
2759 char *stub_name;
2760
2761 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2762 if (stub_name == NULL)
2763 return NULL;
2764
2765 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2766 stub_name, FALSE, FALSE);
2767 if (h != NULL)
2768 h->stub_cache = stub_entry;
2769
2770 free (stub_name);
2771 }
2772
2773 return stub_entry;
2774 }
2775
2776
2777 /* Create a stub section. */
2778
2779 static asection *
2780 _bfd_aarch64_create_stub_section (asection *section,
2781 struct elf_aarch64_link_hash_table *htab)
2782 {
2783 size_t namelen;
2784 bfd_size_type len;
2785 char *s_name;
2786
2787 namelen = strlen (section->name);
2788 len = namelen + sizeof (STUB_SUFFIX);
2789 s_name = bfd_alloc (htab->stub_bfd, len);
2790 if (s_name == NULL)
2791 return NULL;
2792
2793 memcpy (s_name, section->name, namelen);
2794 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2795 return (*htab->add_stub_section) (s_name, section);
2796 }
2797
2798
2799 /* Find or create a stub section for a link section.
2800
2801 Fix or create the stub section used to collect stubs attached to
2802 the specified link section. */
2803
2804 static asection *
2805 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2806 struct elf_aarch64_link_hash_table *htab)
2807 {
2808 if (htab->stub_group[link_section->id].stub_sec == NULL)
2809 htab->stub_group[link_section->id].stub_sec
2810 = _bfd_aarch64_create_stub_section (link_section, htab);
2811 return htab->stub_group[link_section->id].stub_sec;
2812 }
2813
2814
2815 /* Find or create a stub section in the stub group for an input
2816 section. */
2817
2818 static asection *
2819 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2820 struct elf_aarch64_link_hash_table *htab)
2821 {
2822 asection *link_sec = htab->stub_group[section->id].link_sec;
2823 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2824 }
2825
2826
2827 /* Add a new stub entry in the stub group associated with an input
2828 section to the stub hash. Not all fields of the new stub entry are
2829 initialised. */
2830
2831 static struct elf_aarch64_stub_hash_entry *
2832 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2833 asection *section,
2834 struct elf_aarch64_link_hash_table *htab)
2835 {
2836 asection *link_sec;
2837 asection *stub_sec;
2838 struct elf_aarch64_stub_hash_entry *stub_entry;
2839
2840 link_sec = htab->stub_group[section->id].link_sec;
2841 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2842
2843 /* Enter this entry into the linker stub hash table. */
2844 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2845 TRUE, FALSE);
2846 if (stub_entry == NULL)
2847 {
2848 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2849 section->owner, stub_name);
2850 return NULL;
2851 }
2852
2853 stub_entry->stub_sec = stub_sec;
2854 stub_entry->stub_offset = 0;
2855 stub_entry->id_sec = link_sec;
2856
2857 return stub_entry;
2858 }
2859
2860 /* Add a new stub entry in the final stub section to the stub hash.
2861 Not all fields of the new stub entry are initialised. */
2862
2863 static struct elf_aarch64_stub_hash_entry *
2864 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2865 asection *link_section,
2866 struct elf_aarch64_link_hash_table *htab)
2867 {
2868 asection *stub_sec;
2869 struct elf_aarch64_stub_hash_entry *stub_entry;
2870
2871 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2872 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2873 TRUE, FALSE);
2874 if (stub_entry == NULL)
2875 {
2876 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2877 return NULL;
2878 }
2879
2880 stub_entry->stub_sec = stub_sec;
2881 stub_entry->stub_offset = 0;
2882 stub_entry->id_sec = link_section;
2883
2884 return stub_entry;
2885 }
2886
2887
2888 static bfd_boolean
2889 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2890 void *in_arg ATTRIBUTE_UNUSED)
2891 {
2892 struct elf_aarch64_stub_hash_entry *stub_entry;
2893 asection *stub_sec;
2894 bfd *stub_bfd;
2895 bfd_byte *loc;
2896 bfd_vma sym_value;
2897 bfd_vma veneered_insn_loc;
2898 bfd_vma veneer_entry_loc;
2899 bfd_signed_vma branch_offset = 0;
2900 unsigned int template_size;
2901 const uint32_t *template;
2902 unsigned int i;
2903
2904 /* Massage our args to the form they really have. */
2905 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2906
2907 stub_sec = stub_entry->stub_sec;
2908
2909 /* Make a note of the offset within the stubs for this entry. */
2910 stub_entry->stub_offset = stub_sec->size;
2911 loc = stub_sec->contents + stub_entry->stub_offset;
2912
2913 stub_bfd = stub_sec->owner;
2914
2915 /* This is the address of the stub destination. */
2916 sym_value = (stub_entry->target_value
2917 + stub_entry->target_section->output_offset
2918 + stub_entry->target_section->output_section->vma);
2919
2920 if (stub_entry->stub_type == aarch64_stub_long_branch)
2921 {
2922 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2923 + stub_sec->output_offset);
2924
2925 /* See if we can relax the stub. */
2926 if (aarch64_valid_for_adrp_p (sym_value, place))
2927 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2928 }
2929
2930 switch (stub_entry->stub_type)
2931 {
2932 case aarch64_stub_adrp_branch:
2933 template = aarch64_adrp_branch_stub;
2934 template_size = sizeof (aarch64_adrp_branch_stub);
2935 break;
2936 case aarch64_stub_long_branch:
2937 template = aarch64_long_branch_stub;
2938 template_size = sizeof (aarch64_long_branch_stub);
2939 break;
2940 case aarch64_stub_erratum_835769_veneer:
2941 template = aarch64_erratum_835769_stub;
2942 template_size = sizeof (aarch64_erratum_835769_stub);
2943 break;
2944 case aarch64_stub_erratum_843419_veneer:
2945 template = aarch64_erratum_843419_stub;
2946 template_size = sizeof (aarch64_erratum_843419_stub);
2947 break;
2948 default:
2949 abort ();
2950 }
2951
2952 for (i = 0; i < (template_size / sizeof template[0]); i++)
2953 {
2954 bfd_putl32 (template[i], loc);
2955 loc += 4;
2956 }
2957
2958 template_size = (template_size + 7) & ~7;
2959 stub_sec->size += template_size;
2960
2961 switch (stub_entry->stub_type)
2962 {
2963 case aarch64_stub_adrp_branch:
2964 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2965 stub_entry->stub_offset, sym_value))
2966 /* The stub would not have been relaxed if the offset was out
2967 of range. */
2968 BFD_FAIL ();
2969
2970 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2971 stub_entry->stub_offset + 4, sym_value))
2972 BFD_FAIL ();
2973 break;
2974
2975 case aarch64_stub_long_branch:
2976 /* We want the value relative to the address 12 bytes back from the
2977 value itself. */
2978 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2979 stub_entry->stub_offset + 16, sym_value + 12))
2980 BFD_FAIL ();
2981 break;
2982
2983 case aarch64_stub_erratum_835769_veneer:
2984 veneered_insn_loc = stub_entry->target_section->output_section->vma
2985 + stub_entry->target_section->output_offset
2986 + stub_entry->target_value;
2987 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2988 + stub_entry->stub_sec->output_offset
2989 + stub_entry->stub_offset;
2990 branch_offset = veneered_insn_loc - veneer_entry_loc;
2991 branch_offset >>= 2;
2992 branch_offset &= 0x3ffffff;
2993 bfd_putl32 (stub_entry->veneered_insn,
2994 stub_sec->contents + stub_entry->stub_offset);
2995 bfd_putl32 (template[1] | branch_offset,
2996 stub_sec->contents + stub_entry->stub_offset + 4);
2997 break;
2998
2999 case aarch64_stub_erratum_843419_veneer:
3000 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
3001 stub_entry->stub_offset + 4, sym_value + 4))
3002 BFD_FAIL ();
3003 break;
3004
3005 default:
3006 abort ();
3007 }
3008
3009 return TRUE;
3010 }
3011
3012 /* As above, but don't actually build the stub. Just bump offset so
3013 we know stub section sizes. */
3014
3015 static bfd_boolean
3016 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
3017 void *in_arg ATTRIBUTE_UNUSED)
3018 {
3019 struct elf_aarch64_stub_hash_entry *stub_entry;
3020 int size;
3021
3022 /* Massage our args to the form they really have. */
3023 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3024
3025 switch (stub_entry->stub_type)
3026 {
3027 case aarch64_stub_adrp_branch:
3028 size = sizeof (aarch64_adrp_branch_stub);
3029 break;
3030 case aarch64_stub_long_branch:
3031 size = sizeof (aarch64_long_branch_stub);
3032 break;
3033 case aarch64_stub_erratum_835769_veneer:
3034 size = sizeof (aarch64_erratum_835769_stub);
3035 break;
3036 case aarch64_stub_erratum_843419_veneer:
3037 size = sizeof (aarch64_erratum_843419_stub);
3038 break;
3039 default:
3040 abort ();
3041 }
3042
3043 size = (size + 7) & ~7;
3044 stub_entry->stub_sec->size += size;
3045 return TRUE;
3046 }
3047
3048 /* External entry points for sizing and building linker stubs. */
3049
3050 /* Set up various things so that we can make a list of input sections
3051 for each output section included in the link. Returns -1 on error,
3052 0 when no stubs will be needed, and 1 on success. */
3053
3054 int
3055 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
3056 struct bfd_link_info *info)
3057 {
3058 bfd *input_bfd;
3059 unsigned int bfd_count;
3060 unsigned int top_id, top_index;
3061 asection *section;
3062 asection **input_list, **list;
3063 bfd_size_type amt;
3064 struct elf_aarch64_link_hash_table *htab =
3065 elf_aarch64_hash_table (info);
3066
3067 if (!is_elf_hash_table (htab))
3068 return 0;
3069
3070 /* Count the number of input BFDs and find the top input section id. */
3071 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3072 input_bfd != NULL; input_bfd = input_bfd->link.next)
3073 {
3074 bfd_count += 1;
3075 for (section = input_bfd->sections;
3076 section != NULL; section = section->next)
3077 {
3078 if (top_id < section->id)
3079 top_id = section->id;
3080 }
3081 }
3082 htab->bfd_count = bfd_count;
3083
3084 amt = sizeof (struct map_stub) * (top_id + 1);
3085 htab->stub_group = bfd_zmalloc (amt);
3086 if (htab->stub_group == NULL)
3087 return -1;
3088
3089 /* We can't use output_bfd->section_count here to find the top output
3090 section index as some sections may have been removed, and
3091 _bfd_strip_section_from_output doesn't renumber the indices. */
3092 for (section = output_bfd->sections, top_index = 0;
3093 section != NULL; section = section->next)
3094 {
3095 if (top_index < section->index)
3096 top_index = section->index;
3097 }
3098
3099 htab->top_index = top_index;
3100 amt = sizeof (asection *) * (top_index + 1);
3101 input_list = bfd_malloc (amt);
3102 htab->input_list = input_list;
3103 if (input_list == NULL)
3104 return -1;
3105
3106 /* For sections we aren't interested in, mark their entries with a
3107 value we can check later. */
3108 list = input_list + top_index;
3109 do
3110 *list = bfd_abs_section_ptr;
3111 while (list-- != input_list);
3112
3113 for (section = output_bfd->sections;
3114 section != NULL; section = section->next)
3115 {
3116 if ((section->flags & SEC_CODE) != 0)
3117 input_list[section->index] = NULL;
3118 }
3119
3120 return 1;
3121 }
3122
3123 /* Used by elfNN_aarch64_next_input_section and group_sections. */
3124 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3125
3126 /* The linker repeatedly calls this function for each input section,
3127 in the order that input sections are linked into output sections.
3128 Build lists of input sections to determine groupings between which
3129 we may insert linker stubs. */
3130
3131 void
3132 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
3133 {
3134 struct elf_aarch64_link_hash_table *htab =
3135 elf_aarch64_hash_table (info);
3136
3137 if (isec->output_section->index <= htab->top_index)
3138 {
3139 asection **list = htab->input_list + isec->output_section->index;
3140
3141 if (*list != bfd_abs_section_ptr)
3142 {
3143 /* Steal the link_sec pointer for our list. */
3144 /* This happens to make the list in reverse order,
3145 which is what we want. */
3146 PREV_SEC (isec) = *list;
3147 *list = isec;
3148 }
3149 }
3150 }
3151
3152 /* See whether we can group stub sections together. Grouping stub
3153 sections may result in fewer stubs. More importantly, we need to
3154 put all .init* and .fini* stubs at the beginning of the .init or
3155 .fini output sections respectively, because glibc splits the
3156 _init and _fini functions into multiple parts. Putting a stub in
3157 the middle of a function is not a good idea. */
3158
3159 static void
3160 group_sections (struct elf_aarch64_link_hash_table *htab,
3161 bfd_size_type stub_group_size,
3162 bfd_boolean stubs_always_before_branch)
3163 {
3164 asection **list = htab->input_list + htab->top_index;
3165
3166 do
3167 {
3168 asection *tail = *list;
3169
3170 if (tail == bfd_abs_section_ptr)
3171 continue;
3172
3173 while (tail != NULL)
3174 {
3175 asection *curr;
3176 asection *prev;
3177 bfd_size_type total;
3178
3179 curr = tail;
3180 total = tail->size;
3181 while ((prev = PREV_SEC (curr)) != NULL
3182 && ((total += curr->output_offset - prev->output_offset)
3183 < stub_group_size))
3184 curr = prev;
3185
3186 /* OK, the size from the start of CURR to the end is less
3187 than stub_group_size and thus can be handled by one stub
3188 section. (Or the tail section is itself larger than
3189 stub_group_size, in which case we may be toast.)
3190 We should really be keeping track of the total size of
3191 stubs added here, as stubs contribute to the final output
3192 section size. */
3193 do
3194 {
3195 prev = PREV_SEC (tail);
3196 /* Set up this stub group. */
3197 htab->stub_group[tail->id].link_sec = curr;
3198 }
3199 while (tail != curr && (tail = prev) != NULL);
3200
3201 /* But wait, there's more! Input sections up to stub_group_size
3202 bytes before the stub section can be handled by it too. */
3203 if (!stubs_always_before_branch)
3204 {
3205 total = 0;
3206 while (prev != NULL
3207 && ((total += tail->output_offset - prev->output_offset)
3208 < stub_group_size))
3209 {
3210 tail = prev;
3211 prev = PREV_SEC (tail);
3212 htab->stub_group[tail->id].link_sec = curr;
3213 }
3214 }
3215 tail = prev;
3216 }
3217 }
3218 while (list-- != htab->input_list);
3219
3220 free (htab->input_list);
3221 }
3222
3223 #undef PREV_SEC
3224
3225 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
3226
3227 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
3228 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
3229 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
3230 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
3231 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
3232 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
3233
3234 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
3235 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
3236 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
3237 #define AARCH64_ZR 0x1f
3238
3239 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
3240 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
3241
3242 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
3243 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
3244 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
3245 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
3246 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
3247 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
3248 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
3249 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
3250 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
3251 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
3252 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
3253 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
3254 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
3255 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
3256 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
3257 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
3258 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
3259 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
3260
3261 /* Classify an INSN if it is indeed a load/store.
3262
3263 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
3264
3265 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
3266 is set equal to RT.
3267
3268 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
3269
3270 */
3271
3272 static bfd_boolean
3273 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
3274 bfd_boolean *pair, bfd_boolean *load)
3275 {
3276 uint32_t opcode;
3277 unsigned int r;
3278 uint32_t opc = 0;
3279 uint32_t v = 0;
3280 uint32_t opc_v = 0;
3281
3282 /* Bail out quickly if INSN doesn't fall into the the load-store
3283 encoding space. */
3284 if (!AARCH64_LDST (insn))
3285 return FALSE;
3286
3287 *pair = FALSE;
3288 *load = FALSE;
3289 if (AARCH64_LDST_EX (insn))
3290 {
3291 *rt = AARCH64_RT (insn);
3292 *rt2 = *rt;
3293 if (AARCH64_BIT (insn, 21) == 1)
3294 {
3295 *pair = TRUE;
3296 *rt2 = AARCH64_RT2 (insn);
3297 }
3298 *load = AARCH64_LD (insn);
3299 return TRUE;
3300 }
3301 else if (AARCH64_LDST_NAP (insn)
3302 || AARCH64_LDSTP_PI (insn)
3303 || AARCH64_LDSTP_O (insn)
3304 || AARCH64_LDSTP_PRE (insn))
3305 {
3306 *pair = TRUE;
3307 *rt = AARCH64_RT (insn);
3308 *rt2 = AARCH64_RT2 (insn);
3309 *load = AARCH64_LD (insn);
3310 return TRUE;
3311 }
3312 else if (AARCH64_LDST_PCREL (insn)
3313 || AARCH64_LDST_UI (insn)
3314 || AARCH64_LDST_PIIMM (insn)
3315 || AARCH64_LDST_U (insn)
3316 || AARCH64_LDST_PREIMM (insn)
3317 || AARCH64_LDST_RO (insn)
3318 || AARCH64_LDST_UIMM (insn))
3319 {
3320 *rt = AARCH64_RT (insn);
3321 *rt2 = *rt;
3322 if (AARCH64_LDST_PCREL (insn))
3323 *load = TRUE;
3324 opc = AARCH64_BITS (insn, 22, 2);
3325 v = AARCH64_BIT (insn, 26);
3326 opc_v = opc | (v << 2);
3327 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
3328 || opc_v == 5 || opc_v == 7);
3329 return TRUE;
3330 }
3331 else if (AARCH64_LDST_SIMD_M (insn)
3332 || AARCH64_LDST_SIMD_M_PI (insn))
3333 {
3334 *rt = AARCH64_RT (insn);
3335 *load = AARCH64_BIT (insn, 22);
3336 opcode = (insn >> 12) & 0xf;
3337 switch (opcode)
3338 {
3339 case 0:
3340 case 2:
3341 *rt2 = *rt + 3;
3342 break;
3343
3344 case 4:
3345 case 6:
3346 *rt2 = *rt + 2;
3347 break;
3348
3349 case 7:
3350 *rt2 = *rt;
3351 break;
3352
3353 case 8:
3354 case 10:
3355 *rt2 = *rt + 1;
3356 break;
3357
3358 default:
3359 return FALSE;
3360 }
3361 return TRUE;
3362 }
3363 else if (AARCH64_LDST_SIMD_S (insn)
3364 || AARCH64_LDST_SIMD_S_PI (insn))
3365 {
3366 *rt = AARCH64_RT (insn);
3367 r = (insn >> 21) & 1;
3368 *load = AARCH64_BIT (insn, 22);
3369 opcode = (insn >> 13) & 0x7;
3370 switch (opcode)
3371 {
3372 case 0:
3373 case 2:
3374 case 4:
3375 *rt2 = *rt + r;
3376 break;
3377
3378 case 1:
3379 case 3:
3380 case 5:
3381 *rt2 = *rt + (r == 0 ? 2 : 3);
3382 break;
3383
3384 case 6:
3385 *rt2 = *rt + r;
3386 break;
3387
3388 case 7:
3389 *rt2 = *rt + (r == 0 ? 2 : 3);
3390 break;
3391
3392 default:
3393 return FALSE;
3394 }
3395 return TRUE;
3396 }
3397
3398 return FALSE;
3399 }
3400
3401 /* Return TRUE if INSN is multiply-accumulate. */
3402
3403 static bfd_boolean
3404 aarch64_mlxl_p (uint32_t insn)
3405 {
3406 uint32_t op31 = AARCH64_OP31 (insn);
3407
3408 if (AARCH64_MAC (insn)
3409 && (op31 == 0 || op31 == 1 || op31 == 5)
3410 /* Exclude MUL instructions which are encoded as a multiple accumulate
3411 with RA = XZR. */
3412 && AARCH64_RA (insn) != AARCH64_ZR)
3413 return TRUE;
3414
3415 return FALSE;
3416 }
3417
3418 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3419 it is possible for a 64-bit multiply-accumulate instruction to generate an
3420 incorrect result. The details are quite complex and hard to
3421 determine statically, since branches in the code may exist in some
3422 circumstances, but all cases end with a memory (load, store, or
3423 prefetch) instruction followed immediately by the multiply-accumulate
3424 operation. We employ a linker patching technique, by moving the potentially
3425 affected multiply-accumulate instruction into a patch region and replacing
3426 the original instruction with a branch to the patch. This function checks
3427 if INSN_1 is the memory operation followed by a multiply-accumulate
3428 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3429 if INSN_1 and INSN_2 are safe. */
3430
3431 static bfd_boolean
3432 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3433 {
3434 uint32_t rt;
3435 uint32_t rt2;
3436 uint32_t rn;
3437 uint32_t rm;
3438 uint32_t ra;
3439 bfd_boolean pair;
3440 bfd_boolean load;
3441
3442 if (aarch64_mlxl_p (insn_2)
3443 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3444 {
3445 /* Any SIMD memory op is independent of the subsequent MLA
3446 by definition of the erratum. */
3447 if (AARCH64_BIT (insn_1, 26))
3448 return TRUE;
3449
3450 /* If not SIMD, check for integer memory ops and MLA relationship. */
3451 rn = AARCH64_RN (insn_2);
3452 ra = AARCH64_RA (insn_2);
3453 rm = AARCH64_RM (insn_2);
3454
3455 /* If this is a load and there's a true(RAW) dependency, we are safe
3456 and this is not an erratum sequence. */
3457 if (load &&
3458 (rt == rn || rt == rm || rt == ra
3459 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3460 return FALSE;
3461
3462 /* We conservatively put out stubs for all other cases (including
3463 writebacks). */
3464 return TRUE;
3465 }
3466
3467 return FALSE;
3468 }
3469
3470 /* Used to order a list of mapping symbols by address. */
3471
3472 static int
3473 elf_aarch64_compare_mapping (const void *a, const void *b)
3474 {
3475 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3476 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3477
3478 if (amap->vma > bmap->vma)
3479 return 1;
3480 else if (amap->vma < bmap->vma)
3481 return -1;
3482 else if (amap->type > bmap->type)
3483 /* Ensure results do not depend on the host qsort for objects with
3484 multiple mapping symbols at the same address by sorting on type
3485 after vma. */
3486 return 1;
3487 else if (amap->type < bmap->type)
3488 return -1;
3489 else
3490 return 0;
3491 }
3492
3493
3494 static char *
3495 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3496 {
3497 char *stub_name = (char *) bfd_malloc
3498 (strlen ("__erratum_835769_veneer_") + 16);
3499 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3500 return stub_name;
3501 }
3502
3503 /* Scan for Cortex-A53 erratum 835769 sequence.
3504
3505 Return TRUE else FALSE on abnormal termination. */
3506
3507 static bfd_boolean
3508 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3509 struct bfd_link_info *info,
3510 unsigned int *num_fixes_p)
3511 {
3512 asection *section;
3513 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3514 unsigned int num_fixes = *num_fixes_p;
3515
3516 if (htab == NULL)
3517 return TRUE;
3518
3519 for (section = input_bfd->sections;
3520 section != NULL;
3521 section = section->next)
3522 {
3523 bfd_byte *contents = NULL;
3524 struct _aarch64_elf_section_data *sec_data;
3525 unsigned int span;
3526
3527 if (elf_section_type (section) != SHT_PROGBITS
3528 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3529 || (section->flags & SEC_EXCLUDE) != 0
3530 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3531 || (section->output_section == bfd_abs_section_ptr))
3532 continue;
3533
3534 if (elf_section_data (section)->this_hdr.contents != NULL)
3535 contents = elf_section_data (section)->this_hdr.contents;
3536 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3537 return FALSE;
3538
3539 sec_data = elf_aarch64_section_data (section);
3540
3541 qsort (sec_data->map, sec_data->mapcount,
3542 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3543
3544 for (span = 0; span < sec_data->mapcount; span++)
3545 {
3546 unsigned int span_start = sec_data->map[span].vma;
3547 unsigned int span_end = ((span == sec_data->mapcount - 1)
3548 ? sec_data->map[0].vma + section->size
3549 : sec_data->map[span + 1].vma);
3550 unsigned int i;
3551 char span_type = sec_data->map[span].type;
3552
3553 if (span_type == 'd')
3554 continue;
3555
3556 for (i = span_start; i + 4 < span_end; i += 4)
3557 {
3558 uint32_t insn_1 = bfd_getl32 (contents + i);
3559 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3560
3561 if (aarch64_erratum_sequence (insn_1, insn_2))
3562 {
3563 struct elf_aarch64_stub_hash_entry *stub_entry;
3564 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3565 if (! stub_name)
3566 return FALSE;
3567
3568 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3569 section,
3570 htab);
3571 if (! stub_entry)
3572 return FALSE;
3573
3574 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3575 stub_entry->target_section = section;
3576 stub_entry->target_value = i + 4;
3577 stub_entry->veneered_insn = insn_2;
3578 stub_entry->output_name = stub_name;
3579 num_fixes++;
3580 }
3581 }
3582 }
3583 if (elf_section_data (section)->this_hdr.contents == NULL)
3584 free (contents);
3585 }
3586
3587 *num_fixes_p = num_fixes;
3588
3589 return TRUE;
3590 }
3591
3592
3593 /* Test if instruction INSN is ADRP. */
3594
3595 static bfd_boolean
3596 _bfd_aarch64_adrp_p (uint32_t insn)
3597 {
3598 return ((insn & 0x9f000000) == 0x90000000);
3599 }
3600
3601
3602 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3603
3604 static bfd_boolean
3605 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3606 uint32_t insn_3)
3607 {
3608 uint32_t rt;
3609 uint32_t rt2;
3610 bfd_boolean pair;
3611 bfd_boolean load;
3612
3613 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3614 && (!pair
3615 || (pair && !load))
3616 && AARCH64_LDST_UIMM (insn_3)
3617 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3618 }
3619
3620
3621 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3622
3623 Return TRUE if section CONTENTS at offset I contains one of the
3624 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3625 seen set P_VENEER_I to the offset of the final LOAD/STORE
3626 instruction in the sequence.
3627 */
3628
3629 static bfd_boolean
3630 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3631 bfd_vma i, bfd_vma span_end,
3632 bfd_vma *p_veneer_i)
3633 {
3634 uint32_t insn_1 = bfd_getl32 (contents + i);
3635
3636 if (!_bfd_aarch64_adrp_p (insn_1))
3637 return FALSE;
3638
3639 if (span_end < i + 12)
3640 return FALSE;
3641
3642 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3643 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3644
3645 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3646 return FALSE;
3647
3648 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3649 {
3650 *p_veneer_i = i + 8;
3651 return TRUE;
3652 }
3653
3654 if (span_end < i + 16)
3655 return FALSE;
3656
3657 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3658
3659 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3660 {
3661 *p_veneer_i = i + 12;
3662 return TRUE;
3663 }
3664
3665 return FALSE;
3666 }
3667
3668
3669 /* Resize all stub sections. */
3670
3671 static void
3672 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3673 {
3674 asection *section;
3675
3676 /* OK, we've added some stubs. Find out the new size of the
3677 stub sections. */
3678 for (section = htab->stub_bfd->sections;
3679 section != NULL; section = section->next)
3680 {
3681 /* Ignore non-stub sections. */
3682 if (!strstr (section->name, STUB_SUFFIX))
3683 continue;
3684 section->size = 0;
3685 }
3686
3687 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3688
3689 for (section = htab->stub_bfd->sections;
3690 section != NULL; section = section->next)
3691 {
3692 if (!strstr (section->name, STUB_SUFFIX))
3693 continue;
3694
3695 if (section->size)
3696 section->size += 4;
3697
3698 /* Ensure all stub sections have a size which is a multiple of
3699 4096. This is important in order to ensure that the insertion
3700 of stub sections does not in itself move existing code around
3701 in such a way that new errata sequences are created. */
3702 if (htab->fix_erratum_843419)
3703 if (section->size)
3704 section->size = BFD_ALIGN (section->size, 0x1000);
3705 }
3706 }
3707
3708
3709 /* Construct an erratum 843419 workaround stub name.
3710 */
3711
3712 static char *
3713 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3714 bfd_vma offset)
3715 {
3716 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3717 char *stub_name = bfd_malloc (len);
3718
3719 if (stub_name != NULL)
3720 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3721 input_section->owner->id,
3722 input_section->id,
3723 offset);
3724 return stub_name;
3725 }
3726
3727 /* Build a stub_entry structure describing an 843419 fixup.
3728
3729 The stub_entry constructed is populated with the bit pattern INSN
3730 of the instruction located at OFFSET within input SECTION.
3731
3732 Returns TRUE on success. */
3733
3734 static bfd_boolean
3735 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3736 bfd_vma adrp_offset,
3737 bfd_vma ldst_offset,
3738 asection *section,
3739 struct bfd_link_info *info)
3740 {
3741 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3742 char *stub_name;
3743 struct elf_aarch64_stub_hash_entry *stub_entry;
3744
3745 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3746 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3747 FALSE, FALSE);
3748 if (stub_entry)
3749 {
3750 free (stub_name);
3751 return TRUE;
3752 }
3753
3754 /* We always place an 843419 workaround veneer in the stub section
3755 attached to the input section in which an erratum sequence has
3756 been found. This ensures that later in the link process (in
3757 elfNN_aarch64_write_section) when we copy the veneered
3758 instruction from the input section into the stub section the
3759 copied instruction will have had any relocations applied to it.
3760 If we placed workaround veneers in any other stub section then we
3761 could not assume that all relocations have been processed on the
3762 corresponding input section at the point we output the stub
3763 section.
3764 */
3765
3766 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3767 if (stub_entry == NULL)
3768 {
3769 free (stub_name);
3770 return FALSE;
3771 }
3772
3773 stub_entry->adrp_offset = adrp_offset;
3774 stub_entry->target_value = ldst_offset;
3775 stub_entry->target_section = section;
3776 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3777 stub_entry->veneered_insn = insn;
3778 stub_entry->output_name = stub_name;
3779
3780 return TRUE;
3781 }
3782
3783
3784 /* Scan an input section looking for the signature of erratum 843419.
3785
3786 Scans input SECTION in INPUT_BFD looking for erratum 843419
3787 signatures, for each signature found a stub_entry is created
3788 describing the location of the erratum for subsequent fixup.
3789
3790 Return TRUE on successful scan, FALSE on failure to scan.
3791 */
3792
3793 static bfd_boolean
3794 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3795 struct bfd_link_info *info)
3796 {
3797 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3798
3799 if (htab == NULL)
3800 return TRUE;
3801
3802 if (elf_section_type (section) != SHT_PROGBITS
3803 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3804 || (section->flags & SEC_EXCLUDE) != 0
3805 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3806 || (section->output_section == bfd_abs_section_ptr))
3807 return TRUE;
3808
3809 do
3810 {
3811 bfd_byte *contents = NULL;
3812 struct _aarch64_elf_section_data *sec_data;
3813 unsigned int span;
3814
3815 if (elf_section_data (section)->this_hdr.contents != NULL)
3816 contents = elf_section_data (section)->this_hdr.contents;
3817 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3818 return FALSE;
3819
3820 sec_data = elf_aarch64_section_data (section);
3821
3822 qsort (sec_data->map, sec_data->mapcount,
3823 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3824
3825 for (span = 0; span < sec_data->mapcount; span++)
3826 {
3827 unsigned int span_start = sec_data->map[span].vma;
3828 unsigned int span_end = ((span == sec_data->mapcount - 1)
3829 ? sec_data->map[0].vma + section->size
3830 : sec_data->map[span + 1].vma);
3831 unsigned int i;
3832 char span_type = sec_data->map[span].type;
3833
3834 if (span_type == 'd')
3835 continue;
3836
3837 for (i = span_start; i + 8 < span_end; i += 4)
3838 {
3839 bfd_vma vma = (section->output_section->vma
3840 + section->output_offset
3841 + i);
3842 bfd_vma veneer_i;
3843
3844 if (_bfd_aarch64_erratum_843419_p
3845 (contents, vma, i, span_end, &veneer_i))
3846 {
3847 uint32_t insn = bfd_getl32 (contents + veneer_i);
3848
3849 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3850 section, info))
3851 return FALSE;
3852 }
3853 }
3854 }
3855
3856 if (elf_section_data (section)->this_hdr.contents == NULL)
3857 free (contents);
3858 }
3859 while (0);
3860
3861 return TRUE;
3862 }
3863
3864
3865 /* Determine and set the size of the stub section for a final link.
3866
3867 The basic idea here is to examine all the relocations looking for
3868 PC-relative calls to a target that is unreachable with a "bl"
3869 instruction. */
3870
3871 bfd_boolean
3872 elfNN_aarch64_size_stubs (bfd *output_bfd,
3873 bfd *stub_bfd,
3874 struct bfd_link_info *info,
3875 bfd_signed_vma group_size,
3876 asection * (*add_stub_section) (const char *,
3877 asection *),
3878 void (*layout_sections_again) (void))
3879 {
3880 bfd_size_type stub_group_size;
3881 bfd_boolean stubs_always_before_branch;
3882 bfd_boolean stub_changed = FALSE;
3883 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3884 unsigned int num_erratum_835769_fixes = 0;
3885
3886 /* Propagate mach to stub bfd, because it may not have been
3887 finalized when we created stub_bfd. */
3888 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3889 bfd_get_mach (output_bfd));
3890
3891 /* Stash our params away. */
3892 htab->stub_bfd = stub_bfd;
3893 htab->add_stub_section = add_stub_section;
3894 htab->layout_sections_again = layout_sections_again;
3895 stubs_always_before_branch = group_size < 0;
3896 if (group_size < 0)
3897 stub_group_size = -group_size;
3898 else
3899 stub_group_size = group_size;
3900
3901 if (stub_group_size == 1)
3902 {
3903 /* Default values. */
3904 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3905 stub_group_size = 127 * 1024 * 1024;
3906 }
3907
3908 group_sections (htab, stub_group_size, stubs_always_before_branch);
3909
3910 (*htab->layout_sections_again) ();
3911
3912 if (htab->fix_erratum_835769)
3913 {
3914 bfd *input_bfd;
3915
3916 for (input_bfd = info->input_bfds;
3917 input_bfd != NULL; input_bfd = input_bfd->link.next)
3918 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3919 &num_erratum_835769_fixes))
3920 return FALSE;
3921
3922 _bfd_aarch64_resize_stubs (htab);
3923 (*htab->layout_sections_again) ();
3924 }
3925
3926 if (htab->fix_erratum_843419)
3927 {
3928 bfd *input_bfd;
3929
3930 for (input_bfd = info->input_bfds;
3931 input_bfd != NULL;
3932 input_bfd = input_bfd->link.next)
3933 {
3934 asection *section;
3935
3936 for (section = input_bfd->sections;
3937 section != NULL;
3938 section = section->next)
3939 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3940 return FALSE;
3941 }
3942
3943 _bfd_aarch64_resize_stubs (htab);
3944 (*htab->layout_sections_again) ();
3945 }
3946
3947 while (1)
3948 {
3949 bfd *input_bfd;
3950
3951 for (input_bfd = info->input_bfds;
3952 input_bfd != NULL; input_bfd = input_bfd->link.next)
3953 {
3954 Elf_Internal_Shdr *symtab_hdr;
3955 asection *section;
3956 Elf_Internal_Sym *local_syms = NULL;
3957
3958 /* We'll need the symbol table in a second. */
3959 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3960 if (symtab_hdr->sh_info == 0)
3961 continue;
3962
3963 /* Walk over each section attached to the input bfd. */
3964 for (section = input_bfd->sections;
3965 section != NULL; section = section->next)
3966 {
3967 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3968
3969 /* If there aren't any relocs, then there's nothing more
3970 to do. */
3971 if ((section->flags & SEC_RELOC) == 0
3972 || section->reloc_count == 0
3973 || (section->flags & SEC_CODE) == 0)
3974 continue;
3975
3976 /* If this section is a link-once section that will be
3977 discarded, then don't create any stubs. */
3978 if (section->output_section == NULL
3979 || section->output_section->owner != output_bfd)
3980 continue;
3981
3982 /* Get the relocs. */
3983 internal_relocs
3984 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3985 NULL, info->keep_memory);
3986 if (internal_relocs == NULL)
3987 goto error_ret_free_local;
3988
3989 /* Now examine each relocation. */
3990 irela = internal_relocs;
3991 irelaend = irela + section->reloc_count;
3992 for (; irela < irelaend; irela++)
3993 {
3994 unsigned int r_type, r_indx;
3995 enum elf_aarch64_stub_type stub_type;
3996 struct elf_aarch64_stub_hash_entry *stub_entry;
3997 asection *sym_sec;
3998 bfd_vma sym_value;
3999 bfd_vma destination;
4000 struct elf_aarch64_link_hash_entry *hash;
4001 const char *sym_name;
4002 char *stub_name;
4003 const asection *id_sec;
4004 unsigned char st_type;
4005 bfd_size_type len;
4006
4007 r_type = ELFNN_R_TYPE (irela->r_info);
4008 r_indx = ELFNN_R_SYM (irela->r_info);
4009
4010 if (r_type >= (unsigned int) R_AARCH64_end)
4011 {
4012 bfd_set_error (bfd_error_bad_value);
4013 error_ret_free_internal:
4014 if (elf_section_data (section)->relocs == NULL)
4015 free (internal_relocs);
4016 goto error_ret_free_local;
4017 }
4018
4019 /* Only look for stubs on unconditional branch and
4020 branch and link instructions. */
4021 if (r_type != (unsigned int) AARCH64_R (CALL26)
4022 && r_type != (unsigned int) AARCH64_R (JUMP26))
4023 continue;
4024
4025 /* Now determine the call target, its name, value,
4026 section. */
4027 sym_sec = NULL;
4028 sym_value = 0;
4029 destination = 0;
4030 hash = NULL;
4031 sym_name = NULL;
4032 if (r_indx < symtab_hdr->sh_info)
4033 {
4034 /* It's a local symbol. */
4035 Elf_Internal_Sym *sym;
4036 Elf_Internal_Shdr *hdr;
4037
4038 if (local_syms == NULL)
4039 {
4040 local_syms
4041 = (Elf_Internal_Sym *) symtab_hdr->contents;
4042 if (local_syms == NULL)
4043 local_syms
4044 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4045 symtab_hdr->sh_info, 0,
4046 NULL, NULL, NULL);
4047 if (local_syms == NULL)
4048 goto error_ret_free_internal;
4049 }
4050
4051 sym = local_syms + r_indx;
4052 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4053 sym_sec = hdr->bfd_section;
4054 if (!sym_sec)
4055 /* This is an undefined symbol. It can never
4056 be resolved. */
4057 continue;
4058
4059 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4060 sym_value = sym->st_value;
4061 destination = (sym_value + irela->r_addend
4062 + sym_sec->output_offset
4063 + sym_sec->output_section->vma);
4064 st_type = ELF_ST_TYPE (sym->st_info);
4065 sym_name
4066 = bfd_elf_string_from_elf_section (input_bfd,
4067 symtab_hdr->sh_link,
4068 sym->st_name);
4069 }
4070 else
4071 {
4072 int e_indx;
4073
4074 e_indx = r_indx - symtab_hdr->sh_info;
4075 hash = ((struct elf_aarch64_link_hash_entry *)
4076 elf_sym_hashes (input_bfd)[e_indx]);
4077
4078 while (hash->root.root.type == bfd_link_hash_indirect
4079 || hash->root.root.type == bfd_link_hash_warning)
4080 hash = ((struct elf_aarch64_link_hash_entry *)
4081 hash->root.root.u.i.link);
4082
4083 if (hash->root.root.type == bfd_link_hash_defined
4084 || hash->root.root.type == bfd_link_hash_defweak)
4085 {
4086 struct elf_aarch64_link_hash_table *globals =
4087 elf_aarch64_hash_table (info);
4088 sym_sec = hash->root.root.u.def.section;
4089 sym_value = hash->root.root.u.def.value;
4090 /* For a destination in a shared library,
4091 use the PLT stub as target address to
4092 decide whether a branch stub is
4093 needed. */
4094 if (globals->root.splt != NULL && hash != NULL
4095 && hash->root.plt.offset != (bfd_vma) - 1)
4096 {
4097 sym_sec = globals->root.splt;
4098 sym_value = hash->root.plt.offset;
4099 if (sym_sec->output_section != NULL)
4100 destination = (sym_value
4101 + sym_sec->output_offset
4102 +
4103 sym_sec->output_section->vma);
4104 }
4105 else if (sym_sec->output_section != NULL)
4106 destination = (sym_value + irela->r_addend
4107 + sym_sec->output_offset
4108 + sym_sec->output_section->vma);
4109 }
4110 else if (hash->root.root.type == bfd_link_hash_undefined
4111 || (hash->root.root.type
4112 == bfd_link_hash_undefweak))
4113 {
4114 /* For a shared library, use the PLT stub as
4115 target address to decide whether a long
4116 branch stub is needed.
4117 For absolute code, they cannot be handled. */
4118 struct elf_aarch64_link_hash_table *globals =
4119 elf_aarch64_hash_table (info);
4120
4121 if (globals->root.splt != NULL && hash != NULL
4122 && hash->root.plt.offset != (bfd_vma) - 1)
4123 {
4124 sym_sec = globals->root.splt;
4125 sym_value = hash->root.plt.offset;
4126 if (sym_sec->output_section != NULL)
4127 destination = (sym_value
4128 + sym_sec->output_offset
4129 +
4130 sym_sec->output_section->vma);
4131 }
4132 else
4133 continue;
4134 }
4135 else
4136 {
4137 bfd_set_error (bfd_error_bad_value);
4138 goto error_ret_free_internal;
4139 }
4140 st_type = ELF_ST_TYPE (hash->root.type);
4141 sym_name = hash->root.root.root.string;
4142 }
4143
4144 /* Determine what (if any) linker stub is needed. */
4145 stub_type = aarch64_type_of_stub
4146 (info, section, irela, sym_sec, st_type, hash, destination);
4147 if (stub_type == aarch64_stub_none)
4148 continue;
4149
4150 /* Support for grouping stub sections. */
4151 id_sec = htab->stub_group[section->id].link_sec;
4152
4153 /* Get the name of this stub. */
4154 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
4155 irela);
4156 if (!stub_name)
4157 goto error_ret_free_internal;
4158
4159 stub_entry =
4160 aarch64_stub_hash_lookup (&htab->stub_hash_table,
4161 stub_name, FALSE, FALSE);
4162 if (stub_entry != NULL)
4163 {
4164 /* The proper stub has already been created. */
4165 free (stub_name);
4166 continue;
4167 }
4168
4169 stub_entry = _bfd_aarch64_add_stub_entry_in_group
4170 (stub_name, section, htab);
4171 if (stub_entry == NULL)
4172 {
4173 free (stub_name);
4174 goto error_ret_free_internal;
4175 }
4176
4177 stub_entry->target_value = sym_value;
4178 stub_entry->target_section = sym_sec;
4179 stub_entry->stub_type = stub_type;
4180 stub_entry->h = hash;
4181 stub_entry->st_type = st_type;
4182
4183 if (sym_name == NULL)
4184 sym_name = "unnamed";
4185 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
4186 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
4187 if (stub_entry->output_name == NULL)
4188 {
4189 free (stub_name);
4190 goto error_ret_free_internal;
4191 }
4192
4193 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
4194 sym_name);
4195
4196 stub_changed = TRUE;
4197 }
4198
4199 /* We're done with the internal relocs, free them. */
4200 if (elf_section_data (section)->relocs == NULL)
4201 free (internal_relocs);
4202 }
4203 }
4204
4205 if (!stub_changed)
4206 break;
4207
4208 _bfd_aarch64_resize_stubs (htab);
4209
4210 /* Ask the linker to do its stuff. */
4211 (*htab->layout_sections_again) ();
4212 stub_changed = FALSE;
4213 }
4214
4215 return TRUE;
4216
4217 error_ret_free_local:
4218 return FALSE;
4219 }
4220
4221 /* Build all the stubs associated with the current output file. The
4222 stubs are kept in a hash table attached to the main linker hash
4223 table. We also set up the .plt entries for statically linked PIC
4224 functions here. This function is called via aarch64_elf_finish in the
4225 linker. */
4226
4227 bfd_boolean
4228 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
4229 {
4230 asection *stub_sec;
4231 struct bfd_hash_table *table;
4232 struct elf_aarch64_link_hash_table *htab;
4233
4234 htab = elf_aarch64_hash_table (info);
4235
4236 for (stub_sec = htab->stub_bfd->sections;
4237 stub_sec != NULL; stub_sec = stub_sec->next)
4238 {
4239 bfd_size_type size;
4240
4241 /* Ignore non-stub sections. */
4242 if (!strstr (stub_sec->name, STUB_SUFFIX))
4243 continue;
4244
4245 /* Allocate memory to hold the linker stubs. */
4246 size = stub_sec->size;
4247 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4248 if (stub_sec->contents == NULL && size != 0)
4249 return FALSE;
4250 stub_sec->size = 0;
4251
4252 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
4253 stub_sec->size += 4;
4254 }
4255
4256 /* Build the stubs as directed by the stub hash table. */
4257 table = &htab->stub_hash_table;
4258 bfd_hash_traverse (table, aarch64_build_one_stub, info);
4259
4260 return TRUE;
4261 }
4262
4263
4264 /* Add an entry to the code/data map for section SEC. */
4265
4266 static void
4267 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
4268 {
4269 struct _aarch64_elf_section_data *sec_data =
4270 elf_aarch64_section_data (sec);
4271 unsigned int newidx;
4272
4273 if (sec_data->map == NULL)
4274 {
4275 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
4276 sec_data->mapcount = 0;
4277 sec_data->mapsize = 1;
4278 }
4279
4280 newidx = sec_data->mapcount++;
4281
4282 if (sec_data->mapcount > sec_data->mapsize)
4283 {
4284 sec_data->mapsize *= 2;
4285 sec_data->map = bfd_realloc_or_free
4286 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
4287 }
4288
4289 if (sec_data->map)
4290 {
4291 sec_data->map[newidx].vma = vma;
4292 sec_data->map[newidx].type = type;
4293 }
4294 }
4295
4296
4297 /* Initialise maps of insn/data for input BFDs. */
4298 void
4299 bfd_elfNN_aarch64_init_maps (bfd *abfd)
4300 {
4301 Elf_Internal_Sym *isymbuf;
4302 Elf_Internal_Shdr *hdr;
4303 unsigned int i, localsyms;
4304
4305 /* Make sure that we are dealing with an AArch64 elf binary. */
4306 if (!is_aarch64_elf (abfd))
4307 return;
4308
4309 if ((abfd->flags & DYNAMIC) != 0)
4310 return;
4311
4312 hdr = &elf_symtab_hdr (abfd);
4313 localsyms = hdr->sh_info;
4314
4315 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
4316 should contain the number of local symbols, which should come before any
4317 global symbols. Mapping symbols are always local. */
4318 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
4319
4320 /* No internal symbols read? Skip this BFD. */
4321 if (isymbuf == NULL)
4322 return;
4323
4324 for (i = 0; i < localsyms; i++)
4325 {
4326 Elf_Internal_Sym *isym = &isymbuf[i];
4327 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4328 const char *name;
4329
4330 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4331 {
4332 name = bfd_elf_string_from_elf_section (abfd,
4333 hdr->sh_link,
4334 isym->st_name);
4335
4336 if (bfd_is_aarch64_special_symbol_name
4337 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4338 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4339 }
4340 }
4341 }
4342
4343 /* Set option values needed during linking. */
4344 void
4345 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4346 struct bfd_link_info *link_info,
4347 int no_enum_warn,
4348 int no_wchar_warn, int pic_veneer,
4349 int fix_erratum_835769,
4350 int fix_erratum_843419)
4351 {
4352 struct elf_aarch64_link_hash_table *globals;
4353
4354 globals = elf_aarch64_hash_table (link_info);
4355 globals->pic_veneer = pic_veneer;
4356 globals->fix_erratum_835769 = fix_erratum_835769;
4357 globals->fix_erratum_843419 = fix_erratum_843419;
4358 globals->fix_erratum_843419_adr = TRUE;
4359
4360 BFD_ASSERT (is_aarch64_elf (output_bfd));
4361 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4362 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4363 }
4364
4365 static bfd_vma
4366 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4367 struct elf_aarch64_link_hash_table
4368 *globals, struct bfd_link_info *info,
4369 bfd_vma value, bfd *output_bfd,
4370 bfd_boolean *unresolved_reloc_p)
4371 {
4372 bfd_vma off = (bfd_vma) - 1;
4373 asection *basegot = globals->root.sgot;
4374 bfd_boolean dyn = globals->root.dynamic_sections_created;
4375
4376 if (h != NULL)
4377 {
4378 BFD_ASSERT (basegot != NULL);
4379 off = h->got.offset;
4380 BFD_ASSERT (off != (bfd_vma) - 1);
4381 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4382 || (bfd_link_pic (info)
4383 && SYMBOL_REFERENCES_LOCAL (info, h))
4384 || (ELF_ST_VISIBILITY (h->other)
4385 && h->root.type == bfd_link_hash_undefweak))
4386 {
4387 /* This is actually a static link, or it is a -Bsymbolic link
4388 and the symbol is defined locally. We must initialize this
4389 entry in the global offset table. Since the offset must
4390 always be a multiple of 8 (4 in the case of ILP32), we use
4391 the least significant bit to record whether we have
4392 initialized it already.
4393 When doing a dynamic link, we create a .rel(a).got relocation
4394 entry to initialize the value. This is done in the
4395 finish_dynamic_symbol routine. */
4396 if ((off & 1) != 0)
4397 off &= ~1;
4398 else
4399 {
4400 bfd_put_NN (output_bfd, value, basegot->contents + off);
4401 h->got.offset |= 1;
4402 }
4403 }
4404 else
4405 *unresolved_reloc_p = FALSE;
4406
4407 off = off + basegot->output_section->vma + basegot->output_offset;
4408 }
4409
4410 return off;
4411 }
4412
4413 /* Change R_TYPE to a more efficient access model where possible,
4414 return the new reloc type. */
4415
4416 static bfd_reloc_code_real_type
4417 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4418 struct elf_link_hash_entry *h)
4419 {
4420 bfd_boolean is_local = h == NULL;
4421
4422 switch (r_type)
4423 {
4424 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4425 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4426 return (is_local
4427 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4428 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4429
4430 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4431 return (is_local
4432 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4433 : r_type);
4434
4435 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4436 return (is_local
4437 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4438 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4439
4440 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4441 return (is_local
4442 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4443 : BFD_RELOC_AARCH64_NONE);
4444
4445 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4446 return (is_local
4447 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4448 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC);
4449
4450 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4451 return (is_local
4452 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4453 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
4454
4455 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4456 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4457 return (is_local
4458 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4459 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4460
4461 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4462 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4463
4464 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4465 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4466
4467 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4468 return r_type;
4469
4470 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4471 return (is_local
4472 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4473 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4474
4475 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4476 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4477 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4478 /* Instructions with these relocations will become NOPs. */
4479 return BFD_RELOC_AARCH64_NONE;
4480
4481 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4482 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4483 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4484 return is_local ? BFD_RELOC_AARCH64_NONE : r_type;
4485
4486 #if ARCH_SIZE == 64
4487 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4488 return is_local
4489 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
4490 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC;
4491
4492 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4493 return is_local
4494 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
4495 : BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1;
4496 #endif
4497
4498 default:
4499 break;
4500 }
4501
4502 return r_type;
4503 }
4504
4505 static unsigned int
4506 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4507 {
4508 switch (r_type)
4509 {
4510 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4511 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4512 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4513 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4514 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
4515 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4516 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4517 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
4518 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
4519 return GOT_NORMAL;
4520
4521 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4522 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4523 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4524 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
4525 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
4526 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4527 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4528 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4529 return GOT_TLS_GD;
4530
4531 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4532 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4533 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4534 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4535 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4536 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4537 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4538 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4539 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4540 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
4541 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
4542 return GOT_TLSDESC_GD;
4543
4544 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4545 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4546 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4547 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4548 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
4549 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
4550 return GOT_TLS_IE;
4551
4552 default:
4553 break;
4554 }
4555 return GOT_UNKNOWN;
4556 }
4557
4558 static bfd_boolean
4559 aarch64_can_relax_tls (bfd *input_bfd,
4560 struct bfd_link_info *info,
4561 bfd_reloc_code_real_type r_type,
4562 struct elf_link_hash_entry *h,
4563 unsigned long r_symndx)
4564 {
4565 unsigned int symbol_got_type;
4566 unsigned int reloc_got_type;
4567
4568 if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
4569 return FALSE;
4570
4571 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4572 reloc_got_type = aarch64_reloc_got_type (r_type);
4573
4574 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4575 return TRUE;
4576
4577 if (bfd_link_pic (info))
4578 return FALSE;
4579
4580 if (h && h->root.type == bfd_link_hash_undefweak)
4581 return FALSE;
4582
4583 return TRUE;
4584 }
4585
4586 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4587 enumerator. */
4588
4589 static bfd_reloc_code_real_type
4590 aarch64_tls_transition (bfd *input_bfd,
4591 struct bfd_link_info *info,
4592 unsigned int r_type,
4593 struct elf_link_hash_entry *h,
4594 unsigned long r_symndx)
4595 {
4596 bfd_reloc_code_real_type bfd_r_type
4597 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4598
4599 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4600 return bfd_r_type;
4601
4602 return aarch64_tls_transition_without_check (bfd_r_type, h);
4603 }
4604
4605 /* Return the base VMA address which should be subtracted from real addresses
4606 when resolving R_AARCH64_TLS_DTPREL relocation. */
4607
4608 static bfd_vma
4609 dtpoff_base (struct bfd_link_info *info)
4610 {
4611 /* If tls_sec is NULL, we should have signalled an error already. */
4612 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4613 return elf_hash_table (info)->tls_sec->vma;
4614 }
4615
4616 /* Return the base VMA address which should be subtracted from real addresses
4617 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4618
4619 static bfd_vma
4620 tpoff_base (struct bfd_link_info *info)
4621 {
4622 struct elf_link_hash_table *htab = elf_hash_table (info);
4623
4624 /* If tls_sec is NULL, we should have signalled an error already. */
4625 BFD_ASSERT (htab->tls_sec != NULL);
4626
4627 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4628 htab->tls_sec->alignment_power);
4629 return htab->tls_sec->vma - base;
4630 }
4631
4632 static bfd_vma *
4633 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4634 unsigned long r_symndx)
4635 {
4636 /* Calculate the address of the GOT entry for symbol
4637 referred to in h. */
4638 if (h != NULL)
4639 return &h->got.offset;
4640 else
4641 {
4642 /* local symbol */
4643 struct elf_aarch64_local_symbol *l;
4644
4645 l = elf_aarch64_locals (input_bfd);
4646 return &l[r_symndx].got_offset;
4647 }
4648 }
4649
4650 static void
4651 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4652 unsigned long r_symndx)
4653 {
4654 bfd_vma *p;
4655 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4656 *p |= 1;
4657 }
4658
4659 static int
4660 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4661 unsigned long r_symndx)
4662 {
4663 bfd_vma value;
4664 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4665 return value & 1;
4666 }
4667
4668 static bfd_vma
4669 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4670 unsigned long r_symndx)
4671 {
4672 bfd_vma value;
4673 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4674 value &= ~1;
4675 return value;
4676 }
4677
4678 static bfd_vma *
4679 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4680 unsigned long r_symndx)
4681 {
4682 /* Calculate the address of the GOT entry for symbol
4683 referred to in h. */
4684 if (h != NULL)
4685 {
4686 struct elf_aarch64_link_hash_entry *eh;
4687 eh = (struct elf_aarch64_link_hash_entry *) h;
4688 return &eh->tlsdesc_got_jump_table_offset;
4689 }
4690 else
4691 {
4692 /* local symbol */
4693 struct elf_aarch64_local_symbol *l;
4694
4695 l = elf_aarch64_locals (input_bfd);
4696 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4697 }
4698 }
4699
4700 static void
4701 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4702 unsigned long r_symndx)
4703 {
4704 bfd_vma *p;
4705 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4706 *p |= 1;
4707 }
4708
4709 static int
4710 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4711 struct elf_link_hash_entry *h,
4712 unsigned long r_symndx)
4713 {
4714 bfd_vma value;
4715 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4716 return value & 1;
4717 }
4718
4719 static bfd_vma
4720 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4721 unsigned long r_symndx)
4722 {
4723 bfd_vma value;
4724 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4725 value &= ~1;
4726 return value;
4727 }
4728
4729 /* Data for make_branch_to_erratum_835769_stub(). */
4730
4731 struct erratum_835769_branch_to_stub_data
4732 {
4733 struct bfd_link_info *info;
4734 asection *output_section;
4735 bfd_byte *contents;
4736 };
4737
4738 /* Helper to insert branches to erratum 835769 stubs in the right
4739 places for a particular section. */
4740
4741 static bfd_boolean
4742 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4743 void *in_arg)
4744 {
4745 struct elf_aarch64_stub_hash_entry *stub_entry;
4746 struct erratum_835769_branch_to_stub_data *data;
4747 bfd_byte *contents;
4748 unsigned long branch_insn = 0;
4749 bfd_vma veneered_insn_loc, veneer_entry_loc;
4750 bfd_signed_vma branch_offset;
4751 unsigned int target;
4752 bfd *abfd;
4753
4754 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4755 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4756
4757 if (stub_entry->target_section != data->output_section
4758 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4759 return TRUE;
4760
4761 contents = data->contents;
4762 veneered_insn_loc = stub_entry->target_section->output_section->vma
4763 + stub_entry->target_section->output_offset
4764 + stub_entry->target_value;
4765 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4766 + stub_entry->stub_sec->output_offset
4767 + stub_entry->stub_offset;
4768 branch_offset = veneer_entry_loc - veneered_insn_loc;
4769
4770 abfd = stub_entry->target_section->owner;
4771 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4772 (*_bfd_error_handler)
4773 (_("%B: error: Erratum 835769 stub out "
4774 "of range (input file too large)"), abfd);
4775
4776 target = stub_entry->target_value;
4777 branch_insn = 0x14000000;
4778 branch_offset >>= 2;
4779 branch_offset &= 0x3ffffff;
4780 branch_insn |= branch_offset;
4781 bfd_putl32 (branch_insn, &contents[target]);
4782
4783 return TRUE;
4784 }
4785
4786
4787 static bfd_boolean
4788 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4789 void *in_arg)
4790 {
4791 struct elf_aarch64_stub_hash_entry *stub_entry
4792 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4793 struct erratum_835769_branch_to_stub_data *data
4794 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4795 struct bfd_link_info *info;
4796 struct elf_aarch64_link_hash_table *htab;
4797 bfd_byte *contents;
4798 asection *section;
4799 bfd *abfd;
4800 bfd_vma place;
4801 uint32_t insn;
4802
4803 info = data->info;
4804 contents = data->contents;
4805 section = data->output_section;
4806
4807 htab = elf_aarch64_hash_table (info);
4808
4809 if (stub_entry->target_section != section
4810 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4811 return TRUE;
4812
4813 insn = bfd_getl32 (contents + stub_entry->target_value);
4814 bfd_putl32 (insn,
4815 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4816
4817 place = (section->output_section->vma + section->output_offset
4818 + stub_entry->adrp_offset);
4819 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4820
4821 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4822 abort ();
4823
4824 bfd_signed_vma imm =
4825 (_bfd_aarch64_sign_extend
4826 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4827 - (place & 0xfff));
4828
4829 if (htab->fix_erratum_843419_adr
4830 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4831 {
4832 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4833 | AARCH64_RT (insn));
4834 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4835 }
4836 else
4837 {
4838 bfd_vma veneered_insn_loc;
4839 bfd_vma veneer_entry_loc;
4840 bfd_signed_vma branch_offset;
4841 uint32_t branch_insn;
4842
4843 veneered_insn_loc = stub_entry->target_section->output_section->vma
4844 + stub_entry->target_section->output_offset
4845 + stub_entry->target_value;
4846 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4847 + stub_entry->stub_sec->output_offset
4848 + stub_entry->stub_offset;
4849 branch_offset = veneer_entry_loc - veneered_insn_loc;
4850
4851 abfd = stub_entry->target_section->owner;
4852 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4853 (*_bfd_error_handler)
4854 (_("%B: error: Erratum 843419 stub out "
4855 "of range (input file too large)"), abfd);
4856
4857 branch_insn = 0x14000000;
4858 branch_offset >>= 2;
4859 branch_offset &= 0x3ffffff;
4860 branch_insn |= branch_offset;
4861 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4862 }
4863 return TRUE;
4864 }
4865
4866
4867 static bfd_boolean
4868 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4869 struct bfd_link_info *link_info,
4870 asection *sec,
4871 bfd_byte *contents)
4872
4873 {
4874 struct elf_aarch64_link_hash_table *globals =
4875 elf_aarch64_hash_table (link_info);
4876
4877 if (globals == NULL)
4878 return FALSE;
4879
4880 /* Fix code to point to erratum 835769 stubs. */
4881 if (globals->fix_erratum_835769)
4882 {
4883 struct erratum_835769_branch_to_stub_data data;
4884
4885 data.info = link_info;
4886 data.output_section = sec;
4887 data.contents = contents;
4888 bfd_hash_traverse (&globals->stub_hash_table,
4889 make_branch_to_erratum_835769_stub, &data);
4890 }
4891
4892 if (globals->fix_erratum_843419)
4893 {
4894 struct erratum_835769_branch_to_stub_data data;
4895
4896 data.info = link_info;
4897 data.output_section = sec;
4898 data.contents = contents;
4899 bfd_hash_traverse (&globals->stub_hash_table,
4900 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4901 }
4902
4903 return FALSE;
4904 }
4905
4906 /* Perform a relocation as part of a final link. */
4907 static bfd_reloc_status_type
4908 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4909 bfd *input_bfd,
4910 bfd *output_bfd,
4911 asection *input_section,
4912 bfd_byte *contents,
4913 Elf_Internal_Rela *rel,
4914 bfd_vma value,
4915 struct bfd_link_info *info,
4916 asection *sym_sec,
4917 struct elf_link_hash_entry *h,
4918 bfd_boolean *unresolved_reloc_p,
4919 bfd_boolean save_addend,
4920 bfd_vma *saved_addend,
4921 Elf_Internal_Sym *sym)
4922 {
4923 Elf_Internal_Shdr *symtab_hdr;
4924 unsigned int r_type = howto->type;
4925 bfd_reloc_code_real_type bfd_r_type
4926 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4927 bfd_reloc_code_real_type new_bfd_r_type;
4928 unsigned long r_symndx;
4929 bfd_byte *hit_data = contents + rel->r_offset;
4930 bfd_vma place, off;
4931 bfd_signed_vma signed_addend;
4932 struct elf_aarch64_link_hash_table *globals;
4933 bfd_boolean weak_undef_p;
4934 asection *base_got;
4935
4936 globals = elf_aarch64_hash_table (info);
4937
4938 symtab_hdr = &elf_symtab_hdr (input_bfd);
4939
4940 BFD_ASSERT (is_aarch64_elf (input_bfd));
4941
4942 r_symndx = ELFNN_R_SYM (rel->r_info);
4943
4944 /* It is possible to have linker relaxations on some TLS access
4945 models. Update our information here. */
4946 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4947 if (new_bfd_r_type != bfd_r_type)
4948 {
4949 bfd_r_type = new_bfd_r_type;
4950 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4951 BFD_ASSERT (howto != NULL);
4952 r_type = howto->type;
4953 }
4954
4955 place = input_section->output_section->vma
4956 + input_section->output_offset + rel->r_offset;
4957
4958 /* Get addend, accumulating the addend for consecutive relocs
4959 which refer to the same offset. */
4960 signed_addend = saved_addend ? *saved_addend : 0;
4961 signed_addend += rel->r_addend;
4962
4963 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4964 : bfd_is_und_section (sym_sec));
4965
4966 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4967 it here if it is defined in a non-shared object. */
4968 if (h != NULL
4969 && h->type == STT_GNU_IFUNC
4970 && h->def_regular)
4971 {
4972 asection *plt;
4973 const char *name;
4974 bfd_vma addend = 0;
4975
4976 if ((input_section->flags & SEC_ALLOC) == 0
4977 || h->plt.offset == (bfd_vma) -1)
4978 abort ();
4979
4980 /* STT_GNU_IFUNC symbol must go through PLT. */
4981 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4982 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4983
4984 switch (bfd_r_type)
4985 {
4986 default:
4987 if (h->root.root.string)
4988 name = h->root.root.string;
4989 else
4990 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4991 NULL);
4992 (*_bfd_error_handler)
4993 (_("%B: relocation %s against STT_GNU_IFUNC "
4994 "symbol `%s' isn't handled by %s"), input_bfd,
4995 howto->name, name, __FUNCTION__);
4996 bfd_set_error (bfd_error_bad_value);
4997 return FALSE;
4998
4999 case BFD_RELOC_AARCH64_NN:
5000 if (rel->r_addend != 0)
5001 {
5002 if (h->root.root.string)
5003 name = h->root.root.string;
5004 else
5005 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
5006 sym, NULL);
5007 (*_bfd_error_handler)
5008 (_("%B: relocation %s against STT_GNU_IFUNC "
5009 "symbol `%s' has non-zero addend: %d"),
5010 input_bfd, howto->name, name, rel->r_addend);
5011 bfd_set_error (bfd_error_bad_value);
5012 return FALSE;
5013 }
5014
5015 /* Generate dynamic relocation only when there is a
5016 non-GOT reference in a shared object. */
5017 if (bfd_link_pic (info) && h->non_got_ref)
5018 {
5019 Elf_Internal_Rela outrel;
5020 asection *sreloc;
5021
5022 /* Need a dynamic relocation to get the real function
5023 address. */
5024 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
5025 info,
5026 input_section,
5027 rel->r_offset);
5028 if (outrel.r_offset == (bfd_vma) -1
5029 || outrel.r_offset == (bfd_vma) -2)
5030 abort ();
5031
5032 outrel.r_offset += (input_section->output_section->vma
5033 + input_section->output_offset);
5034
5035 if (h->dynindx == -1
5036 || h->forced_local
5037 || bfd_link_executable (info))
5038 {
5039 /* This symbol is resolved locally. */
5040 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
5041 outrel.r_addend = (h->root.u.def.value
5042 + h->root.u.def.section->output_section->vma
5043 + h->root.u.def.section->output_offset);
5044 }
5045 else
5046 {
5047 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5048 outrel.r_addend = 0;
5049 }
5050
5051 sreloc = globals->root.irelifunc;
5052 elf_append_rela (output_bfd, sreloc, &outrel);
5053
5054 /* If this reloc is against an external symbol, we
5055 do not want to fiddle with the addend. Otherwise,
5056 we need to include the symbol value so that it
5057 becomes an addend for the dynamic reloc. For an
5058 internal symbol, we have updated addend. */
5059 return bfd_reloc_ok;
5060 }
5061 /* FALLTHROUGH */
5062 case BFD_RELOC_AARCH64_CALL26:
5063 case BFD_RELOC_AARCH64_JUMP26:
5064 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5065 signed_addend,
5066 weak_undef_p);
5067 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5068 howto, value);
5069 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5070 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5071 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5072 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5073 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5074 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5075 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5076 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5077 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5078 base_got = globals->root.sgot;
5079 off = h->got.offset;
5080
5081 if (base_got == NULL)
5082 abort ();
5083
5084 if (off == (bfd_vma) -1)
5085 {
5086 bfd_vma plt_index;
5087
5088 /* We can't use h->got.offset here to save state, or
5089 even just remember the offset, as finish_dynamic_symbol
5090 would use that as offset into .got. */
5091
5092 if (globals->root.splt != NULL)
5093 {
5094 plt_index = ((h->plt.offset - globals->plt_header_size) /
5095 globals->plt_entry_size);
5096 off = (plt_index + 3) * GOT_ENTRY_SIZE;
5097 base_got = globals->root.sgotplt;
5098 }
5099 else
5100 {
5101 plt_index = h->plt.offset / globals->plt_entry_size;
5102 off = plt_index * GOT_ENTRY_SIZE;
5103 base_got = globals->root.igotplt;
5104 }
5105
5106 if (h->dynindx == -1
5107 || h->forced_local
5108 || info->symbolic)
5109 {
5110 /* This references the local definition. We must
5111 initialize this entry in the global offset table.
5112 Since the offset must always be a multiple of 8,
5113 we use the least significant bit to record
5114 whether we have initialized it already.
5115
5116 When doing a dynamic link, we create a .rela.got
5117 relocation entry to initialize the value. This
5118 is done in the finish_dynamic_symbol routine. */
5119 if ((off & 1) != 0)
5120 off &= ~1;
5121 else
5122 {
5123 bfd_put_NN (output_bfd, value,
5124 base_got->contents + off);
5125 /* Note that this is harmless as -1 | 1 still is -1. */
5126 h->got.offset |= 1;
5127 }
5128 }
5129 value = (base_got->output_section->vma
5130 + base_got->output_offset + off);
5131 }
5132 else
5133 value = aarch64_calculate_got_entry_vma (h, globals, info,
5134 value, output_bfd,
5135 unresolved_reloc_p);
5136
5137 switch (bfd_r_type)
5138 {
5139 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5140 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5141 addend = (globals->root.sgot->output_section->vma
5142 + globals->root.sgot->output_offset);
5143 break;
5144 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5145 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5146 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5147 value = (value - globals->root.sgot->output_section->vma
5148 - globals->root.sgot->output_offset);
5149 default:
5150 break;
5151 }
5152
5153 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5154 addend, weak_undef_p);
5155 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
5156 case BFD_RELOC_AARCH64_ADD_LO12:
5157 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5158 break;
5159 }
5160 }
5161
5162 switch (bfd_r_type)
5163 {
5164 case BFD_RELOC_AARCH64_NONE:
5165 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5166 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5167 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5168 *unresolved_reloc_p = FALSE;
5169 return bfd_reloc_ok;
5170
5171 case BFD_RELOC_AARCH64_NN:
5172
5173 /* When generating a shared object or relocatable executable, these
5174 relocations are copied into the output file to be resolved at
5175 run time. */
5176 if (((bfd_link_pic (info) == TRUE)
5177 || globals->root.is_relocatable_executable)
5178 && (input_section->flags & SEC_ALLOC)
5179 && (h == NULL
5180 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5181 || h->root.type != bfd_link_hash_undefweak))
5182 {
5183 Elf_Internal_Rela outrel;
5184 bfd_byte *loc;
5185 bfd_boolean skip, relocate;
5186 asection *sreloc;
5187
5188 *unresolved_reloc_p = FALSE;
5189
5190 skip = FALSE;
5191 relocate = FALSE;
5192
5193 outrel.r_addend = signed_addend;
5194 outrel.r_offset =
5195 _bfd_elf_section_offset (output_bfd, info, input_section,
5196 rel->r_offset);
5197 if (outrel.r_offset == (bfd_vma) - 1)
5198 skip = TRUE;
5199 else if (outrel.r_offset == (bfd_vma) - 2)
5200 {
5201 skip = TRUE;
5202 relocate = TRUE;
5203 }
5204
5205 outrel.r_offset += (input_section->output_section->vma
5206 + input_section->output_offset);
5207
5208 if (skip)
5209 memset (&outrel, 0, sizeof outrel);
5210 else if (h != NULL
5211 && h->dynindx != -1
5212 && (!bfd_link_pic (info)
5213 || !SYMBOLIC_BIND (info, h)
5214 || !h->def_regular))
5215 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
5216 else
5217 {
5218 int symbol;
5219
5220 /* On SVR4-ish systems, the dynamic loader cannot
5221 relocate the text and data segments independently,
5222 so the symbol does not matter. */
5223 symbol = 0;
5224 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
5225 outrel.r_addend += value;
5226 }
5227
5228 sreloc = elf_section_data (input_section)->sreloc;
5229 if (sreloc == NULL || sreloc->contents == NULL)
5230 return bfd_reloc_notsupported;
5231
5232 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
5233 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
5234
5235 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
5236 {
5237 /* Sanity to check that we have previously allocated
5238 sufficient space in the relocation section for the
5239 number of relocations we actually want to emit. */
5240 abort ();
5241 }
5242
5243 /* If this reloc is against an external symbol, we do not want to
5244 fiddle with the addend. Otherwise, we need to include the symbol
5245 value so that it becomes an addend for the dynamic reloc. */
5246 if (!relocate)
5247 return bfd_reloc_ok;
5248
5249 return _bfd_final_link_relocate (howto, input_bfd, input_section,
5250 contents, rel->r_offset, value,
5251 signed_addend);
5252 }
5253 else
5254 value += signed_addend;
5255 break;
5256
5257 case BFD_RELOC_AARCH64_CALL26:
5258 case BFD_RELOC_AARCH64_JUMP26:
5259 {
5260 asection *splt = globals->root.splt;
5261 bfd_boolean via_plt_p =
5262 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
5263
5264 /* A call to an undefined weak symbol is converted to a jump to
5265 the next instruction unless a PLT entry will be created.
5266 The jump to the next instruction is optimized as a NOP.
5267 Do the same for local undefined symbols. */
5268 if (weak_undef_p && ! via_plt_p)
5269 {
5270 bfd_putl32 (INSN_NOP, hit_data);
5271 return bfd_reloc_ok;
5272 }
5273
5274 /* If the call goes through a PLT entry, make sure to
5275 check distance to the right destination address. */
5276 if (via_plt_p)
5277 value = (splt->output_section->vma
5278 + splt->output_offset + h->plt.offset);
5279
5280 /* Check if a stub has to be inserted because the destination
5281 is too far away. */
5282 struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
5283 if (! aarch64_valid_branch_p (value, place))
5284 /* The target is out of reach, so redirect the branch to
5285 the local stub for this function. */
5286 stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
5287 rel, globals);
5288 if (stub_entry != NULL)
5289 value = (stub_entry->stub_offset
5290 + stub_entry->stub_sec->output_offset
5291 + stub_entry->stub_sec->output_section->vma);
5292 }
5293 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5294 signed_addend, weak_undef_p);
5295 *unresolved_reloc_p = FALSE;
5296 break;
5297
5298 case BFD_RELOC_AARCH64_16_PCREL:
5299 case BFD_RELOC_AARCH64_32_PCREL:
5300 case BFD_RELOC_AARCH64_64_PCREL:
5301 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5302 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5303 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5304 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
5305 if (bfd_link_pic (info)
5306 && (input_section->flags & SEC_ALLOC) != 0
5307 && (input_section->flags & SEC_READONLY) != 0
5308 && h != NULL
5309 && !h->def_regular)
5310 {
5311 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5312
5313 (*_bfd_error_handler)
5314 (_("%B: relocation %s against external symbol `%s' can not be used"
5315 " when making a shared object; recompile with -fPIC"),
5316 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
5317 h->root.root.string);
5318 bfd_set_error (bfd_error_bad_value);
5319 return FALSE;
5320 }
5321
5322 case BFD_RELOC_AARCH64_16:
5323 #if ARCH_SIZE == 64
5324 case BFD_RELOC_AARCH64_32:
5325 #endif
5326 case BFD_RELOC_AARCH64_ADD_LO12:
5327 case BFD_RELOC_AARCH64_BRANCH19:
5328 case BFD_RELOC_AARCH64_LDST128_LO12:
5329 case BFD_RELOC_AARCH64_LDST16_LO12:
5330 case BFD_RELOC_AARCH64_LDST32_LO12:
5331 case BFD_RELOC_AARCH64_LDST64_LO12:
5332 case BFD_RELOC_AARCH64_LDST8_LO12:
5333 case BFD_RELOC_AARCH64_MOVW_G0:
5334 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5335 case BFD_RELOC_AARCH64_MOVW_G0_S:
5336 case BFD_RELOC_AARCH64_MOVW_G1:
5337 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5338 case BFD_RELOC_AARCH64_MOVW_G1_S:
5339 case BFD_RELOC_AARCH64_MOVW_G2:
5340 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5341 case BFD_RELOC_AARCH64_MOVW_G2_S:
5342 case BFD_RELOC_AARCH64_MOVW_G3:
5343 case BFD_RELOC_AARCH64_TSTBR14:
5344 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5345 signed_addend, weak_undef_p);
5346 break;
5347
5348 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5349 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5350 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
5351 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5352 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
5353 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5354 if (globals->root.sgot == NULL)
5355 BFD_ASSERT (h != NULL);
5356
5357 if (h != NULL)
5358 {
5359 bfd_vma addend = 0;
5360 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
5361 output_bfd,
5362 unresolved_reloc_p);
5363 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5364 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5365 addend = (globals->root.sgot->output_section->vma
5366 + globals->root.sgot->output_offset);
5367 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5368 addend, weak_undef_p);
5369 }
5370 else
5371 {
5372 bfd_vma addend = 0;
5373 struct elf_aarch64_local_symbol *locals
5374 = elf_aarch64_locals (input_bfd);
5375
5376 if (locals == NULL)
5377 {
5378 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5379 (*_bfd_error_handler)
5380 (_("%B: Local symbol descriptor table be NULL when applying "
5381 "relocation %s against local symbol"),
5382 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5383 abort ();
5384 }
5385
5386 off = symbol_got_offset (input_bfd, h, r_symndx);
5387 base_got = globals->root.sgot;
5388 bfd_vma got_entry_addr = (base_got->output_section->vma
5389 + base_got->output_offset + off);
5390
5391 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5392 {
5393 bfd_put_64 (output_bfd, value, base_got->contents + off);
5394
5395 if (bfd_link_pic (info))
5396 {
5397 asection *s;
5398 Elf_Internal_Rela outrel;
5399
5400 /* For local symbol, we have done absolute relocation in static
5401 linking stageh. While for share library, we need to update
5402 the content of GOT entry according to the share objects
5403 loading base address. So we need to generate a
5404 R_AARCH64_RELATIVE reloc for dynamic linker. */
5405 s = globals->root.srelgot;
5406 if (s == NULL)
5407 abort ();
5408
5409 outrel.r_offset = got_entry_addr;
5410 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5411 outrel.r_addend = value;
5412 elf_append_rela (output_bfd, s, &outrel);
5413 }
5414
5415 symbol_got_offset_mark (input_bfd, h, r_symndx);
5416 }
5417
5418 /* Update the relocation value to GOT entry addr as we have transformed
5419 the direct data access into indirect data access through GOT. */
5420 value = got_entry_addr;
5421
5422 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5423 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5424 addend = base_got->output_section->vma + base_got->output_offset;
5425
5426 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5427 addend, weak_undef_p);
5428 }
5429
5430 break;
5431
5432 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
5433 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5434 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5435 if (h != NULL)
5436 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
5437 output_bfd,
5438 unresolved_reloc_p);
5439 else
5440 {
5441 struct elf_aarch64_local_symbol *locals
5442 = elf_aarch64_locals (input_bfd);
5443
5444 if (locals == NULL)
5445 {
5446 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5447 (*_bfd_error_handler)
5448 (_("%B: Local symbol descriptor table be NULL when applying "
5449 "relocation %s against local symbol"),
5450 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5451 abort ();
5452 }
5453
5454 off = symbol_got_offset (input_bfd, h, r_symndx);
5455 base_got = globals->root.sgot;
5456 if (base_got == NULL)
5457 abort ();
5458
5459 bfd_vma got_entry_addr = (base_got->output_section->vma
5460 + base_got->output_offset + off);
5461
5462 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5463 {
5464 bfd_put_64 (output_bfd, value, base_got->contents + off);
5465
5466 if (bfd_link_pic (info))
5467 {
5468 asection *s;
5469 Elf_Internal_Rela outrel;
5470
5471 /* For local symbol, we have done absolute relocation in static
5472 linking stage. While for share library, we need to update
5473 the content of GOT entry according to the share objects
5474 loading base address. So we need to generate a
5475 R_AARCH64_RELATIVE reloc for dynamic linker. */
5476 s = globals->root.srelgot;
5477 if (s == NULL)
5478 abort ();
5479
5480 outrel.r_offset = got_entry_addr;
5481 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5482 outrel.r_addend = value;
5483 elf_append_rela (output_bfd, s, &outrel);
5484 }
5485
5486 symbol_got_offset_mark (input_bfd, h, r_symndx);
5487 }
5488 }
5489
5490 /* Update the relocation value to GOT entry addr as we have transformed
5491 the direct data access into indirect data access through GOT. */
5492 value = symbol_got_offset (input_bfd, h, r_symndx);
5493 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5494 0, weak_undef_p);
5495 *unresolved_reloc_p = FALSE;
5496 break;
5497
5498 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5499 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5500 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5501 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5502 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5503 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5504 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5505 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5506 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5507 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5508 if (globals->root.sgot == NULL)
5509 return bfd_reloc_notsupported;
5510
5511 value = (symbol_got_offset (input_bfd, h, r_symndx)
5512 + globals->root.sgot->output_section->vma
5513 + globals->root.sgot->output_offset);
5514
5515 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5516 0, weak_undef_p);
5517 *unresolved_reloc_p = FALSE;
5518 break;
5519
5520 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5521 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5522 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5523 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5524 if (globals->root.sgot == NULL)
5525 return bfd_reloc_notsupported;
5526
5527 value = symbol_got_offset (input_bfd, h, r_symndx);
5528 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5529 0, weak_undef_p);
5530 *unresolved_reloc_p = FALSE;
5531 break;
5532
5533 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
5534 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
5535 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5536 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
5537 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
5538 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
5539 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
5540 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
5541 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
5542 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
5543 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
5544 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5545 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5546 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5547 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5548 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5549 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5550 signed_addend - dtpoff_base (info),
5551 weak_undef_p);
5552 break;
5553
5554 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5555 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5556 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5557 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5558 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5559 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5560 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5561 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5562 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5563 signed_addend - tpoff_base (info),
5564 weak_undef_p);
5565 *unresolved_reloc_p = FALSE;
5566 break;
5567
5568 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5569 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5570 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5571 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5572 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5573 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5574 if (globals->root.sgot == NULL)
5575 return bfd_reloc_notsupported;
5576 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5577 + globals->root.sgotplt->output_section->vma
5578 + globals->root.sgotplt->output_offset
5579 + globals->sgotplt_jump_table_size);
5580
5581 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5582 0, weak_undef_p);
5583 *unresolved_reloc_p = FALSE;
5584 break;
5585
5586 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5587 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5588 if (globals->root.sgot == NULL)
5589 return bfd_reloc_notsupported;
5590
5591 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5592 + globals->root.sgotplt->output_section->vma
5593 + globals->root.sgotplt->output_offset
5594 + globals->sgotplt_jump_table_size);
5595
5596 value -= (globals->root.sgot->output_section->vma
5597 + globals->root.sgot->output_offset);
5598
5599 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5600 0, weak_undef_p);
5601 *unresolved_reloc_p = FALSE;
5602 break;
5603
5604 default:
5605 return bfd_reloc_notsupported;
5606 }
5607
5608 if (saved_addend)
5609 *saved_addend = value;
5610
5611 /* Only apply the final relocation in a sequence. */
5612 if (save_addend)
5613 return bfd_reloc_continue;
5614
5615 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5616 howto, value);
5617 }
5618
5619 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5620 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5621 link.
5622
5623 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5624 is to then call final_link_relocate. Return other values in the
5625 case of error. */
5626
5627 static bfd_reloc_status_type
5628 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5629 bfd *input_bfd, bfd_byte *contents,
5630 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5631 {
5632 bfd_boolean is_local = h == NULL;
5633 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5634 unsigned long insn;
5635
5636 BFD_ASSERT (globals && input_bfd && contents && rel);
5637
5638 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5639 {
5640 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5641 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5642 if (is_local)
5643 {
5644 /* GD->LE relaxation:
5645 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5646 or
5647 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5648 */
5649 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5650 return bfd_reloc_continue;
5651 }
5652 else
5653 {
5654 /* GD->IE relaxation:
5655 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5656 or
5657 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5658 */
5659 return bfd_reloc_continue;
5660 }
5661
5662 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5663 BFD_ASSERT (0);
5664 break;
5665
5666 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5667 if (is_local)
5668 {
5669 /* Tiny TLSDESC->LE relaxation:
5670 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5671 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5672 .tlsdesccall var
5673 blr x1 => nop
5674 */
5675 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5676 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5677
5678 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5679 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5680 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5681
5682 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5683 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5684 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5685 return bfd_reloc_continue;
5686 }
5687 else
5688 {
5689 /* Tiny TLSDESC->IE relaxation:
5690 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5691 adr x0, :tlsdesc:var => nop
5692 .tlsdesccall var
5693 blr x1 => nop
5694 */
5695 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5696 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5697
5698 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5699 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5700
5701 bfd_putl32 (0x58000000, contents + rel->r_offset);
5702 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5703 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5704 return bfd_reloc_continue;
5705 }
5706
5707 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5708 if (is_local)
5709 {
5710 /* Tiny GD->LE relaxation:
5711 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5712 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5713 nop => add x0, x0, #:tprel_lo12_nc:x
5714 */
5715
5716 /* First kill the tls_get_addr reloc on the bl instruction. */
5717 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5718
5719 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5720 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5721 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5722
5723 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5724 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5725 rel[1].r_offset = rel->r_offset + 8;
5726
5727 /* Move the current relocation to the second instruction in
5728 the sequence. */
5729 rel->r_offset += 4;
5730 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5731 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5732 return bfd_reloc_continue;
5733 }
5734 else
5735 {
5736 /* Tiny GD->IE relaxation:
5737 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5738 bl __tls_get_addr => mrs x1, tpidr_el0
5739 nop => add x0, x0, x1
5740 */
5741
5742 /* First kill the tls_get_addr reloc on the bl instruction. */
5743 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5744 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5745
5746 bfd_putl32 (0x58000000, contents + rel->r_offset);
5747 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5748 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5749 return bfd_reloc_continue;
5750 }
5751
5752 #if ARCH_SIZE == 64
5753 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5754 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSGD_MOVW_G0_NC));
5755 BFD_ASSERT (rel->r_offset + 12 == rel[2].r_offset);
5756 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (CALL26));
5757
5758 if (is_local)
5759 {
5760 /* Large GD->LE relaxation:
5761 movz x0, #:tlsgd_g1:var => movz x0, #:tprel_g2:var, lsl #32
5762 movk x0, #:tlsgd_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
5763 add x0, gp, x0 => movk x0, #:tprel_g0_nc:var
5764 bl __tls_get_addr => mrs x1, tpidr_el0
5765 nop => add x0, x0, x1
5766 */
5767 rel[2].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5768 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5769 rel[2].r_offset = rel->r_offset + 8;
5770
5771 bfd_putl32 (0xd2c00000, contents + rel->r_offset + 0);
5772 bfd_putl32 (0xf2a00000, contents + rel->r_offset + 4);
5773 bfd_putl32 (0xf2800000, contents + rel->r_offset + 8);
5774 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
5775 bfd_putl32 (0x8b000020, contents + rel->r_offset + 16);
5776 }
5777 else
5778 {
5779 /* Large GD->IE relaxation:
5780 movz x0, #:tlsgd_g1:var => movz x0, #:gottprel_g1:var, lsl #16
5781 movk x0, #:tlsgd_g0_nc:var => movk x0, #:gottprel_g0_nc:var
5782 add x0, gp, x0 => ldr x0, [gp, x0]
5783 bl __tls_get_addr => mrs x1, tpidr_el0
5784 nop => add x0, x0, x1
5785 */
5786 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5787 bfd_putl32 (0xd2a80000, contents + rel->r_offset + 0);
5788 bfd_putl32 (0x58000000, contents + rel->r_offset + 8);
5789 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 12);
5790 bfd_putl32 (0x8b000020, contents + rel->r_offset + 16);
5791 }
5792 return bfd_reloc_continue;
5793
5794 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5795 return bfd_reloc_continue;
5796 #endif
5797
5798 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5799 return bfd_reloc_continue;
5800
5801 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5802 if (is_local)
5803 {
5804 /* GD->LE relaxation:
5805 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5806 */
5807 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5808 return bfd_reloc_continue;
5809 }
5810 else
5811 {
5812 /* GD->IE relaxation:
5813 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5814 */
5815 insn = bfd_getl32 (contents + rel->r_offset);
5816 insn &= 0xffffffe0;
5817 bfd_putl32 (insn, contents + rel->r_offset);
5818 return bfd_reloc_continue;
5819 }
5820
5821 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5822 if (is_local)
5823 {
5824 /* GD->LE relaxation
5825 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5826 bl __tls_get_addr => mrs x1, tpidr_el0
5827 nop => add x0, x1, x0
5828 */
5829
5830 /* First kill the tls_get_addr reloc on the bl instruction. */
5831 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5832 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5833
5834 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5835 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5836 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5837 return bfd_reloc_continue;
5838 }
5839 else
5840 {
5841 /* GD->IE relaxation
5842 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5843 BL __tls_get_addr => mrs x1, tpidr_el0
5844 R_AARCH64_CALL26
5845 NOP => add x0, x1, x0
5846 */
5847
5848 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5849
5850 /* Remove the relocation on the BL instruction. */
5851 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5852
5853 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5854
5855 /* We choose to fixup the BL and NOP instructions using the
5856 offset from the second relocation to allow flexibility in
5857 scheduling instructions between the ADD and BL. */
5858 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5859 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5860 return bfd_reloc_continue;
5861 }
5862
5863 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5864 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5865 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5866 /* GD->IE/LE relaxation:
5867 add x0, x0, #:tlsdesc_lo12:var => nop
5868 blr xd => nop
5869 */
5870 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5871 return bfd_reloc_ok;
5872
5873 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5874 if (is_local)
5875 {
5876 /* GD->LE relaxation:
5877 ldr xd, [gp, xn] => movk x0, #:tprel_g0_nc:var
5878 */
5879 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5880 return bfd_reloc_continue;
5881 }
5882 else
5883 {
5884 /* GD->IE relaxation:
5885 ldr xd, [gp, xn] => ldr x0, [gp, xn]
5886 */
5887 insn = bfd_getl32 (contents + rel->r_offset);
5888 insn &= 0xffffffe0;
5889 bfd_putl32 (insn, contents + rel->r_offset);
5890 return bfd_reloc_ok;
5891 }
5892
5893 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5894 /* GD->LE relaxation:
5895 movk xd, #:tlsdesc_off_g0_nc:var => movk x0, #:tprel_g1_nc:var, lsl #16
5896 GD->IE relaxation:
5897 movk xd, #:tlsdesc_off_g0_nc:var => movk xd, #:gottprel_g0_nc:var
5898 */
5899 if (is_local)
5900 bfd_putl32 (0xf2a00000, contents + rel->r_offset);
5901 return bfd_reloc_continue;
5902
5903 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5904 if (is_local)
5905 {
5906 /* GD->LE relaxation:
5907 movz xd, #:tlsdesc_off_g1:var => movz x0, #:tprel_g2:var, lsl #32
5908 */
5909 bfd_putl32 (0xd2c00000, contents + rel->r_offset);
5910 return bfd_reloc_continue;
5911 }
5912 else
5913 {
5914 /* GD->IE relaxation:
5915 movz xd, #:tlsdesc_off_g1:var => movz xd, #:gottprel_g1:var, lsl #16
5916 */
5917 insn = bfd_getl32 (contents + rel->r_offset);
5918 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5919 return bfd_reloc_continue;
5920 }
5921
5922 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5923 /* IE->LE relaxation:
5924 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5925 */
5926 if (is_local)
5927 {
5928 insn = bfd_getl32 (contents + rel->r_offset);
5929 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5930 }
5931 return bfd_reloc_continue;
5932
5933 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5934 /* IE->LE relaxation:
5935 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5936 */
5937 if (is_local)
5938 {
5939 insn = bfd_getl32 (contents + rel->r_offset);
5940 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5941 }
5942 return bfd_reloc_continue;
5943
5944 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5945 /* LD->LE relaxation (tiny):
5946 adr x0, :tlsldm:x => mrs x0, tpidr_el0
5947 bl __tls_get_addr => add x0, x0, TCB_SIZE
5948 */
5949 if (is_local)
5950 {
5951 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5952 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5953 /* No need of CALL26 relocation for tls_get_addr. */
5954 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5955 bfd_putl32 (0xd53bd040, contents + rel->r_offset + 0);
5956 bfd_putl32 (0x91004000, contents + rel->r_offset + 4);
5957 return bfd_reloc_ok;
5958 }
5959 return bfd_reloc_continue;
5960
5961 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5962 /* LD->LE relaxation (small):
5963 adrp x0, :tlsldm:x => mrs x0, tpidr_el0
5964 */
5965 if (is_local)
5966 {
5967 bfd_putl32 (0xd53bd040, contents + rel->r_offset);
5968 return bfd_reloc_ok;
5969 }
5970 return bfd_reloc_continue;
5971
5972 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5973 /* LD->LE relaxation (small):
5974 add x0, #:tlsldm_lo12:x => add x0, x0, TCB_SIZE
5975 bl __tls_get_addr => nop
5976 */
5977 if (is_local)
5978 {
5979 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5980 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5981 /* No need of CALL26 relocation for tls_get_addr. */
5982 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5983 bfd_putl32 (0x91004000, contents + rel->r_offset + 0);
5984 bfd_putl32 (0xd503201f, contents + rel->r_offset + 4);
5985 return bfd_reloc_ok;
5986 }
5987 return bfd_reloc_continue;
5988
5989 default:
5990 return bfd_reloc_continue;
5991 }
5992
5993 return bfd_reloc_ok;
5994 }
5995
5996 /* Relocate an AArch64 ELF section. */
5997
5998 static bfd_boolean
5999 elfNN_aarch64_relocate_section (bfd *output_bfd,
6000 struct bfd_link_info *info,
6001 bfd *input_bfd,
6002 asection *input_section,
6003 bfd_byte *contents,
6004 Elf_Internal_Rela *relocs,
6005 Elf_Internal_Sym *local_syms,
6006 asection **local_sections)
6007 {
6008 Elf_Internal_Shdr *symtab_hdr;
6009 struct elf_link_hash_entry **sym_hashes;
6010 Elf_Internal_Rela *rel;
6011 Elf_Internal_Rela *relend;
6012 const char *name;
6013 struct elf_aarch64_link_hash_table *globals;
6014 bfd_boolean save_addend = FALSE;
6015 bfd_vma addend = 0;
6016
6017 globals = elf_aarch64_hash_table (info);
6018
6019 symtab_hdr = &elf_symtab_hdr (input_bfd);
6020 sym_hashes = elf_sym_hashes (input_bfd);
6021
6022 rel = relocs;
6023 relend = relocs + input_section->reloc_count;
6024 for (; rel < relend; rel++)
6025 {
6026 unsigned int r_type;
6027 bfd_reloc_code_real_type bfd_r_type;
6028 bfd_reloc_code_real_type relaxed_bfd_r_type;
6029 reloc_howto_type *howto;
6030 unsigned long r_symndx;
6031 Elf_Internal_Sym *sym;
6032 asection *sec;
6033 struct elf_link_hash_entry *h;
6034 bfd_vma relocation;
6035 bfd_reloc_status_type r;
6036 arelent bfd_reloc;
6037 char sym_type;
6038 bfd_boolean unresolved_reloc = FALSE;
6039 char *error_message = NULL;
6040
6041 r_symndx = ELFNN_R_SYM (rel->r_info);
6042 r_type = ELFNN_R_TYPE (rel->r_info);
6043
6044 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
6045 howto = bfd_reloc.howto;
6046
6047 if (howto == NULL)
6048 {
6049 (*_bfd_error_handler)
6050 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
6051 input_bfd, input_section, r_type);
6052 return FALSE;
6053 }
6054 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
6055
6056 h = NULL;
6057 sym = NULL;
6058 sec = NULL;
6059
6060 if (r_symndx < symtab_hdr->sh_info)
6061 {
6062 sym = local_syms + r_symndx;
6063 sym_type = ELFNN_ST_TYPE (sym->st_info);
6064 sec = local_sections[r_symndx];
6065
6066 /* An object file might have a reference to a local
6067 undefined symbol. This is a daft object file, but we
6068 should at least do something about it. */
6069 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
6070 && bfd_is_und_section (sec)
6071 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
6072 {
6073 if (!info->callbacks->undefined_symbol
6074 (info, bfd_elf_string_from_elf_section
6075 (input_bfd, symtab_hdr->sh_link, sym->st_name),
6076 input_bfd, input_section, rel->r_offset, TRUE))
6077 return FALSE;
6078 }
6079
6080 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
6081
6082 /* Relocate against local STT_GNU_IFUNC symbol. */
6083 if (!bfd_link_relocatable (info)
6084 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
6085 {
6086 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
6087 rel, FALSE);
6088 if (h == NULL)
6089 abort ();
6090
6091 /* Set STT_GNU_IFUNC symbol value. */
6092 h->root.u.def.value = sym->st_value;
6093 h->root.u.def.section = sec;
6094 }
6095 }
6096 else
6097 {
6098 bfd_boolean warned, ignored;
6099
6100 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
6101 r_symndx, symtab_hdr, sym_hashes,
6102 h, sec, relocation,
6103 unresolved_reloc, warned, ignored);
6104
6105 sym_type = h->type;
6106 }
6107
6108 if (sec != NULL && discarded_section (sec))
6109 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
6110 rel, 1, relend, howto, 0, contents);
6111
6112 if (bfd_link_relocatable (info))
6113 continue;
6114
6115 if (h != NULL)
6116 name = h->root.root.string;
6117 else
6118 {
6119 name = (bfd_elf_string_from_elf_section
6120 (input_bfd, symtab_hdr->sh_link, sym->st_name));
6121 if (name == NULL || *name == '\0')
6122 name = bfd_section_name (input_bfd, sec);
6123 }
6124
6125 if (r_symndx != 0
6126 && r_type != R_AARCH64_NONE
6127 && r_type != R_AARCH64_NULL
6128 && (h == NULL
6129 || h->root.type == bfd_link_hash_defined
6130 || h->root.type == bfd_link_hash_defweak)
6131 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
6132 {
6133 (*_bfd_error_handler)
6134 ((sym_type == STT_TLS
6135 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
6136 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
6137 input_bfd,
6138 input_section, (long) rel->r_offset, howto->name, name);
6139 }
6140
6141 /* We relax only if we can see that there can be a valid transition
6142 from a reloc type to another.
6143 We call elfNN_aarch64_final_link_relocate unless we're completely
6144 done, i.e., the relaxation produced the final output we want. */
6145
6146 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
6147 h, r_symndx);
6148 if (relaxed_bfd_r_type != bfd_r_type)
6149 {
6150 bfd_r_type = relaxed_bfd_r_type;
6151 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
6152 BFD_ASSERT (howto != NULL);
6153 r_type = howto->type;
6154 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
6155 unresolved_reloc = 0;
6156 }
6157 else
6158 r = bfd_reloc_continue;
6159
6160 /* There may be multiple consecutive relocations for the
6161 same offset. In that case we are supposed to treat the
6162 output of each relocation as the addend for the next. */
6163 if (rel + 1 < relend
6164 && rel->r_offset == rel[1].r_offset
6165 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
6166 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
6167 save_addend = TRUE;
6168 else
6169 save_addend = FALSE;
6170
6171 if (r == bfd_reloc_continue)
6172 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
6173 input_section, contents, rel,
6174 relocation, info, sec,
6175 h, &unresolved_reloc,
6176 save_addend, &addend, sym);
6177
6178 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
6179 {
6180 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6181 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6182 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6183 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6184 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6185 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6186 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6187 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6188 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6189 {
6190 bfd_boolean need_relocs = FALSE;
6191 bfd_byte *loc;
6192 int indx;
6193 bfd_vma off;
6194
6195 off = symbol_got_offset (input_bfd, h, r_symndx);
6196 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6197
6198 need_relocs =
6199 (bfd_link_pic (info) || indx != 0) &&
6200 (h == NULL
6201 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6202 || h->root.type != bfd_link_hash_undefweak);
6203
6204 BFD_ASSERT (globals->root.srelgot != NULL);
6205
6206 if (need_relocs)
6207 {
6208 Elf_Internal_Rela rela;
6209 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
6210 rela.r_addend = 0;
6211 rela.r_offset = globals->root.sgot->output_section->vma +
6212 globals->root.sgot->output_offset + off;
6213
6214
6215 loc = globals->root.srelgot->contents;
6216 loc += globals->root.srelgot->reloc_count++
6217 * RELOC_SIZE (htab);
6218 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6219
6220 bfd_reloc_code_real_type real_type =
6221 elfNN_aarch64_bfd_reloc_from_type (r_type);
6222
6223 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
6224 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
6225 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
6226 {
6227 /* For local dynamic, don't generate DTPREL in any case.
6228 Initialize the DTPREL slot into zero, so we get module
6229 base address when invoke runtime TLS resolver. */
6230 bfd_put_NN (output_bfd, 0,
6231 globals->root.sgot->contents + off
6232 + GOT_ENTRY_SIZE);
6233 }
6234 else if (indx == 0)
6235 {
6236 bfd_put_NN (output_bfd,
6237 relocation - dtpoff_base (info),
6238 globals->root.sgot->contents + off
6239 + GOT_ENTRY_SIZE);
6240 }
6241 else
6242 {
6243 /* This TLS symbol is global. We emit a
6244 relocation to fixup the tls offset at load
6245 time. */
6246 rela.r_info =
6247 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
6248 rela.r_addend = 0;
6249 rela.r_offset =
6250 (globals->root.sgot->output_section->vma
6251 + globals->root.sgot->output_offset + off
6252 + GOT_ENTRY_SIZE);
6253
6254 loc = globals->root.srelgot->contents;
6255 loc += globals->root.srelgot->reloc_count++
6256 * RELOC_SIZE (globals);
6257 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6258 bfd_put_NN (output_bfd, (bfd_vma) 0,
6259 globals->root.sgot->contents + off
6260 + GOT_ENTRY_SIZE);
6261 }
6262 }
6263 else
6264 {
6265 bfd_put_NN (output_bfd, (bfd_vma) 1,
6266 globals->root.sgot->contents + off);
6267 bfd_put_NN (output_bfd,
6268 relocation - dtpoff_base (info),
6269 globals->root.sgot->contents + off
6270 + GOT_ENTRY_SIZE);
6271 }
6272
6273 symbol_got_offset_mark (input_bfd, h, r_symndx);
6274 }
6275 break;
6276
6277 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6278 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
6279 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6280 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6281 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6282 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
6283 {
6284 bfd_boolean need_relocs = FALSE;
6285 bfd_byte *loc;
6286 int indx;
6287 bfd_vma off;
6288
6289 off = symbol_got_offset (input_bfd, h, r_symndx);
6290
6291 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6292
6293 need_relocs =
6294 (bfd_link_pic (info) || indx != 0) &&
6295 (h == NULL
6296 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6297 || h->root.type != bfd_link_hash_undefweak);
6298
6299 BFD_ASSERT (globals->root.srelgot != NULL);
6300
6301 if (need_relocs)
6302 {
6303 Elf_Internal_Rela rela;
6304
6305 if (indx == 0)
6306 rela.r_addend = relocation - dtpoff_base (info);
6307 else
6308 rela.r_addend = 0;
6309
6310 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
6311 rela.r_offset = globals->root.sgot->output_section->vma +
6312 globals->root.sgot->output_offset + off;
6313
6314 loc = globals->root.srelgot->contents;
6315 loc += globals->root.srelgot->reloc_count++
6316 * RELOC_SIZE (htab);
6317
6318 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6319
6320 bfd_put_NN (output_bfd, rela.r_addend,
6321 globals->root.sgot->contents + off);
6322 }
6323 else
6324 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
6325 globals->root.sgot->contents + off);
6326
6327 symbol_got_offset_mark (input_bfd, h, r_symndx);
6328 }
6329 break;
6330
6331 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6332 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6333 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6334 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
6335 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6336 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6337 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6338 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
6339 {
6340 bfd_boolean need_relocs = FALSE;
6341 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
6342 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
6343
6344 need_relocs = (h == NULL
6345 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6346 || h->root.type != bfd_link_hash_undefweak);
6347
6348 BFD_ASSERT (globals->root.srelgot != NULL);
6349 BFD_ASSERT (globals->root.sgot != NULL);
6350
6351 if (need_relocs)
6352 {
6353 bfd_byte *loc;
6354 Elf_Internal_Rela rela;
6355 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
6356
6357 rela.r_addend = 0;
6358 rela.r_offset = (globals->root.sgotplt->output_section->vma
6359 + globals->root.sgotplt->output_offset
6360 + off + globals->sgotplt_jump_table_size);
6361
6362 if (indx == 0)
6363 rela.r_addend = relocation - dtpoff_base (info);
6364
6365 /* Allocate the next available slot in the PLT reloc
6366 section to hold our R_AARCH64_TLSDESC, the next
6367 available slot is determined from reloc_count,
6368 which we step. But note, reloc_count was
6369 artifically moved down while allocating slots for
6370 real PLT relocs such that all of the PLT relocs
6371 will fit above the initial reloc_count and the
6372 extra stuff will fit below. */
6373 loc = globals->root.srelplt->contents;
6374 loc += globals->root.srelplt->reloc_count++
6375 * RELOC_SIZE (globals);
6376
6377 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6378
6379 bfd_put_NN (output_bfd, (bfd_vma) 0,
6380 globals->root.sgotplt->contents + off +
6381 globals->sgotplt_jump_table_size);
6382 bfd_put_NN (output_bfd, (bfd_vma) 0,
6383 globals->root.sgotplt->contents + off +
6384 globals->sgotplt_jump_table_size +
6385 GOT_ENTRY_SIZE);
6386 }
6387
6388 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
6389 }
6390 break;
6391 default:
6392 break;
6393 }
6394
6395 if (!save_addend)
6396 addend = 0;
6397
6398
6399 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
6400 because such sections are not SEC_ALLOC and thus ld.so will
6401 not process them. */
6402 if (unresolved_reloc
6403 && !((input_section->flags & SEC_DEBUGGING) != 0
6404 && h->def_dynamic)
6405 && _bfd_elf_section_offset (output_bfd, info, input_section,
6406 +rel->r_offset) != (bfd_vma) - 1)
6407 {
6408 (*_bfd_error_handler)
6409 (_
6410 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
6411 input_bfd, input_section, (long) rel->r_offset, howto->name,
6412 h->root.root.string);
6413 return FALSE;
6414 }
6415
6416 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
6417 {
6418 bfd_reloc_code_real_type real_r_type
6419 = elfNN_aarch64_bfd_reloc_from_type (r_type);
6420
6421 switch (r)
6422 {
6423 case bfd_reloc_overflow:
6424 if (!(*info->callbacks->reloc_overflow)
6425 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
6426 input_bfd, input_section, rel->r_offset))
6427 return FALSE;
6428 if (real_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
6429 || real_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
6430 {
6431 (*info->callbacks->warning)
6432 (info,
6433 _("Too many GOT entries for -fpic, "
6434 "please recompile with -fPIC"),
6435 name, input_bfd, input_section, rel->r_offset);
6436 return FALSE;
6437 }
6438 break;
6439
6440 case bfd_reloc_undefined:
6441 if (!((*info->callbacks->undefined_symbol)
6442 (info, name, input_bfd, input_section,
6443 rel->r_offset, TRUE)))
6444 return FALSE;
6445 break;
6446
6447 case bfd_reloc_outofrange:
6448 error_message = _("out of range");
6449 goto common_error;
6450
6451 case bfd_reloc_notsupported:
6452 error_message = _("unsupported relocation");
6453 goto common_error;
6454
6455 case bfd_reloc_dangerous:
6456 /* error_message should already be set. */
6457 goto common_error;
6458
6459 default:
6460 error_message = _("unknown error");
6461 /* Fall through. */
6462
6463 common_error:
6464 BFD_ASSERT (error_message != NULL);
6465 if (!((*info->callbacks->reloc_dangerous)
6466 (info, error_message, input_bfd, input_section,
6467 rel->r_offset)))
6468 return FALSE;
6469 break;
6470 }
6471 }
6472 }
6473
6474 return TRUE;
6475 }
6476
6477 /* Set the right machine number. */
6478
6479 static bfd_boolean
6480 elfNN_aarch64_object_p (bfd *abfd)
6481 {
6482 #if ARCH_SIZE == 32
6483 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
6484 #else
6485 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
6486 #endif
6487 return TRUE;
6488 }
6489
6490 /* Function to keep AArch64 specific flags in the ELF header. */
6491
6492 static bfd_boolean
6493 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
6494 {
6495 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
6496 {
6497 }
6498 else
6499 {
6500 elf_elfheader (abfd)->e_flags = flags;
6501 elf_flags_init (abfd) = TRUE;
6502 }
6503
6504 return TRUE;
6505 }
6506
6507 /* Merge backend specific data from an object file to the output
6508 object file when linking. */
6509
6510 static bfd_boolean
6511 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
6512 {
6513 flagword out_flags;
6514 flagword in_flags;
6515 bfd_boolean flags_compatible = TRUE;
6516 asection *sec;
6517
6518 /* Check if we have the same endianess. */
6519 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
6520 return FALSE;
6521
6522 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
6523 return TRUE;
6524
6525 /* The input BFD must have had its flags initialised. */
6526 /* The following seems bogus to me -- The flags are initialized in
6527 the assembler but I don't think an elf_flags_init field is
6528 written into the object. */
6529 /* BFD_ASSERT (elf_flags_init (ibfd)); */
6530
6531 in_flags = elf_elfheader (ibfd)->e_flags;
6532 out_flags = elf_elfheader (obfd)->e_flags;
6533
6534 if (!elf_flags_init (obfd))
6535 {
6536 /* If the input is the default architecture and had the default
6537 flags then do not bother setting the flags for the output
6538 architecture, instead allow future merges to do this. If no
6539 future merges ever set these flags then they will retain their
6540 uninitialised values, which surprise surprise, correspond
6541 to the default values. */
6542 if (bfd_get_arch_info (ibfd)->the_default
6543 && elf_elfheader (ibfd)->e_flags == 0)
6544 return TRUE;
6545
6546 elf_flags_init (obfd) = TRUE;
6547 elf_elfheader (obfd)->e_flags = in_flags;
6548
6549 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
6550 && bfd_get_arch_info (obfd)->the_default)
6551 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
6552 bfd_get_mach (ibfd));
6553
6554 return TRUE;
6555 }
6556
6557 /* Identical flags must be compatible. */
6558 if (in_flags == out_flags)
6559 return TRUE;
6560
6561 /* Check to see if the input BFD actually contains any sections. If
6562 not, its flags may not have been initialised either, but it
6563 cannot actually cause any incompatiblity. Do not short-circuit
6564 dynamic objects; their section list may be emptied by
6565 elf_link_add_object_symbols.
6566
6567 Also check to see if there are no code sections in the input.
6568 In this case there is no need to check for code specific flags.
6569 XXX - do we need to worry about floating-point format compatability
6570 in data sections ? */
6571 if (!(ibfd->flags & DYNAMIC))
6572 {
6573 bfd_boolean null_input_bfd = TRUE;
6574 bfd_boolean only_data_sections = TRUE;
6575
6576 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
6577 {
6578 if ((bfd_get_section_flags (ibfd, sec)
6579 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
6580 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
6581 only_data_sections = FALSE;
6582
6583 null_input_bfd = FALSE;
6584 break;
6585 }
6586
6587 if (null_input_bfd || only_data_sections)
6588 return TRUE;
6589 }
6590
6591 return flags_compatible;
6592 }
6593
6594 /* Display the flags field. */
6595
6596 static bfd_boolean
6597 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
6598 {
6599 FILE *file = (FILE *) ptr;
6600 unsigned long flags;
6601
6602 BFD_ASSERT (abfd != NULL && ptr != NULL);
6603
6604 /* Print normal ELF private data. */
6605 _bfd_elf_print_private_bfd_data (abfd, ptr);
6606
6607 flags = elf_elfheader (abfd)->e_flags;
6608 /* Ignore init flag - it may not be set, despite the flags field
6609 containing valid data. */
6610
6611 /* xgettext:c-format */
6612 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
6613
6614 if (flags)
6615 fprintf (file, _("<Unrecognised flag bits set>"));
6616
6617 fputc ('\n', file);
6618
6619 return TRUE;
6620 }
6621
6622 /* Update the got entry reference counts for the section being removed. */
6623
6624 static bfd_boolean
6625 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
6626 struct bfd_link_info *info,
6627 asection *sec,
6628 const Elf_Internal_Rela * relocs)
6629 {
6630 struct elf_aarch64_link_hash_table *htab;
6631 Elf_Internal_Shdr *symtab_hdr;
6632 struct elf_link_hash_entry **sym_hashes;
6633 struct elf_aarch64_local_symbol *locals;
6634 const Elf_Internal_Rela *rel, *relend;
6635
6636 if (bfd_link_relocatable (info))
6637 return TRUE;
6638
6639 htab = elf_aarch64_hash_table (info);
6640
6641 if (htab == NULL)
6642 return FALSE;
6643
6644 elf_section_data (sec)->local_dynrel = NULL;
6645
6646 symtab_hdr = &elf_symtab_hdr (abfd);
6647 sym_hashes = elf_sym_hashes (abfd);
6648
6649 locals = elf_aarch64_locals (abfd);
6650
6651 relend = relocs + sec->reloc_count;
6652 for (rel = relocs; rel < relend; rel++)
6653 {
6654 unsigned long r_symndx;
6655 unsigned int r_type;
6656 struct elf_link_hash_entry *h = NULL;
6657
6658 r_symndx = ELFNN_R_SYM (rel->r_info);
6659
6660 if (r_symndx >= symtab_hdr->sh_info)
6661 {
6662
6663 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6664 while (h->root.type == bfd_link_hash_indirect
6665 || h->root.type == bfd_link_hash_warning)
6666 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6667 }
6668 else
6669 {
6670 Elf_Internal_Sym *isym;
6671
6672 /* A local symbol. */
6673 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6674 abfd, r_symndx);
6675
6676 /* Check relocation against local STT_GNU_IFUNC symbol. */
6677 if (isym != NULL
6678 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6679 {
6680 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6681 if (h == NULL)
6682 abort ();
6683 }
6684 }
6685
6686 if (h)
6687 {
6688 struct elf_aarch64_link_hash_entry *eh;
6689 struct elf_dyn_relocs **pp;
6690 struct elf_dyn_relocs *p;
6691
6692 eh = (struct elf_aarch64_link_hash_entry *) h;
6693
6694 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6695 if (p->sec == sec)
6696 {
6697 /* Everything must go for SEC. */
6698 *pp = p->next;
6699 break;
6700 }
6701 }
6702
6703 r_type = ELFNN_R_TYPE (rel->r_info);
6704 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6705 {
6706 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6707 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6708 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6709 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6710 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6711 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6712 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6713 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6714 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6715 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6716 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6717 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6718 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6719 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6720 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6721 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6722 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6723 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6724 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6725 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6726 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6727 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6728 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6729 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6730 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6731 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6732 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6733 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6734 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6735 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6736 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6737 if (h != NULL)
6738 {
6739 if (h->got.refcount > 0)
6740 h->got.refcount -= 1;
6741
6742 if (h->type == STT_GNU_IFUNC)
6743 {
6744 if (h->plt.refcount > 0)
6745 h->plt.refcount -= 1;
6746 }
6747 }
6748 else if (locals != NULL)
6749 {
6750 if (locals[r_symndx].got_refcount > 0)
6751 locals[r_symndx].got_refcount -= 1;
6752 }
6753 break;
6754
6755 case BFD_RELOC_AARCH64_CALL26:
6756 case BFD_RELOC_AARCH64_JUMP26:
6757 /* If this is a local symbol then we resolve it
6758 directly without creating a PLT entry. */
6759 if (h == NULL)
6760 continue;
6761
6762 if (h->plt.refcount > 0)
6763 h->plt.refcount -= 1;
6764 break;
6765
6766 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6767 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6768 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6769 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6770 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6771 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6772 case BFD_RELOC_AARCH64_MOVW_G3:
6773 case BFD_RELOC_AARCH64_NN:
6774 if (h != NULL && bfd_link_executable (info))
6775 {
6776 if (h->plt.refcount > 0)
6777 h->plt.refcount -= 1;
6778 }
6779 break;
6780
6781 default:
6782 break;
6783 }
6784 }
6785
6786 return TRUE;
6787 }
6788
6789 /* Adjust a symbol defined by a dynamic object and referenced by a
6790 regular object. The current definition is in some section of the
6791 dynamic object, but we're not including those sections. We have to
6792 change the definition to something the rest of the link can
6793 understand. */
6794
6795 static bfd_boolean
6796 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6797 struct elf_link_hash_entry *h)
6798 {
6799 struct elf_aarch64_link_hash_table *htab;
6800 asection *s;
6801
6802 /* If this is a function, put it in the procedure linkage table. We
6803 will fill in the contents of the procedure linkage table later,
6804 when we know the address of the .got section. */
6805 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6806 {
6807 if (h->plt.refcount <= 0
6808 || (h->type != STT_GNU_IFUNC
6809 && (SYMBOL_CALLS_LOCAL (info, h)
6810 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6811 && h->root.type == bfd_link_hash_undefweak))))
6812 {
6813 /* This case can occur if we saw a CALL26 reloc in
6814 an input file, but the symbol wasn't referred to
6815 by a dynamic object or all references were
6816 garbage collected. In which case we can end up
6817 resolving. */
6818 h->plt.offset = (bfd_vma) - 1;
6819 h->needs_plt = 0;
6820 }
6821
6822 return TRUE;
6823 }
6824 else
6825 /* Otherwise, reset to -1. */
6826 h->plt.offset = (bfd_vma) - 1;
6827
6828
6829 /* If this is a weak symbol, and there is a real definition, the
6830 processor independent code will have arranged for us to see the
6831 real definition first, and we can just use the same value. */
6832 if (h->u.weakdef != NULL)
6833 {
6834 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6835 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6836 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6837 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6838 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6839 h->non_got_ref = h->u.weakdef->non_got_ref;
6840 return TRUE;
6841 }
6842
6843 /* If we are creating a shared library, we must presume that the
6844 only references to the symbol are via the global offset table.
6845 For such cases we need not do anything here; the relocations will
6846 be handled correctly by relocate_section. */
6847 if (bfd_link_pic (info))
6848 return TRUE;
6849
6850 /* If there are no references to this symbol that do not use the
6851 GOT, we don't need to generate a copy reloc. */
6852 if (!h->non_got_ref)
6853 return TRUE;
6854
6855 /* If -z nocopyreloc was given, we won't generate them either. */
6856 if (info->nocopyreloc)
6857 {
6858 h->non_got_ref = 0;
6859 return TRUE;
6860 }
6861
6862 /* We must allocate the symbol in our .dynbss section, which will
6863 become part of the .bss section of the executable. There will be
6864 an entry for this symbol in the .dynsym section. The dynamic
6865 object will contain position independent code, so all references
6866 from the dynamic object to this symbol will go through the global
6867 offset table. The dynamic linker will use the .dynsym entry to
6868 determine the address it must put in the global offset table, so
6869 both the dynamic object and the regular object will refer to the
6870 same memory location for the variable. */
6871
6872 htab = elf_aarch64_hash_table (info);
6873
6874 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6875 to copy the initial value out of the dynamic object and into the
6876 runtime process image. */
6877 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6878 {
6879 htab->srelbss->size += RELOC_SIZE (htab);
6880 h->needs_copy = 1;
6881 }
6882
6883 s = htab->sdynbss;
6884
6885 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6886
6887 }
6888
6889 static bfd_boolean
6890 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6891 {
6892 struct elf_aarch64_local_symbol *locals;
6893 locals = elf_aarch64_locals (abfd);
6894 if (locals == NULL)
6895 {
6896 locals = (struct elf_aarch64_local_symbol *)
6897 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6898 if (locals == NULL)
6899 return FALSE;
6900 elf_aarch64_locals (abfd) = locals;
6901 }
6902 return TRUE;
6903 }
6904
6905 /* Create the .got section to hold the global offset table. */
6906
6907 static bfd_boolean
6908 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6909 {
6910 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6911 flagword flags;
6912 asection *s;
6913 struct elf_link_hash_entry *h;
6914 struct elf_link_hash_table *htab = elf_hash_table (info);
6915
6916 /* This function may be called more than once. */
6917 s = bfd_get_linker_section (abfd, ".got");
6918 if (s != NULL)
6919 return TRUE;
6920
6921 flags = bed->dynamic_sec_flags;
6922
6923 s = bfd_make_section_anyway_with_flags (abfd,
6924 (bed->rela_plts_and_copies_p
6925 ? ".rela.got" : ".rel.got"),
6926 (bed->dynamic_sec_flags
6927 | SEC_READONLY));
6928 if (s == NULL
6929 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6930 return FALSE;
6931 htab->srelgot = s;
6932
6933 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6934 if (s == NULL
6935 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6936 return FALSE;
6937 htab->sgot = s;
6938 htab->sgot->size += GOT_ENTRY_SIZE;
6939
6940 if (bed->want_got_sym)
6941 {
6942 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6943 (or .got.plt) section. We don't do this in the linker script
6944 because we don't want to define the symbol if we are not creating
6945 a global offset table. */
6946 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6947 "_GLOBAL_OFFSET_TABLE_");
6948 elf_hash_table (info)->hgot = h;
6949 if (h == NULL)
6950 return FALSE;
6951 }
6952
6953 if (bed->want_got_plt)
6954 {
6955 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6956 if (s == NULL
6957 || !bfd_set_section_alignment (abfd, s,
6958 bed->s->log_file_align))
6959 return FALSE;
6960 htab->sgotplt = s;
6961 }
6962
6963 /* The first bit of the global offset table is the header. */
6964 s->size += bed->got_header_size;
6965
6966 return TRUE;
6967 }
6968
6969 /* Look through the relocs for a section during the first phase. */
6970
6971 static bfd_boolean
6972 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6973 asection *sec, const Elf_Internal_Rela *relocs)
6974 {
6975 Elf_Internal_Shdr *symtab_hdr;
6976 struct elf_link_hash_entry **sym_hashes;
6977 const Elf_Internal_Rela *rel;
6978 const Elf_Internal_Rela *rel_end;
6979 asection *sreloc;
6980
6981 struct elf_aarch64_link_hash_table *htab;
6982
6983 if (bfd_link_relocatable (info))
6984 return TRUE;
6985
6986 BFD_ASSERT (is_aarch64_elf (abfd));
6987
6988 htab = elf_aarch64_hash_table (info);
6989 sreloc = NULL;
6990
6991 symtab_hdr = &elf_symtab_hdr (abfd);
6992 sym_hashes = elf_sym_hashes (abfd);
6993
6994 rel_end = relocs + sec->reloc_count;
6995 for (rel = relocs; rel < rel_end; rel++)
6996 {
6997 struct elf_link_hash_entry *h;
6998 unsigned long r_symndx;
6999 unsigned int r_type;
7000 bfd_reloc_code_real_type bfd_r_type;
7001 Elf_Internal_Sym *isym;
7002
7003 r_symndx = ELFNN_R_SYM (rel->r_info);
7004 r_type = ELFNN_R_TYPE (rel->r_info);
7005
7006 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
7007 {
7008 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
7009 r_symndx);
7010 return FALSE;
7011 }
7012
7013 if (r_symndx < symtab_hdr->sh_info)
7014 {
7015 /* A local symbol. */
7016 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
7017 abfd, r_symndx);
7018 if (isym == NULL)
7019 return FALSE;
7020
7021 /* Check relocation against local STT_GNU_IFUNC symbol. */
7022 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
7023 {
7024 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
7025 TRUE);
7026 if (h == NULL)
7027 return FALSE;
7028
7029 /* Fake a STT_GNU_IFUNC symbol. */
7030 h->type = STT_GNU_IFUNC;
7031 h->def_regular = 1;
7032 h->ref_regular = 1;
7033 h->forced_local = 1;
7034 h->root.type = bfd_link_hash_defined;
7035 }
7036 else
7037 h = NULL;
7038 }
7039 else
7040 {
7041 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
7042 while (h->root.type == bfd_link_hash_indirect
7043 || h->root.type == bfd_link_hash_warning)
7044 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7045
7046 /* PR15323, ref flags aren't set for references in the same
7047 object. */
7048 h->root.non_ir_ref = 1;
7049 }
7050
7051 /* Could be done earlier, if h were already available. */
7052 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
7053
7054 if (h != NULL)
7055 {
7056 /* Create the ifunc sections for static executables. If we
7057 never see an indirect function symbol nor we are building
7058 a static executable, those sections will be empty and
7059 won't appear in output. */
7060 switch (bfd_r_type)
7061 {
7062 default:
7063 break;
7064
7065 case BFD_RELOC_AARCH64_ADD_LO12:
7066 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7067 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7068 case BFD_RELOC_AARCH64_CALL26:
7069 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7070 case BFD_RELOC_AARCH64_JUMP26:
7071 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7072 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7073 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7074 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7075 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7076 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7077 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7078 case BFD_RELOC_AARCH64_NN:
7079 if (htab->root.dynobj == NULL)
7080 htab->root.dynobj = abfd;
7081 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
7082 return FALSE;
7083 break;
7084 }
7085
7086 /* It is referenced by a non-shared object. */
7087 h->ref_regular = 1;
7088 h->root.non_ir_ref = 1;
7089 }
7090
7091 switch (bfd_r_type)
7092 {
7093 case BFD_RELOC_AARCH64_NN:
7094
7095 /* We don't need to handle relocs into sections not going into
7096 the "real" output. */
7097 if ((sec->flags & SEC_ALLOC) == 0)
7098 break;
7099
7100 if (h != NULL)
7101 {
7102 if (!bfd_link_pic (info))
7103 h->non_got_ref = 1;
7104
7105 h->plt.refcount += 1;
7106 h->pointer_equality_needed = 1;
7107 }
7108
7109 /* No need to do anything if we're not creating a shared
7110 object. */
7111 if (! bfd_link_pic (info))
7112 break;
7113
7114 {
7115 struct elf_dyn_relocs *p;
7116 struct elf_dyn_relocs **head;
7117
7118 /* We must copy these reloc types into the output file.
7119 Create a reloc section in dynobj and make room for
7120 this reloc. */
7121 if (sreloc == NULL)
7122 {
7123 if (htab->root.dynobj == NULL)
7124 htab->root.dynobj = abfd;
7125
7126 sreloc = _bfd_elf_make_dynamic_reloc_section
7127 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
7128
7129 if (sreloc == NULL)
7130 return FALSE;
7131 }
7132
7133 /* If this is a global symbol, we count the number of
7134 relocations we need for this symbol. */
7135 if (h != NULL)
7136 {
7137 struct elf_aarch64_link_hash_entry *eh;
7138 eh = (struct elf_aarch64_link_hash_entry *) h;
7139 head = &eh->dyn_relocs;
7140 }
7141 else
7142 {
7143 /* Track dynamic relocs needed for local syms too.
7144 We really need local syms available to do this
7145 easily. Oh well. */
7146
7147 asection *s;
7148 void **vpp;
7149
7150 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
7151 abfd, r_symndx);
7152 if (isym == NULL)
7153 return FALSE;
7154
7155 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
7156 if (s == NULL)
7157 s = sec;
7158
7159 /* Beware of type punned pointers vs strict aliasing
7160 rules. */
7161 vpp = &(elf_section_data (s)->local_dynrel);
7162 head = (struct elf_dyn_relocs **) vpp;
7163 }
7164
7165 p = *head;
7166 if (p == NULL || p->sec != sec)
7167 {
7168 bfd_size_type amt = sizeof *p;
7169 p = ((struct elf_dyn_relocs *)
7170 bfd_zalloc (htab->root.dynobj, amt));
7171 if (p == NULL)
7172 return FALSE;
7173 p->next = *head;
7174 *head = p;
7175 p->sec = sec;
7176 }
7177
7178 p->count += 1;
7179
7180 }
7181 break;
7182
7183 /* RR: We probably want to keep a consistency check that
7184 there are no dangling GOT_PAGE relocs. */
7185 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7186 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7187 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7188 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7189 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7190 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7191 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7192 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7193 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7194 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7195 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7196 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7197 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7198 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7199 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7200 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7201 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7202 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7203 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7204 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7205 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7206 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7207 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7208 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7209 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7210 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7211 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7212 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7213 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7214 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7215 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7216 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7217 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7218 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7219 {
7220 unsigned got_type;
7221 unsigned old_got_type;
7222
7223 got_type = aarch64_reloc_got_type (bfd_r_type);
7224
7225 if (h)
7226 {
7227 h->got.refcount += 1;
7228 old_got_type = elf_aarch64_hash_entry (h)->got_type;
7229 }
7230 else
7231 {
7232 struct elf_aarch64_local_symbol *locals;
7233
7234 if (!elfNN_aarch64_allocate_local_symbols
7235 (abfd, symtab_hdr->sh_info))
7236 return FALSE;
7237
7238 locals = elf_aarch64_locals (abfd);
7239 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7240 locals[r_symndx].got_refcount += 1;
7241 old_got_type = locals[r_symndx].got_type;
7242 }
7243
7244 /* If a variable is accessed with both general dynamic TLS
7245 methods, two slots may be created. */
7246 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
7247 got_type |= old_got_type;
7248
7249 /* We will already have issued an error message if there
7250 is a TLS/non-TLS mismatch, based on the symbol type.
7251 So just combine any TLS types needed. */
7252 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
7253 && got_type != GOT_NORMAL)
7254 got_type |= old_got_type;
7255
7256 /* If the symbol is accessed by both IE and GD methods, we
7257 are able to relax. Turn off the GD flag, without
7258 messing up with any other kind of TLS types that may be
7259 involved. */
7260 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
7261 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
7262
7263 if (old_got_type != got_type)
7264 {
7265 if (h != NULL)
7266 elf_aarch64_hash_entry (h)->got_type = got_type;
7267 else
7268 {
7269 struct elf_aarch64_local_symbol *locals;
7270 locals = elf_aarch64_locals (abfd);
7271 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
7272 locals[r_symndx].got_type = got_type;
7273 }
7274 }
7275
7276 if (htab->root.dynobj == NULL)
7277 htab->root.dynobj = abfd;
7278 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
7279 return FALSE;
7280 break;
7281 }
7282
7283 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7284 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7285 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7286 case BFD_RELOC_AARCH64_MOVW_G3:
7287 if (bfd_link_pic (info))
7288 {
7289 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
7290 (*_bfd_error_handler)
7291 (_("%B: relocation %s against `%s' can not be used when making "
7292 "a shared object; recompile with -fPIC"),
7293 abfd, elfNN_aarch64_howto_table[howto_index].name,
7294 (h) ? h->root.root.string : "a local symbol");
7295 bfd_set_error (bfd_error_bad_value);
7296 return FALSE;
7297 }
7298
7299 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7300 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7301 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7302 if (h != NULL && bfd_link_executable (info))
7303 {
7304 /* If this reloc is in a read-only section, we might
7305 need a copy reloc. We can't check reliably at this
7306 stage whether the section is read-only, as input
7307 sections have not yet been mapped to output sections.
7308 Tentatively set the flag for now, and correct in
7309 adjust_dynamic_symbol. */
7310 h->non_got_ref = 1;
7311 h->plt.refcount += 1;
7312 h->pointer_equality_needed = 1;
7313 }
7314 /* FIXME:: RR need to handle these in shared libraries
7315 and essentially bomb out as these being non-PIC
7316 relocations in shared libraries. */
7317 break;
7318
7319 case BFD_RELOC_AARCH64_CALL26:
7320 case BFD_RELOC_AARCH64_JUMP26:
7321 /* If this is a local symbol then we resolve it
7322 directly without creating a PLT entry. */
7323 if (h == NULL)
7324 continue;
7325
7326 h->needs_plt = 1;
7327 if (h->plt.refcount <= 0)
7328 h->plt.refcount = 1;
7329 else
7330 h->plt.refcount += 1;
7331 break;
7332
7333 default:
7334 break;
7335 }
7336 }
7337
7338 return TRUE;
7339 }
7340
7341 /* Treat mapping symbols as special target symbols. */
7342
7343 static bfd_boolean
7344 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
7345 asymbol *sym)
7346 {
7347 return bfd_is_aarch64_special_symbol_name (sym->name,
7348 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
7349 }
7350
7351 /* This is a copy of elf_find_function () from elf.c except that
7352 AArch64 mapping symbols are ignored when looking for function names. */
7353
7354 static bfd_boolean
7355 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
7356 asymbol **symbols,
7357 asection *section,
7358 bfd_vma offset,
7359 const char **filename_ptr,
7360 const char **functionname_ptr)
7361 {
7362 const char *filename = NULL;
7363 asymbol *func = NULL;
7364 bfd_vma low_func = 0;
7365 asymbol **p;
7366
7367 for (p = symbols; *p != NULL; p++)
7368 {
7369 elf_symbol_type *q;
7370
7371 q = (elf_symbol_type *) * p;
7372
7373 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
7374 {
7375 default:
7376 break;
7377 case STT_FILE:
7378 filename = bfd_asymbol_name (&q->symbol);
7379 break;
7380 case STT_FUNC:
7381 case STT_NOTYPE:
7382 /* Skip mapping symbols. */
7383 if ((q->symbol.flags & BSF_LOCAL)
7384 && (bfd_is_aarch64_special_symbol_name
7385 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
7386 continue;
7387 /* Fall through. */
7388 if (bfd_get_section (&q->symbol) == section
7389 && q->symbol.value >= low_func && q->symbol.value <= offset)
7390 {
7391 func = (asymbol *) q;
7392 low_func = q->symbol.value;
7393 }
7394 break;
7395 }
7396 }
7397
7398 if (func == NULL)
7399 return FALSE;
7400
7401 if (filename_ptr)
7402 *filename_ptr = filename;
7403 if (functionname_ptr)
7404 *functionname_ptr = bfd_asymbol_name (func);
7405
7406 return TRUE;
7407 }
7408
7409
7410 /* Find the nearest line to a particular section and offset, for error
7411 reporting. This code is a duplicate of the code in elf.c, except
7412 that it uses aarch64_elf_find_function. */
7413
7414 static bfd_boolean
7415 elfNN_aarch64_find_nearest_line (bfd *abfd,
7416 asymbol **symbols,
7417 asection *section,
7418 bfd_vma offset,
7419 const char **filename_ptr,
7420 const char **functionname_ptr,
7421 unsigned int *line_ptr,
7422 unsigned int *discriminator_ptr)
7423 {
7424 bfd_boolean found = FALSE;
7425
7426 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
7427 filename_ptr, functionname_ptr,
7428 line_ptr, discriminator_ptr,
7429 dwarf_debug_sections, 0,
7430 &elf_tdata (abfd)->dwarf2_find_line_info))
7431 {
7432 if (!*functionname_ptr)
7433 aarch64_elf_find_function (abfd, symbols, section, offset,
7434 *filename_ptr ? NULL : filename_ptr,
7435 functionname_ptr);
7436
7437 return TRUE;
7438 }
7439
7440 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
7441 toolchain uses DWARF1. */
7442
7443 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
7444 &found, filename_ptr,
7445 functionname_ptr, line_ptr,
7446 &elf_tdata (abfd)->line_info))
7447 return FALSE;
7448
7449 if (found && (*functionname_ptr || *line_ptr))
7450 return TRUE;
7451
7452 if (symbols == NULL)
7453 return FALSE;
7454
7455 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
7456 filename_ptr, functionname_ptr))
7457 return FALSE;
7458
7459 *line_ptr = 0;
7460 return TRUE;
7461 }
7462
7463 static bfd_boolean
7464 elfNN_aarch64_find_inliner_info (bfd *abfd,
7465 const char **filename_ptr,
7466 const char **functionname_ptr,
7467 unsigned int *line_ptr)
7468 {
7469 bfd_boolean found;
7470 found = _bfd_dwarf2_find_inliner_info
7471 (abfd, filename_ptr,
7472 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
7473 return found;
7474 }
7475
7476
7477 static void
7478 elfNN_aarch64_post_process_headers (bfd *abfd,
7479 struct bfd_link_info *link_info)
7480 {
7481 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
7482
7483 i_ehdrp = elf_elfheader (abfd);
7484 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
7485
7486 _bfd_elf_post_process_headers (abfd, link_info);
7487 }
7488
7489 static enum elf_reloc_type_class
7490 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
7491 const asection *rel_sec ATTRIBUTE_UNUSED,
7492 const Elf_Internal_Rela *rela)
7493 {
7494 switch ((int) ELFNN_R_TYPE (rela->r_info))
7495 {
7496 case AARCH64_R (RELATIVE):
7497 return reloc_class_relative;
7498 case AARCH64_R (JUMP_SLOT):
7499 return reloc_class_plt;
7500 case AARCH64_R (COPY):
7501 return reloc_class_copy;
7502 default:
7503 return reloc_class_normal;
7504 }
7505 }
7506
7507 /* Handle an AArch64 specific section when reading an object file. This is
7508 called when bfd_section_from_shdr finds a section with an unknown
7509 type. */
7510
7511 static bfd_boolean
7512 elfNN_aarch64_section_from_shdr (bfd *abfd,
7513 Elf_Internal_Shdr *hdr,
7514 const char *name, int shindex)
7515 {
7516 /* There ought to be a place to keep ELF backend specific flags, but
7517 at the moment there isn't one. We just keep track of the
7518 sections by their name, instead. Fortunately, the ABI gives
7519 names for all the AArch64 specific sections, so we will probably get
7520 away with this. */
7521 switch (hdr->sh_type)
7522 {
7523 case SHT_AARCH64_ATTRIBUTES:
7524 break;
7525
7526 default:
7527 return FALSE;
7528 }
7529
7530 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
7531 return FALSE;
7532
7533 return TRUE;
7534 }
7535
7536 /* A structure used to record a list of sections, independently
7537 of the next and prev fields in the asection structure. */
7538 typedef struct section_list
7539 {
7540 asection *sec;
7541 struct section_list *next;
7542 struct section_list *prev;
7543 }
7544 section_list;
7545
7546 /* Unfortunately we need to keep a list of sections for which
7547 an _aarch64_elf_section_data structure has been allocated. This
7548 is because it is possible for functions like elfNN_aarch64_write_section
7549 to be called on a section which has had an elf_data_structure
7550 allocated for it (and so the used_by_bfd field is valid) but
7551 for which the AArch64 extended version of this structure - the
7552 _aarch64_elf_section_data structure - has not been allocated. */
7553 static section_list *sections_with_aarch64_elf_section_data = NULL;
7554
7555 static void
7556 record_section_with_aarch64_elf_section_data (asection *sec)
7557 {
7558 struct section_list *entry;
7559
7560 entry = bfd_malloc (sizeof (*entry));
7561 if (entry == NULL)
7562 return;
7563 entry->sec = sec;
7564 entry->next = sections_with_aarch64_elf_section_data;
7565 entry->prev = NULL;
7566 if (entry->next != NULL)
7567 entry->next->prev = entry;
7568 sections_with_aarch64_elf_section_data = entry;
7569 }
7570
7571 static struct section_list *
7572 find_aarch64_elf_section_entry (asection *sec)
7573 {
7574 struct section_list *entry;
7575 static struct section_list *last_entry = NULL;
7576
7577 /* This is a short cut for the typical case where the sections are added
7578 to the sections_with_aarch64_elf_section_data list in forward order and
7579 then looked up here in backwards order. This makes a real difference
7580 to the ld-srec/sec64k.exp linker test. */
7581 entry = sections_with_aarch64_elf_section_data;
7582 if (last_entry != NULL)
7583 {
7584 if (last_entry->sec == sec)
7585 entry = last_entry;
7586 else if (last_entry->next != NULL && last_entry->next->sec == sec)
7587 entry = last_entry->next;
7588 }
7589
7590 for (; entry; entry = entry->next)
7591 if (entry->sec == sec)
7592 break;
7593
7594 if (entry)
7595 /* Record the entry prior to this one - it is the entry we are
7596 most likely to want to locate next time. Also this way if we
7597 have been called from
7598 unrecord_section_with_aarch64_elf_section_data () we will not
7599 be caching a pointer that is about to be freed. */
7600 last_entry = entry->prev;
7601
7602 return entry;
7603 }
7604
7605 static void
7606 unrecord_section_with_aarch64_elf_section_data (asection *sec)
7607 {
7608 struct section_list *entry;
7609
7610 entry = find_aarch64_elf_section_entry (sec);
7611
7612 if (entry)
7613 {
7614 if (entry->prev != NULL)
7615 entry->prev->next = entry->next;
7616 if (entry->next != NULL)
7617 entry->next->prev = entry->prev;
7618 if (entry == sections_with_aarch64_elf_section_data)
7619 sections_with_aarch64_elf_section_data = entry->next;
7620 free (entry);
7621 }
7622 }
7623
7624
7625 typedef struct
7626 {
7627 void *finfo;
7628 struct bfd_link_info *info;
7629 asection *sec;
7630 int sec_shndx;
7631 int (*func) (void *, const char *, Elf_Internal_Sym *,
7632 asection *, struct elf_link_hash_entry *);
7633 } output_arch_syminfo;
7634
7635 enum map_symbol_type
7636 {
7637 AARCH64_MAP_INSN,
7638 AARCH64_MAP_DATA
7639 };
7640
7641
7642 /* Output a single mapping symbol. */
7643
7644 static bfd_boolean
7645 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
7646 enum map_symbol_type type, bfd_vma offset)
7647 {
7648 static const char *names[2] = { "$x", "$d" };
7649 Elf_Internal_Sym sym;
7650
7651 sym.st_value = (osi->sec->output_section->vma
7652 + osi->sec->output_offset + offset);
7653 sym.st_size = 0;
7654 sym.st_other = 0;
7655 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7656 sym.st_shndx = osi->sec_shndx;
7657 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7658 }
7659
7660 /* Output a single local symbol for a generated stub. */
7661
7662 static bfd_boolean
7663 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7664 bfd_vma offset, bfd_vma size)
7665 {
7666 Elf_Internal_Sym sym;
7667
7668 sym.st_value = (osi->sec->output_section->vma
7669 + osi->sec->output_offset + offset);
7670 sym.st_size = size;
7671 sym.st_other = 0;
7672 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7673 sym.st_shndx = osi->sec_shndx;
7674 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7675 }
7676
7677 static bfd_boolean
7678 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7679 {
7680 struct elf_aarch64_stub_hash_entry *stub_entry;
7681 asection *stub_sec;
7682 bfd_vma addr;
7683 char *stub_name;
7684 output_arch_syminfo *osi;
7685
7686 /* Massage our args to the form they really have. */
7687 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7688 osi = (output_arch_syminfo *) in_arg;
7689
7690 stub_sec = stub_entry->stub_sec;
7691
7692 /* Ensure this stub is attached to the current section being
7693 processed. */
7694 if (stub_sec != osi->sec)
7695 return TRUE;
7696
7697 addr = (bfd_vma) stub_entry->stub_offset;
7698
7699 stub_name = stub_entry->output_name;
7700
7701 switch (stub_entry->stub_type)
7702 {
7703 case aarch64_stub_adrp_branch:
7704 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7705 sizeof (aarch64_adrp_branch_stub)))
7706 return FALSE;
7707 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7708 return FALSE;
7709 break;
7710 case aarch64_stub_long_branch:
7711 if (!elfNN_aarch64_output_stub_sym
7712 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7713 return FALSE;
7714 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7715 return FALSE;
7716 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7717 return FALSE;
7718 break;
7719 case aarch64_stub_erratum_835769_veneer:
7720 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7721 sizeof (aarch64_erratum_835769_stub)))
7722 return FALSE;
7723 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7724 return FALSE;
7725 break;
7726 case aarch64_stub_erratum_843419_veneer:
7727 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7728 sizeof (aarch64_erratum_843419_stub)))
7729 return FALSE;
7730 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7731 return FALSE;
7732 break;
7733
7734 default:
7735 abort ();
7736 }
7737
7738 return TRUE;
7739 }
7740
7741 /* Output mapping symbols for linker generated sections. */
7742
7743 static bfd_boolean
7744 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7745 struct bfd_link_info *info,
7746 void *finfo,
7747 int (*func) (void *, const char *,
7748 Elf_Internal_Sym *,
7749 asection *,
7750 struct elf_link_hash_entry
7751 *))
7752 {
7753 output_arch_syminfo osi;
7754 struct elf_aarch64_link_hash_table *htab;
7755
7756 htab = elf_aarch64_hash_table (info);
7757
7758 osi.finfo = finfo;
7759 osi.info = info;
7760 osi.func = func;
7761
7762 /* Long calls stubs. */
7763 if (htab->stub_bfd && htab->stub_bfd->sections)
7764 {
7765 asection *stub_sec;
7766
7767 for (stub_sec = htab->stub_bfd->sections;
7768 stub_sec != NULL; stub_sec = stub_sec->next)
7769 {
7770 /* Ignore non-stub sections. */
7771 if (!strstr (stub_sec->name, STUB_SUFFIX))
7772 continue;
7773
7774 osi.sec = stub_sec;
7775
7776 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7777 (output_bfd, osi.sec->output_section);
7778
7779 /* The first instruction in a stub is always a branch. */
7780 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7781 return FALSE;
7782
7783 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7784 &osi);
7785 }
7786 }
7787
7788 /* Finally, output mapping symbols for the PLT. */
7789 if (!htab->root.splt || htab->root.splt->size == 0)
7790 return TRUE;
7791
7792 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7793 (output_bfd, htab->root.splt->output_section);
7794 osi.sec = htab->root.splt;
7795
7796 elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
7797
7798 return TRUE;
7799
7800 }
7801
7802 /* Allocate target specific section data. */
7803
7804 static bfd_boolean
7805 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7806 {
7807 if (!sec->used_by_bfd)
7808 {
7809 _aarch64_elf_section_data *sdata;
7810 bfd_size_type amt = sizeof (*sdata);
7811
7812 sdata = bfd_zalloc (abfd, amt);
7813 if (sdata == NULL)
7814 return FALSE;
7815 sec->used_by_bfd = sdata;
7816 }
7817
7818 record_section_with_aarch64_elf_section_data (sec);
7819
7820 return _bfd_elf_new_section_hook (abfd, sec);
7821 }
7822
7823
7824 static void
7825 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7826 asection *sec,
7827 void *ignore ATTRIBUTE_UNUSED)
7828 {
7829 unrecord_section_with_aarch64_elf_section_data (sec);
7830 }
7831
7832 static bfd_boolean
7833 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7834 {
7835 if (abfd->sections)
7836 bfd_map_over_sections (abfd,
7837 unrecord_section_via_map_over_sections, NULL);
7838
7839 return _bfd_elf_close_and_cleanup (abfd);
7840 }
7841
7842 static bfd_boolean
7843 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7844 {
7845 if (abfd->sections)
7846 bfd_map_over_sections (abfd,
7847 unrecord_section_via_map_over_sections, NULL);
7848
7849 return _bfd_free_cached_info (abfd);
7850 }
7851
7852 /* Create dynamic sections. This is different from the ARM backend in that
7853 the got, plt, gotplt and their relocation sections are all created in the
7854 standard part of the bfd elf backend. */
7855
7856 static bfd_boolean
7857 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7858 struct bfd_link_info *info)
7859 {
7860 struct elf_aarch64_link_hash_table *htab;
7861
7862 /* We need to create .got section. */
7863 if (!aarch64_elf_create_got_section (dynobj, info))
7864 return FALSE;
7865
7866 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7867 return FALSE;
7868
7869 htab = elf_aarch64_hash_table (info);
7870 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7871 if (!bfd_link_pic (info))
7872 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7873
7874 if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
7875 abort ();
7876
7877 return TRUE;
7878 }
7879
7880
7881 /* Allocate space in .plt, .got and associated reloc sections for
7882 dynamic relocs. */
7883
7884 static bfd_boolean
7885 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7886 {
7887 struct bfd_link_info *info;
7888 struct elf_aarch64_link_hash_table *htab;
7889 struct elf_aarch64_link_hash_entry *eh;
7890 struct elf_dyn_relocs *p;
7891
7892 /* An example of a bfd_link_hash_indirect symbol is versioned
7893 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7894 -> __gxx_personality_v0(bfd_link_hash_defined)
7895
7896 There is no need to process bfd_link_hash_indirect symbols here
7897 because we will also be presented with the concrete instance of
7898 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7899 called to copy all relevant data from the generic to the concrete
7900 symbol instance.
7901 */
7902 if (h->root.type == bfd_link_hash_indirect)
7903 return TRUE;
7904
7905 if (h->root.type == bfd_link_hash_warning)
7906 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7907
7908 info = (struct bfd_link_info *) inf;
7909 htab = elf_aarch64_hash_table (info);
7910
7911 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7912 here if it is defined and referenced in a non-shared object. */
7913 if (h->type == STT_GNU_IFUNC
7914 && h->def_regular)
7915 return TRUE;
7916 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7917 {
7918 /* Make sure this symbol is output as a dynamic symbol.
7919 Undefined weak syms won't yet be marked as dynamic. */
7920 if (h->dynindx == -1 && !h->forced_local)
7921 {
7922 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7923 return FALSE;
7924 }
7925
7926 if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7927 {
7928 asection *s = htab->root.splt;
7929
7930 /* If this is the first .plt entry, make room for the special
7931 first entry. */
7932 if (s->size == 0)
7933 s->size += htab->plt_header_size;
7934
7935 h->plt.offset = s->size;
7936
7937 /* If this symbol is not defined in a regular file, and we are
7938 not generating a shared library, then set the symbol to this
7939 location in the .plt. This is required to make function
7940 pointers compare as equal between the normal executable and
7941 the shared library. */
7942 if (!bfd_link_pic (info) && !h->def_regular)
7943 {
7944 h->root.u.def.section = s;
7945 h->root.u.def.value = h->plt.offset;
7946 }
7947
7948 /* Make room for this entry. For now we only create the
7949 small model PLT entries. We later need to find a way
7950 of relaxing into these from the large model PLT entries. */
7951 s->size += PLT_SMALL_ENTRY_SIZE;
7952
7953 /* We also need to make an entry in the .got.plt section, which
7954 will be placed in the .got section by the linker script. */
7955 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7956
7957 /* We also need to make an entry in the .rela.plt section. */
7958 htab->root.srelplt->size += RELOC_SIZE (htab);
7959
7960 /* We need to ensure that all GOT entries that serve the PLT
7961 are consecutive with the special GOT slots [0] [1] and
7962 [2]. Any addtional relocations, such as
7963 R_AARCH64_TLSDESC, must be placed after the PLT related
7964 entries. We abuse the reloc_count such that during
7965 sizing we adjust reloc_count to indicate the number of
7966 PLT related reserved entries. In subsequent phases when
7967 filling in the contents of the reloc entries, PLT related
7968 entries are placed by computing their PLT index (0
7969 .. reloc_count). While other none PLT relocs are placed
7970 at the slot indicated by reloc_count and reloc_count is
7971 updated. */
7972
7973 htab->root.srelplt->reloc_count++;
7974 }
7975 else
7976 {
7977 h->plt.offset = (bfd_vma) - 1;
7978 h->needs_plt = 0;
7979 }
7980 }
7981 else
7982 {
7983 h->plt.offset = (bfd_vma) - 1;
7984 h->needs_plt = 0;
7985 }
7986
7987 eh = (struct elf_aarch64_link_hash_entry *) h;
7988 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7989
7990 if (h->got.refcount > 0)
7991 {
7992 bfd_boolean dyn;
7993 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7994
7995 h->got.offset = (bfd_vma) - 1;
7996
7997 dyn = htab->root.dynamic_sections_created;
7998
7999 /* Make sure this symbol is output as a dynamic symbol.
8000 Undefined weak syms won't yet be marked as dynamic. */
8001 if (dyn && h->dynindx == -1 && !h->forced_local)
8002 {
8003 if (!bfd_elf_link_record_dynamic_symbol (info, h))
8004 return FALSE;
8005 }
8006
8007 if (got_type == GOT_UNKNOWN)
8008 {
8009 }
8010 else if (got_type == GOT_NORMAL)
8011 {
8012 h->got.offset = htab->root.sgot->size;
8013 htab->root.sgot->size += GOT_ENTRY_SIZE;
8014 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8015 || h->root.type != bfd_link_hash_undefweak)
8016 && (bfd_link_pic (info)
8017 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8018 {
8019 htab->root.srelgot->size += RELOC_SIZE (htab);
8020 }
8021 }
8022 else
8023 {
8024 int indx;
8025 if (got_type & GOT_TLSDESC_GD)
8026 {
8027 eh->tlsdesc_got_jump_table_offset =
8028 (htab->root.sgotplt->size
8029 - aarch64_compute_jump_table_size (htab));
8030 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8031 h->got.offset = (bfd_vma) - 2;
8032 }
8033
8034 if (got_type & GOT_TLS_GD)
8035 {
8036 h->got.offset = htab->root.sgot->size;
8037 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8038 }
8039
8040 if (got_type & GOT_TLS_IE)
8041 {
8042 h->got.offset = htab->root.sgot->size;
8043 htab->root.sgot->size += GOT_ENTRY_SIZE;
8044 }
8045
8046 indx = h && h->dynindx != -1 ? h->dynindx : 0;
8047 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8048 || h->root.type != bfd_link_hash_undefweak)
8049 && (bfd_link_pic (info)
8050 || indx != 0
8051 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
8052 {
8053 if (got_type & GOT_TLSDESC_GD)
8054 {
8055 htab->root.srelplt->size += RELOC_SIZE (htab);
8056 /* Note reloc_count not incremented here! We have
8057 already adjusted reloc_count for this relocation
8058 type. */
8059
8060 /* TLSDESC PLT is now needed, but not yet determined. */
8061 htab->tlsdesc_plt = (bfd_vma) - 1;
8062 }
8063
8064 if (got_type & GOT_TLS_GD)
8065 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8066
8067 if (got_type & GOT_TLS_IE)
8068 htab->root.srelgot->size += RELOC_SIZE (htab);
8069 }
8070 }
8071 }
8072 else
8073 {
8074 h->got.offset = (bfd_vma) - 1;
8075 }
8076
8077 if (eh->dyn_relocs == NULL)
8078 return TRUE;
8079
8080 /* In the shared -Bsymbolic case, discard space allocated for
8081 dynamic pc-relative relocs against symbols which turn out to be
8082 defined in regular objects. For the normal shared case, discard
8083 space for pc-relative relocs that have become local due to symbol
8084 visibility changes. */
8085
8086 if (bfd_link_pic (info))
8087 {
8088 /* Relocs that use pc_count are those that appear on a call
8089 insn, or certain REL relocs that can generated via assembly.
8090 We want calls to protected symbols to resolve directly to the
8091 function rather than going via the plt. If people want
8092 function pointer comparisons to work as expected then they
8093 should avoid writing weird assembly. */
8094 if (SYMBOL_CALLS_LOCAL (info, h))
8095 {
8096 struct elf_dyn_relocs **pp;
8097
8098 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
8099 {
8100 p->count -= p->pc_count;
8101 p->pc_count = 0;
8102 if (p->count == 0)
8103 *pp = p->next;
8104 else
8105 pp = &p->next;
8106 }
8107 }
8108
8109 /* Also discard relocs on undefined weak syms with non-default
8110 visibility. */
8111 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
8112 {
8113 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8114 eh->dyn_relocs = NULL;
8115
8116 /* Make sure undefined weak symbols are output as a dynamic
8117 symbol in PIEs. */
8118 else if (h->dynindx == -1
8119 && !h->forced_local
8120 && !bfd_elf_link_record_dynamic_symbol (info, h))
8121 return FALSE;
8122 }
8123
8124 }
8125 else if (ELIMINATE_COPY_RELOCS)
8126 {
8127 /* For the non-shared case, discard space for relocs against
8128 symbols which turn out to need copy relocs or are not
8129 dynamic. */
8130
8131 if (!h->non_got_ref
8132 && ((h->def_dynamic
8133 && !h->def_regular)
8134 || (htab->root.dynamic_sections_created
8135 && (h->root.type == bfd_link_hash_undefweak
8136 || h->root.type == bfd_link_hash_undefined))))
8137 {
8138 /* Make sure this symbol is output as a dynamic symbol.
8139 Undefined weak syms won't yet be marked as dynamic. */
8140 if (h->dynindx == -1
8141 && !h->forced_local
8142 && !bfd_elf_link_record_dynamic_symbol (info, h))
8143 return FALSE;
8144
8145 /* If that succeeded, we know we'll be keeping all the
8146 relocs. */
8147 if (h->dynindx != -1)
8148 goto keep;
8149 }
8150
8151 eh->dyn_relocs = NULL;
8152
8153 keep:;
8154 }
8155
8156 /* Finally, allocate space. */
8157 for (p = eh->dyn_relocs; p != NULL; p = p->next)
8158 {
8159 asection *sreloc;
8160
8161 sreloc = elf_section_data (p->sec)->sreloc;
8162
8163 BFD_ASSERT (sreloc != NULL);
8164
8165 sreloc->size += p->count * RELOC_SIZE (htab);
8166 }
8167
8168 return TRUE;
8169 }
8170
8171 /* Allocate space in .plt, .got and associated reloc sections for
8172 ifunc dynamic relocs. */
8173
8174 static bfd_boolean
8175 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
8176 void *inf)
8177 {
8178 struct bfd_link_info *info;
8179 struct elf_aarch64_link_hash_table *htab;
8180 struct elf_aarch64_link_hash_entry *eh;
8181
8182 /* An example of a bfd_link_hash_indirect symbol is versioned
8183 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
8184 -> __gxx_personality_v0(bfd_link_hash_defined)
8185
8186 There is no need to process bfd_link_hash_indirect symbols here
8187 because we will also be presented with the concrete instance of
8188 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
8189 called to copy all relevant data from the generic to the concrete
8190 symbol instance.
8191 */
8192 if (h->root.type == bfd_link_hash_indirect)
8193 return TRUE;
8194
8195 if (h->root.type == bfd_link_hash_warning)
8196 h = (struct elf_link_hash_entry *) h->root.u.i.link;
8197
8198 info = (struct bfd_link_info *) inf;
8199 htab = elf_aarch64_hash_table (info);
8200
8201 eh = (struct elf_aarch64_link_hash_entry *) h;
8202
8203 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
8204 here if it is defined and referenced in a non-shared object. */
8205 if (h->type == STT_GNU_IFUNC
8206 && h->def_regular)
8207 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
8208 &eh->dyn_relocs,
8209 htab->plt_entry_size,
8210 htab->plt_header_size,
8211 GOT_ENTRY_SIZE);
8212 return TRUE;
8213 }
8214
8215 /* Allocate space in .plt, .got and associated reloc sections for
8216 local dynamic relocs. */
8217
8218 static bfd_boolean
8219 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
8220 {
8221 struct elf_link_hash_entry *h
8222 = (struct elf_link_hash_entry *) *slot;
8223
8224 if (h->type != STT_GNU_IFUNC
8225 || !h->def_regular
8226 || !h->ref_regular
8227 || !h->forced_local
8228 || h->root.type != bfd_link_hash_defined)
8229 abort ();
8230
8231 return elfNN_aarch64_allocate_dynrelocs (h, inf);
8232 }
8233
8234 /* Allocate space in .plt, .got and associated reloc sections for
8235 local ifunc dynamic relocs. */
8236
8237 static bfd_boolean
8238 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
8239 {
8240 struct elf_link_hash_entry *h
8241 = (struct elf_link_hash_entry *) *slot;
8242
8243 if (h->type != STT_GNU_IFUNC
8244 || !h->def_regular
8245 || !h->ref_regular
8246 || !h->forced_local
8247 || h->root.type != bfd_link_hash_defined)
8248 abort ();
8249
8250 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
8251 }
8252
8253 /* Find any dynamic relocs that apply to read-only sections. */
8254
8255 static bfd_boolean
8256 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
8257 {
8258 struct elf_aarch64_link_hash_entry * eh;
8259 struct elf_dyn_relocs * p;
8260
8261 eh = (struct elf_aarch64_link_hash_entry *) h;
8262 for (p = eh->dyn_relocs; p != NULL; p = p->next)
8263 {
8264 asection *s = p->sec;
8265
8266 if (s != NULL && (s->flags & SEC_READONLY) != 0)
8267 {
8268 struct bfd_link_info *info = (struct bfd_link_info *) inf;
8269
8270 info->flags |= DF_TEXTREL;
8271
8272 /* Not an error, just cut short the traversal. */
8273 return FALSE;
8274 }
8275 }
8276 return TRUE;
8277 }
8278
8279 /* This is the most important function of all . Innocuosly named
8280 though ! */
8281 static bfd_boolean
8282 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
8283 struct bfd_link_info *info)
8284 {
8285 struct elf_aarch64_link_hash_table *htab;
8286 bfd *dynobj;
8287 asection *s;
8288 bfd_boolean relocs;
8289 bfd *ibfd;
8290
8291 htab = elf_aarch64_hash_table ((info));
8292 dynobj = htab->root.dynobj;
8293
8294 BFD_ASSERT (dynobj != NULL);
8295
8296 if (htab->root.dynamic_sections_created)
8297 {
8298 if (bfd_link_executable (info) && !info->nointerp)
8299 {
8300 s = bfd_get_linker_section (dynobj, ".interp");
8301 if (s == NULL)
8302 abort ();
8303 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
8304 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
8305 }
8306 }
8307
8308 /* Set up .got offsets for local syms, and space for local dynamic
8309 relocs. */
8310 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
8311 {
8312 struct elf_aarch64_local_symbol *locals = NULL;
8313 Elf_Internal_Shdr *symtab_hdr;
8314 asection *srel;
8315 unsigned int i;
8316
8317 if (!is_aarch64_elf (ibfd))
8318 continue;
8319
8320 for (s = ibfd->sections; s != NULL; s = s->next)
8321 {
8322 struct elf_dyn_relocs *p;
8323
8324 for (p = (struct elf_dyn_relocs *)
8325 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
8326 {
8327 if (!bfd_is_abs_section (p->sec)
8328 && bfd_is_abs_section (p->sec->output_section))
8329 {
8330 /* Input section has been discarded, either because
8331 it is a copy of a linkonce section or due to
8332 linker script /DISCARD/, so we'll be discarding
8333 the relocs too. */
8334 }
8335 else if (p->count != 0)
8336 {
8337 srel = elf_section_data (p->sec)->sreloc;
8338 srel->size += p->count * RELOC_SIZE (htab);
8339 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
8340 info->flags |= DF_TEXTREL;
8341 }
8342 }
8343 }
8344
8345 locals = elf_aarch64_locals (ibfd);
8346 if (!locals)
8347 continue;
8348
8349 symtab_hdr = &elf_symtab_hdr (ibfd);
8350 srel = htab->root.srelgot;
8351 for (i = 0; i < symtab_hdr->sh_info; i++)
8352 {
8353 locals[i].got_offset = (bfd_vma) - 1;
8354 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
8355 if (locals[i].got_refcount > 0)
8356 {
8357 unsigned got_type = locals[i].got_type;
8358 if (got_type & GOT_TLSDESC_GD)
8359 {
8360 locals[i].tlsdesc_got_jump_table_offset =
8361 (htab->root.sgotplt->size
8362 - aarch64_compute_jump_table_size (htab));
8363 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
8364 locals[i].got_offset = (bfd_vma) - 2;
8365 }
8366
8367 if (got_type & GOT_TLS_GD)
8368 {
8369 locals[i].got_offset = htab->root.sgot->size;
8370 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
8371 }
8372
8373 if (got_type & GOT_TLS_IE
8374 || got_type & GOT_NORMAL)
8375 {
8376 locals[i].got_offset = htab->root.sgot->size;
8377 htab->root.sgot->size += GOT_ENTRY_SIZE;
8378 }
8379
8380 if (got_type == GOT_UNKNOWN)
8381 {
8382 }
8383
8384 if (bfd_link_pic (info))
8385 {
8386 if (got_type & GOT_TLSDESC_GD)
8387 {
8388 htab->root.srelplt->size += RELOC_SIZE (htab);
8389 /* Note RELOC_COUNT not incremented here! */
8390 htab->tlsdesc_plt = (bfd_vma) - 1;
8391 }
8392
8393 if (got_type & GOT_TLS_GD)
8394 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
8395
8396 if (got_type & GOT_TLS_IE
8397 || got_type & GOT_NORMAL)
8398 htab->root.srelgot->size += RELOC_SIZE (htab);
8399 }
8400 }
8401 else
8402 {
8403 locals[i].got_refcount = (bfd_vma) - 1;
8404 }
8405 }
8406 }
8407
8408
8409 /* Allocate global sym .plt and .got entries, and space for global
8410 sym dynamic relocs. */
8411 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
8412 info);
8413
8414 /* Allocate global ifunc sym .plt and .got entries, and space for global
8415 ifunc sym dynamic relocs. */
8416 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
8417 info);
8418
8419 /* Allocate .plt and .got entries, and space for local symbols. */
8420 htab_traverse (htab->loc_hash_table,
8421 elfNN_aarch64_allocate_local_dynrelocs,
8422 info);
8423
8424 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
8425 htab_traverse (htab->loc_hash_table,
8426 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
8427 info);
8428
8429 /* For every jump slot reserved in the sgotplt, reloc_count is
8430 incremented. However, when we reserve space for TLS descriptors,
8431 it's not incremented, so in order to compute the space reserved
8432 for them, it suffices to multiply the reloc count by the jump
8433 slot size. */
8434
8435 if (htab->root.srelplt)
8436 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
8437
8438 if (htab->tlsdesc_plt)
8439 {
8440 if (htab->root.splt->size == 0)
8441 htab->root.splt->size += PLT_ENTRY_SIZE;
8442
8443 htab->tlsdesc_plt = htab->root.splt->size;
8444 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
8445
8446 /* If we're not using lazy TLS relocations, don't generate the
8447 GOT entry required. */
8448 if (!(info->flags & DF_BIND_NOW))
8449 {
8450 htab->dt_tlsdesc_got = htab->root.sgot->size;
8451 htab->root.sgot->size += GOT_ENTRY_SIZE;
8452 }
8453 }
8454
8455 /* Init mapping symbols information to use later to distingush between
8456 code and data while scanning for errata. */
8457 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
8458 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
8459 {
8460 if (!is_aarch64_elf (ibfd))
8461 continue;
8462 bfd_elfNN_aarch64_init_maps (ibfd);
8463 }
8464
8465 /* We now have determined the sizes of the various dynamic sections.
8466 Allocate memory for them. */
8467 relocs = FALSE;
8468 for (s = dynobj->sections; s != NULL; s = s->next)
8469 {
8470 if ((s->flags & SEC_LINKER_CREATED) == 0)
8471 continue;
8472
8473 if (s == htab->root.splt
8474 || s == htab->root.sgot
8475 || s == htab->root.sgotplt
8476 || s == htab->root.iplt
8477 || s == htab->root.igotplt || s == htab->sdynbss)
8478 {
8479 /* Strip this section if we don't need it; see the
8480 comment below. */
8481 }
8482 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
8483 {
8484 if (s->size != 0 && s != htab->root.srelplt)
8485 relocs = TRUE;
8486
8487 /* We use the reloc_count field as a counter if we need
8488 to copy relocs into the output file. */
8489 if (s != htab->root.srelplt)
8490 s->reloc_count = 0;
8491 }
8492 else
8493 {
8494 /* It's not one of our sections, so don't allocate space. */
8495 continue;
8496 }
8497
8498 if (s->size == 0)
8499 {
8500 /* If we don't need this section, strip it from the
8501 output file. This is mostly to handle .rela.bss and
8502 .rela.plt. We must create both sections in
8503 create_dynamic_sections, because they must be created
8504 before the linker maps input sections to output
8505 sections. The linker does that before
8506 adjust_dynamic_symbol is called, and it is that
8507 function which decides whether anything needs to go
8508 into these sections. */
8509
8510 s->flags |= SEC_EXCLUDE;
8511 continue;
8512 }
8513
8514 if ((s->flags & SEC_HAS_CONTENTS) == 0)
8515 continue;
8516
8517 /* Allocate memory for the section contents. We use bfd_zalloc
8518 here in case unused entries are not reclaimed before the
8519 section's contents are written out. This should not happen,
8520 but this way if it does, we get a R_AARCH64_NONE reloc instead
8521 of garbage. */
8522 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
8523 if (s->contents == NULL)
8524 return FALSE;
8525 }
8526
8527 if (htab->root.dynamic_sections_created)
8528 {
8529 /* Add some entries to the .dynamic section. We fill in the
8530 values later, in elfNN_aarch64_finish_dynamic_sections, but we
8531 must add the entries now so that we get the correct size for
8532 the .dynamic section. The DT_DEBUG entry is filled in by the
8533 dynamic linker and used by the debugger. */
8534 #define add_dynamic_entry(TAG, VAL) \
8535 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
8536
8537 if (bfd_link_executable (info))
8538 {
8539 if (!add_dynamic_entry (DT_DEBUG, 0))
8540 return FALSE;
8541 }
8542
8543 if (htab->root.splt->size != 0)
8544 {
8545 if (!add_dynamic_entry (DT_PLTGOT, 0)
8546 || !add_dynamic_entry (DT_PLTRELSZ, 0)
8547 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
8548 || !add_dynamic_entry (DT_JMPREL, 0))
8549 return FALSE;
8550
8551 if (htab->tlsdesc_plt
8552 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
8553 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
8554 return FALSE;
8555 }
8556
8557 if (relocs)
8558 {
8559 if (!add_dynamic_entry (DT_RELA, 0)
8560 || !add_dynamic_entry (DT_RELASZ, 0)
8561 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
8562 return FALSE;
8563
8564 /* If any dynamic relocs apply to a read-only section,
8565 then we need a DT_TEXTREL entry. */
8566 if ((info->flags & DF_TEXTREL) == 0)
8567 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
8568 info);
8569
8570 if ((info->flags & DF_TEXTREL) != 0)
8571 {
8572 if (!add_dynamic_entry (DT_TEXTREL, 0))
8573 return FALSE;
8574 }
8575 }
8576 }
8577 #undef add_dynamic_entry
8578
8579 return TRUE;
8580 }
8581
8582 static inline void
8583 elf_aarch64_update_plt_entry (bfd *output_bfd,
8584 bfd_reloc_code_real_type r_type,
8585 bfd_byte *plt_entry, bfd_vma value)
8586 {
8587 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
8588
8589 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
8590 }
8591
8592 static void
8593 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
8594 struct elf_aarch64_link_hash_table
8595 *htab, bfd *output_bfd,
8596 struct bfd_link_info *info)
8597 {
8598 bfd_byte *plt_entry;
8599 bfd_vma plt_index;
8600 bfd_vma got_offset;
8601 bfd_vma gotplt_entry_address;
8602 bfd_vma plt_entry_address;
8603 Elf_Internal_Rela rela;
8604 bfd_byte *loc;
8605 asection *plt, *gotplt, *relplt;
8606
8607 /* When building a static executable, use .iplt, .igot.plt and
8608 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8609 if (htab->root.splt != NULL)
8610 {
8611 plt = htab->root.splt;
8612 gotplt = htab->root.sgotplt;
8613 relplt = htab->root.srelplt;
8614 }
8615 else
8616 {
8617 plt = htab->root.iplt;
8618 gotplt = htab->root.igotplt;
8619 relplt = htab->root.irelplt;
8620 }
8621
8622 /* Get the index in the procedure linkage table which
8623 corresponds to this symbol. This is the index of this symbol
8624 in all the symbols for which we are making plt entries. The
8625 first entry in the procedure linkage table is reserved.
8626
8627 Get the offset into the .got table of the entry that
8628 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8629 bytes. The first three are reserved for the dynamic linker.
8630
8631 For static executables, we don't reserve anything. */
8632
8633 if (plt == htab->root.splt)
8634 {
8635 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8636 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8637 }
8638 else
8639 {
8640 plt_index = h->plt.offset / htab->plt_entry_size;
8641 got_offset = plt_index * GOT_ENTRY_SIZE;
8642 }
8643
8644 plt_entry = plt->contents + h->plt.offset;
8645 plt_entry_address = plt->output_section->vma
8646 + plt->output_offset + h->plt.offset;
8647 gotplt_entry_address = gotplt->output_section->vma +
8648 gotplt->output_offset + got_offset;
8649
8650 /* Copy in the boiler-plate for the PLTn entry. */
8651 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8652
8653 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8654 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8655 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8656 plt_entry,
8657 PG (gotplt_entry_address) -
8658 PG (plt_entry_address));
8659
8660 /* Fill in the lo12 bits for the load from the pltgot. */
8661 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8662 plt_entry + 4,
8663 PG_OFFSET (gotplt_entry_address));
8664
8665 /* Fill in the lo12 bits for the add from the pltgot entry. */
8666 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8667 plt_entry + 8,
8668 PG_OFFSET (gotplt_entry_address));
8669
8670 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8671 bfd_put_NN (output_bfd,
8672 plt->output_section->vma + plt->output_offset,
8673 gotplt->contents + got_offset);
8674
8675 rela.r_offset = gotplt_entry_address;
8676
8677 if (h->dynindx == -1
8678 || ((bfd_link_executable (info)
8679 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8680 && h->def_regular
8681 && h->type == STT_GNU_IFUNC))
8682 {
8683 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8684 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8685 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8686 rela.r_addend = (h->root.u.def.value
8687 + h->root.u.def.section->output_section->vma
8688 + h->root.u.def.section->output_offset);
8689 }
8690 else
8691 {
8692 /* Fill in the entry in the .rela.plt section. */
8693 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8694 rela.r_addend = 0;
8695 }
8696
8697 /* Compute the relocation entry to used based on PLT index and do
8698 not adjust reloc_count. The reloc_count has already been adjusted
8699 to account for this entry. */
8700 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8701 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8702 }
8703
8704 /* Size sections even though they're not dynamic. We use it to setup
8705 _TLS_MODULE_BASE_, if needed. */
8706
8707 static bfd_boolean
8708 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8709 struct bfd_link_info *info)
8710 {
8711 asection *tls_sec;
8712
8713 if (bfd_link_relocatable (info))
8714 return TRUE;
8715
8716 tls_sec = elf_hash_table (info)->tls_sec;
8717
8718 if (tls_sec)
8719 {
8720 struct elf_link_hash_entry *tlsbase;
8721
8722 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8723 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8724
8725 if (tlsbase)
8726 {
8727 struct bfd_link_hash_entry *h = NULL;
8728 const struct elf_backend_data *bed =
8729 get_elf_backend_data (output_bfd);
8730
8731 if (!(_bfd_generic_link_add_one_symbol
8732 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8733 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8734 return FALSE;
8735
8736 tlsbase->type = STT_TLS;
8737 tlsbase = (struct elf_link_hash_entry *) h;
8738 tlsbase->def_regular = 1;
8739 tlsbase->other = STV_HIDDEN;
8740 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8741 }
8742 }
8743
8744 return TRUE;
8745 }
8746
8747 /* Finish up dynamic symbol handling. We set the contents of various
8748 dynamic sections here. */
8749 static bfd_boolean
8750 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8751 struct bfd_link_info *info,
8752 struct elf_link_hash_entry *h,
8753 Elf_Internal_Sym *sym)
8754 {
8755 struct elf_aarch64_link_hash_table *htab;
8756 htab = elf_aarch64_hash_table (info);
8757
8758 if (h->plt.offset != (bfd_vma) - 1)
8759 {
8760 asection *plt, *gotplt, *relplt;
8761
8762 /* This symbol has an entry in the procedure linkage table. Set
8763 it up. */
8764
8765 /* When building a static executable, use .iplt, .igot.plt and
8766 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8767 if (htab->root.splt != NULL)
8768 {
8769 plt = htab->root.splt;
8770 gotplt = htab->root.sgotplt;
8771 relplt = htab->root.srelplt;
8772 }
8773 else
8774 {
8775 plt = htab->root.iplt;
8776 gotplt = htab->root.igotplt;
8777 relplt = htab->root.irelplt;
8778 }
8779
8780 /* This symbol has an entry in the procedure linkage table. Set
8781 it up. */
8782 if ((h->dynindx == -1
8783 && !((h->forced_local || bfd_link_executable (info))
8784 && h->def_regular
8785 && h->type == STT_GNU_IFUNC))
8786 || plt == NULL
8787 || gotplt == NULL
8788 || relplt == NULL)
8789 abort ();
8790
8791 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8792 if (!h->def_regular)
8793 {
8794 /* Mark the symbol as undefined, rather than as defined in
8795 the .plt section. */
8796 sym->st_shndx = SHN_UNDEF;
8797 /* If the symbol is weak we need to clear the value.
8798 Otherwise, the PLT entry would provide a definition for
8799 the symbol even if the symbol wasn't defined anywhere,
8800 and so the symbol would never be NULL. Leave the value if
8801 there were any relocations where pointer equality matters
8802 (this is a clue for the dynamic linker, to make function
8803 pointer comparisons work between an application and shared
8804 library). */
8805 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8806 sym->st_value = 0;
8807 }
8808 }
8809
8810 if (h->got.offset != (bfd_vma) - 1
8811 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8812 {
8813 Elf_Internal_Rela rela;
8814 bfd_byte *loc;
8815
8816 /* This symbol has an entry in the global offset table. Set it
8817 up. */
8818 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8819 abort ();
8820
8821 rela.r_offset = (htab->root.sgot->output_section->vma
8822 + htab->root.sgot->output_offset
8823 + (h->got.offset & ~(bfd_vma) 1));
8824
8825 if (h->def_regular
8826 && h->type == STT_GNU_IFUNC)
8827 {
8828 if (bfd_link_pic (info))
8829 {
8830 /* Generate R_AARCH64_GLOB_DAT. */
8831 goto do_glob_dat;
8832 }
8833 else
8834 {
8835 asection *plt;
8836
8837 if (!h->pointer_equality_needed)
8838 abort ();
8839
8840 /* For non-shared object, we can't use .got.plt, which
8841 contains the real function address if we need pointer
8842 equality. We load the GOT entry with the PLT entry. */
8843 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8844 bfd_put_NN (output_bfd, (plt->output_section->vma
8845 + plt->output_offset
8846 + h->plt.offset),
8847 htab->root.sgot->contents
8848 + (h->got.offset & ~(bfd_vma) 1));
8849 return TRUE;
8850 }
8851 }
8852 else if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
8853 {
8854 if (!h->def_regular)
8855 return FALSE;
8856
8857 BFD_ASSERT ((h->got.offset & 1) != 0);
8858 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8859 rela.r_addend = (h->root.u.def.value
8860 + h->root.u.def.section->output_section->vma
8861 + h->root.u.def.section->output_offset);
8862 }
8863 else
8864 {
8865 do_glob_dat:
8866 BFD_ASSERT ((h->got.offset & 1) == 0);
8867 bfd_put_NN (output_bfd, (bfd_vma) 0,
8868 htab->root.sgot->contents + h->got.offset);
8869 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8870 rela.r_addend = 0;
8871 }
8872
8873 loc = htab->root.srelgot->contents;
8874 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8875 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8876 }
8877
8878 if (h->needs_copy)
8879 {
8880 Elf_Internal_Rela rela;
8881 bfd_byte *loc;
8882
8883 /* This symbol needs a copy reloc. Set it up. */
8884
8885 if (h->dynindx == -1
8886 || (h->root.type != bfd_link_hash_defined
8887 && h->root.type != bfd_link_hash_defweak)
8888 || htab->srelbss == NULL)
8889 abort ();
8890
8891 rela.r_offset = (h->root.u.def.value
8892 + h->root.u.def.section->output_section->vma
8893 + h->root.u.def.section->output_offset);
8894 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8895 rela.r_addend = 0;
8896 loc = htab->srelbss->contents;
8897 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8898 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8899 }
8900
8901 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8902 be NULL for local symbols. */
8903 if (sym != NULL
8904 && (h == elf_hash_table (info)->hdynamic
8905 || h == elf_hash_table (info)->hgot))
8906 sym->st_shndx = SHN_ABS;
8907
8908 return TRUE;
8909 }
8910
8911 /* Finish up local dynamic symbol handling. We set the contents of
8912 various dynamic sections here. */
8913
8914 static bfd_boolean
8915 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8916 {
8917 struct elf_link_hash_entry *h
8918 = (struct elf_link_hash_entry *) *slot;
8919 struct bfd_link_info *info
8920 = (struct bfd_link_info *) inf;
8921
8922 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8923 info, h, NULL);
8924 }
8925
8926 static void
8927 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8928 struct elf_aarch64_link_hash_table
8929 *htab)
8930 {
8931 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8932 small and large plts and at the minute just generates
8933 the small PLT. */
8934
8935 /* PLT0 of the small PLT looks like this in ELF64 -
8936 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8937 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8938 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8939 // symbol resolver
8940 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8941 // GOTPLT entry for this.
8942 br x17
8943 PLT0 will be slightly different in ELF32 due to different got entry
8944 size.
8945 */
8946 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8947 bfd_vma plt_base;
8948
8949
8950 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8951 PLT_ENTRY_SIZE);
8952 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8953 PLT_ENTRY_SIZE;
8954
8955 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8956 + htab->root.sgotplt->output_offset
8957 + GOT_ENTRY_SIZE * 2);
8958
8959 plt_base = htab->root.splt->output_section->vma +
8960 htab->root.splt->output_offset;
8961
8962 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8963 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8964 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8965 htab->root.splt->contents + 4,
8966 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8967
8968 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8969 htab->root.splt->contents + 8,
8970 PG_OFFSET (plt_got_2nd_ent));
8971
8972 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8973 htab->root.splt->contents + 12,
8974 PG_OFFSET (plt_got_2nd_ent));
8975 }
8976
8977 static bfd_boolean
8978 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8979 struct bfd_link_info *info)
8980 {
8981 struct elf_aarch64_link_hash_table *htab;
8982 bfd *dynobj;
8983 asection *sdyn;
8984
8985 htab = elf_aarch64_hash_table (info);
8986 dynobj = htab->root.dynobj;
8987 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8988
8989 if (htab->root.dynamic_sections_created)
8990 {
8991 ElfNN_External_Dyn *dyncon, *dynconend;
8992
8993 if (sdyn == NULL || htab->root.sgot == NULL)
8994 abort ();
8995
8996 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8997 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8998 for (; dyncon < dynconend; dyncon++)
8999 {
9000 Elf_Internal_Dyn dyn;
9001 asection *s;
9002
9003 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
9004
9005 switch (dyn.d_tag)
9006 {
9007 default:
9008 continue;
9009
9010 case DT_PLTGOT:
9011 s = htab->root.sgotplt;
9012 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
9013 break;
9014
9015 case DT_JMPREL:
9016 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
9017 break;
9018
9019 case DT_PLTRELSZ:
9020 s = htab->root.srelplt;
9021 dyn.d_un.d_val = s->size;
9022 break;
9023
9024 case DT_RELASZ:
9025 /* The procedure linkage table relocs (DT_JMPREL) should
9026 not be included in the overall relocs (DT_RELA).
9027 Therefore, we override the DT_RELASZ entry here to
9028 make it not include the JMPREL relocs. Since the
9029 linker script arranges for .rela.plt to follow all
9030 other relocation sections, we don't have to worry
9031 about changing the DT_RELA entry. */
9032 if (htab->root.srelplt != NULL)
9033 {
9034 s = htab->root.srelplt;
9035 dyn.d_un.d_val -= s->size;
9036 }
9037 break;
9038
9039 case DT_TLSDESC_PLT:
9040 s = htab->root.splt;
9041 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9042 + htab->tlsdesc_plt;
9043 break;
9044
9045 case DT_TLSDESC_GOT:
9046 s = htab->root.sgot;
9047 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
9048 + htab->dt_tlsdesc_got;
9049 break;
9050 }
9051
9052 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
9053 }
9054
9055 }
9056
9057 /* Fill in the special first entry in the procedure linkage table. */
9058 if (htab->root.splt && htab->root.splt->size > 0)
9059 {
9060 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
9061
9062 elf_section_data (htab->root.splt->output_section)->
9063 this_hdr.sh_entsize = htab->plt_entry_size;
9064
9065
9066 if (htab->tlsdesc_plt)
9067 {
9068 bfd_put_NN (output_bfd, (bfd_vma) 0,
9069 htab->root.sgot->contents + htab->dt_tlsdesc_got);
9070
9071 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
9072 elfNN_aarch64_tlsdesc_small_plt_entry,
9073 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
9074
9075 {
9076 bfd_vma adrp1_addr =
9077 htab->root.splt->output_section->vma
9078 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
9079
9080 bfd_vma adrp2_addr = adrp1_addr + 4;
9081
9082 bfd_vma got_addr =
9083 htab->root.sgot->output_section->vma
9084 + htab->root.sgot->output_offset;
9085
9086 bfd_vma pltgot_addr =
9087 htab->root.sgotplt->output_section->vma
9088 + htab->root.sgotplt->output_offset;
9089
9090 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
9091
9092 bfd_byte *plt_entry =
9093 htab->root.splt->contents + htab->tlsdesc_plt;
9094
9095 /* adrp x2, DT_TLSDESC_GOT */
9096 elf_aarch64_update_plt_entry (output_bfd,
9097 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9098 plt_entry + 4,
9099 (PG (dt_tlsdesc_got)
9100 - PG (adrp1_addr)));
9101
9102 /* adrp x3, 0 */
9103 elf_aarch64_update_plt_entry (output_bfd,
9104 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
9105 plt_entry + 8,
9106 (PG (pltgot_addr)
9107 - PG (adrp2_addr)));
9108
9109 /* ldr x2, [x2, #0] */
9110 elf_aarch64_update_plt_entry (output_bfd,
9111 BFD_RELOC_AARCH64_LDSTNN_LO12,
9112 plt_entry + 12,
9113 PG_OFFSET (dt_tlsdesc_got));
9114
9115 /* add x3, x3, 0 */
9116 elf_aarch64_update_plt_entry (output_bfd,
9117 BFD_RELOC_AARCH64_ADD_LO12,
9118 plt_entry + 16,
9119 PG_OFFSET (pltgot_addr));
9120 }
9121 }
9122 }
9123
9124 if (htab->root.sgotplt)
9125 {
9126 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
9127 {
9128 (*_bfd_error_handler)
9129 (_("discarded output section: `%A'"), htab->root.sgotplt);
9130 return FALSE;
9131 }
9132
9133 /* Fill in the first three entries in the global offset table. */
9134 if (htab->root.sgotplt->size > 0)
9135 {
9136 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
9137
9138 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
9139 bfd_put_NN (output_bfd,
9140 (bfd_vma) 0,
9141 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
9142 bfd_put_NN (output_bfd,
9143 (bfd_vma) 0,
9144 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
9145 }
9146
9147 if (htab->root.sgot)
9148 {
9149 if (htab->root.sgot->size > 0)
9150 {
9151 bfd_vma addr =
9152 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
9153 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
9154 }
9155 }
9156
9157 elf_section_data (htab->root.sgotplt->output_section)->
9158 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
9159 }
9160
9161 if (htab->root.sgot && htab->root.sgot->size > 0)
9162 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
9163 = GOT_ENTRY_SIZE;
9164
9165 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
9166 htab_traverse (htab->loc_hash_table,
9167 elfNN_aarch64_finish_local_dynamic_symbol,
9168 info);
9169
9170 return TRUE;
9171 }
9172
9173 /* Return address for Ith PLT stub in section PLT, for relocation REL
9174 or (bfd_vma) -1 if it should not be included. */
9175
9176 static bfd_vma
9177 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
9178 const arelent *rel ATTRIBUTE_UNUSED)
9179 {
9180 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
9181 }
9182
9183
9184 /* We use this so we can override certain functions
9185 (though currently we don't). */
9186
9187 const struct elf_size_info elfNN_aarch64_size_info =
9188 {
9189 sizeof (ElfNN_External_Ehdr),
9190 sizeof (ElfNN_External_Phdr),
9191 sizeof (ElfNN_External_Shdr),
9192 sizeof (ElfNN_External_Rel),
9193 sizeof (ElfNN_External_Rela),
9194 sizeof (ElfNN_External_Sym),
9195 sizeof (ElfNN_External_Dyn),
9196 sizeof (Elf_External_Note),
9197 4, /* Hash table entry size. */
9198 1, /* Internal relocs per external relocs. */
9199 ARCH_SIZE, /* Arch size. */
9200 LOG_FILE_ALIGN, /* Log_file_align. */
9201 ELFCLASSNN, EV_CURRENT,
9202 bfd_elfNN_write_out_phdrs,
9203 bfd_elfNN_write_shdrs_and_ehdr,
9204 bfd_elfNN_checksum_contents,
9205 bfd_elfNN_write_relocs,
9206 bfd_elfNN_swap_symbol_in,
9207 bfd_elfNN_swap_symbol_out,
9208 bfd_elfNN_slurp_reloc_table,
9209 bfd_elfNN_slurp_symbol_table,
9210 bfd_elfNN_swap_dyn_in,
9211 bfd_elfNN_swap_dyn_out,
9212 bfd_elfNN_swap_reloc_in,
9213 bfd_elfNN_swap_reloc_out,
9214 bfd_elfNN_swap_reloca_in,
9215 bfd_elfNN_swap_reloca_out
9216 };
9217
9218 #define ELF_ARCH bfd_arch_aarch64
9219 #define ELF_MACHINE_CODE EM_AARCH64
9220 #define ELF_MAXPAGESIZE 0x10000
9221 #define ELF_MINPAGESIZE 0x1000
9222 #define ELF_COMMONPAGESIZE 0x1000
9223
9224 #define bfd_elfNN_close_and_cleanup \
9225 elfNN_aarch64_close_and_cleanup
9226
9227 #define bfd_elfNN_bfd_free_cached_info \
9228 elfNN_aarch64_bfd_free_cached_info
9229
9230 #define bfd_elfNN_bfd_is_target_special_symbol \
9231 elfNN_aarch64_is_target_special_symbol
9232
9233 #define bfd_elfNN_bfd_link_hash_table_create \
9234 elfNN_aarch64_link_hash_table_create
9235
9236 #define bfd_elfNN_bfd_merge_private_bfd_data \
9237 elfNN_aarch64_merge_private_bfd_data
9238
9239 #define bfd_elfNN_bfd_print_private_bfd_data \
9240 elfNN_aarch64_print_private_bfd_data
9241
9242 #define bfd_elfNN_bfd_reloc_type_lookup \
9243 elfNN_aarch64_reloc_type_lookup
9244
9245 #define bfd_elfNN_bfd_reloc_name_lookup \
9246 elfNN_aarch64_reloc_name_lookup
9247
9248 #define bfd_elfNN_bfd_set_private_flags \
9249 elfNN_aarch64_set_private_flags
9250
9251 #define bfd_elfNN_find_inliner_info \
9252 elfNN_aarch64_find_inliner_info
9253
9254 #define bfd_elfNN_find_nearest_line \
9255 elfNN_aarch64_find_nearest_line
9256
9257 #define bfd_elfNN_mkobject \
9258 elfNN_aarch64_mkobject
9259
9260 #define bfd_elfNN_new_section_hook \
9261 elfNN_aarch64_new_section_hook
9262
9263 #define elf_backend_adjust_dynamic_symbol \
9264 elfNN_aarch64_adjust_dynamic_symbol
9265
9266 #define elf_backend_always_size_sections \
9267 elfNN_aarch64_always_size_sections
9268
9269 #define elf_backend_check_relocs \
9270 elfNN_aarch64_check_relocs
9271
9272 #define elf_backend_copy_indirect_symbol \
9273 elfNN_aarch64_copy_indirect_symbol
9274
9275 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
9276 to them in our hash. */
9277 #define elf_backend_create_dynamic_sections \
9278 elfNN_aarch64_create_dynamic_sections
9279
9280 #define elf_backend_init_index_section \
9281 _bfd_elf_init_2_index_sections
9282
9283 #define elf_backend_finish_dynamic_sections \
9284 elfNN_aarch64_finish_dynamic_sections
9285
9286 #define elf_backend_finish_dynamic_symbol \
9287 elfNN_aarch64_finish_dynamic_symbol
9288
9289 #define elf_backend_gc_sweep_hook \
9290 elfNN_aarch64_gc_sweep_hook
9291
9292 #define elf_backend_object_p \
9293 elfNN_aarch64_object_p
9294
9295 #define elf_backend_output_arch_local_syms \
9296 elfNN_aarch64_output_arch_local_syms
9297
9298 #define elf_backend_plt_sym_val \
9299 elfNN_aarch64_plt_sym_val
9300
9301 #define elf_backend_post_process_headers \
9302 elfNN_aarch64_post_process_headers
9303
9304 #define elf_backend_relocate_section \
9305 elfNN_aarch64_relocate_section
9306
9307 #define elf_backend_reloc_type_class \
9308 elfNN_aarch64_reloc_type_class
9309
9310 #define elf_backend_section_from_shdr \
9311 elfNN_aarch64_section_from_shdr
9312
9313 #define elf_backend_size_dynamic_sections \
9314 elfNN_aarch64_size_dynamic_sections
9315
9316 #define elf_backend_size_info \
9317 elfNN_aarch64_size_info
9318
9319 #define elf_backend_write_section \
9320 elfNN_aarch64_write_section
9321
9322 #define elf_backend_can_refcount 1
9323 #define elf_backend_can_gc_sections 1
9324 #define elf_backend_plt_readonly 1
9325 #define elf_backend_want_got_plt 1
9326 #define elf_backend_want_plt_sym 0
9327 #define elf_backend_may_use_rel_p 0
9328 #define elf_backend_may_use_rela_p 1
9329 #define elf_backend_default_use_rela_p 1
9330 #define elf_backend_rela_normal 1
9331 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
9332 #define elf_backend_default_execstack 0
9333 #define elf_backend_extern_protected_data 1
9334
9335 #undef elf_backend_obj_attrs_section
9336 #define elf_backend_obj_attrs_section ".ARM.attributes"
9337
9338 #include "elfNN-target.h"
9339
9340 /* CloudABI support. */
9341
9342 #undef TARGET_LITTLE_SYM
9343 #define TARGET_LITTLE_SYM aarch64_elfNN_le_cloudabi_vec
9344 #undef TARGET_LITTLE_NAME
9345 #define TARGET_LITTLE_NAME "elfNN-littleaarch64-cloudabi"
9346 #undef TARGET_BIG_SYM
9347 #define TARGET_BIG_SYM aarch64_elfNN_be_cloudabi_vec
9348 #undef TARGET_BIG_NAME
9349 #define TARGET_BIG_NAME "elfNN-bigaarch64-cloudabi"
9350
9351 #undef ELF_OSABI
9352 #define ELF_OSABI ELFOSABI_CLOUDABI
9353
9354 #undef elfNN_bed
9355 #define elfNN_bed elfNN_aarch64_cloudabi_bed
9356
9357 #include "elfNN-target.h"
This page took 0.22467 seconds and 4 git commands to generate.