Fix address violations when reading corrupt VMS records.
[deliverable/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2017 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static bool
114 is_mrs_tpidr_el0(const Insntype insn)
115 { return (insn & 0xFFFFFFE0) == 0xd53bd040; }
116
117 static unsigned int
118 aarch64_rm(const Insntype insn)
119 { return aarch64_bits(insn, 16, 5); }
120
121 static unsigned int
122 aarch64_rn(const Insntype insn)
123 { return aarch64_bits(insn, 5, 5); }
124
125 static unsigned int
126 aarch64_rd(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt(const Insntype insn)
131 { return aarch64_bits(insn, 0, 5); }
132
133 static unsigned int
134 aarch64_rt2(const Insntype insn)
135 { return aarch64_bits(insn, 10, 5); }
136
137 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
138 static Insntype
139 aarch64_adr_encode_imm(Insntype adr, int imm21)
140 {
141 gold_assert(is_adr(adr));
142 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
143 const int mask19 = (1 << 19) - 1;
144 const int mask2 = 3;
145 adr &= ~((mask19 << 5) | (mask2 << 29));
146 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
147 return adr;
148 }
149
150 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
151 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
152 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
153 static int64_t
154 aarch64_adrp_decode_imm(const Insntype adrp)
155 {
156 const int mask19 = (1 << 19) - 1;
157 const int mask2 = 3;
158 gold_assert(is_adrp(adrp));
159 // 21-bit imm encoded in adrp.
160 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
161 // Retrieve msb of 21-bit-signed imm for sign extension.
162 uint64_t msbt = (imm >> 20) & 1;
163 // Real value is imm multiplied by 4k. Value now has 33-bit information.
164 int64_t value = imm << 12;
165 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
166 // with value.
167 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
168 }
169
170 static bool
171 aarch64_b(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x14000000; }
173
174 static bool
175 aarch64_bl(const Insntype insn)
176 { return (insn & 0xFC000000) == 0x94000000; }
177
178 static bool
179 aarch64_blr(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
181
182 static bool
183 aarch64_br(const Insntype insn)
184 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
185
186 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
187 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
188 static bool
189 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
190
191 static bool
192 aarch64_ldst(Insntype insn)
193 { return (insn & 0x0a000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_ex(Insntype insn)
197 { return (insn & 0x3f000000) == 0x08000000; }
198
199 static bool
200 aarch64_ldst_pcrel(Insntype insn)
201 { return (insn & 0x3b000000) == 0x18000000; }
202
203 static bool
204 aarch64_ldst_nap(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28000000; }
206
207 static bool
208 aarch64_ldstp_pi(Insntype insn)
209 { return (insn & 0x3b800000) == 0x28800000; }
210
211 static bool
212 aarch64_ldstp_o(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29000000; }
214
215 static bool
216 aarch64_ldstp_pre(Insntype insn)
217 { return (insn & 0x3b800000) == 0x29800000; }
218
219 static bool
220 aarch64_ldst_ui(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000000; }
222
223 static bool
224 aarch64_ldst_piimm(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000400; }
226
227 static bool
228 aarch64_ldst_u(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000800; }
230
231 static bool
232 aarch64_ldst_preimm(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38000c00; }
234
235 static bool
236 aarch64_ldst_ro(Insntype insn)
237 { return (insn & 0x3b200c00) == 0x38200800; }
238
239 static bool
240 aarch64_ldst_uimm(Insntype insn)
241 { return (insn & 0x3b000000) == 0x39000000; }
242
243 static bool
244 aarch64_ldst_simd_m(Insntype insn)
245 { return (insn & 0xbfbf0000) == 0x0c000000; }
246
247 static bool
248 aarch64_ldst_simd_m_pi(Insntype insn)
249 { return (insn & 0xbfa00000) == 0x0c800000; }
250
251 static bool
252 aarch64_ldst_simd_s(Insntype insn)
253 { return (insn & 0xbf9f0000) == 0x0d000000; }
254
255 static bool
256 aarch64_ldst_simd_s_pi(Insntype insn)
257 { return (insn & 0xbf800000) == 0x0d800000; }
258
259 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
260 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
261 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
262 // instructions PAIR is TRUE, RT and RT2 are returned.
263 static bool
264 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
265 bool *pair, bool *load)
266 {
267 uint32_t opcode;
268 unsigned int r;
269 uint32_t opc = 0;
270 uint32_t v = 0;
271 uint32_t opc_v = 0;
272
273 /* Bail out quickly if INSN doesn't fall into the the load-store
274 encoding space. */
275 if (!aarch64_ldst (insn))
276 return false;
277
278 *pair = false;
279 *load = false;
280 if (aarch64_ldst_ex (insn))
281 {
282 *rt = aarch64_rt (insn);
283 *rt2 = *rt;
284 if (aarch64_bit (insn, 21) == 1)
285 {
286 *pair = true;
287 *rt2 = aarch64_rt2 (insn);
288 }
289 *load = aarch64_ld (insn);
290 return true;
291 }
292 else if (aarch64_ldst_nap (insn)
293 || aarch64_ldstp_pi (insn)
294 || aarch64_ldstp_o (insn)
295 || aarch64_ldstp_pre (insn))
296 {
297 *pair = true;
298 *rt = aarch64_rt (insn);
299 *rt2 = aarch64_rt2 (insn);
300 *load = aarch64_ld (insn);
301 return true;
302 }
303 else if (aarch64_ldst_pcrel (insn)
304 || aarch64_ldst_ui (insn)
305 || aarch64_ldst_piimm (insn)
306 || aarch64_ldst_u (insn)
307 || aarch64_ldst_preimm (insn)
308 || aarch64_ldst_ro (insn)
309 || aarch64_ldst_uimm (insn))
310 {
311 *rt = aarch64_rt (insn);
312 *rt2 = *rt;
313 if (aarch64_ldst_pcrel (insn))
314 *load = true;
315 opc = aarch64_bits (insn, 22, 2);
316 v = aarch64_bit (insn, 26);
317 opc_v = opc | (v << 2);
318 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
319 || opc_v == 5 || opc_v == 7);
320 return true;
321 }
322 else if (aarch64_ldst_simd_m (insn)
323 || aarch64_ldst_simd_m_pi (insn))
324 {
325 *rt = aarch64_rt (insn);
326 *load = aarch64_bit (insn, 22);
327 opcode = (insn >> 12) & 0xf;
328 switch (opcode)
329 {
330 case 0:
331 case 2:
332 *rt2 = *rt + 3;
333 break;
334
335 case 4:
336 case 6:
337 *rt2 = *rt + 2;
338 break;
339
340 case 7:
341 *rt2 = *rt;
342 break;
343
344 case 8:
345 case 10:
346 *rt2 = *rt + 1;
347 break;
348
349 default:
350 return false;
351 }
352 return true;
353 }
354 else if (aarch64_ldst_simd_s (insn)
355 || aarch64_ldst_simd_s_pi (insn))
356 {
357 *rt = aarch64_rt (insn);
358 r = (insn >> 21) & 1;
359 *load = aarch64_bit (insn, 22);
360 opcode = (insn >> 13) & 0x7;
361 switch (opcode)
362 {
363 case 0:
364 case 2:
365 case 4:
366 *rt2 = *rt + r;
367 break;
368
369 case 1:
370 case 3:
371 case 5:
372 *rt2 = *rt + (r == 0 ? 2 : 3);
373 break;
374
375 case 6:
376 *rt2 = *rt + r;
377 break;
378
379 case 7:
380 *rt2 = *rt + (r == 0 ? 2 : 3);
381 break;
382
383 default:
384 return false;
385 }
386 return true;
387 }
388 return false;
389 } // End of "aarch64_mem_op_p".
390
391 // Return true if INSN is mac insn.
392 static bool
393 aarch64_mac(Insntype insn)
394 { return (insn & 0xff000000) == 0x9b000000; }
395
396 // Return true if INSN is multiply-accumulate.
397 // (This is similar to implementaton in elfnn-aarch64.c.)
398 static bool
399 aarch64_mlxl(Insntype insn)
400 {
401 uint32_t op31 = aarch64_op31(insn);
402 if (aarch64_mac(insn)
403 && (op31 == 0 || op31 == 1 || op31 == 5)
404 /* Exclude MUL instructions which are encoded as a multiple-accumulate
405 with RA = XZR. */
406 && aarch64_ra(insn) != AARCH64_ZR)
407 {
408 return true;
409 }
410 return false;
411 }
412 }; // End of "AArch64_insn_utilities".
413
414
415 // Insn length in byte.
416
417 template<bool big_endian>
418 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
419
420
421 // Zero register encoding - 31.
422
423 template<bool big_endian>
424 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
425
426
427 // Output_data_got_aarch64 class.
428
429 template<int size, bool big_endian>
430 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
431 {
432 public:
433 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
434 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
435 : Output_data_got<size, big_endian>(),
436 symbol_table_(symtab), layout_(layout)
437 { }
438
439 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
440 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
441 // applied in a static link.
442 void
443 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
444 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
445
446
447 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
448 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
449 // relocation that needs to be applied in a static link.
450 void
451 add_static_reloc(unsigned int got_offset, unsigned int r_type,
452 Sized_relobj_file<size, big_endian>* relobj,
453 unsigned int index)
454 {
455 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
456 index));
457 }
458
459
460 protected:
461 // Write out the GOT table.
462 void
463 do_write(Output_file* of) {
464 // The first entry in the GOT is the address of the .dynamic section.
465 gold_assert(this->data_size() >= size / 8);
466 Output_section* dynamic = this->layout_->dynamic_section();
467 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
468 this->replace_constant(0, dynamic_addr);
469 Output_data_got<size, big_endian>::do_write(of);
470
471 // Handling static relocs
472 if (this->static_relocs_.empty())
473 return;
474
475 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
476
477 gold_assert(parameters->doing_static_link());
478 const off_t offset = this->offset();
479 const section_size_type oview_size =
480 convert_to_section_size_type(this->data_size());
481 unsigned char* const oview = of->get_output_view(offset, oview_size);
482
483 Output_segment* tls_segment = this->layout_->tls_segment();
484 gold_assert(tls_segment != NULL);
485
486 AArch64_address aligned_tcb_address =
487 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
488 tls_segment->maximum_alignment());
489
490 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
491 {
492 Static_reloc& reloc(this->static_relocs_[i]);
493 AArch64_address value;
494
495 if (!reloc.symbol_is_global())
496 {
497 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
498 const Symbol_value<size>* psymval =
499 reloc.relobj()->local_symbol(reloc.index());
500
501 // We are doing static linking. Issue an error and skip this
502 // relocation if the symbol is undefined or in a discarded_section.
503 bool is_ordinary;
504 unsigned int shndx = psymval->input_shndx(&is_ordinary);
505 if ((shndx == elfcpp::SHN_UNDEF)
506 || (is_ordinary
507 && shndx != elfcpp::SHN_UNDEF
508 && !object->is_section_included(shndx)
509 && !this->symbol_table_->is_section_folded(object, shndx)))
510 {
511 gold_error(_("undefined or discarded local symbol %u from "
512 " object %s in GOT"),
513 reloc.index(), reloc.relobj()->name().c_str());
514 continue;
515 }
516 value = psymval->value(object, 0);
517 }
518 else
519 {
520 const Symbol* gsym = reloc.symbol();
521 gold_assert(gsym != NULL);
522 if (gsym->is_forwarder())
523 gsym = this->symbol_table_->resolve_forwards(gsym);
524
525 // We are doing static linking. Issue an error and skip this
526 // relocation if the symbol is undefined or in a discarded_section
527 // unless it is a weakly_undefined symbol.
528 if ((gsym->is_defined_in_discarded_section()
529 || gsym->is_undefined())
530 && !gsym->is_weak_undefined())
531 {
532 gold_error(_("undefined or discarded symbol %s in GOT"),
533 gsym->name());
534 continue;
535 }
536
537 if (!gsym->is_weak_undefined())
538 {
539 const Sized_symbol<size>* sym =
540 static_cast<const Sized_symbol<size>*>(gsym);
541 value = sym->value();
542 }
543 else
544 value = 0;
545 }
546
547 unsigned got_offset = reloc.got_offset();
548 gold_assert(got_offset < oview_size);
549
550 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
551 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
552 Valtype x;
553 switch (reloc.r_type())
554 {
555 case elfcpp::R_AARCH64_TLS_DTPREL64:
556 x = value;
557 break;
558 case elfcpp::R_AARCH64_TLS_TPREL64:
559 x = value + aligned_tcb_address;
560 break;
561 default:
562 gold_unreachable();
563 }
564 elfcpp::Swap<size, big_endian>::writeval(wv, x);
565 }
566
567 of->write_output_view(offset, oview_size, oview);
568 }
569
570 private:
571 // Symbol table of the output object.
572 Symbol_table* symbol_table_;
573 // A pointer to the Layout class, so that we can find the .dynamic
574 // section when we write out the GOT section.
575 Layout* layout_;
576
577 // This class represent dynamic relocations that need to be applied by
578 // gold because we are using TLS relocations in a static link.
579 class Static_reloc
580 {
581 public:
582 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
583 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
584 { this->u_.global.symbol = gsym; }
585
586 Static_reloc(unsigned int got_offset, unsigned int r_type,
587 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
588 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
589 {
590 this->u_.local.relobj = relobj;
591 this->u_.local.index = index;
592 }
593
594 // Return the GOT offset.
595 unsigned int
596 got_offset() const
597 { return this->got_offset_; }
598
599 // Relocation type.
600 unsigned int
601 r_type() const
602 { return this->r_type_; }
603
604 // Whether the symbol is global or not.
605 bool
606 symbol_is_global() const
607 { return this->symbol_is_global_; }
608
609 // For a relocation against a global symbol, the global symbol.
610 Symbol*
611 symbol() const
612 {
613 gold_assert(this->symbol_is_global_);
614 return this->u_.global.symbol;
615 }
616
617 // For a relocation against a local symbol, the defining object.
618 Sized_relobj_file<size, big_endian>*
619 relobj() const
620 {
621 gold_assert(!this->symbol_is_global_);
622 return this->u_.local.relobj;
623 }
624
625 // For a relocation against a local symbol, the local symbol index.
626 unsigned int
627 index() const
628 {
629 gold_assert(!this->symbol_is_global_);
630 return this->u_.local.index;
631 }
632
633 private:
634 // GOT offset of the entry to which this relocation is applied.
635 unsigned int got_offset_;
636 // Type of relocation.
637 unsigned int r_type_;
638 // Whether this relocation is against a global symbol.
639 bool symbol_is_global_;
640 // A global or local symbol.
641 union
642 {
643 struct
644 {
645 // For a global symbol, the symbol itself.
646 Symbol* symbol;
647 } global;
648 struct
649 {
650 // For a local symbol, the object defining the symbol.
651 Sized_relobj_file<size, big_endian>* relobj;
652 // For a local symbol, the symbol index.
653 unsigned int index;
654 } local;
655 } u_;
656 }; // End of inner class Static_reloc
657
658 std::vector<Static_reloc> static_relocs_;
659 }; // End of Output_data_got_aarch64
660
661
662 template<int size, bool big_endian>
663 class AArch64_input_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_output_section;
668
669
670 template<int size, bool big_endian>
671 class AArch64_relobj;
672
673
674 // Stub type enum constants.
675
676 enum
677 {
678 ST_NONE = 0,
679
680 // Using adrp/add pair, 4 insns (including alignment) without mem access,
681 // the fastest stub. This has a limited jump distance, which is tested by
682 // aarch64_valid_for_adrp_p.
683 ST_ADRP_BRANCH = 1,
684
685 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
686 // unlimited in jump distance.
687 ST_LONG_BRANCH_ABS = 2,
688
689 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
690 // mem access, slowest one. Only used in position independent executables.
691 ST_LONG_BRANCH_PCREL = 3,
692
693 // Stub for erratum 843419 handling.
694 ST_E_843419 = 4,
695
696 // Stub for erratum 835769 handling.
697 ST_E_835769 = 5,
698
699 // Number of total stub types.
700 ST_NUMBER = 6
701 };
702
703
704 // Struct that wraps insns for a particular stub. All stub templates are
705 // created/initialized as constants by Stub_template_repertoire.
706
707 template<bool big_endian>
708 struct Stub_template
709 {
710 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
711 const int insn_num;
712 };
713
714
715 // Simple singleton class that creates/initializes/stores all types of stub
716 // templates.
717
718 template<bool big_endian>
719 class Stub_template_repertoire
720 {
721 public:
722 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
723
724 // Single static method to get stub template for a given stub type.
725 static const Stub_template<big_endian>*
726 get_stub_template(int type)
727 {
728 static Stub_template_repertoire<big_endian> singleton;
729 return singleton.stub_templates_[type];
730 }
731
732 private:
733 // Constructor - creates/initializes all stub templates.
734 Stub_template_repertoire();
735 ~Stub_template_repertoire()
736 { }
737
738 // Disallowing copy ctor and copy assignment operator.
739 Stub_template_repertoire(Stub_template_repertoire&);
740 Stub_template_repertoire& operator=(Stub_template_repertoire&);
741
742 // Data that stores all insn templates.
743 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
744 }; // End of "class Stub_template_repertoire".
745
746
747 // Constructor - creates/initilizes all stub templates.
748
749 template<bool big_endian>
750 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
751 {
752 // Insn array definitions.
753 const static Insntype ST_NONE_INSNS[] = {};
754
755 const static Insntype ST_ADRP_BRANCH_INSNS[] =
756 {
757 0x90000010, /* adrp ip0, X */
758 /* ADR_PREL_PG_HI21(X) */
759 0x91000210, /* add ip0, ip0, :lo12:X */
760 /* ADD_ABS_LO12_NC(X) */
761 0xd61f0200, /* br ip0 */
762 0x00000000, /* alignment padding */
763 };
764
765 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
766 {
767 0x58000050, /* ldr ip0, 0x8 */
768 0xd61f0200, /* br ip0 */
769 0x00000000, /* address field */
770 0x00000000, /* address fields */
771 };
772
773 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
774 {
775 0x58000090, /* ldr ip0, 0x10 */
776 0x10000011, /* adr ip1, #0 */
777 0x8b110210, /* add ip0, ip0, ip1 */
778 0xd61f0200, /* br ip0 */
779 0x00000000, /* address field */
780 0x00000000, /* address field */
781 0x00000000, /* alignment padding */
782 0x00000000, /* alignment padding */
783 };
784
785 const static Insntype ST_E_843419_INSNS[] =
786 {
787 0x00000000, /* Placeholder for erratum insn. */
788 0x14000000, /* b <label> */
789 };
790
791 // ST_E_835769 has the same stub template as ST_E_843419
792 // but we reproduce the array here so that the sizeof
793 // expressions in install_insn_template will work.
794 const static Insntype ST_E_835769_INSNS[] =
795 {
796 0x00000000, /* Placeholder for erratum insn. */
797 0x14000000, /* b <label> */
798 };
799
800 #define install_insn_template(T) \
801 const static Stub_template<big_endian> template_##T = { \
802 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
803 this->stub_templates_[T] = &template_##T
804
805 install_insn_template(ST_NONE);
806 install_insn_template(ST_ADRP_BRANCH);
807 install_insn_template(ST_LONG_BRANCH_ABS);
808 install_insn_template(ST_LONG_BRANCH_PCREL);
809 install_insn_template(ST_E_843419);
810 install_insn_template(ST_E_835769);
811
812 #undef install_insn_template
813 }
814
815
816 // Base class for stubs.
817
818 template<int size, bool big_endian>
819 class Stub_base
820 {
821 public:
822 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
823 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
824
825 static const AArch64_address invalid_address =
826 static_cast<AArch64_address>(-1);
827
828 static const section_offset_type invalid_offset =
829 static_cast<section_offset_type>(-1);
830
831 Stub_base(int type)
832 : destination_address_(invalid_address),
833 offset_(invalid_offset),
834 type_(type)
835 {}
836
837 ~Stub_base()
838 {}
839
840 // Get stub type.
841 int
842 type() const
843 { return this->type_; }
844
845 // Get stub template that provides stub insn information.
846 const Stub_template<big_endian>*
847 stub_template() const
848 {
849 return Stub_template_repertoire<big_endian>::
850 get_stub_template(this->type());
851 }
852
853 // Get destination address.
854 AArch64_address
855 destination_address() const
856 {
857 gold_assert(this->destination_address_ != this->invalid_address);
858 return this->destination_address_;
859 }
860
861 // Set destination address.
862 void
863 set_destination_address(AArch64_address address)
864 {
865 gold_assert(address != this->invalid_address);
866 this->destination_address_ = address;
867 }
868
869 // Reset the destination address.
870 void
871 reset_destination_address()
872 { this->destination_address_ = this->invalid_address; }
873
874 // Get offset of code stub. For Reloc_stub, it is the offset from the
875 // beginning of its containing stub table; for Erratum_stub, it is the offset
876 // from the end of reloc_stubs.
877 section_offset_type
878 offset() const
879 {
880 gold_assert(this->offset_ != this->invalid_offset);
881 return this->offset_;
882 }
883
884 // Set stub offset.
885 void
886 set_offset(section_offset_type offset)
887 { this->offset_ = offset; }
888
889 // Return the stub insn.
890 const Insntype*
891 insns() const
892 { return this->stub_template()->insns; }
893
894 // Return num of stub insns.
895 unsigned int
896 insn_num() const
897 { return this->stub_template()->insn_num; }
898
899 // Get size of the stub.
900 int
901 stub_size() const
902 {
903 return this->insn_num() *
904 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
905 }
906
907 // Write stub to output file.
908 void
909 write(unsigned char* view, section_size_type view_size)
910 { this->do_write(view, view_size); }
911
912 protected:
913 // Abstract method to be implemented by sub-classes.
914 virtual void
915 do_write(unsigned char*, section_size_type) = 0;
916
917 private:
918 // The last insn of a stub is a jump to destination insn. This field records
919 // the destination address.
920 AArch64_address destination_address_;
921 // The stub offset. Note this has difference interpretations between an
922 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
923 // beginning of the containing stub_table, whereas for Erratum_stub, this is
924 // the offset from the end of reloc_stubs.
925 section_offset_type offset_;
926 // Stub type.
927 const int type_;
928 }; // End of "Stub_base".
929
930
931 // Erratum stub class. An erratum stub differs from a reloc stub in that for
932 // each erratum occurrence, we generate an erratum stub. We never share erratum
933 // stubs, whereas for reloc stubs, different branch insns share a single reloc
934 // stub as long as the branch targets are the same. (More to the point, reloc
935 // stubs can be shared because they're used to reach a specific target, whereas
936 // erratum stubs branch back to the original control flow.)
937
938 template<int size, bool big_endian>
939 class Erratum_stub : public Stub_base<size, big_endian>
940 {
941 public:
942 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
943 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
944 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
945 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
946
947 static const int STUB_ADDR_ALIGN;
948
949 static const Insntype invalid_insn = static_cast<Insntype>(-1);
950
951 Erratum_stub(The_aarch64_relobj* relobj, int type,
952 unsigned shndx, unsigned int sh_offset)
953 : Stub_base<size, big_endian>(type), relobj_(relobj),
954 shndx_(shndx), sh_offset_(sh_offset),
955 erratum_insn_(invalid_insn),
956 erratum_address_(this->invalid_address)
957 {}
958
959 ~Erratum_stub() {}
960
961 // Return the object that contains the erratum.
962 The_aarch64_relobj*
963 relobj()
964 { return this->relobj_; }
965
966 // Get section index of the erratum.
967 unsigned int
968 shndx() const
969 { return this->shndx_; }
970
971 // Get section offset of the erratum.
972 unsigned int
973 sh_offset() const
974 { return this->sh_offset_; }
975
976 // Get the erratum insn. This is the insn located at erratum_insn_address.
977 Insntype
978 erratum_insn() const
979 {
980 gold_assert(this->erratum_insn_ != this->invalid_insn);
981 return this->erratum_insn_;
982 }
983
984 // Set the insn that the erratum happens to.
985 void
986 set_erratum_insn(Insntype insn)
987 { this->erratum_insn_ = insn; }
988
989 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
990 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
991 // is no longer the one we want to write out to the stub, update erratum_insn_
992 // with relocated version. Also note that in this case xn must not be "PC", so
993 // it is safe to move the erratum insn from the origin place to the stub. For
994 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
995 // relocation spot (assertion added though).
996 void
997 update_erratum_insn(Insntype insn)
998 {
999 gold_assert(this->erratum_insn_ != this->invalid_insn);
1000 switch (this->type())
1001 {
1002 case ST_E_843419:
1003 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
1004 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
1005 gold_assert(Insn_utilities::aarch64_rd(insn) ==
1006 Insn_utilities::aarch64_rd(this->erratum_insn()));
1007 gold_assert(Insn_utilities::aarch64_rn(insn) ==
1008 Insn_utilities::aarch64_rn(this->erratum_insn()));
1009 // Update plain ld/st insn with relocated insn.
1010 this->erratum_insn_ = insn;
1011 break;
1012 case ST_E_835769:
1013 gold_assert(insn == this->erratum_insn());
1014 break;
1015 default:
1016 gold_unreachable();
1017 }
1018 }
1019
1020
1021 // Return the address where an erratum must be done.
1022 AArch64_address
1023 erratum_address() const
1024 {
1025 gold_assert(this->erratum_address_ != this->invalid_address);
1026 return this->erratum_address_;
1027 }
1028
1029 // Set the address where an erratum must be done.
1030 void
1031 set_erratum_address(AArch64_address addr)
1032 { this->erratum_address_ = addr; }
1033
1034 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1035 // sh_offset). We do not include 'type' in the calculation, because there is
1036 // at most one stub type at (obj, shndx, sh_offset).
1037 bool
1038 operator<(const Erratum_stub<size, big_endian>& k) const
1039 {
1040 if (this == &k)
1041 return false;
1042 // We group stubs by relobj.
1043 if (this->relobj_ != k.relobj_)
1044 return this->relobj_ < k.relobj_;
1045 // Then by section index.
1046 if (this->shndx_ != k.shndx_)
1047 return this->shndx_ < k.shndx_;
1048 // Lastly by section offset.
1049 return this->sh_offset_ < k.sh_offset_;
1050 }
1051
1052 protected:
1053 virtual void
1054 do_write(unsigned char*, section_size_type);
1055
1056 private:
1057 // The object that needs to be fixed.
1058 The_aarch64_relobj* relobj_;
1059 // The shndx in the object that needs to be fixed.
1060 const unsigned int shndx_;
1061 // The section offset in the obejct that needs to be fixed.
1062 const unsigned int sh_offset_;
1063 // The insn to be fixed.
1064 Insntype erratum_insn_;
1065 // The address of the above insn.
1066 AArch64_address erratum_address_;
1067 }; // End of "Erratum_stub".
1068
1069
1070 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1071 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1072 // adrp's code position (two or three insns before erratum insn itself).
1073
1074 template<int size, bool big_endian>
1075 class E843419_stub : public Erratum_stub<size, big_endian>
1076 {
1077 public:
1078 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1079
1080 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1081 unsigned int shndx, unsigned int sh_offset,
1082 unsigned int adrp_sh_offset)
1083 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1084 adrp_sh_offset_(adrp_sh_offset)
1085 {}
1086
1087 unsigned int
1088 adrp_sh_offset() const
1089 { return this->adrp_sh_offset_; }
1090
1091 private:
1092 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1093 // can can obtain it from its parent.)
1094 const unsigned int adrp_sh_offset_;
1095 };
1096
1097
1098 template<int size, bool big_endian>
1099 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1100
1101 // Comparator used in set definition.
1102 template<int size, bool big_endian>
1103 struct Erratum_stub_less
1104 {
1105 bool
1106 operator()(const Erratum_stub<size, big_endian>* s1,
1107 const Erratum_stub<size, big_endian>* s2) const
1108 { return *s1 < *s2; }
1109 };
1110
1111 // Erratum_stub implementation for writing stub to output file.
1112
1113 template<int size, bool big_endian>
1114 void
1115 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1116 {
1117 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1118 const Insntype* insns = this->insns();
1119 uint32_t num_insns = this->insn_num();
1120 Insntype* ip = reinterpret_cast<Insntype*>(view);
1121 // For current implemented erratum 843419 and 835769, the first insn in the
1122 // stub is always a copy of the problematic insn (in 843419, the mem access
1123 // insn, in 835769, the mac insn), followed by a jump-back.
1124 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1125 for (uint32_t i = 1; i < num_insns; ++i)
1126 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1127 }
1128
1129
1130 // Reloc stub class.
1131
1132 template<int size, bool big_endian>
1133 class Reloc_stub : public Stub_base<size, big_endian>
1134 {
1135 public:
1136 typedef Reloc_stub<size, big_endian> This;
1137 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1138
1139 // Branch range. This is used to calculate the section group size, as well as
1140 // determine whether a stub is needed.
1141 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1142 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1143
1144 // Constant used to determine if an offset fits in the adrp instruction
1145 // encoding.
1146 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1147 static const int MIN_ADRP_IMM = -(1 << 20);
1148
1149 static const int BYTES_PER_INSN = 4;
1150 static const int STUB_ADDR_ALIGN;
1151
1152 // Determine whether the offset fits in the jump/branch instruction.
1153 static bool
1154 aarch64_valid_branch_offset_p(int64_t offset)
1155 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1156
1157 // Determine whether the offset fits in the adrp immediate field.
1158 static bool
1159 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1160 {
1161 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1162 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1163 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1164 }
1165
1166 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1167 // needed.
1168 static int
1169 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1170 AArch64_address target);
1171
1172 Reloc_stub(int type)
1173 : Stub_base<size, big_endian>(type)
1174 { }
1175
1176 ~Reloc_stub()
1177 { }
1178
1179 // The key class used to index the stub instance in the stub table's stub map.
1180 class Key
1181 {
1182 public:
1183 Key(int type, const Symbol* symbol, const Relobj* relobj,
1184 unsigned int r_sym, int32_t addend)
1185 : type_(type), addend_(addend)
1186 {
1187 if (symbol != NULL)
1188 {
1189 this->r_sym_ = Reloc_stub::invalid_index;
1190 this->u_.symbol = symbol;
1191 }
1192 else
1193 {
1194 gold_assert(relobj != NULL && r_sym != invalid_index);
1195 this->r_sym_ = r_sym;
1196 this->u_.relobj = relobj;
1197 }
1198 }
1199
1200 ~Key()
1201 { }
1202
1203 // Return stub type.
1204 int
1205 type() const
1206 { return this->type_; }
1207
1208 // Return the local symbol index or invalid_index.
1209 unsigned int
1210 r_sym() const
1211 { return this->r_sym_; }
1212
1213 // Return the symbol if there is one.
1214 const Symbol*
1215 symbol() const
1216 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1217
1218 // Return the relobj if there is one.
1219 const Relobj*
1220 relobj() const
1221 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1222
1223 // Whether this equals to another key k.
1224 bool
1225 eq(const Key& k) const
1226 {
1227 return ((this->type_ == k.type_)
1228 && (this->r_sym_ == k.r_sym_)
1229 && ((this->r_sym_ != Reloc_stub::invalid_index)
1230 ? (this->u_.relobj == k.u_.relobj)
1231 : (this->u_.symbol == k.u_.symbol))
1232 && (this->addend_ == k.addend_));
1233 }
1234
1235 // Return a hash value.
1236 size_t
1237 hash_value() const
1238 {
1239 size_t name_hash_value = gold::string_hash<char>(
1240 (this->r_sym_ != Reloc_stub::invalid_index)
1241 ? this->u_.relobj->name().c_str()
1242 : this->u_.symbol->name());
1243 // We only have 4 stub types.
1244 size_t stub_type_hash_value = 0x03 & this->type_;
1245 return (name_hash_value
1246 ^ stub_type_hash_value
1247 ^ ((this->r_sym_ & 0x3fff) << 2)
1248 ^ ((this->addend_ & 0xffff) << 16));
1249 }
1250
1251 // Functors for STL associative containers.
1252 struct hash
1253 {
1254 size_t
1255 operator()(const Key& k) const
1256 { return k.hash_value(); }
1257 };
1258
1259 struct equal_to
1260 {
1261 bool
1262 operator()(const Key& k1, const Key& k2) const
1263 { return k1.eq(k2); }
1264 };
1265
1266 private:
1267 // Stub type.
1268 const int type_;
1269 // If this is a local symbol, this is the index in the defining object.
1270 // Otherwise, it is invalid_index for a global symbol.
1271 unsigned int r_sym_;
1272 // If r_sym_ is an invalid index, this points to a global symbol.
1273 // Otherwise, it points to a relobj. We used the unsized and target
1274 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1275 // Arm_relobj, in order to avoid making the stub class a template
1276 // as most of the stub machinery is endianness-neutral. However, it
1277 // may require a bit of casting done by users of this class.
1278 union
1279 {
1280 const Symbol* symbol;
1281 const Relobj* relobj;
1282 } u_;
1283 // Addend associated with a reloc.
1284 int32_t addend_;
1285 }; // End of inner class Reloc_stub::Key
1286
1287 protected:
1288 // This may be overridden in the child class.
1289 virtual void
1290 do_write(unsigned char*, section_size_type);
1291
1292 private:
1293 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1294 }; // End of Reloc_stub
1295
1296 template<int size, bool big_endian>
1297 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1298
1299 // Write data to output file.
1300
1301 template<int size, bool big_endian>
1302 void
1303 Reloc_stub<size, big_endian>::
1304 do_write(unsigned char* view, section_size_type)
1305 {
1306 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1307 const uint32_t* insns = this->insns();
1308 uint32_t num_insns = this->insn_num();
1309 Insntype* ip = reinterpret_cast<Insntype*>(view);
1310 for (uint32_t i = 0; i < num_insns; ++i)
1311 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1312 }
1313
1314
1315 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1316 // needed.
1317
1318 template<int size, bool big_endian>
1319 inline int
1320 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1321 unsigned int r_type, AArch64_address location, AArch64_address dest)
1322 {
1323 int64_t branch_offset = 0;
1324 switch(r_type)
1325 {
1326 case elfcpp::R_AARCH64_CALL26:
1327 case elfcpp::R_AARCH64_JUMP26:
1328 branch_offset = dest - location;
1329 break;
1330 default:
1331 gold_unreachable();
1332 }
1333
1334 if (aarch64_valid_branch_offset_p(branch_offset))
1335 return ST_NONE;
1336
1337 if (aarch64_valid_for_adrp_p(location, dest))
1338 return ST_ADRP_BRANCH;
1339
1340 // Always use PC-relative addressing in case of -shared or -pie.
1341 if (parameters->options().output_is_position_independent())
1342 return ST_LONG_BRANCH_PCREL;
1343
1344 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
1345 // But is only applicable to non-shared or non-pie.
1346 return ST_LONG_BRANCH_ABS;
1347 }
1348
1349 // A class to hold stubs for the ARM target.
1350
1351 template<int size, bool big_endian>
1352 class Stub_table : public Output_data
1353 {
1354 public:
1355 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1356 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1357 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1358 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1359 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1360 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1361 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1362 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1363 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1364 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1365 typedef Stub_table<size, big_endian> The_stub_table;
1366 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1367 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1368 Reloc_stub_map;
1369 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1370 typedef Relocate_info<size, big_endian> The_relocate_info;
1371
1372 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1373 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1374
1375 Stub_table(The_aarch64_input_section* owner)
1376 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1377 erratum_stubs_size_(0), prev_data_size_(0)
1378 { }
1379
1380 ~Stub_table()
1381 { }
1382
1383 The_aarch64_input_section*
1384 owner() const
1385 { return owner_; }
1386
1387 // Whether this stub table is empty.
1388 bool
1389 empty() const
1390 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1391
1392 // Return the current data size.
1393 off_t
1394 current_data_size() const
1395 { return this->current_data_size_for_child(); }
1396
1397 // Add a STUB using KEY. The caller is responsible for avoiding addition
1398 // if a STUB with the same key has already been added.
1399 void
1400 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1401
1402 // Add an erratum stub into the erratum stub set. The set is ordered by
1403 // (relobj, shndx, sh_offset).
1404 void
1405 add_erratum_stub(The_erratum_stub* stub);
1406
1407 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1408 The_erratum_stub*
1409 find_erratum_stub(The_aarch64_relobj* a64relobj,
1410 unsigned int shndx, unsigned int sh_offset);
1411
1412 // Find all the erratums for a given input section. The return value is a pair
1413 // of iterators [begin, end).
1414 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1415 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1416 unsigned int shndx);
1417
1418 // Compute the erratum stub address.
1419 AArch64_address
1420 erratum_stub_address(The_erratum_stub* stub) const
1421 {
1422 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1423 The_erratum_stub::STUB_ADDR_ALIGN);
1424 r += stub->offset();
1425 return r;
1426 }
1427
1428 // Finalize stubs. No-op here, just for completeness.
1429 void
1430 finalize_stubs()
1431 { }
1432
1433 // Look up a relocation stub using KEY. Return NULL if there is none.
1434 The_reloc_stub*
1435 find_reloc_stub(The_reloc_stub_key& key)
1436 {
1437 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1438 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1439 }
1440
1441 // Relocate stubs in this stub table.
1442 void
1443 relocate_stubs(const The_relocate_info*,
1444 The_target_aarch64*,
1445 Output_section*,
1446 unsigned char*,
1447 AArch64_address,
1448 section_size_type);
1449
1450 // Update data size at the end of a relaxation pass. Return true if data size
1451 // is different from that of the previous relaxation pass.
1452 bool
1453 update_data_size_changed_p()
1454 {
1455 // No addralign changed here.
1456 off_t s = align_address(this->reloc_stubs_size_,
1457 The_erratum_stub::STUB_ADDR_ALIGN)
1458 + this->erratum_stubs_size_;
1459 bool changed = (s != this->prev_data_size_);
1460 this->prev_data_size_ = s;
1461 return changed;
1462 }
1463
1464 protected:
1465 // Write out section contents.
1466 void
1467 do_write(Output_file*);
1468
1469 // Return the required alignment.
1470 uint64_t
1471 do_addralign() const
1472 {
1473 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1474 The_erratum_stub::STUB_ADDR_ALIGN);
1475 }
1476
1477 // Reset address and file offset.
1478 void
1479 do_reset_address_and_file_offset()
1480 { this->set_current_data_size_for_child(this->prev_data_size_); }
1481
1482 // Set final data size.
1483 void
1484 set_final_data_size()
1485 { this->set_data_size(this->current_data_size()); }
1486
1487 private:
1488 // Relocate one stub.
1489 void
1490 relocate_stub(The_reloc_stub*,
1491 const The_relocate_info*,
1492 The_target_aarch64*,
1493 Output_section*,
1494 unsigned char*,
1495 AArch64_address,
1496 section_size_type);
1497
1498 private:
1499 // Owner of this stub table.
1500 The_aarch64_input_section* owner_;
1501 // The relocation stubs.
1502 Reloc_stub_map reloc_stubs_;
1503 // The erratum stubs.
1504 Erratum_stub_set erratum_stubs_;
1505 // Size of reloc stubs.
1506 off_t reloc_stubs_size_;
1507 // Size of erratum stubs.
1508 off_t erratum_stubs_size_;
1509 // data size of this in the previous pass.
1510 off_t prev_data_size_;
1511 }; // End of Stub_table
1512
1513
1514 // Add an erratum stub into the erratum stub set. The set is ordered by
1515 // (relobj, shndx, sh_offset).
1516
1517 template<int size, bool big_endian>
1518 void
1519 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1520 {
1521 std::pair<Erratum_stub_set_iter, bool> ret =
1522 this->erratum_stubs_.insert(stub);
1523 gold_assert(ret.second);
1524 this->erratum_stubs_size_ = align_address(
1525 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1526 stub->set_offset(this->erratum_stubs_size_);
1527 this->erratum_stubs_size_ += stub->stub_size();
1528 }
1529
1530
1531 // Find if such erratum exists for given (obj, shndx, sh_offset).
1532
1533 template<int size, bool big_endian>
1534 Erratum_stub<size, big_endian>*
1535 Stub_table<size, big_endian>::find_erratum_stub(
1536 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1537 {
1538 // A dummy object used as key to search in the set.
1539 The_erratum_stub key(a64relobj, ST_NONE,
1540 shndx, sh_offset);
1541 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1542 if (i != this->erratum_stubs_.end())
1543 {
1544 The_erratum_stub* stub(*i);
1545 gold_assert(stub->erratum_insn() != 0);
1546 return stub;
1547 }
1548 return NULL;
1549 }
1550
1551
1552 // Find all the errata for a given input section. The return value is a pair of
1553 // iterators [begin, end).
1554
1555 template<int size, bool big_endian>
1556 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1557 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1558 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1559 The_aarch64_relobj* a64relobj, unsigned int shndx)
1560 {
1561 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1562 Erratum_stub_set_iter start, end;
1563 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1564 start = this->erratum_stubs_.lower_bound(&low_key);
1565 if (start == this->erratum_stubs_.end())
1566 return Result_pair(this->erratum_stubs_.end(),
1567 this->erratum_stubs_.end());
1568 end = start;
1569 while (end != this->erratum_stubs_.end() &&
1570 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1571 ++end;
1572 return Result_pair(start, end);
1573 }
1574
1575
1576 // Add a STUB using KEY. The caller is responsible for avoiding addition
1577 // if a STUB with the same key has already been added.
1578
1579 template<int size, bool big_endian>
1580 void
1581 Stub_table<size, big_endian>::add_reloc_stub(
1582 The_reloc_stub* stub, const The_reloc_stub_key& key)
1583 {
1584 gold_assert(stub->type() == key.type());
1585 this->reloc_stubs_[key] = stub;
1586
1587 // Assign stub offset early. We can do this because we never remove
1588 // reloc stubs and they are in the beginning of the stub table.
1589 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1590 The_reloc_stub::STUB_ADDR_ALIGN);
1591 stub->set_offset(this->reloc_stubs_size_);
1592 this->reloc_stubs_size_ += stub->stub_size();
1593 }
1594
1595
1596 // Relocate all stubs in this stub table.
1597
1598 template<int size, bool big_endian>
1599 void
1600 Stub_table<size, big_endian>::
1601 relocate_stubs(const The_relocate_info* relinfo,
1602 The_target_aarch64* target_aarch64,
1603 Output_section* output_section,
1604 unsigned char* view,
1605 AArch64_address address,
1606 section_size_type view_size)
1607 {
1608 // "view_size" is the total size of the stub_table.
1609 gold_assert(address == this->address() &&
1610 view_size == static_cast<section_size_type>(this->data_size()));
1611 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1612 p != this->reloc_stubs_.end(); ++p)
1613 relocate_stub(p->second, relinfo, target_aarch64, output_section,
1614 view, address, view_size);
1615
1616 // Just for convenience.
1617 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1618
1619 // Now 'relocate' erratum stubs.
1620 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
1621 i != this->erratum_stubs_.end(); ++i)
1622 {
1623 AArch64_address stub_address = this->erratum_stub_address(*i);
1624 // The address of "b" in the stub that is to be "relocated".
1625 AArch64_address stub_b_insn_address;
1626 // Branch offset that is to be filled in "b" insn.
1627 int b_offset = 0;
1628 switch ((*i)->type())
1629 {
1630 case ST_E_843419:
1631 case ST_E_835769:
1632 // The 1st insn of the erratum could be a relocation spot,
1633 // in this case we need to fix it with
1634 // "(*i)->erratum_insn()".
1635 elfcpp::Swap<32, big_endian>::writeval(
1636 view + (stub_address - this->address()),
1637 (*i)->erratum_insn());
1638 // For the erratum, the 2nd insn is a b-insn to be patched
1639 // (relocated).
1640 stub_b_insn_address = stub_address + 1 * BPI;
1641 b_offset = (*i)->destination_address() - stub_b_insn_address;
1642 AArch64_relocate_functions<size, big_endian>::construct_b(
1643 view + (stub_b_insn_address - this->address()),
1644 ((unsigned int)(b_offset)) & 0xfffffff);
1645 break;
1646 default:
1647 gold_unreachable();
1648 break;
1649 }
1650 }
1651 }
1652
1653
1654 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
1655
1656 template<int size, bool big_endian>
1657 void
1658 Stub_table<size, big_endian>::
1659 relocate_stub(The_reloc_stub* stub,
1660 const The_relocate_info* relinfo,
1661 The_target_aarch64* target_aarch64,
1662 Output_section* output_section,
1663 unsigned char* view,
1664 AArch64_address address,
1665 section_size_type view_size)
1666 {
1667 // "offset" is the offset from the beginning of the stub_table.
1668 section_size_type offset = stub->offset();
1669 section_size_type stub_size = stub->stub_size();
1670 // "view_size" is the total size of the stub_table.
1671 gold_assert(offset + stub_size <= view_size);
1672
1673 target_aarch64->relocate_stub(stub, relinfo, output_section,
1674 view + offset, address + offset, view_size);
1675 }
1676
1677
1678 // Write out the stubs to file.
1679
1680 template<int size, bool big_endian>
1681 void
1682 Stub_table<size, big_endian>::do_write(Output_file* of)
1683 {
1684 off_t offset = this->offset();
1685 const section_size_type oview_size =
1686 convert_to_section_size_type(this->data_size());
1687 unsigned char* const oview = of->get_output_view(offset, oview_size);
1688
1689 // Write relocation stubs.
1690 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1691 p != this->reloc_stubs_.end(); ++p)
1692 {
1693 The_reloc_stub* stub = p->second;
1694 AArch64_address address = this->address() + stub->offset();
1695 gold_assert(address ==
1696 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1697 stub->write(oview + stub->offset(), stub->stub_size());
1698 }
1699
1700 // Write erratum stubs.
1701 unsigned int erratum_stub_start_offset =
1702 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1703 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1704 p != this->erratum_stubs_.end(); ++p)
1705 {
1706 The_erratum_stub* stub(*p);
1707 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1708 stub->stub_size());
1709 }
1710
1711 of->write_output_view(this->offset(), oview_size, oview);
1712 }
1713
1714
1715 // AArch64_relobj class.
1716
1717 template<int size, bool big_endian>
1718 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1719 {
1720 public:
1721 typedef AArch64_relobj<size, big_endian> This;
1722 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1723 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1724 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1725 typedef Stub_table<size, big_endian> The_stub_table;
1726 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1727 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1728 typedef std::vector<The_stub_table*> Stub_table_list;
1729 static const AArch64_address invalid_address =
1730 static_cast<AArch64_address>(-1);
1731
1732 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1733 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1734 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1735 stub_tables_()
1736 { }
1737
1738 ~AArch64_relobj()
1739 { }
1740
1741 // Return the stub table of the SHNDX-th section if there is one.
1742 The_stub_table*
1743 stub_table(unsigned int shndx) const
1744 {
1745 gold_assert(shndx < this->stub_tables_.size());
1746 return this->stub_tables_[shndx];
1747 }
1748
1749 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1750 void
1751 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1752 {
1753 gold_assert(shndx < this->stub_tables_.size());
1754 this->stub_tables_[shndx] = stub_table;
1755 }
1756
1757 // Entrance to errata scanning.
1758 void
1759 scan_errata(unsigned int shndx,
1760 const elfcpp::Shdr<size, big_endian>&,
1761 Output_section*, const Symbol_table*,
1762 The_target_aarch64*);
1763
1764 // Scan all relocation sections for stub generation.
1765 void
1766 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1767 const Layout*);
1768
1769 // Whether a section is a scannable text section.
1770 bool
1771 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1772 const Output_section*, const Symbol_table*);
1773
1774 // Convert regular input section with index SHNDX to a relaxed section.
1775 void
1776 convert_input_section_to_relaxed_section(unsigned shndx)
1777 {
1778 // The stubs have relocations and we need to process them after writing
1779 // out the stubs. So relocation now must follow section write.
1780 this->set_section_offset(shndx, -1ULL);
1781 this->set_relocs_must_follow_section_writes();
1782 }
1783
1784 // Structure for mapping symbol position.
1785 struct Mapping_symbol_position
1786 {
1787 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1788 shndx_(shndx), offset_(offset)
1789 {}
1790
1791 // "<" comparator used in ordered_map container.
1792 bool
1793 operator<(const Mapping_symbol_position& p) const
1794 {
1795 return (this->shndx_ < p.shndx_
1796 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1797 }
1798
1799 // Section index.
1800 unsigned int shndx_;
1801
1802 // Section offset.
1803 AArch64_address offset_;
1804 };
1805
1806 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1807
1808 protected:
1809 // Post constructor setup.
1810 void
1811 do_setup()
1812 {
1813 // Call parent's setup method.
1814 Sized_relobj_file<size, big_endian>::do_setup();
1815
1816 // Initialize look-up tables.
1817 this->stub_tables_.resize(this->shnum());
1818 }
1819
1820 virtual void
1821 do_relocate_sections(
1822 const Symbol_table* symtab, const Layout* layout,
1823 const unsigned char* pshdrs, Output_file* of,
1824 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1825
1826 // Count local symbols and (optionally) record mapping info.
1827 virtual void
1828 do_count_local_symbols(Stringpool_template<char>*,
1829 Stringpool_template<char>*);
1830
1831 private:
1832 // Fix all errata in the object.
1833 void
1834 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
1835
1836 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1837 // applied.
1838 bool
1839 try_fix_erratum_843419_optimized(
1840 The_erratum_stub*,
1841 typename Sized_relobj_file<size, big_endian>::View_size&);
1842
1843 // Whether a section needs to be scanned for relocation stubs.
1844 bool
1845 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1846 const Relobj::Output_sections&,
1847 const Symbol_table*, const unsigned char*);
1848
1849 // List of stub tables.
1850 Stub_table_list stub_tables_;
1851
1852 // Mapping symbol information sorted by (section index, section_offset).
1853 Mapping_symbol_info mapping_symbol_info_;
1854 }; // End of AArch64_relobj
1855
1856
1857 // Override to record mapping symbol information.
1858 template<int size, bool big_endian>
1859 void
1860 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1861 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1862 {
1863 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1864
1865 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1866 // processing if not fixing erratum.
1867 if (!parameters->options().fix_cortex_a53_843419()
1868 && !parameters->options().fix_cortex_a53_835769())
1869 return;
1870
1871 const unsigned int loccount = this->local_symbol_count();
1872 if (loccount == 0)
1873 return;
1874
1875 // Read the symbol table section header.
1876 const unsigned int symtab_shndx = this->symtab_shndx();
1877 elfcpp::Shdr<size, big_endian>
1878 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1879 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1880
1881 // Read the local symbols.
1882 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1883 gold_assert(loccount == symtabshdr.get_sh_info());
1884 off_t locsize = loccount * sym_size;
1885 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1886 locsize, true, true);
1887
1888 // For mapping symbol processing, we need to read the symbol names.
1889 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1890 if (strtab_shndx >= this->shnum())
1891 {
1892 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1893 return;
1894 }
1895
1896 elfcpp::Shdr<size, big_endian>
1897 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1898 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1899 {
1900 this->error(_("symbol table name section has wrong type: %u"),
1901 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1902 return;
1903 }
1904
1905 const char* pnames =
1906 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1907 strtabshdr.get_sh_size(),
1908 false, false));
1909
1910 // Skip the first dummy symbol.
1911 psyms += sym_size;
1912 typename Sized_relobj_file<size, big_endian>::Local_values*
1913 plocal_values = this->local_values();
1914 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1915 {
1916 elfcpp::Sym<size, big_endian> sym(psyms);
1917 Symbol_value<size>& lv((*plocal_values)[i]);
1918 AArch64_address input_value = lv.input_value();
1919
1920 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1921 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1922 // symbols.
1923 // Mapping symbols could be one of the following 4 forms -
1924 // a) $x
1925 // b) $x.<any...>
1926 // c) $d
1927 // d) $d.<any...>
1928 const char* sym_name = pnames + sym.get_st_name();
1929 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1930 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1931 {
1932 bool is_ordinary;
1933 unsigned int input_shndx =
1934 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1935 gold_assert(is_ordinary);
1936
1937 Mapping_symbol_position msp(input_shndx, input_value);
1938 // Insert mapping_symbol_info into map whose ordering is defined by
1939 // (shndx, offset_within_section).
1940 this->mapping_symbol_info_[msp] = sym_name[1];
1941 }
1942 }
1943 }
1944
1945
1946 // Fix all errata in the object.
1947
1948 template<int size, bool big_endian>
1949 void
1950 AArch64_relobj<size, big_endian>::fix_errata(
1951 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1952 {
1953 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1954 unsigned int shnum = this->shnum();
1955 for (unsigned int i = 1; i < shnum; ++i)
1956 {
1957 The_stub_table* stub_table = this->stub_table(i);
1958 if (!stub_table)
1959 continue;
1960 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1961 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1962 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1963 while (p != end)
1964 {
1965 The_erratum_stub* stub = *p;
1966 typename Sized_relobj_file<size, big_endian>::View_size&
1967 pview((*pviews)[i]);
1968
1969 // Double check data before fix.
1970 gold_assert(pview.address + stub->sh_offset()
1971 == stub->erratum_address());
1972
1973 // Update previously recorded erratum insn with relocated
1974 // version.
1975 Insntype* ip =
1976 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
1977 Insntype insn_to_fix = ip[0];
1978 stub->update_erratum_insn(insn_to_fix);
1979
1980 // First try to see if erratum is 843419 and if it can be fixed
1981 // without using branch-to-stub.
1982 if (!try_fix_erratum_843419_optimized(stub, pview))
1983 {
1984 // Replace the erratum insn with a branch-to-stub.
1985 AArch64_address stub_address =
1986 stub_table->erratum_stub_address(stub);
1987 unsigned int b_offset = stub_address - stub->erratum_address();
1988 AArch64_relocate_functions<size, big_endian>::construct_b(
1989 pview.view + stub->sh_offset(), b_offset & 0xfffffff);
1990 }
1991 ++p;
1992 }
1993 }
1994 }
1995
1996
1997 // This is an optimization for 843419. This erratum requires the sequence begin
1998 // with 'adrp', when final value calculated by adrp fits in adr, we can just
1999 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
2000 // in this case, we do not delete the erratum stub (too late to do so), it is
2001 // merely generated without ever being called.)
2002
2003 template<int size, bool big_endian>
2004 bool
2005 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
2006 The_erratum_stub* stub,
2007 typename Sized_relobj_file<size, big_endian>::View_size& pview)
2008 {
2009 if (stub->type() != ST_E_843419)
2010 return false;
2011
2012 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2013 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2014 E843419_stub<size, big_endian>* e843419_stub =
2015 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2016 AArch64_address pc = pview.address + e843419_stub->adrp_sh_offset();
2017 unsigned int adrp_offset = e843419_stub->adrp_sh_offset ();
2018 Insntype* adrp_view = reinterpret_cast<Insntype*>(pview.view + adrp_offset);
2019 Insntype adrp_insn = adrp_view[0];
2020
2021 // If the instruction at adrp_sh_offset is "mrs R, tpidr_el0", it may come
2022 // from IE -> LE relaxation etc. This is a side-effect of TLS relaxation that
2023 // ADRP has been turned into MRS, there is no erratum risk anymore.
2024 // Therefore, we return true to avoid doing unnecessary branch-to-stub.
2025 if (Insn_utilities::is_mrs_tpidr_el0(adrp_insn))
2026 return true;
2027
2028 // If the instruction at adrp_sh_offset is not ADRP and the instruction before
2029 // it is "mrs R, tpidr_el0", it may come from LD -> LE relaxation etc.
2030 // Like the above case, there is no erratum risk any more, we can safely
2031 // return true.
2032 if (!Insn_utilities::is_adrp(adrp_insn) && adrp_offset)
2033 {
2034 Insntype* prev_view
2035 = reinterpret_cast<Insntype*>(pview.view + adrp_offset - 4);
2036 Insntype prev_insn = prev_view[0];
2037
2038 if (Insn_utilities::is_mrs_tpidr_el0(prev_insn))
2039 return true;
2040 }
2041
2042 /* If we reach here, the first instruction must be ADRP. */
2043 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2044 // Get adrp 33-bit signed imm value.
2045 int64_t adrp_imm = Insn_utilities::
2046 aarch64_adrp_decode_imm(adrp_insn);
2047 // adrp - final value transferred to target register is calculated as:
2048 // PC[11:0] = Zeros(12)
2049 // adrp_dest_value = PC + adrp_imm;
2050 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2051 // adr -final value transferred to target register is calucalted as:
2052 // PC + adr_imm
2053 // So we have:
2054 // PC + adr_imm = adrp_dest_value
2055 // ==>
2056 // adr_imm = adrp_dest_value - PC
2057 int64_t adr_imm = adrp_dest_value - pc;
2058 // Check if imm fits in adr (21-bit signed).
2059 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2060 {
2061 // Convert 'adrp' into 'adr'.
2062 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2063 adr_insn = Insn_utilities::
2064 aarch64_adr_encode_imm(adr_insn, adr_imm);
2065 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2066 return true;
2067 }
2068 return false;
2069 }
2070
2071
2072 // Relocate sections.
2073
2074 template<int size, bool big_endian>
2075 void
2076 AArch64_relobj<size, big_endian>::do_relocate_sections(
2077 const Symbol_table* symtab, const Layout* layout,
2078 const unsigned char* pshdrs, Output_file* of,
2079 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2080 {
2081 // Relocate the section data.
2082 this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
2083 1, this->shnum() - 1);
2084
2085 // We do not generate stubs if doing a relocatable link.
2086 if (parameters->options().relocatable())
2087 return;
2088
2089 if (parameters->options().fix_cortex_a53_843419()
2090 || parameters->options().fix_cortex_a53_835769())
2091 this->fix_errata(pviews);
2092
2093 Relocate_info<size, big_endian> relinfo;
2094 relinfo.symtab = symtab;
2095 relinfo.layout = layout;
2096 relinfo.object = this;
2097
2098 // Relocate stub tables.
2099 unsigned int shnum = this->shnum();
2100 The_target_aarch64* target = The_target_aarch64::current_target();
2101
2102 for (unsigned int i = 1; i < shnum; ++i)
2103 {
2104 The_aarch64_input_section* aarch64_input_section =
2105 target->find_aarch64_input_section(this, i);
2106 if (aarch64_input_section != NULL
2107 && aarch64_input_section->is_stub_table_owner()
2108 && !aarch64_input_section->stub_table()->empty())
2109 {
2110 Output_section* os = this->output_section(i);
2111 gold_assert(os != NULL);
2112
2113 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2114 relinfo.reloc_shdr = NULL;
2115 relinfo.data_shndx = i;
2116 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2117
2118 typename Sized_relobj_file<size, big_endian>::View_size&
2119 view_struct = (*pviews)[i];
2120 gold_assert(view_struct.view != NULL);
2121
2122 The_stub_table* stub_table = aarch64_input_section->stub_table();
2123 off_t offset = stub_table->address() - view_struct.address;
2124 unsigned char* view = view_struct.view + offset;
2125 AArch64_address address = stub_table->address();
2126 section_size_type view_size = stub_table->data_size();
2127 stub_table->relocate_stubs(&relinfo, target, os, view, address,
2128 view_size);
2129 }
2130 }
2131 }
2132
2133
2134 // Determine if an input section is scannable for stub processing. SHDR is
2135 // the header of the section and SHNDX is the section index. OS is the output
2136 // section for the input section and SYMTAB is the global symbol table used to
2137 // look up ICF information.
2138
2139 template<int size, bool big_endian>
2140 bool
2141 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2142 const elfcpp::Shdr<size, big_endian>& text_shdr,
2143 unsigned int text_shndx,
2144 const Output_section* os,
2145 const Symbol_table* symtab)
2146 {
2147 // Skip any empty sections, unallocated sections or sections whose
2148 // type are not SHT_PROGBITS.
2149 if (text_shdr.get_sh_size() == 0
2150 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2151 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2152 return false;
2153
2154 // Skip any discarded or ICF'ed sections.
2155 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2156 return false;
2157
2158 // Skip exception frame.
2159 if (strcmp(os->name(), ".eh_frame") == 0)
2160 return false ;
2161
2162 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2163 os->find_relaxed_input_section(this, text_shndx) != NULL);
2164
2165 return true;
2166 }
2167
2168
2169 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2170 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2171
2172 template<int size, bool big_endian>
2173 bool
2174 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2175 const elfcpp::Shdr<size, big_endian>& shdr,
2176 const Relobj::Output_sections& out_sections,
2177 const Symbol_table* symtab,
2178 const unsigned char* pshdrs)
2179 {
2180 unsigned int sh_type = shdr.get_sh_type();
2181 if (sh_type != elfcpp::SHT_RELA)
2182 return false;
2183
2184 // Ignore empty section.
2185 off_t sh_size = shdr.get_sh_size();
2186 if (sh_size == 0)
2187 return false;
2188
2189 // Ignore reloc section with unexpected symbol table. The
2190 // error will be reported in the final link.
2191 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2192 return false;
2193
2194 gold_assert(sh_type == elfcpp::SHT_RELA);
2195 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2196
2197 // Ignore reloc section with unexpected entsize or uneven size.
2198 // The error will be reported in the final link.
2199 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2200 return false;
2201
2202 // Ignore reloc section with bad info. This error will be
2203 // reported in the final link.
2204 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2205 if (text_shndx >= this->shnum())
2206 return false;
2207
2208 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2209 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2210 text_shndx * shdr_size);
2211 return this->text_section_is_scannable(text_shdr, text_shndx,
2212 out_sections[text_shndx], symtab);
2213 }
2214
2215
2216 // Scan section SHNDX for erratum 843419 and 835769.
2217
2218 template<int size, bool big_endian>
2219 void
2220 AArch64_relobj<size, big_endian>::scan_errata(
2221 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2222 Output_section* os, const Symbol_table* symtab,
2223 The_target_aarch64* target)
2224 {
2225 if (shdr.get_sh_size() == 0
2226 || (shdr.get_sh_flags() &
2227 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2228 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2229 return;
2230
2231 if (!os || symtab->is_section_folded(this, shndx)) return;
2232
2233 AArch64_address output_offset = this->get_output_section_offset(shndx);
2234 AArch64_address output_address;
2235 if (output_offset != invalid_address)
2236 output_address = os->address() + output_offset;
2237 else
2238 {
2239 const Output_relaxed_input_section* poris =
2240 os->find_relaxed_input_section(this, shndx);
2241 if (!poris) return;
2242 output_address = poris->address();
2243 }
2244
2245 section_size_type input_view_size = 0;
2246 const unsigned char* input_view =
2247 this->section_contents(shndx, &input_view_size, false);
2248
2249 Mapping_symbol_position section_start(shndx, 0);
2250 // Find the first mapping symbol record within section shndx.
2251 typename Mapping_symbol_info::const_iterator p =
2252 this->mapping_symbol_info_.lower_bound(section_start);
2253 while (p != this->mapping_symbol_info_.end() &&
2254 p->first.shndx_ == shndx)
2255 {
2256 typename Mapping_symbol_info::const_iterator prev = p;
2257 ++p;
2258 if (prev->second == 'x')
2259 {
2260 section_size_type span_start =
2261 convert_to_section_size_type(prev->first.offset_);
2262 section_size_type span_end;
2263 if (p != this->mapping_symbol_info_.end()
2264 && p->first.shndx_ == shndx)
2265 span_end = convert_to_section_size_type(p->first.offset_);
2266 else
2267 span_end = convert_to_section_size_type(shdr.get_sh_size());
2268
2269 // Here we do not share the scanning code of both errata. For 843419,
2270 // only the last few insns of each page are examined, which is fast,
2271 // whereas, for 835769, every insn pair needs to be checked.
2272
2273 if (parameters->options().fix_cortex_a53_843419())
2274 target->scan_erratum_843419_span(
2275 this, shndx, span_start, span_end,
2276 const_cast<unsigned char*>(input_view), output_address);
2277
2278 if (parameters->options().fix_cortex_a53_835769())
2279 target->scan_erratum_835769_span(
2280 this, shndx, span_start, span_end,
2281 const_cast<unsigned char*>(input_view), output_address);
2282 }
2283 }
2284 }
2285
2286
2287 // Scan relocations for stub generation.
2288
2289 template<int size, bool big_endian>
2290 void
2291 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2292 The_target_aarch64* target,
2293 const Symbol_table* symtab,
2294 const Layout* layout)
2295 {
2296 unsigned int shnum = this->shnum();
2297 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2298
2299 // Read the section headers.
2300 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2301 shnum * shdr_size,
2302 true, true);
2303
2304 // To speed up processing, we set up hash tables for fast lookup of
2305 // input offsets to output addresses.
2306 this->initialize_input_to_output_maps();
2307
2308 const Relobj::Output_sections& out_sections(this->output_sections());
2309
2310 Relocate_info<size, big_endian> relinfo;
2311 relinfo.symtab = symtab;
2312 relinfo.layout = layout;
2313 relinfo.object = this;
2314
2315 // Do relocation stubs scanning.
2316 const unsigned char* p = pshdrs + shdr_size;
2317 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2318 {
2319 const elfcpp::Shdr<size, big_endian> shdr(p);
2320 if (parameters->options().fix_cortex_a53_843419()
2321 || parameters->options().fix_cortex_a53_835769())
2322 scan_errata(i, shdr, out_sections[i], symtab, target);
2323 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2324 pshdrs))
2325 {
2326 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2327 AArch64_address output_offset =
2328 this->get_output_section_offset(index);
2329 AArch64_address output_address;
2330 if (output_offset != invalid_address)
2331 {
2332 output_address = out_sections[index]->address() + output_offset;
2333 }
2334 else
2335 {
2336 // Currently this only happens for a relaxed section.
2337 const Output_relaxed_input_section* poris =
2338 out_sections[index]->find_relaxed_input_section(this, index);
2339 gold_assert(poris != NULL);
2340 output_address = poris->address();
2341 }
2342
2343 // Get the relocations.
2344 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2345 shdr.get_sh_size(),
2346 true, false);
2347
2348 // Get the section contents.
2349 section_size_type input_view_size = 0;
2350 const unsigned char* input_view =
2351 this->section_contents(index, &input_view_size, false);
2352
2353 relinfo.reloc_shndx = i;
2354 relinfo.data_shndx = index;
2355 unsigned int sh_type = shdr.get_sh_type();
2356 unsigned int reloc_size;
2357 gold_assert (sh_type == elfcpp::SHT_RELA);
2358 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2359
2360 Output_section* os = out_sections[index];
2361 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2362 shdr.get_sh_size() / reloc_size,
2363 os,
2364 output_offset == invalid_address,
2365 input_view, output_address,
2366 input_view_size);
2367 }
2368 }
2369 }
2370
2371
2372 // A class to wrap an ordinary input section containing executable code.
2373
2374 template<int size, bool big_endian>
2375 class AArch64_input_section : public Output_relaxed_input_section
2376 {
2377 public:
2378 typedef Stub_table<size, big_endian> The_stub_table;
2379
2380 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2381 : Output_relaxed_input_section(relobj, shndx, 1),
2382 stub_table_(NULL),
2383 original_contents_(NULL), original_size_(0),
2384 original_addralign_(1)
2385 { }
2386
2387 ~AArch64_input_section()
2388 { delete[] this->original_contents_; }
2389
2390 // Initialize.
2391 void
2392 init();
2393
2394 // Set the stub_table.
2395 void
2396 set_stub_table(The_stub_table* st)
2397 { this->stub_table_ = st; }
2398
2399 // Whether this is a stub table owner.
2400 bool
2401 is_stub_table_owner() const
2402 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2403
2404 // Return the original size of the section.
2405 uint32_t
2406 original_size() const
2407 { return this->original_size_; }
2408
2409 // Return the stub table.
2410 The_stub_table*
2411 stub_table()
2412 { return stub_table_; }
2413
2414 protected:
2415 // Write out this input section.
2416 void
2417 do_write(Output_file*);
2418
2419 // Return required alignment of this.
2420 uint64_t
2421 do_addralign() const
2422 {
2423 if (this->is_stub_table_owner())
2424 return std::max(this->stub_table_->addralign(),
2425 static_cast<uint64_t>(this->original_addralign_));
2426 else
2427 return this->original_addralign_;
2428 }
2429
2430 // Finalize data size.
2431 void
2432 set_final_data_size();
2433
2434 // Reset address and file offset.
2435 void
2436 do_reset_address_and_file_offset();
2437
2438 // Output offset.
2439 bool
2440 do_output_offset(const Relobj* object, unsigned int shndx,
2441 section_offset_type offset,
2442 section_offset_type* poutput) const
2443 {
2444 if ((object == this->relobj())
2445 && (shndx == this->shndx())
2446 && (offset >= 0)
2447 && (offset <=
2448 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2449 {
2450 *poutput = offset;
2451 return true;
2452 }
2453 else
2454 return false;
2455 }
2456
2457 private:
2458 // Copying is not allowed.
2459 AArch64_input_section(const AArch64_input_section&);
2460 AArch64_input_section& operator=(const AArch64_input_section&);
2461
2462 // The relocation stubs.
2463 The_stub_table* stub_table_;
2464 // Original section contents. We have to make a copy here since the file
2465 // containing the original section may not be locked when we need to access
2466 // the contents.
2467 unsigned char* original_contents_;
2468 // Section size of the original input section.
2469 uint32_t original_size_;
2470 // Address alignment of the original input section.
2471 uint32_t original_addralign_;
2472 }; // End of AArch64_input_section
2473
2474
2475 // Finalize data size.
2476
2477 template<int size, bool big_endian>
2478 void
2479 AArch64_input_section<size, big_endian>::set_final_data_size()
2480 {
2481 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2482
2483 if (this->is_stub_table_owner())
2484 {
2485 this->stub_table_->finalize_data_size();
2486 off = align_address(off, this->stub_table_->addralign());
2487 off += this->stub_table_->data_size();
2488 }
2489 this->set_data_size(off);
2490 }
2491
2492
2493 // Reset address and file offset.
2494
2495 template<int size, bool big_endian>
2496 void
2497 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2498 {
2499 // Size of the original input section contents.
2500 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2501
2502 // If this is a stub table owner, account for the stub table size.
2503 if (this->is_stub_table_owner())
2504 {
2505 The_stub_table* stub_table = this->stub_table_;
2506
2507 // Reset the stub table's address and file offset. The
2508 // current data size for child will be updated after that.
2509 stub_table_->reset_address_and_file_offset();
2510 off = align_address(off, stub_table_->addralign());
2511 off += stub_table->current_data_size();
2512 }
2513
2514 this->set_current_data_size(off);
2515 }
2516
2517
2518 // Initialize an Arm_input_section.
2519
2520 template<int size, bool big_endian>
2521 void
2522 AArch64_input_section<size, big_endian>::init()
2523 {
2524 Relobj* relobj = this->relobj();
2525 unsigned int shndx = this->shndx();
2526
2527 // We have to cache original size, alignment and contents to avoid locking
2528 // the original file.
2529 this->original_addralign_ =
2530 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2531
2532 // This is not efficient but we expect only a small number of relaxed
2533 // input sections for stubs.
2534 section_size_type section_size;
2535 const unsigned char* section_contents =
2536 relobj->section_contents(shndx, &section_size, false);
2537 this->original_size_ =
2538 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2539
2540 gold_assert(this->original_contents_ == NULL);
2541 this->original_contents_ = new unsigned char[section_size];
2542 memcpy(this->original_contents_, section_contents, section_size);
2543
2544 // We want to make this look like the original input section after
2545 // output sections are finalized.
2546 Output_section* os = relobj->output_section(shndx);
2547 off_t offset = relobj->output_section_offset(shndx);
2548 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2549 this->set_address(os->address() + offset);
2550 this->set_file_offset(os->offset() + offset);
2551 this->set_current_data_size(this->original_size_);
2552 this->finalize_data_size();
2553 }
2554
2555
2556 // Write data to output file.
2557
2558 template<int size, bool big_endian>
2559 void
2560 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2561 {
2562 // We have to write out the original section content.
2563 gold_assert(this->original_contents_ != NULL);
2564 of->write(this->offset(), this->original_contents_,
2565 this->original_size_);
2566
2567 // If this owns a stub table and it is not empty, write it.
2568 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2569 this->stub_table_->write(of);
2570 }
2571
2572
2573 // Arm output section class. This is defined mainly to add a number of stub
2574 // generation methods.
2575
2576 template<int size, bool big_endian>
2577 class AArch64_output_section : public Output_section
2578 {
2579 public:
2580 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2581 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2582 typedef Stub_table<size, big_endian> The_stub_table;
2583 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2584
2585 public:
2586 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2587 elfcpp::Elf_Xword flags)
2588 : Output_section(name, type, flags)
2589 { }
2590
2591 ~AArch64_output_section() {}
2592
2593 // Group input sections for stub generation.
2594 void
2595 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2596 const Task*);
2597
2598 private:
2599 typedef Output_section::Input_section Input_section;
2600 typedef Output_section::Input_section_list Input_section_list;
2601
2602 // Create a stub group.
2603 void
2604 create_stub_group(Input_section_list::const_iterator,
2605 Input_section_list::const_iterator,
2606 Input_section_list::const_iterator,
2607 The_target_aarch64*,
2608 std::vector<Output_relaxed_input_section*>&,
2609 const Task*);
2610 }; // End of AArch64_output_section
2611
2612
2613 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2614 // the input section that will be the owner of the stub table.
2615
2616 template<int size, bool big_endian> void
2617 AArch64_output_section<size, big_endian>::create_stub_group(
2618 Input_section_list::const_iterator first,
2619 Input_section_list::const_iterator last,
2620 Input_section_list::const_iterator owner,
2621 The_target_aarch64* target,
2622 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2623 const Task* task)
2624 {
2625 // Currently we convert ordinary input sections into relaxed sections only
2626 // at this point.
2627 The_aarch64_input_section* input_section;
2628 if (owner->is_relaxed_input_section())
2629 gold_unreachable();
2630 else
2631 {
2632 gold_assert(owner->is_input_section());
2633 // Create a new relaxed input section. We need to lock the original
2634 // file.
2635 Task_lock_obj<Object> tl(task, owner->relobj());
2636 input_section =
2637 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2638 new_relaxed_sections.push_back(input_section);
2639 }
2640
2641 // Create a stub table.
2642 The_stub_table* stub_table =
2643 target->new_stub_table(input_section);
2644
2645 input_section->set_stub_table(stub_table);
2646
2647 Input_section_list::const_iterator p = first;
2648 // Look for input sections or relaxed input sections in [first ... last].
2649 do
2650 {
2651 if (p->is_input_section() || p->is_relaxed_input_section())
2652 {
2653 // The stub table information for input sections live
2654 // in their objects.
2655 The_aarch64_relobj* aarch64_relobj =
2656 static_cast<The_aarch64_relobj*>(p->relobj());
2657 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2658 }
2659 }
2660 while (p++ != last);
2661 }
2662
2663
2664 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2665 // stub groups. We grow a stub group by adding input section until the size is
2666 // just below GROUP_SIZE. The last input section will be converted into a stub
2667 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2668 // after the stub table, effectively doubling the group size.
2669 //
2670 // This is similar to the group_sections() function in elf32-arm.c but is
2671 // implemented differently.
2672
2673 template<int size, bool big_endian>
2674 void AArch64_output_section<size, big_endian>::group_sections(
2675 section_size_type group_size,
2676 bool stubs_always_after_branch,
2677 Target_aarch64<size, big_endian>* target,
2678 const Task* task)
2679 {
2680 typedef enum
2681 {
2682 NO_GROUP,
2683 FINDING_STUB_SECTION,
2684 HAS_STUB_SECTION
2685 } State;
2686
2687 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2688
2689 State state = NO_GROUP;
2690 section_size_type off = 0;
2691 section_size_type group_begin_offset = 0;
2692 section_size_type group_end_offset = 0;
2693 section_size_type stub_table_end_offset = 0;
2694 Input_section_list::const_iterator group_begin =
2695 this->input_sections().end();
2696 Input_section_list::const_iterator stub_table =
2697 this->input_sections().end();
2698 Input_section_list::const_iterator group_end = this->input_sections().end();
2699 for (Input_section_list::const_iterator p = this->input_sections().begin();
2700 p != this->input_sections().end();
2701 ++p)
2702 {
2703 section_size_type section_begin_offset =
2704 align_address(off, p->addralign());
2705 section_size_type section_end_offset =
2706 section_begin_offset + p->data_size();
2707
2708 // Check to see if we should group the previously seen sections.
2709 switch (state)
2710 {
2711 case NO_GROUP:
2712 break;
2713
2714 case FINDING_STUB_SECTION:
2715 // Adding this section makes the group larger than GROUP_SIZE.
2716 if (section_end_offset - group_begin_offset >= group_size)
2717 {
2718 if (stubs_always_after_branch)
2719 {
2720 gold_assert(group_end != this->input_sections().end());
2721 this->create_stub_group(group_begin, group_end, group_end,
2722 target, new_relaxed_sections,
2723 task);
2724 state = NO_GROUP;
2725 }
2726 else
2727 {
2728 // Input sections up to stub_group_size bytes after the stub
2729 // table can be handled by it too.
2730 state = HAS_STUB_SECTION;
2731 stub_table = group_end;
2732 stub_table_end_offset = group_end_offset;
2733 }
2734 }
2735 break;
2736
2737 case HAS_STUB_SECTION:
2738 // Adding this section makes the post stub-section group larger
2739 // than GROUP_SIZE.
2740 gold_unreachable();
2741 // NOT SUPPORTED YET. For completeness only.
2742 if (section_end_offset - stub_table_end_offset >= group_size)
2743 {
2744 gold_assert(group_end != this->input_sections().end());
2745 this->create_stub_group(group_begin, group_end, stub_table,
2746 target, new_relaxed_sections, task);
2747 state = NO_GROUP;
2748 }
2749 break;
2750
2751 default:
2752 gold_unreachable();
2753 }
2754
2755 // If we see an input section and currently there is no group, start
2756 // a new one. Skip any empty sections. We look at the data size
2757 // instead of calling p->relobj()->section_size() to avoid locking.
2758 if ((p->is_input_section() || p->is_relaxed_input_section())
2759 && (p->data_size() != 0))
2760 {
2761 if (state == NO_GROUP)
2762 {
2763 state = FINDING_STUB_SECTION;
2764 group_begin = p;
2765 group_begin_offset = section_begin_offset;
2766 }
2767
2768 // Keep track of the last input section seen.
2769 group_end = p;
2770 group_end_offset = section_end_offset;
2771 }
2772
2773 off = section_end_offset;
2774 }
2775
2776 // Create a stub group for any ungrouped sections.
2777 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2778 {
2779 gold_assert(group_end != this->input_sections().end());
2780 this->create_stub_group(group_begin, group_end,
2781 (state == FINDING_STUB_SECTION
2782 ? group_end
2783 : stub_table),
2784 target, new_relaxed_sections, task);
2785 }
2786
2787 if (!new_relaxed_sections.empty())
2788 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2789
2790 // Update the section offsets
2791 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2792 {
2793 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2794 new_relaxed_sections[i]->relobj());
2795 unsigned int shndx = new_relaxed_sections[i]->shndx();
2796 // Tell AArch64_relobj that this input section is converted.
2797 relobj->convert_input_section_to_relaxed_section(shndx);
2798 }
2799 } // End of AArch64_output_section::group_sections
2800
2801
2802 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2803
2804
2805 // The aarch64 target class.
2806 // See the ABI at
2807 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2808 template<int size, bool big_endian>
2809 class Target_aarch64 : public Sized_target<size, big_endian>
2810 {
2811 public:
2812 typedef Target_aarch64<size, big_endian> This;
2813 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2814 Reloc_section;
2815 typedef Relocate_info<size, big_endian> The_relocate_info;
2816 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2817 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2818 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2819 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2820 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2821 typedef Stub_table<size, big_endian> The_stub_table;
2822 typedef std::vector<The_stub_table*> Stub_table_list;
2823 typedef typename Stub_table_list::iterator Stub_table_iterator;
2824 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2825 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2826 typedef Unordered_map<Section_id,
2827 AArch64_input_section<size, big_endian>*,
2828 Section_id_hash> AArch64_input_section_map;
2829 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2830 const static int TCB_SIZE = size / 8 * 2;
2831
2832 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2833 : Sized_target<size, big_endian>(info),
2834 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2835 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2836 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2837 got_mod_index_offset_(-1U),
2838 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2839 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2840 { }
2841
2842 // Scan the relocations to determine unreferenced sections for
2843 // garbage collection.
2844 void
2845 gc_process_relocs(Symbol_table* symtab,
2846 Layout* layout,
2847 Sized_relobj_file<size, big_endian>* object,
2848 unsigned int data_shndx,
2849 unsigned int sh_type,
2850 const unsigned char* prelocs,
2851 size_t reloc_count,
2852 Output_section* output_section,
2853 bool needs_special_offset_handling,
2854 size_t local_symbol_count,
2855 const unsigned char* plocal_symbols);
2856
2857 // Scan the relocations to look for symbol adjustments.
2858 void
2859 scan_relocs(Symbol_table* symtab,
2860 Layout* layout,
2861 Sized_relobj_file<size, big_endian>* object,
2862 unsigned int data_shndx,
2863 unsigned int sh_type,
2864 const unsigned char* prelocs,
2865 size_t reloc_count,
2866 Output_section* output_section,
2867 bool needs_special_offset_handling,
2868 size_t local_symbol_count,
2869 const unsigned char* plocal_symbols);
2870
2871 // Finalize the sections.
2872 void
2873 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2874
2875 // Return the value to use for a dynamic which requires special
2876 // treatment.
2877 uint64_t
2878 do_dynsym_value(const Symbol*) const;
2879
2880 // Relocate a section.
2881 void
2882 relocate_section(const Relocate_info<size, big_endian>*,
2883 unsigned int sh_type,
2884 const unsigned char* prelocs,
2885 size_t reloc_count,
2886 Output_section* output_section,
2887 bool needs_special_offset_handling,
2888 unsigned char* view,
2889 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2890 section_size_type view_size,
2891 const Reloc_symbol_changes*);
2892
2893 // Scan the relocs during a relocatable link.
2894 void
2895 scan_relocatable_relocs(Symbol_table* symtab,
2896 Layout* layout,
2897 Sized_relobj_file<size, big_endian>* object,
2898 unsigned int data_shndx,
2899 unsigned int sh_type,
2900 const unsigned char* prelocs,
2901 size_t reloc_count,
2902 Output_section* output_section,
2903 bool needs_special_offset_handling,
2904 size_t local_symbol_count,
2905 const unsigned char* plocal_symbols,
2906 Relocatable_relocs*);
2907
2908 // Scan the relocs for --emit-relocs.
2909 void
2910 emit_relocs_scan(Symbol_table* symtab,
2911 Layout* layout,
2912 Sized_relobj_file<size, big_endian>* object,
2913 unsigned int data_shndx,
2914 unsigned int sh_type,
2915 const unsigned char* prelocs,
2916 size_t reloc_count,
2917 Output_section* output_section,
2918 bool needs_special_offset_handling,
2919 size_t local_symbol_count,
2920 const unsigned char* plocal_syms,
2921 Relocatable_relocs* rr);
2922
2923 // Relocate a section during a relocatable link.
2924 void
2925 relocate_relocs(
2926 const Relocate_info<size, big_endian>*,
2927 unsigned int sh_type,
2928 const unsigned char* prelocs,
2929 size_t reloc_count,
2930 Output_section* output_section,
2931 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2932 unsigned char* view,
2933 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2934 section_size_type view_size,
2935 unsigned char* reloc_view,
2936 section_size_type reloc_view_size);
2937
2938 // Return the symbol index to use for a target specific relocation.
2939 // The only target specific relocation is R_AARCH64_TLSDESC for a
2940 // local symbol, which is an absolute reloc.
2941 unsigned int
2942 do_reloc_symbol_index(void*, unsigned int r_type) const
2943 {
2944 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
2945 return 0;
2946 }
2947
2948 // Return the addend to use for a target specific relocation.
2949 uint64_t
2950 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
2951
2952 // Return the PLT section.
2953 uint64_t
2954 do_plt_address_for_global(const Symbol* gsym) const
2955 { return this->plt_section()->address_for_global(gsym); }
2956
2957 uint64_t
2958 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
2959 { return this->plt_section()->address_for_local(relobj, symndx); }
2960
2961 // This function should be defined in targets that can use relocation
2962 // types to determine (implemented in local_reloc_may_be_function_pointer
2963 // and global_reloc_may_be_function_pointer)
2964 // if a function's pointer is taken. ICF uses this in safe mode to only
2965 // fold those functions whose pointer is defintely not taken.
2966 bool
2967 do_can_check_for_function_pointers() const
2968 { return true; }
2969
2970 // Return the number of entries in the PLT.
2971 unsigned int
2972 plt_entry_count() const;
2973
2974 //Return the offset of the first non-reserved PLT entry.
2975 unsigned int
2976 first_plt_entry_offset() const;
2977
2978 // Return the size of each PLT entry.
2979 unsigned int
2980 plt_entry_size() const;
2981
2982 // Create a stub table.
2983 The_stub_table*
2984 new_stub_table(The_aarch64_input_section*);
2985
2986 // Create an aarch64 input section.
2987 The_aarch64_input_section*
2988 new_aarch64_input_section(Relobj*, unsigned int);
2989
2990 // Find an aarch64 input section instance for a given OBJ and SHNDX.
2991 The_aarch64_input_section*
2992 find_aarch64_input_section(Relobj*, unsigned int) const;
2993
2994 // Return the thread control block size.
2995 unsigned int
2996 tcb_size() const { return This::TCB_SIZE; }
2997
2998 // Scan a section for stub generation.
2999 void
3000 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
3001 const unsigned char*, size_t, Output_section*,
3002 bool, const unsigned char*,
3003 Address,
3004 section_size_type);
3005
3006 // Scan a relocation section for stub.
3007 template<int sh_type>
3008 void
3009 scan_reloc_section_for_stubs(
3010 const The_relocate_info* relinfo,
3011 const unsigned char* prelocs,
3012 size_t reloc_count,
3013 Output_section* output_section,
3014 bool needs_special_offset_handling,
3015 const unsigned char* view,
3016 Address view_address,
3017 section_size_type);
3018
3019 // Relocate a single stub.
3020 void
3021 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
3022 Output_section*, unsigned char*, Address,
3023 section_size_type);
3024
3025 // Get the default AArch64 target.
3026 static This*
3027 current_target()
3028 {
3029 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
3030 && parameters->target().get_size() == size
3031 && parameters->target().is_big_endian() == big_endian);
3032 return static_cast<This*>(parameters->sized_target<size, big_endian>());
3033 }
3034
3035
3036 // Scan erratum 843419 for a part of a section.
3037 void
3038 scan_erratum_843419_span(
3039 AArch64_relobj<size, big_endian>*,
3040 unsigned int,
3041 const section_size_type,
3042 const section_size_type,
3043 unsigned char*,
3044 Address);
3045
3046 // Scan erratum 835769 for a part of a section.
3047 void
3048 scan_erratum_835769_span(
3049 AArch64_relobj<size, big_endian>*,
3050 unsigned int,
3051 const section_size_type,
3052 const section_size_type,
3053 unsigned char*,
3054 Address);
3055
3056 protected:
3057 void
3058 do_select_as_default_target()
3059 {
3060 gold_assert(aarch64_reloc_property_table == NULL);
3061 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3062 }
3063
3064 // Add a new reloc argument, returning the index in the vector.
3065 size_t
3066 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3067 unsigned int r_sym)
3068 {
3069 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3070 return this->tlsdesc_reloc_info_.size() - 1;
3071 }
3072
3073 virtual Output_data_plt_aarch64<size, big_endian>*
3074 do_make_data_plt(Layout* layout,
3075 Output_data_got_aarch64<size, big_endian>* got,
3076 Output_data_space* got_plt,
3077 Output_data_space* got_irelative)
3078 {
3079 return new Output_data_plt_aarch64_standard<size, big_endian>(
3080 layout, got, got_plt, got_irelative);
3081 }
3082
3083
3084 // do_make_elf_object to override the same function in the base class.
3085 Object*
3086 do_make_elf_object(const std::string&, Input_file*, off_t,
3087 const elfcpp::Ehdr<size, big_endian>&);
3088
3089 Output_data_plt_aarch64<size, big_endian>*
3090 make_data_plt(Layout* layout,
3091 Output_data_got_aarch64<size, big_endian>* got,
3092 Output_data_space* got_plt,
3093 Output_data_space* got_irelative)
3094 {
3095 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3096 }
3097
3098 // We only need to generate stubs, and hence perform relaxation if we are
3099 // not doing relocatable linking.
3100 virtual bool
3101 do_may_relax() const
3102 { return !parameters->options().relocatable(); }
3103
3104 // Relaxation hook. This is where we do stub generation.
3105 virtual bool
3106 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3107
3108 void
3109 group_sections(Layout* layout,
3110 section_size_type group_size,
3111 bool stubs_always_after_branch,
3112 const Task* task);
3113
3114 void
3115 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3116 const Sized_symbol<size>*, unsigned int,
3117 const Symbol_value<size>*,
3118 typename elfcpp::Elf_types<size>::Elf_Swxword,
3119 Address Elf_Addr);
3120
3121 // Make an output section.
3122 Output_section*
3123 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3124 elfcpp::Elf_Xword flags)
3125 { return new The_aarch64_output_section(name, type, flags); }
3126
3127 private:
3128 // The class which scans relocations.
3129 class Scan
3130 {
3131 public:
3132 Scan()
3133 : issued_non_pic_error_(false)
3134 { }
3135
3136 inline void
3137 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3138 Sized_relobj_file<size, big_endian>* object,
3139 unsigned int data_shndx,
3140 Output_section* output_section,
3141 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3142 const elfcpp::Sym<size, big_endian>& lsym,
3143 bool is_discarded);
3144
3145 inline void
3146 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3147 Sized_relobj_file<size, big_endian>* object,
3148 unsigned int data_shndx,
3149 Output_section* output_section,
3150 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3151 Symbol* gsym);
3152
3153 inline bool
3154 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3155 Target_aarch64<size, big_endian>* ,
3156 Sized_relobj_file<size, big_endian>* ,
3157 unsigned int ,
3158 Output_section* ,
3159 const elfcpp::Rela<size, big_endian>& ,
3160 unsigned int r_type,
3161 const elfcpp::Sym<size, big_endian>&);
3162
3163 inline bool
3164 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3165 Target_aarch64<size, big_endian>* ,
3166 Sized_relobj_file<size, big_endian>* ,
3167 unsigned int ,
3168 Output_section* ,
3169 const elfcpp::Rela<size, big_endian>& ,
3170 unsigned int r_type,
3171 Symbol* gsym);
3172
3173 private:
3174 static void
3175 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3176 unsigned int r_type);
3177
3178 static void
3179 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3180 unsigned int r_type, Symbol*);
3181
3182 inline bool
3183 possible_function_pointer_reloc(unsigned int r_type);
3184
3185 void
3186 check_non_pic(Relobj*, unsigned int r_type);
3187
3188 bool
3189 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3190 unsigned int r_type);
3191
3192 // Whether we have issued an error about a non-PIC compilation.
3193 bool issued_non_pic_error_;
3194 };
3195
3196 // The class which implements relocation.
3197 class Relocate
3198 {
3199 public:
3200 Relocate()
3201 : skip_call_tls_get_addr_(false)
3202 { }
3203
3204 ~Relocate()
3205 { }
3206
3207 // Do a relocation. Return false if the caller should not issue
3208 // any warnings about this relocation.
3209 inline bool
3210 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3211 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3212 const Sized_symbol<size>*, const Symbol_value<size>*,
3213 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3214 section_size_type);
3215
3216 private:
3217 inline typename AArch64_relocate_functions<size, big_endian>::Status
3218 relocate_tls(const Relocate_info<size, big_endian>*,
3219 Target_aarch64<size, big_endian>*,
3220 size_t,
3221 const elfcpp::Rela<size, big_endian>&,
3222 unsigned int r_type, const Sized_symbol<size>*,
3223 const Symbol_value<size>*,
3224 unsigned char*,
3225 typename elfcpp::Elf_types<size>::Elf_Addr);
3226
3227 inline typename AArch64_relocate_functions<size, big_endian>::Status
3228 tls_gd_to_le(
3229 const Relocate_info<size, big_endian>*,
3230 Target_aarch64<size, big_endian>*,
3231 const elfcpp::Rela<size, big_endian>&,
3232 unsigned int,
3233 unsigned char*,
3234 const Symbol_value<size>*);
3235
3236 inline typename AArch64_relocate_functions<size, big_endian>::Status
3237 tls_ld_to_le(
3238 const Relocate_info<size, big_endian>*,
3239 Target_aarch64<size, big_endian>*,
3240 const elfcpp::Rela<size, big_endian>&,
3241 unsigned int,
3242 unsigned char*,
3243 const Symbol_value<size>*);
3244
3245 inline typename AArch64_relocate_functions<size, big_endian>::Status
3246 tls_ie_to_le(
3247 const Relocate_info<size, big_endian>*,
3248 Target_aarch64<size, big_endian>*,
3249 const elfcpp::Rela<size, big_endian>&,
3250 unsigned int,
3251 unsigned char*,
3252 const Symbol_value<size>*);
3253
3254 inline typename AArch64_relocate_functions<size, big_endian>::Status
3255 tls_desc_gd_to_le(
3256 const Relocate_info<size, big_endian>*,
3257 Target_aarch64<size, big_endian>*,
3258 const elfcpp::Rela<size, big_endian>&,
3259 unsigned int,
3260 unsigned char*,
3261 const Symbol_value<size>*);
3262
3263 inline typename AArch64_relocate_functions<size, big_endian>::Status
3264 tls_desc_gd_to_ie(
3265 const Relocate_info<size, big_endian>*,
3266 Target_aarch64<size, big_endian>*,
3267 const elfcpp::Rela<size, big_endian>&,
3268 unsigned int,
3269 unsigned char*,
3270 const Symbol_value<size>*,
3271 typename elfcpp::Elf_types<size>::Elf_Addr,
3272 typename elfcpp::Elf_types<size>::Elf_Addr);
3273
3274 bool skip_call_tls_get_addr_;
3275
3276 }; // End of class Relocate
3277
3278 // Adjust TLS relocation type based on the options and whether this
3279 // is a local symbol.
3280 static tls::Tls_optimization
3281 optimize_tls_reloc(bool is_final, int r_type);
3282
3283 // Get the GOT section, creating it if necessary.
3284 Output_data_got_aarch64<size, big_endian>*
3285 got_section(Symbol_table*, Layout*);
3286
3287 // Get the GOT PLT section.
3288 Output_data_space*
3289 got_plt_section() const
3290 {
3291 gold_assert(this->got_plt_ != NULL);
3292 return this->got_plt_;
3293 }
3294
3295 // Get the GOT section for TLSDESC entries.
3296 Output_data_got<size, big_endian>*
3297 got_tlsdesc_section() const
3298 {
3299 gold_assert(this->got_tlsdesc_ != NULL);
3300 return this->got_tlsdesc_;
3301 }
3302
3303 // Create the PLT section.
3304 void
3305 make_plt_section(Symbol_table* symtab, Layout* layout);
3306
3307 // Create a PLT entry for a global symbol.
3308 void
3309 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3310
3311 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3312 void
3313 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3314 Sized_relobj_file<size, big_endian>* relobj,
3315 unsigned int local_sym_index);
3316
3317 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3318 void
3319 define_tls_base_symbol(Symbol_table*, Layout*);
3320
3321 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3322 void
3323 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3324
3325 // Create a GOT entry for the TLS module index.
3326 unsigned int
3327 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3328 Sized_relobj_file<size, big_endian>* object);
3329
3330 // Get the PLT section.
3331 Output_data_plt_aarch64<size, big_endian>*
3332 plt_section() const
3333 {
3334 gold_assert(this->plt_ != NULL);
3335 return this->plt_;
3336 }
3337
3338 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3339 // ST_E_843419, we need an additional field for adrp offset.
3340 void create_erratum_stub(
3341 AArch64_relobj<size, big_endian>* relobj,
3342 unsigned int shndx,
3343 section_size_type erratum_insn_offset,
3344 Address erratum_address,
3345 typename Insn_utilities::Insntype erratum_insn,
3346 int erratum_type,
3347 unsigned int e843419_adrp_offset=0);
3348
3349 // Return whether this is a 3-insn erratum sequence.
3350 bool is_erratum_843419_sequence(
3351 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3352 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3353 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3354
3355 // Return whether this is a 835769 sequence.
3356 // (Similarly implemented as in elfnn-aarch64.c.)
3357 bool is_erratum_835769_sequence(
3358 typename elfcpp::Swap<32,big_endian>::Valtype,
3359 typename elfcpp::Swap<32,big_endian>::Valtype);
3360
3361 // Get the dynamic reloc section, creating it if necessary.
3362 Reloc_section*
3363 rela_dyn_section(Layout*);
3364
3365 // Get the section to use for TLSDESC relocations.
3366 Reloc_section*
3367 rela_tlsdesc_section(Layout*) const;
3368
3369 // Get the section to use for IRELATIVE relocations.
3370 Reloc_section*
3371 rela_irelative_section(Layout*);
3372
3373 // Add a potential copy relocation.
3374 void
3375 copy_reloc(Symbol_table* symtab, Layout* layout,
3376 Sized_relobj_file<size, big_endian>* object,
3377 unsigned int shndx, Output_section* output_section,
3378 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3379 {
3380 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3381 this->copy_relocs_.copy_reloc(symtab, layout,
3382 symtab->get_sized_symbol<size>(sym),
3383 object, shndx, output_section,
3384 r_type, reloc.get_r_offset(),
3385 reloc.get_r_addend(),
3386 this->rela_dyn_section(layout));
3387 }
3388
3389 // Information about this specific target which we pass to the
3390 // general Target structure.
3391 static const Target::Target_info aarch64_info;
3392
3393 // The types of GOT entries needed for this platform.
3394 // These values are exposed to the ABI in an incremental link.
3395 // Do not renumber existing values without changing the version
3396 // number of the .gnu_incremental_inputs section.
3397 enum Got_type
3398 {
3399 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3400 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3401 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3402 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3403 };
3404
3405 // This type is used as the argument to the target specific
3406 // relocation routines. The only target specific reloc is
3407 // R_AARCh64_TLSDESC against a local symbol.
3408 struct Tlsdesc_info
3409 {
3410 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3411 unsigned int a_r_sym)
3412 : object(a_object), r_sym(a_r_sym)
3413 { }
3414
3415 // The object in which the local symbol is defined.
3416 Sized_relobj_file<size, big_endian>* object;
3417 // The local symbol index in the object.
3418 unsigned int r_sym;
3419 };
3420
3421 // The GOT section.
3422 Output_data_got_aarch64<size, big_endian>* got_;
3423 // The PLT section.
3424 Output_data_plt_aarch64<size, big_endian>* plt_;
3425 // The GOT PLT section.
3426 Output_data_space* got_plt_;
3427 // The GOT section for IRELATIVE relocations.
3428 Output_data_space* got_irelative_;
3429 // The GOT section for TLSDESC relocations.
3430 Output_data_got<size, big_endian>* got_tlsdesc_;
3431 // The _GLOBAL_OFFSET_TABLE_ symbol.
3432 Symbol* global_offset_table_;
3433 // The dynamic reloc section.
3434 Reloc_section* rela_dyn_;
3435 // The section to use for IRELATIVE relocs.
3436 Reloc_section* rela_irelative_;
3437 // Relocs saved to avoid a COPY reloc.
3438 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3439 // Offset of the GOT entry for the TLS module index.
3440 unsigned int got_mod_index_offset_;
3441 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3442 // specific relocation. Here we store the object and local symbol
3443 // index for the relocation.
3444 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3445 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3446 bool tls_base_symbol_defined_;
3447 // List of stub_tables
3448 Stub_table_list stub_tables_;
3449 // Actual stub group size
3450 section_size_type stub_group_size_;
3451 AArch64_input_section_map aarch64_input_section_map_;
3452 }; // End of Target_aarch64
3453
3454
3455 template<>
3456 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3457 {
3458 64, // size
3459 false, // is_big_endian
3460 elfcpp::EM_AARCH64, // machine_code
3461 false, // has_make_symbol
3462 false, // has_resolve
3463 false, // has_code_fill
3464 true, // is_default_stack_executable
3465 true, // can_icf_inline_merge_sections
3466 '\0', // wrap_char
3467 "/lib/ld.so.1", // program interpreter
3468 0x400000, // default_text_segment_address
3469 0x10000, // abi_pagesize (overridable by -z max-page-size)
3470 0x1000, // common_pagesize (overridable by -z common-page-size)
3471 false, // isolate_execinstr
3472 0, // rosegment_gap
3473 elfcpp::SHN_UNDEF, // small_common_shndx
3474 elfcpp::SHN_UNDEF, // large_common_shndx
3475 0, // small_common_section_flags
3476 0, // large_common_section_flags
3477 NULL, // attributes_section
3478 NULL, // attributes_vendor
3479 "_start", // entry_symbol_name
3480 32, // hash_entry_size
3481 };
3482
3483 template<>
3484 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3485 {
3486 32, // size
3487 false, // is_big_endian
3488 elfcpp::EM_AARCH64, // machine_code
3489 false, // has_make_symbol
3490 false, // has_resolve
3491 false, // has_code_fill
3492 true, // is_default_stack_executable
3493 false, // can_icf_inline_merge_sections
3494 '\0', // wrap_char
3495 "/lib/ld.so.1", // program interpreter
3496 0x400000, // default_text_segment_address
3497 0x10000, // abi_pagesize (overridable by -z max-page-size)
3498 0x1000, // common_pagesize (overridable by -z common-page-size)
3499 false, // isolate_execinstr
3500 0, // rosegment_gap
3501 elfcpp::SHN_UNDEF, // small_common_shndx
3502 elfcpp::SHN_UNDEF, // large_common_shndx
3503 0, // small_common_section_flags
3504 0, // large_common_section_flags
3505 NULL, // attributes_section
3506 NULL, // attributes_vendor
3507 "_start", // entry_symbol_name
3508 32, // hash_entry_size
3509 };
3510
3511 template<>
3512 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3513 {
3514 64, // size
3515 true, // is_big_endian
3516 elfcpp::EM_AARCH64, // machine_code
3517 false, // has_make_symbol
3518 false, // has_resolve
3519 false, // has_code_fill
3520 true, // is_default_stack_executable
3521 true, // can_icf_inline_merge_sections
3522 '\0', // wrap_char
3523 "/lib/ld.so.1", // program interpreter
3524 0x400000, // default_text_segment_address
3525 0x10000, // abi_pagesize (overridable by -z max-page-size)
3526 0x1000, // common_pagesize (overridable by -z common-page-size)
3527 false, // isolate_execinstr
3528 0, // rosegment_gap
3529 elfcpp::SHN_UNDEF, // small_common_shndx
3530 elfcpp::SHN_UNDEF, // large_common_shndx
3531 0, // small_common_section_flags
3532 0, // large_common_section_flags
3533 NULL, // attributes_section
3534 NULL, // attributes_vendor
3535 "_start", // entry_symbol_name
3536 32, // hash_entry_size
3537 };
3538
3539 template<>
3540 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3541 {
3542 32, // size
3543 true, // is_big_endian
3544 elfcpp::EM_AARCH64, // machine_code
3545 false, // has_make_symbol
3546 false, // has_resolve
3547 false, // has_code_fill
3548 true, // is_default_stack_executable
3549 false, // can_icf_inline_merge_sections
3550 '\0', // wrap_char
3551 "/lib/ld.so.1", // program interpreter
3552 0x400000, // default_text_segment_address
3553 0x10000, // abi_pagesize (overridable by -z max-page-size)
3554 0x1000, // common_pagesize (overridable by -z common-page-size)
3555 false, // isolate_execinstr
3556 0, // rosegment_gap
3557 elfcpp::SHN_UNDEF, // small_common_shndx
3558 elfcpp::SHN_UNDEF, // large_common_shndx
3559 0, // small_common_section_flags
3560 0, // large_common_section_flags
3561 NULL, // attributes_section
3562 NULL, // attributes_vendor
3563 "_start", // entry_symbol_name
3564 32, // hash_entry_size
3565 };
3566
3567 // Get the GOT section, creating it if necessary.
3568
3569 template<int size, bool big_endian>
3570 Output_data_got_aarch64<size, big_endian>*
3571 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3572 Layout* layout)
3573 {
3574 if (this->got_ == NULL)
3575 {
3576 gold_assert(symtab != NULL && layout != NULL);
3577
3578 // When using -z now, we can treat .got.plt as a relro section.
3579 // Without -z now, it is modified after program startup by lazy
3580 // PLT relocations.
3581 bool is_got_plt_relro = parameters->options().now();
3582 Output_section_order got_order = (is_got_plt_relro
3583 ? ORDER_RELRO
3584 : ORDER_RELRO_LAST);
3585 Output_section_order got_plt_order = (is_got_plt_relro
3586 ? ORDER_RELRO
3587 : ORDER_NON_RELRO_FIRST);
3588
3589 // Layout of .got and .got.plt sections.
3590 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3591 // ...
3592 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3593 // .gotplt[1] reserved for ld.so (resolver)
3594 // .gotplt[2] reserved
3595
3596 // Generate .got section.
3597 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3598 layout);
3599 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3600 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3601 this->got_, got_order, true);
3602 // The first word of GOT is reserved for the address of .dynamic.
3603 // We put 0 here now. The value will be replaced later in
3604 // Output_data_got_aarch64::do_write.
3605 this->got_->add_constant(0);
3606
3607 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3608 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3609 // even if there is a .got.plt section.
3610 this->global_offset_table_ =
3611 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3612 Symbol_table::PREDEFINED,
3613 this->got_,
3614 0, 0, elfcpp::STT_OBJECT,
3615 elfcpp::STB_LOCAL,
3616 elfcpp::STV_HIDDEN, 0,
3617 false, false);
3618
3619 // Generate .got.plt section.
3620 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3621 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3622 (elfcpp::SHF_ALLOC
3623 | elfcpp::SHF_WRITE),
3624 this->got_plt_, got_plt_order,
3625 is_got_plt_relro);
3626
3627 // The first three entries are reserved.
3628 this->got_plt_->set_current_data_size(
3629 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3630
3631 // If there are any IRELATIVE relocations, they get GOT entries
3632 // in .got.plt after the jump slot entries.
3633 this->got_irelative_ = new Output_data_space(size / 8,
3634 "** GOT IRELATIVE PLT");
3635 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3636 (elfcpp::SHF_ALLOC
3637 | elfcpp::SHF_WRITE),
3638 this->got_irelative_,
3639 got_plt_order,
3640 is_got_plt_relro);
3641
3642 // If there are any TLSDESC relocations, they get GOT entries in
3643 // .got.plt after the jump slot and IRELATIVE entries.
3644 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3645 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3646 (elfcpp::SHF_ALLOC
3647 | elfcpp::SHF_WRITE),
3648 this->got_tlsdesc_,
3649 got_plt_order,
3650 is_got_plt_relro);
3651
3652 if (!is_got_plt_relro)
3653 {
3654 // Those bytes can go into the relro segment.
3655 layout->increase_relro(
3656 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3657 }
3658
3659 }
3660 return this->got_;
3661 }
3662
3663 // Get the dynamic reloc section, creating it if necessary.
3664
3665 template<int size, bool big_endian>
3666 typename Target_aarch64<size, big_endian>::Reloc_section*
3667 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3668 {
3669 if (this->rela_dyn_ == NULL)
3670 {
3671 gold_assert(layout != NULL);
3672 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3673 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3674 elfcpp::SHF_ALLOC, this->rela_dyn_,
3675 ORDER_DYNAMIC_RELOCS, false);
3676 }
3677 return this->rela_dyn_;
3678 }
3679
3680 // Get the section to use for IRELATIVE relocs, creating it if
3681 // necessary. These go in .rela.dyn, but only after all other dynamic
3682 // relocations. They need to follow the other dynamic relocations so
3683 // that they can refer to global variables initialized by those
3684 // relocs.
3685
3686 template<int size, bool big_endian>
3687 typename Target_aarch64<size, big_endian>::Reloc_section*
3688 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3689 {
3690 if (this->rela_irelative_ == NULL)
3691 {
3692 // Make sure we have already created the dynamic reloc section.
3693 this->rela_dyn_section(layout);
3694 this->rela_irelative_ = new Reloc_section(false);
3695 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3696 elfcpp::SHF_ALLOC, this->rela_irelative_,
3697 ORDER_DYNAMIC_RELOCS, false);
3698 gold_assert(this->rela_dyn_->output_section()
3699 == this->rela_irelative_->output_section());
3700 }
3701 return this->rela_irelative_;
3702 }
3703
3704
3705 // do_make_elf_object to override the same function in the base class. We need
3706 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3707 // store backend specific information. Hence we need to have our own ELF object
3708 // creation.
3709
3710 template<int size, bool big_endian>
3711 Object*
3712 Target_aarch64<size, big_endian>::do_make_elf_object(
3713 const std::string& name,
3714 Input_file* input_file,
3715 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3716 {
3717 int et = ehdr.get_e_type();
3718 // ET_EXEC files are valid input for --just-symbols/-R,
3719 // and we treat them as relocatable objects.
3720 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3721 return Sized_target<size, big_endian>::do_make_elf_object(
3722 name, input_file, offset, ehdr);
3723 else if (et == elfcpp::ET_REL)
3724 {
3725 AArch64_relobj<size, big_endian>* obj =
3726 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3727 obj->setup();
3728 return obj;
3729 }
3730 else if (et == elfcpp::ET_DYN)
3731 {
3732 // Keep base implementation.
3733 Sized_dynobj<size, big_endian>* obj =
3734 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3735 obj->setup();
3736 return obj;
3737 }
3738 else
3739 {
3740 gold_error(_("%s: unsupported ELF file type %d"),
3741 name.c_str(), et);
3742 return NULL;
3743 }
3744 }
3745
3746
3747 // Scan a relocation for stub generation.
3748
3749 template<int size, bool big_endian>
3750 void
3751 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3752 const Relocate_info<size, big_endian>* relinfo,
3753 unsigned int r_type,
3754 const Sized_symbol<size>* gsym,
3755 unsigned int r_sym,
3756 const Symbol_value<size>* psymval,
3757 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3758 Address address)
3759 {
3760 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3761 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3762
3763 Symbol_value<size> symval;
3764 if (gsym != NULL)
3765 {
3766 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3767 get_reloc_property(r_type);
3768 if (gsym->use_plt_offset(arp->reference_flags()))
3769 {
3770 // This uses a PLT, change the symbol value.
3771 symval.set_output_value(this->plt_section()->address()
3772 + gsym->plt_offset());
3773 psymval = &symval;
3774 }
3775 else if (gsym->is_undefined())
3776 {
3777 // There is no need to generate a stub symbol if the original symbol
3778 // is undefined.
3779 gold_debug(DEBUG_TARGET,
3780 "stub: not creating a stub for undefined symbol %s in file %s",
3781 gsym->name(), aarch64_relobj->name().c_str());
3782 return;
3783 }
3784 }
3785
3786 // Get the symbol value.
3787 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3788
3789 // Owing to pipelining, the PC relative branches below actually skip
3790 // two instructions when the branch offset is 0.
3791 Address destination = static_cast<Address>(-1);
3792 switch (r_type)
3793 {
3794 case elfcpp::R_AARCH64_CALL26:
3795 case elfcpp::R_AARCH64_JUMP26:
3796 destination = value + addend;
3797 break;
3798 default:
3799 gold_unreachable();
3800 }
3801
3802 int stub_type = The_reloc_stub::
3803 stub_type_for_reloc(r_type, address, destination);
3804 if (stub_type == ST_NONE)
3805 return;
3806
3807 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3808 gold_assert(stub_table != NULL);
3809
3810 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3811 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3812 if (stub == NULL)
3813 {
3814 stub = new The_reloc_stub(stub_type);
3815 stub_table->add_reloc_stub(stub, key);
3816 }
3817 stub->set_destination_address(destination);
3818 } // End of Target_aarch64::scan_reloc_for_stub
3819
3820
3821 // This function scans a relocation section for stub generation.
3822 // The template parameter Relocate must be a class type which provides
3823 // a single function, relocate(), which implements the machine
3824 // specific part of a relocation.
3825
3826 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3827 // SHT_REL or SHT_RELA.
3828
3829 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3830 // of relocs. OUTPUT_SECTION is the output section.
3831 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3832 // mapped to output offsets.
3833
3834 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3835 // VIEW_SIZE is the size. These refer to the input section, unless
3836 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3837 // the output section.
3838
3839 template<int size, bool big_endian>
3840 template<int sh_type>
3841 void inline
3842 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3843 const Relocate_info<size, big_endian>* relinfo,
3844 const unsigned char* prelocs,
3845 size_t reloc_count,
3846 Output_section* /*output_section*/,
3847 bool /*needs_special_offset_handling*/,
3848 const unsigned char* /*view*/,
3849 Address view_address,
3850 section_size_type)
3851 {
3852 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3853
3854 const int reloc_size =
3855 Reloc_types<sh_type,size,big_endian>::reloc_size;
3856 AArch64_relobj<size, big_endian>* object =
3857 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3858 unsigned int local_count = object->local_symbol_count();
3859
3860 gold::Default_comdat_behavior default_comdat_behavior;
3861 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3862
3863 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3864 {
3865 Reltype reloc(prelocs);
3866 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3867 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3868 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3869 if (r_type != elfcpp::R_AARCH64_CALL26
3870 && r_type != elfcpp::R_AARCH64_JUMP26)
3871 continue;
3872
3873 section_offset_type offset =
3874 convert_to_section_size_type(reloc.get_r_offset());
3875
3876 // Get the addend.
3877 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3878 reloc.get_r_addend();
3879
3880 const Sized_symbol<size>* sym;
3881 Symbol_value<size> symval;
3882 const Symbol_value<size> *psymval;
3883 bool is_defined_in_discarded_section;
3884 unsigned int shndx;
3885 if (r_sym < local_count)
3886 {
3887 sym = NULL;
3888 psymval = object->local_symbol(r_sym);
3889
3890 // If the local symbol belongs to a section we are discarding,
3891 // and that section is a debug section, try to find the
3892 // corresponding kept section and map this symbol to its
3893 // counterpart in the kept section. The symbol must not
3894 // correspond to a section we are folding.
3895 bool is_ordinary;
3896 shndx = psymval->input_shndx(&is_ordinary);
3897 is_defined_in_discarded_section =
3898 (is_ordinary
3899 && shndx != elfcpp::SHN_UNDEF
3900 && !object->is_section_included(shndx)
3901 && !relinfo->symtab->is_section_folded(object, shndx));
3902
3903 // We need to compute the would-be final value of this local
3904 // symbol.
3905 if (!is_defined_in_discarded_section)
3906 {
3907 typedef Sized_relobj_file<size, big_endian> ObjType;
3908 if (psymval->is_section_symbol())
3909 symval.set_is_section_symbol();
3910 typename ObjType::Compute_final_local_value_status status =
3911 object->compute_final_local_value(r_sym, psymval, &symval,
3912 relinfo->symtab);
3913 if (status == ObjType::CFLV_OK)
3914 {
3915 // Currently we cannot handle a branch to a target in
3916 // a merged section. If this is the case, issue an error
3917 // and also free the merge symbol value.
3918 if (!symval.has_output_value())
3919 {
3920 const std::string& section_name =
3921 object->section_name(shndx);
3922 object->error(_("cannot handle branch to local %u "
3923 "in a merged section %s"),
3924 r_sym, section_name.c_str());
3925 }
3926 psymval = &symval;
3927 }
3928 else
3929 {
3930 // We cannot determine the final value.
3931 continue;
3932 }
3933 }
3934 }
3935 else
3936 {
3937 const Symbol* gsym;
3938 gsym = object->global_symbol(r_sym);
3939 gold_assert(gsym != NULL);
3940 if (gsym->is_forwarder())
3941 gsym = relinfo->symtab->resolve_forwards(gsym);
3942
3943 sym = static_cast<const Sized_symbol<size>*>(gsym);
3944 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
3945 symval.set_output_symtab_index(sym->symtab_index());
3946 else
3947 symval.set_no_output_symtab_entry();
3948
3949 // We need to compute the would-be final value of this global
3950 // symbol.
3951 const Symbol_table* symtab = relinfo->symtab;
3952 const Sized_symbol<size>* sized_symbol =
3953 symtab->get_sized_symbol<size>(gsym);
3954 Symbol_table::Compute_final_value_status status;
3955 typename elfcpp::Elf_types<size>::Elf_Addr value =
3956 symtab->compute_final_value<size>(sized_symbol, &status);
3957
3958 // Skip this if the symbol has not output section.
3959 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
3960 continue;
3961 symval.set_output_value(value);
3962
3963 if (gsym->type() == elfcpp::STT_TLS)
3964 symval.set_is_tls_symbol();
3965 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
3966 symval.set_is_ifunc_symbol();
3967 psymval = &symval;
3968
3969 is_defined_in_discarded_section =
3970 (gsym->is_defined_in_discarded_section()
3971 && gsym->is_undefined());
3972 shndx = 0;
3973 }
3974
3975 Symbol_value<size> symval2;
3976 if (is_defined_in_discarded_section)
3977 {
3978 if (comdat_behavior == CB_UNDETERMINED)
3979 {
3980 std::string name = object->section_name(relinfo->data_shndx);
3981 comdat_behavior = default_comdat_behavior.get(name.c_str());
3982 }
3983 if (comdat_behavior == CB_PRETEND)
3984 {
3985 bool found;
3986 typename elfcpp::Elf_types<size>::Elf_Addr value =
3987 object->map_to_kept_section(shndx, &found);
3988 if (found)
3989 symval2.set_output_value(value + psymval->input_value());
3990 else
3991 symval2.set_output_value(0);
3992 }
3993 else
3994 {
3995 if (comdat_behavior == CB_WARNING)
3996 gold_warning_at_location(relinfo, i, offset,
3997 _("relocation refers to discarded "
3998 "section"));
3999 symval2.set_output_value(0);
4000 }
4001 symval2.set_no_output_symtab_entry();
4002 psymval = &symval2;
4003 }
4004
4005 // If symbol is a section symbol, we don't know the actual type of
4006 // destination. Give up.
4007 if (psymval->is_section_symbol())
4008 continue;
4009
4010 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
4011 addend, view_address + offset);
4012 } // End of iterating relocs in a section
4013 } // End of Target_aarch64::scan_reloc_section_for_stubs
4014
4015
4016 // Scan an input section for stub generation.
4017
4018 template<int size, bool big_endian>
4019 void
4020 Target_aarch64<size, big_endian>::scan_section_for_stubs(
4021 const Relocate_info<size, big_endian>* relinfo,
4022 unsigned int sh_type,
4023 const unsigned char* prelocs,
4024 size_t reloc_count,
4025 Output_section* output_section,
4026 bool needs_special_offset_handling,
4027 const unsigned char* view,
4028 Address view_address,
4029 section_size_type view_size)
4030 {
4031 gold_assert(sh_type == elfcpp::SHT_RELA);
4032 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
4033 relinfo,
4034 prelocs,
4035 reloc_count,
4036 output_section,
4037 needs_special_offset_handling,
4038 view,
4039 view_address,
4040 view_size);
4041 }
4042
4043
4044 // Relocate a single stub.
4045
4046 template<int size, bool big_endian>
4047 void Target_aarch64<size, big_endian>::
4048 relocate_stub(The_reloc_stub* stub,
4049 const The_relocate_info*,
4050 Output_section*,
4051 unsigned char* view,
4052 Address address,
4053 section_size_type)
4054 {
4055 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4056 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4057 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4058
4059 Insntype* ip = reinterpret_cast<Insntype*>(view);
4060 int insn_number = stub->insn_num();
4061 const uint32_t* insns = stub->insns();
4062 // Check the insns are really those stub insns.
4063 for (int i = 0; i < insn_number; ++i)
4064 {
4065 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4066 gold_assert(((uint32_t)insn == insns[i]));
4067 }
4068
4069 Address dest = stub->destination_address();
4070
4071 switch(stub->type())
4072 {
4073 case ST_ADRP_BRANCH:
4074 {
4075 // 1st reloc is ADR_PREL_PG_HI21
4076 The_reloc_functions_status status =
4077 The_reloc_functions::adrp(view, dest, address);
4078 // An error should never arise in the above step. If so, please
4079 // check 'aarch64_valid_for_adrp_p'.
4080 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4081
4082 // 2nd reloc is ADD_ABS_LO12_NC
4083 const AArch64_reloc_property* arp =
4084 aarch64_reloc_property_table->get_reloc_property(
4085 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4086 gold_assert(arp != NULL);
4087 status = The_reloc_functions::template
4088 rela_general<32>(view + 4, dest, 0, arp);
4089 // An error should never arise, it is an "_NC" relocation.
4090 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4091 }
4092 break;
4093
4094 case ST_LONG_BRANCH_ABS:
4095 // 1st reloc is R_AARCH64_PREL64, at offset 8
4096 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4097 break;
4098
4099 case ST_LONG_BRANCH_PCREL:
4100 {
4101 // "PC" calculation is the 2nd insn in the stub.
4102 uint64_t offset = dest - (address + 4);
4103 // Offset is placed at offset 4 and 5.
4104 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4105 }
4106 break;
4107
4108 default:
4109 gold_unreachable();
4110 }
4111 }
4112
4113
4114 // A class to handle the PLT data.
4115 // This is an abstract base class that handles most of the linker details
4116 // but does not know the actual contents of PLT entries. The derived
4117 // classes below fill in those details.
4118
4119 template<int size, bool big_endian>
4120 class Output_data_plt_aarch64 : public Output_section_data
4121 {
4122 public:
4123 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4124 Reloc_section;
4125 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4126
4127 Output_data_plt_aarch64(Layout* layout,
4128 uint64_t addralign,
4129 Output_data_got_aarch64<size, big_endian>* got,
4130 Output_data_space* got_plt,
4131 Output_data_space* got_irelative)
4132 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4133 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4134 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4135 { this->init(layout); }
4136
4137 // Initialize the PLT section.
4138 void
4139 init(Layout* layout);
4140
4141 // Add an entry to the PLT.
4142 void
4143 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4144
4145 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4146 unsigned int
4147 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4148 Sized_relobj_file<size, big_endian>* relobj,
4149 unsigned int local_sym_index);
4150
4151 // Add the relocation for a PLT entry.
4152 void
4153 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4154 unsigned int got_offset);
4155
4156 // Add the reserved TLSDESC_PLT entry to the PLT.
4157 void
4158 reserve_tlsdesc_entry(unsigned int got_offset)
4159 { this->tlsdesc_got_offset_ = got_offset; }
4160
4161 // Return true if a TLSDESC_PLT entry has been reserved.
4162 bool
4163 has_tlsdesc_entry() const
4164 { return this->tlsdesc_got_offset_ != -1U; }
4165
4166 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4167 unsigned int
4168 get_tlsdesc_got_offset() const
4169 { return this->tlsdesc_got_offset_; }
4170
4171 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4172 unsigned int
4173 get_tlsdesc_plt_offset() const
4174 {
4175 return (this->first_plt_entry_offset() +
4176 (this->count_ + this->irelative_count_)
4177 * this->get_plt_entry_size());
4178 }
4179
4180 // Return the .rela.plt section data.
4181 Reloc_section*
4182 rela_plt()
4183 { return this->rel_; }
4184
4185 // Return where the TLSDESC relocations should go.
4186 Reloc_section*
4187 rela_tlsdesc(Layout*);
4188
4189 // Return where the IRELATIVE relocations should go in the PLT
4190 // relocations.
4191 Reloc_section*
4192 rela_irelative(Symbol_table*, Layout*);
4193
4194 // Return whether we created a section for IRELATIVE relocations.
4195 bool
4196 has_irelative_section() const
4197 { return this->irelative_rel_ != NULL; }
4198
4199 // Return the number of PLT entries.
4200 unsigned int
4201 entry_count() const
4202 { return this->count_ + this->irelative_count_; }
4203
4204 // Return the offset of the first non-reserved PLT entry.
4205 unsigned int
4206 first_plt_entry_offset() const
4207 { return this->do_first_plt_entry_offset(); }
4208
4209 // Return the size of a PLT entry.
4210 unsigned int
4211 get_plt_entry_size() const
4212 { return this->do_get_plt_entry_size(); }
4213
4214 // Return the reserved tlsdesc entry size.
4215 unsigned int
4216 get_plt_tlsdesc_entry_size() const
4217 { return this->do_get_plt_tlsdesc_entry_size(); }
4218
4219 // Return the PLT address to use for a global symbol.
4220 uint64_t
4221 address_for_global(const Symbol*);
4222
4223 // Return the PLT address to use for a local symbol.
4224 uint64_t
4225 address_for_local(const Relobj*, unsigned int symndx);
4226
4227 protected:
4228 // Fill in the first PLT entry.
4229 void
4230 fill_first_plt_entry(unsigned char* pov,
4231 Address got_address,
4232 Address plt_address)
4233 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4234
4235 // Fill in a normal PLT entry.
4236 void
4237 fill_plt_entry(unsigned char* pov,
4238 Address got_address,
4239 Address plt_address,
4240 unsigned int got_offset,
4241 unsigned int plt_offset)
4242 {
4243 this->do_fill_plt_entry(pov, got_address, plt_address,
4244 got_offset, plt_offset);
4245 }
4246
4247 // Fill in the reserved TLSDESC PLT entry.
4248 void
4249 fill_tlsdesc_entry(unsigned char* pov,
4250 Address gotplt_address,
4251 Address plt_address,
4252 Address got_base,
4253 unsigned int tlsdesc_got_offset,
4254 unsigned int plt_offset)
4255 {
4256 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4257 tlsdesc_got_offset, plt_offset);
4258 }
4259
4260 virtual unsigned int
4261 do_first_plt_entry_offset() const = 0;
4262
4263 virtual unsigned int
4264 do_get_plt_entry_size() const = 0;
4265
4266 virtual unsigned int
4267 do_get_plt_tlsdesc_entry_size() const = 0;
4268
4269 virtual void
4270 do_fill_first_plt_entry(unsigned char* pov,
4271 Address got_addr,
4272 Address plt_addr) = 0;
4273
4274 virtual void
4275 do_fill_plt_entry(unsigned char* pov,
4276 Address got_address,
4277 Address plt_address,
4278 unsigned int got_offset,
4279 unsigned int plt_offset) = 0;
4280
4281 virtual void
4282 do_fill_tlsdesc_entry(unsigned char* pov,
4283 Address gotplt_address,
4284 Address plt_address,
4285 Address got_base,
4286 unsigned int tlsdesc_got_offset,
4287 unsigned int plt_offset) = 0;
4288
4289 void
4290 do_adjust_output_section(Output_section* os);
4291
4292 // Write to a map file.
4293 void
4294 do_print_to_mapfile(Mapfile* mapfile) const
4295 { mapfile->print_output_data(this, _("** PLT")); }
4296
4297 private:
4298 // Set the final size.
4299 void
4300 set_final_data_size();
4301
4302 // Write out the PLT data.
4303 void
4304 do_write(Output_file*);
4305
4306 // The reloc section.
4307 Reloc_section* rel_;
4308
4309 // The TLSDESC relocs, if necessary. These must follow the regular
4310 // PLT relocs.
4311 Reloc_section* tlsdesc_rel_;
4312
4313 // The IRELATIVE relocs, if necessary. These must follow the
4314 // regular PLT relocations.
4315 Reloc_section* irelative_rel_;
4316
4317 // The .got section.
4318 Output_data_got_aarch64<size, big_endian>* got_;
4319
4320 // The .got.plt section.
4321 Output_data_space* got_plt_;
4322
4323 // The part of the .got.plt section used for IRELATIVE relocs.
4324 Output_data_space* got_irelative_;
4325
4326 // The number of PLT entries.
4327 unsigned int count_;
4328
4329 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4330 // follow the regular PLT entries.
4331 unsigned int irelative_count_;
4332
4333 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4334 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4335 // indicates an offset is not allocated.
4336 unsigned int tlsdesc_got_offset_;
4337 };
4338
4339 // Initialize the PLT section.
4340
4341 template<int size, bool big_endian>
4342 void
4343 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4344 {
4345 this->rel_ = new Reloc_section(false);
4346 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4347 elfcpp::SHF_ALLOC, this->rel_,
4348 ORDER_DYNAMIC_PLT_RELOCS, false);
4349 }
4350
4351 template<int size, bool big_endian>
4352 void
4353 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4354 Output_section* os)
4355 {
4356 os->set_entsize(this->get_plt_entry_size());
4357 }
4358
4359 // Add an entry to the PLT.
4360
4361 template<int size, bool big_endian>
4362 void
4363 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4364 Layout* layout, Symbol* gsym)
4365 {
4366 gold_assert(!gsym->has_plt_offset());
4367
4368 unsigned int* pcount;
4369 unsigned int plt_reserved;
4370 Output_section_data_build* got;
4371
4372 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4373 && gsym->can_use_relative_reloc(false))
4374 {
4375 pcount = &this->irelative_count_;
4376 plt_reserved = 0;
4377 got = this->got_irelative_;
4378 }
4379 else
4380 {
4381 pcount = &this->count_;
4382 plt_reserved = this->first_plt_entry_offset();
4383 got = this->got_plt_;
4384 }
4385
4386 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4387 + plt_reserved);
4388
4389 ++*pcount;
4390
4391 section_offset_type got_offset = got->current_data_size();
4392
4393 // Every PLT entry needs a GOT entry which points back to the PLT
4394 // entry (this will be changed by the dynamic linker, normally
4395 // lazily when the function is called).
4396 got->set_current_data_size(got_offset + size / 8);
4397
4398 // Every PLT entry needs a reloc.
4399 this->add_relocation(symtab, layout, gsym, got_offset);
4400
4401 // Note that we don't need to save the symbol. The contents of the
4402 // PLT are independent of which symbols are used. The symbols only
4403 // appear in the relocations.
4404 }
4405
4406 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4407 // the PLT offset.
4408
4409 template<int size, bool big_endian>
4410 unsigned int
4411 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4412 Symbol_table* symtab,
4413 Layout* layout,
4414 Sized_relobj_file<size, big_endian>* relobj,
4415 unsigned int local_sym_index)
4416 {
4417 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4418 ++this->irelative_count_;
4419
4420 section_offset_type got_offset = this->got_irelative_->current_data_size();
4421
4422 // Every PLT entry needs a GOT entry which points back to the PLT
4423 // entry.
4424 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4425
4426 // Every PLT entry needs a reloc.
4427 Reloc_section* rela = this->rela_irelative(symtab, layout);
4428 rela->add_symbolless_local_addend(relobj, local_sym_index,
4429 elfcpp::R_AARCH64_IRELATIVE,
4430 this->got_irelative_, got_offset, 0);
4431
4432 return plt_offset;
4433 }
4434
4435 // Add the relocation for a PLT entry.
4436
4437 template<int size, bool big_endian>
4438 void
4439 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4440 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4441 {
4442 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4443 && gsym->can_use_relative_reloc(false))
4444 {
4445 Reloc_section* rela = this->rela_irelative(symtab, layout);
4446 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4447 this->got_irelative_, got_offset, 0);
4448 }
4449 else
4450 {
4451 gsym->set_needs_dynsym_entry();
4452 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4453 got_offset, 0);
4454 }
4455 }
4456
4457 // Return where the TLSDESC relocations should go, creating it if
4458 // necessary. These follow the JUMP_SLOT relocations.
4459
4460 template<int size, bool big_endian>
4461 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4462 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4463 {
4464 if (this->tlsdesc_rel_ == NULL)
4465 {
4466 this->tlsdesc_rel_ = new Reloc_section(false);
4467 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4468 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4469 ORDER_DYNAMIC_PLT_RELOCS, false);
4470 gold_assert(this->tlsdesc_rel_->output_section()
4471 == this->rel_->output_section());
4472 }
4473 return this->tlsdesc_rel_;
4474 }
4475
4476 // Return where the IRELATIVE relocations should go in the PLT. These
4477 // follow the JUMP_SLOT and the TLSDESC relocations.
4478
4479 template<int size, bool big_endian>
4480 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4481 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4482 Layout* layout)
4483 {
4484 if (this->irelative_rel_ == NULL)
4485 {
4486 // Make sure we have a place for the TLSDESC relocations, in
4487 // case we see any later on.
4488 this->rela_tlsdesc(layout);
4489 this->irelative_rel_ = new Reloc_section(false);
4490 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4491 elfcpp::SHF_ALLOC, this->irelative_rel_,
4492 ORDER_DYNAMIC_PLT_RELOCS, false);
4493 gold_assert(this->irelative_rel_->output_section()
4494 == this->rel_->output_section());
4495
4496 if (parameters->doing_static_link())
4497 {
4498 // A statically linked executable will only have a .rela.plt
4499 // section to hold R_AARCH64_IRELATIVE relocs for
4500 // STT_GNU_IFUNC symbols. The library will use these
4501 // symbols to locate the IRELATIVE relocs at program startup
4502 // time.
4503 symtab->define_in_output_data("__rela_iplt_start", NULL,
4504 Symbol_table::PREDEFINED,
4505 this->irelative_rel_, 0, 0,
4506 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4507 elfcpp::STV_HIDDEN, 0, false, true);
4508 symtab->define_in_output_data("__rela_iplt_end", NULL,
4509 Symbol_table::PREDEFINED,
4510 this->irelative_rel_, 0, 0,
4511 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4512 elfcpp::STV_HIDDEN, 0, true, true);
4513 }
4514 }
4515 return this->irelative_rel_;
4516 }
4517
4518 // Return the PLT address to use for a global symbol.
4519
4520 template<int size, bool big_endian>
4521 uint64_t
4522 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4523 const Symbol* gsym)
4524 {
4525 uint64_t offset = 0;
4526 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4527 && gsym->can_use_relative_reloc(false))
4528 offset = (this->first_plt_entry_offset() +
4529 this->count_ * this->get_plt_entry_size());
4530 return this->address() + offset + gsym->plt_offset();
4531 }
4532
4533 // Return the PLT address to use for a local symbol. These are always
4534 // IRELATIVE relocs.
4535
4536 template<int size, bool big_endian>
4537 uint64_t
4538 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4539 const Relobj* object,
4540 unsigned int r_sym)
4541 {
4542 return (this->address()
4543 + this->first_plt_entry_offset()
4544 + this->count_ * this->get_plt_entry_size()
4545 + object->local_plt_offset(r_sym));
4546 }
4547
4548 // Set the final size.
4549
4550 template<int size, bool big_endian>
4551 void
4552 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4553 {
4554 unsigned int count = this->count_ + this->irelative_count_;
4555 unsigned int extra_size = 0;
4556 if (this->has_tlsdesc_entry())
4557 extra_size += this->get_plt_tlsdesc_entry_size();
4558 this->set_data_size(this->first_plt_entry_offset()
4559 + count * this->get_plt_entry_size()
4560 + extra_size);
4561 }
4562
4563 template<int size, bool big_endian>
4564 class Output_data_plt_aarch64_standard :
4565 public Output_data_plt_aarch64<size, big_endian>
4566 {
4567 public:
4568 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4569 Output_data_plt_aarch64_standard(
4570 Layout* layout,
4571 Output_data_got_aarch64<size, big_endian>* got,
4572 Output_data_space* got_plt,
4573 Output_data_space* got_irelative)
4574 : Output_data_plt_aarch64<size, big_endian>(layout,
4575 size == 32 ? 4 : 8,
4576 got, got_plt,
4577 got_irelative)
4578 { }
4579
4580 protected:
4581 // Return the offset of the first non-reserved PLT entry.
4582 virtual unsigned int
4583 do_first_plt_entry_offset() const
4584 { return this->first_plt_entry_size; }
4585
4586 // Return the size of a PLT entry
4587 virtual unsigned int
4588 do_get_plt_entry_size() const
4589 { return this->plt_entry_size; }
4590
4591 // Return the size of a tlsdesc entry
4592 virtual unsigned int
4593 do_get_plt_tlsdesc_entry_size() const
4594 { return this->plt_tlsdesc_entry_size; }
4595
4596 virtual void
4597 do_fill_first_plt_entry(unsigned char* pov,
4598 Address got_address,
4599 Address plt_address);
4600
4601 virtual void
4602 do_fill_plt_entry(unsigned char* pov,
4603 Address got_address,
4604 Address plt_address,
4605 unsigned int got_offset,
4606 unsigned int plt_offset);
4607
4608 virtual void
4609 do_fill_tlsdesc_entry(unsigned char* pov,
4610 Address gotplt_address,
4611 Address plt_address,
4612 Address got_base,
4613 unsigned int tlsdesc_got_offset,
4614 unsigned int plt_offset);
4615
4616 private:
4617 // The size of the first plt entry size.
4618 static const int first_plt_entry_size = 32;
4619 // The size of the plt entry size.
4620 static const int plt_entry_size = 16;
4621 // The size of the plt tlsdesc entry size.
4622 static const int plt_tlsdesc_entry_size = 32;
4623 // Template for the first PLT entry.
4624 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4625 // Template for subsequent PLT entries.
4626 static const uint32_t plt_entry[plt_entry_size / 4];
4627 // The reserved TLSDESC entry in the PLT for an executable.
4628 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4629 };
4630
4631 // The first entry in the PLT for an executable.
4632
4633 template<>
4634 const uint32_t
4635 Output_data_plt_aarch64_standard<32, false>::
4636 first_plt_entry[first_plt_entry_size / 4] =
4637 {
4638 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4639 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4640 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4641 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4642 0xd61f0220, /* br x17 */
4643 0xd503201f, /* nop */
4644 0xd503201f, /* nop */
4645 0xd503201f, /* nop */
4646 };
4647
4648
4649 template<>
4650 const uint32_t
4651 Output_data_plt_aarch64_standard<32, true>::
4652 first_plt_entry[first_plt_entry_size / 4] =
4653 {
4654 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4655 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4656 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4657 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4658 0xd61f0220, /* br x17 */
4659 0xd503201f, /* nop */
4660 0xd503201f, /* nop */
4661 0xd503201f, /* nop */
4662 };
4663
4664
4665 template<>
4666 const uint32_t
4667 Output_data_plt_aarch64_standard<64, false>::
4668 first_plt_entry[first_plt_entry_size / 4] =
4669 {
4670 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4671 0x90000010, /* adrp x16, PLT_GOT+16 */
4672 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4673 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4674 0xd61f0220, /* br x17 */
4675 0xd503201f, /* nop */
4676 0xd503201f, /* nop */
4677 0xd503201f, /* nop */
4678 };
4679
4680
4681 template<>
4682 const uint32_t
4683 Output_data_plt_aarch64_standard<64, true>::
4684 first_plt_entry[first_plt_entry_size / 4] =
4685 {
4686 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4687 0x90000010, /* adrp x16, PLT_GOT+16 */
4688 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4689 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4690 0xd61f0220, /* br x17 */
4691 0xd503201f, /* nop */
4692 0xd503201f, /* nop */
4693 0xd503201f, /* nop */
4694 };
4695
4696
4697 template<>
4698 const uint32_t
4699 Output_data_plt_aarch64_standard<32, false>::
4700 plt_entry[plt_entry_size / 4] =
4701 {
4702 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4703 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4704 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4705 0xd61f0220, /* br x17. */
4706 };
4707
4708
4709 template<>
4710 const uint32_t
4711 Output_data_plt_aarch64_standard<32, true>::
4712 plt_entry[plt_entry_size / 4] =
4713 {
4714 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4715 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4716 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4717 0xd61f0220, /* br x17. */
4718 };
4719
4720
4721 template<>
4722 const uint32_t
4723 Output_data_plt_aarch64_standard<64, false>::
4724 plt_entry[plt_entry_size / 4] =
4725 {
4726 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4727 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4728 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4729 0xd61f0220, /* br x17. */
4730 };
4731
4732
4733 template<>
4734 const uint32_t
4735 Output_data_plt_aarch64_standard<64, true>::
4736 plt_entry[plt_entry_size / 4] =
4737 {
4738 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4739 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4740 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4741 0xd61f0220, /* br x17. */
4742 };
4743
4744
4745 template<int size, bool big_endian>
4746 void
4747 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4748 unsigned char* pov,
4749 Address got_address,
4750 Address plt_address)
4751 {
4752 // PLT0 of the small PLT looks like this in ELF64 -
4753 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4754 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4755 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4756 // symbol resolver
4757 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4758 // GOTPLT entry for this.
4759 // br x17
4760 // PLT0 will be slightly different in ELF32 due to different got entry
4761 // size.
4762 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4763 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4764
4765 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4766 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4767 // FIXME: This only works for 64bit
4768 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4769 gotplt_2nd_ent, plt_address + 4);
4770
4771 // Fill in R_AARCH64_LDST8_LO12
4772 elfcpp::Swap<32, big_endian>::writeval(
4773 pov + 8,
4774 ((this->first_plt_entry[2] & 0xffc003ff)
4775 | ((gotplt_2nd_ent & 0xff8) << 7)));
4776
4777 // Fill in R_AARCH64_ADD_ABS_LO12
4778 elfcpp::Swap<32, big_endian>::writeval(
4779 pov + 12,
4780 ((this->first_plt_entry[3] & 0xffc003ff)
4781 | ((gotplt_2nd_ent & 0xfff) << 10)));
4782 }
4783
4784
4785 // Subsequent entries in the PLT for an executable.
4786 // FIXME: This only works for 64bit
4787
4788 template<int size, bool big_endian>
4789 void
4790 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4791 unsigned char* pov,
4792 Address got_address,
4793 Address plt_address,
4794 unsigned int got_offset,
4795 unsigned int plt_offset)
4796 {
4797 memcpy(pov, this->plt_entry, this->plt_entry_size);
4798
4799 Address gotplt_entry_address = got_address + got_offset;
4800 Address plt_entry_address = plt_address + plt_offset;
4801
4802 // Fill in R_AARCH64_PCREL_ADR_HI21
4803 AArch64_relocate_functions<size, big_endian>::adrp(
4804 pov,
4805 gotplt_entry_address,
4806 plt_entry_address);
4807
4808 // Fill in R_AARCH64_LDST64_ABS_LO12
4809 elfcpp::Swap<32, big_endian>::writeval(
4810 pov + 4,
4811 ((this->plt_entry[1] & 0xffc003ff)
4812 | ((gotplt_entry_address & 0xff8) << 7)));
4813
4814 // Fill in R_AARCH64_ADD_ABS_LO12
4815 elfcpp::Swap<32, big_endian>::writeval(
4816 pov + 8,
4817 ((this->plt_entry[2] & 0xffc003ff)
4818 | ((gotplt_entry_address & 0xfff) <<10)));
4819
4820 }
4821
4822
4823 template<>
4824 const uint32_t
4825 Output_data_plt_aarch64_standard<32, false>::
4826 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4827 {
4828 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4829 0x90000002, /* adrp x2, 0 */
4830 0x90000003, /* adrp x3, 0 */
4831 0xb9400042, /* ldr w2, [w2, #0] */
4832 0x11000063, /* add w3, w3, 0 */
4833 0xd61f0040, /* br x2 */
4834 0xd503201f, /* nop */
4835 0xd503201f, /* nop */
4836 };
4837
4838 template<>
4839 const uint32_t
4840 Output_data_plt_aarch64_standard<32, true>::
4841 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4842 {
4843 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4844 0x90000002, /* adrp x2, 0 */
4845 0x90000003, /* adrp x3, 0 */
4846 0xb9400042, /* ldr w2, [w2, #0] */
4847 0x11000063, /* add w3, w3, 0 */
4848 0xd61f0040, /* br x2 */
4849 0xd503201f, /* nop */
4850 0xd503201f, /* nop */
4851 };
4852
4853 template<>
4854 const uint32_t
4855 Output_data_plt_aarch64_standard<64, false>::
4856 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4857 {
4858 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4859 0x90000002, /* adrp x2, 0 */
4860 0x90000003, /* adrp x3, 0 */
4861 0xf9400042, /* ldr x2, [x2, #0] */
4862 0x91000063, /* add x3, x3, 0 */
4863 0xd61f0040, /* br x2 */
4864 0xd503201f, /* nop */
4865 0xd503201f, /* nop */
4866 };
4867
4868 template<>
4869 const uint32_t
4870 Output_data_plt_aarch64_standard<64, true>::
4871 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4872 {
4873 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4874 0x90000002, /* adrp x2, 0 */
4875 0x90000003, /* adrp x3, 0 */
4876 0xf9400042, /* ldr x2, [x2, #0] */
4877 0x91000063, /* add x3, x3, 0 */
4878 0xd61f0040, /* br x2 */
4879 0xd503201f, /* nop */
4880 0xd503201f, /* nop */
4881 };
4882
4883 template<int size, bool big_endian>
4884 void
4885 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4886 unsigned char* pov,
4887 Address gotplt_address,
4888 Address plt_address,
4889 Address got_base,
4890 unsigned int tlsdesc_got_offset,
4891 unsigned int plt_offset)
4892 {
4893 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4894
4895 // move DT_TLSDESC_GOT address into x2
4896 // move .got.plt address into x3
4897 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4898 Address plt_entry_address = plt_address + plt_offset;
4899
4900 // R_AARCH64_ADR_PREL_PG_HI21
4901 AArch64_relocate_functions<size, big_endian>::adrp(
4902 pov + 4,
4903 tlsdesc_got_entry,
4904 plt_entry_address + 4);
4905
4906 // R_AARCH64_ADR_PREL_PG_HI21
4907 AArch64_relocate_functions<size, big_endian>::adrp(
4908 pov + 8,
4909 gotplt_address,
4910 plt_entry_address + 8);
4911
4912 // R_AARCH64_LDST64_ABS_LO12
4913 elfcpp::Swap<32, big_endian>::writeval(
4914 pov + 12,
4915 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4916 | ((tlsdesc_got_entry & 0xff8) << 7)));
4917
4918 // R_AARCH64_ADD_ABS_LO12
4919 elfcpp::Swap<32, big_endian>::writeval(
4920 pov + 16,
4921 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4922 | ((gotplt_address & 0xfff) << 10)));
4923 }
4924
4925 // Write out the PLT. This uses the hand-coded instructions above,
4926 // and adjusts them as needed. This is specified by the AMD64 ABI.
4927
4928 template<int size, bool big_endian>
4929 void
4930 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4931 {
4932 const off_t offset = this->offset();
4933 const section_size_type oview_size =
4934 convert_to_section_size_type(this->data_size());
4935 unsigned char* const oview = of->get_output_view(offset, oview_size);
4936
4937 const off_t got_file_offset = this->got_plt_->offset();
4938 gold_assert(got_file_offset + this->got_plt_->data_size()
4939 == this->got_irelative_->offset());
4940
4941 const section_size_type got_size =
4942 convert_to_section_size_type(this->got_plt_->data_size()
4943 + this->got_irelative_->data_size());
4944 unsigned char* const got_view = of->get_output_view(got_file_offset,
4945 got_size);
4946
4947 unsigned char* pov = oview;
4948
4949 // The base address of the .plt section.
4950 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
4951 // The base address of the PLT portion of the .got section.
4952 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
4953 = this->got_plt_->address();
4954
4955 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
4956 pov += this->first_plt_entry_offset();
4957
4958 // The first three entries in .got.plt are reserved.
4959 unsigned char* got_pov = got_view;
4960 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
4961 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4962
4963 unsigned int plt_offset = this->first_plt_entry_offset();
4964 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4965 const unsigned int count = this->count_ + this->irelative_count_;
4966 for (unsigned int plt_index = 0;
4967 plt_index < count;
4968 ++plt_index,
4969 pov += this->get_plt_entry_size(),
4970 got_pov += size / 8,
4971 plt_offset += this->get_plt_entry_size(),
4972 got_offset += size / 8)
4973 {
4974 // Set and adjust the PLT entry itself.
4975 this->fill_plt_entry(pov, gotplt_address, plt_address,
4976 got_offset, plt_offset);
4977
4978 // Set the entry in the GOT, which points to plt0.
4979 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
4980 }
4981
4982 if (this->has_tlsdesc_entry())
4983 {
4984 // Set and adjust the reserved TLSDESC PLT entry.
4985 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
4986 // The base address of the .base section.
4987 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
4988 this->got_->address();
4989 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4990 tlsdesc_got_offset, plt_offset);
4991 pov += this->get_plt_tlsdesc_entry_size();
4992 }
4993
4994 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4995 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4996
4997 of->write_output_view(offset, oview_size, oview);
4998 of->write_output_view(got_file_offset, got_size, got_view);
4999 }
5000
5001 // Telling how to update the immediate field of an instruction.
5002 struct AArch64_howto
5003 {
5004 // The immediate field mask.
5005 elfcpp::Elf_Xword dst_mask;
5006
5007 // The offset to apply relocation immediate
5008 int doffset;
5009
5010 // The second part offset, if the immediate field has two parts.
5011 // -1 if the immediate field has only one part.
5012 int doffset2;
5013 };
5014
5015 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
5016 {
5017 {0, -1, -1}, // DATA
5018 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
5019 {0xffffe0, 5, -1}, // LD [23:5]-imm19
5020 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
5021 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
5022 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
5023 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
5024 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
5025 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
5026 {0x3ffffff, 0, -1}, // B [25:0]-imm26
5027 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
5028 };
5029
5030 // AArch64 relocate function class
5031
5032 template<int size, bool big_endian>
5033 class AArch64_relocate_functions
5034 {
5035 public:
5036 typedef enum
5037 {
5038 STATUS_OKAY, // No error during relocation.
5039 STATUS_OVERFLOW, // Relocation overflow.
5040 STATUS_BAD_RELOC, // Relocation cannot be applied.
5041 } Status;
5042
5043 typedef AArch64_relocate_functions<size, big_endian> This;
5044 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5045 typedef Relocate_info<size, big_endian> The_relocate_info;
5046 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5047 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5048 typedef Stub_table<size, big_endian> The_stub_table;
5049 typedef elfcpp::Rela<size, big_endian> The_rela;
5050 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5051
5052 // Return the page address of the address.
5053 // Page(address) = address & ~0xFFF
5054
5055 static inline AArch64_valtype
5056 Page(Address address)
5057 {
5058 return (address & (~static_cast<Address>(0xFFF)));
5059 }
5060
5061 private:
5062 // Update instruction (pointed by view) with selected bits (immed).
5063 // val = (val & ~dst_mask) | (immed << doffset)
5064
5065 template<int valsize>
5066 static inline void
5067 update_view(unsigned char* view,
5068 AArch64_valtype immed,
5069 elfcpp::Elf_Xword doffset,
5070 elfcpp::Elf_Xword dst_mask)
5071 {
5072 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5073 Valtype* wv = reinterpret_cast<Valtype*>(view);
5074 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5075
5076 // Clear immediate fields.
5077 val &= ~dst_mask;
5078 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5079 static_cast<Valtype>(val | (immed << doffset)));
5080 }
5081
5082 // Update two parts of an instruction (pointed by view) with selected
5083 // bits (immed1 and immed2).
5084 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5085
5086 template<int valsize>
5087 static inline void
5088 update_view_two_parts(
5089 unsigned char* view,
5090 AArch64_valtype immed1,
5091 AArch64_valtype immed2,
5092 elfcpp::Elf_Xword doffset1,
5093 elfcpp::Elf_Xword doffset2,
5094 elfcpp::Elf_Xword dst_mask)
5095 {
5096 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5097 Valtype* wv = reinterpret_cast<Valtype*>(view);
5098 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5099 val &= ~dst_mask;
5100 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5101 static_cast<Valtype>(val | (immed1 << doffset1) |
5102 (immed2 << doffset2)));
5103 }
5104
5105 // Update adr or adrp instruction with immed.
5106 // In adr and adrp: [30:29] immlo [23:5] immhi
5107
5108 static inline void
5109 update_adr(unsigned char* view, AArch64_valtype immed)
5110 {
5111 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5112 This::template update_view_two_parts<32>(
5113 view,
5114 immed & 0x3,
5115 (immed & 0x1ffffc) >> 2,
5116 29,
5117 5,
5118 dst_mask);
5119 }
5120
5121 // Update movz/movn instruction with bits immed.
5122 // Set instruction to movz if is_movz is true, otherwise set instruction
5123 // to movn.
5124
5125 static inline void
5126 update_movnz(unsigned char* view,
5127 AArch64_valtype immed,
5128 bool is_movz)
5129 {
5130 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5131 Valtype* wv = reinterpret_cast<Valtype*>(view);
5132 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5133
5134 const elfcpp::Elf_Xword doffset =
5135 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5136 const elfcpp::Elf_Xword dst_mask =
5137 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5138
5139 // Clear immediate fields and opc code.
5140 val &= ~(dst_mask | (0x3 << 29));
5141
5142 // Set instruction to movz or movn.
5143 // movz: [30:29] is 10 movn: [30:29] is 00
5144 if (is_movz)
5145 val |= (0x2 << 29);
5146
5147 elfcpp::Swap<32, big_endian>::writeval(wv,
5148 static_cast<Valtype>(val | (immed << doffset)));
5149 }
5150
5151 public:
5152
5153 // Update selected bits in text.
5154
5155 template<int valsize>
5156 static inline typename This::Status
5157 reloc_common(unsigned char* view, Address x,
5158 const AArch64_reloc_property* reloc_property)
5159 {
5160 // Select bits from X.
5161 Address immed = reloc_property->select_x_value(x);
5162
5163 // Update view.
5164 const AArch64_reloc_property::Reloc_inst inst =
5165 reloc_property->reloc_inst();
5166 // If it is a data relocation or instruction has 2 parts of immediate
5167 // fields, you should not call pcrela_general.
5168 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5169 aarch64_howto[inst].doffset != -1);
5170 This::template update_view<valsize>(view, immed,
5171 aarch64_howto[inst].doffset,
5172 aarch64_howto[inst].dst_mask);
5173
5174 // Do check overflow or alignment if needed.
5175 return (reloc_property->checkup_x_value(x)
5176 ? This::STATUS_OKAY
5177 : This::STATUS_OVERFLOW);
5178 }
5179
5180 // Construct a B insn. Note, although we group it here with other relocation
5181 // operation, there is actually no 'relocation' involved here.
5182 static inline void
5183 construct_b(unsigned char* view, unsigned int branch_offset)
5184 {
5185 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5186 26, 0, 0xffffffff);
5187 }
5188
5189 // Do a simple rela relocation at unaligned addresses.
5190
5191 template<int valsize>
5192 static inline typename This::Status
5193 rela_ua(unsigned char* view,
5194 const Sized_relobj_file<size, big_endian>* object,
5195 const Symbol_value<size>* psymval,
5196 AArch64_valtype addend,
5197 const AArch64_reloc_property* reloc_property)
5198 {
5199 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5200 Valtype;
5201 typename elfcpp::Elf_types<size>::Elf_Addr x =
5202 psymval->value(object, addend);
5203 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5204 static_cast<Valtype>(x));
5205 return (reloc_property->checkup_x_value(x)
5206 ? This::STATUS_OKAY
5207 : This::STATUS_OVERFLOW);
5208 }
5209
5210 // Do a simple pc-relative relocation at unaligned addresses.
5211
5212 template<int valsize>
5213 static inline typename This::Status
5214 pcrela_ua(unsigned char* view,
5215 const Sized_relobj_file<size, big_endian>* object,
5216 const Symbol_value<size>* psymval,
5217 AArch64_valtype addend,
5218 Address address,
5219 const AArch64_reloc_property* reloc_property)
5220 {
5221 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5222 Valtype;
5223 Address x = psymval->value(object, addend) - address;
5224 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5225 static_cast<Valtype>(x));
5226 return (reloc_property->checkup_x_value(x)
5227 ? This::STATUS_OKAY
5228 : This::STATUS_OVERFLOW);
5229 }
5230
5231 // Do a simple rela relocation at aligned addresses.
5232
5233 template<int valsize>
5234 static inline typename This::Status
5235 rela(
5236 unsigned char* view,
5237 const Sized_relobj_file<size, big_endian>* object,
5238 const Symbol_value<size>* psymval,
5239 AArch64_valtype addend,
5240 const AArch64_reloc_property* reloc_property)
5241 {
5242 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5243 Valtype* wv = reinterpret_cast<Valtype*>(view);
5244 Address x = psymval->value(object, addend);
5245 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5246 return (reloc_property->checkup_x_value(x)
5247 ? This::STATUS_OKAY
5248 : This::STATUS_OVERFLOW);
5249 }
5250
5251 // Do relocate. Update selected bits in text.
5252 // new_val = (val & ~dst_mask) | (immed << doffset)
5253
5254 template<int valsize>
5255 static inline typename This::Status
5256 rela_general(unsigned char* view,
5257 const Sized_relobj_file<size, big_endian>* object,
5258 const Symbol_value<size>* psymval,
5259 AArch64_valtype addend,
5260 const AArch64_reloc_property* reloc_property)
5261 {
5262 // Calculate relocation.
5263 Address x = psymval->value(object, addend);
5264 return This::template reloc_common<valsize>(view, x, reloc_property);
5265 }
5266
5267 // Do relocate. Update selected bits in text.
5268 // new val = (val & ~dst_mask) | (immed << doffset)
5269
5270 template<int valsize>
5271 static inline typename This::Status
5272 rela_general(
5273 unsigned char* view,
5274 AArch64_valtype s,
5275 AArch64_valtype addend,
5276 const AArch64_reloc_property* reloc_property)
5277 {
5278 // Calculate relocation.
5279 Address x = s + addend;
5280 return This::template reloc_common<valsize>(view, x, reloc_property);
5281 }
5282
5283 // Do address relative relocate. Update selected bits in text.
5284 // new val = (val & ~dst_mask) | (immed << doffset)
5285
5286 template<int valsize>
5287 static inline typename This::Status
5288 pcrela_general(
5289 unsigned char* view,
5290 const Sized_relobj_file<size, big_endian>* object,
5291 const Symbol_value<size>* psymval,
5292 AArch64_valtype addend,
5293 Address address,
5294 const AArch64_reloc_property* reloc_property)
5295 {
5296 // Calculate relocation.
5297 Address x = psymval->value(object, addend) - address;
5298 return This::template reloc_common<valsize>(view, x, reloc_property);
5299 }
5300
5301
5302 // Calculate (S + A) - address, update adr instruction.
5303
5304 static inline typename This::Status
5305 adr(unsigned char* view,
5306 const Sized_relobj_file<size, big_endian>* object,
5307 const Symbol_value<size>* psymval,
5308 Address addend,
5309 Address address,
5310 const AArch64_reloc_property* /* reloc_property */)
5311 {
5312 AArch64_valtype x = psymval->value(object, addend) - address;
5313 // Pick bits [20:0] of X.
5314 AArch64_valtype immed = x & 0x1fffff;
5315 update_adr(view, immed);
5316 // Check -2^20 <= X < 2^20
5317 return (size == 64 && Bits<21>::has_overflow((x))
5318 ? This::STATUS_OVERFLOW
5319 : This::STATUS_OKAY);
5320 }
5321
5322 // Calculate PG(S+A) - PG(address), update adrp instruction.
5323 // R_AARCH64_ADR_PREL_PG_HI21
5324
5325 static inline typename This::Status
5326 adrp(
5327 unsigned char* view,
5328 Address sa,
5329 Address address)
5330 {
5331 AArch64_valtype x = This::Page(sa) - This::Page(address);
5332 // Pick [32:12] of X.
5333 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5334 update_adr(view, immed);
5335 // Check -2^32 <= X < 2^32
5336 return (size == 64 && Bits<33>::has_overflow((x))
5337 ? This::STATUS_OVERFLOW
5338 : This::STATUS_OKAY);
5339 }
5340
5341 // Calculate PG(S+A) - PG(address), update adrp instruction.
5342 // R_AARCH64_ADR_PREL_PG_HI21
5343
5344 static inline typename This::Status
5345 adrp(unsigned char* view,
5346 const Sized_relobj_file<size, big_endian>* object,
5347 const Symbol_value<size>* psymval,
5348 Address addend,
5349 Address address,
5350 const AArch64_reloc_property* reloc_property)
5351 {
5352 Address sa = psymval->value(object, addend);
5353 AArch64_valtype x = This::Page(sa) - This::Page(address);
5354 // Pick [32:12] of X.
5355 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5356 update_adr(view, immed);
5357 return (reloc_property->checkup_x_value(x)
5358 ? This::STATUS_OKAY
5359 : This::STATUS_OVERFLOW);
5360 }
5361
5362 // Update mov[n/z] instruction. Check overflow if needed.
5363 // If X >=0, set the instruction to movz and its immediate value to the
5364 // selected bits S.
5365 // If X < 0, set the instruction to movn and its immediate value to
5366 // NOT (selected bits of).
5367
5368 static inline typename This::Status
5369 movnz(unsigned char* view,
5370 AArch64_valtype x,
5371 const AArch64_reloc_property* reloc_property)
5372 {
5373 // Select bits from X.
5374 Address immed;
5375 bool is_movz;
5376 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5377 if (static_cast<SignedW>(x) >= 0)
5378 {
5379 immed = reloc_property->select_x_value(x);
5380 is_movz = true;
5381 }
5382 else
5383 {
5384 immed = reloc_property->select_x_value(~x);;
5385 is_movz = false;
5386 }
5387
5388 // Update movnz instruction.
5389 update_movnz(view, immed, is_movz);
5390
5391 // Do check overflow or alignment if needed.
5392 return (reloc_property->checkup_x_value(x)
5393 ? This::STATUS_OKAY
5394 : This::STATUS_OVERFLOW);
5395 }
5396
5397 static inline bool
5398 maybe_apply_stub(unsigned int,
5399 const The_relocate_info*,
5400 const The_rela&,
5401 unsigned char*,
5402 Address,
5403 const Sized_symbol<size>*,
5404 const Symbol_value<size>*,
5405 const Sized_relobj_file<size, big_endian>*,
5406 section_size_type);
5407
5408 }; // End of AArch64_relocate_functions
5409
5410
5411 // For a certain relocation type (usually jump/branch), test to see if the
5412 // destination needs a stub to fulfil. If so, re-route the destination of the
5413 // original instruction to the stub, note, at this time, the stub has already
5414 // been generated.
5415
5416 template<int size, bool big_endian>
5417 bool
5418 AArch64_relocate_functions<size, big_endian>::
5419 maybe_apply_stub(unsigned int r_type,
5420 const The_relocate_info* relinfo,
5421 const The_rela& rela,
5422 unsigned char* view,
5423 Address address,
5424 const Sized_symbol<size>* gsym,
5425 const Symbol_value<size>* psymval,
5426 const Sized_relobj_file<size, big_endian>* object,
5427 section_size_type current_group_size)
5428 {
5429 if (parameters->options().relocatable())
5430 return false;
5431
5432 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5433 Address branch_target = psymval->value(object, 0) + addend;
5434 int stub_type =
5435 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5436 if (stub_type == ST_NONE)
5437 return false;
5438
5439 const The_aarch64_relobj* aarch64_relobj =
5440 static_cast<const The_aarch64_relobj*>(object);
5441 // We don't create stubs for undefined symbols so don't look for one.
5442 if (gsym && gsym->is_undefined())
5443 {
5444 gold_debug(DEBUG_TARGET,
5445 "stub: looking for a stub for undefined symbol %s in file %s",
5446 gsym->name(), aarch64_relobj->name().c_str());
5447 return false;
5448 }
5449
5450 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5451 gold_assert(stub_table != NULL);
5452
5453 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5454 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5455 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5456 gold_assert(stub != NULL);
5457
5458 Address new_branch_target = stub_table->address() + stub->offset();
5459 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5460 new_branch_target - address;
5461 const AArch64_reloc_property* arp =
5462 aarch64_reloc_property_table->get_reloc_property(r_type);
5463 gold_assert(arp != NULL);
5464 typename This::Status status = This::template
5465 rela_general<32>(view, branch_offset, 0, arp);
5466 if (status != This::STATUS_OKAY)
5467 gold_error(_("Stub is too far away, try a smaller value "
5468 "for '--stub-group-size'. The current value is 0x%lx."),
5469 static_cast<unsigned long>(current_group_size));
5470 return true;
5471 }
5472
5473
5474 // Group input sections for stub generation.
5475 //
5476 // We group input sections in an output section so that the total size,
5477 // including any padding space due to alignment is smaller than GROUP_SIZE
5478 // unless the only input section in group is bigger than GROUP_SIZE already.
5479 // Then an ARM stub table is created to follow the last input section
5480 // in group. For each group an ARM stub table is created an is placed
5481 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5482 // extend the group after the stub table.
5483
5484 template<int size, bool big_endian>
5485 void
5486 Target_aarch64<size, big_endian>::group_sections(
5487 Layout* layout,
5488 section_size_type group_size,
5489 bool stubs_always_after_branch,
5490 const Task* task)
5491 {
5492 // Group input sections and insert stub table
5493 Layout::Section_list section_list;
5494 layout->get_executable_sections(&section_list);
5495 for (Layout::Section_list::const_iterator p = section_list.begin();
5496 p != section_list.end();
5497 ++p)
5498 {
5499 AArch64_output_section<size, big_endian>* output_section =
5500 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5501 output_section->group_sections(group_size, stubs_always_after_branch,
5502 this, task);
5503 }
5504 }
5505
5506
5507 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5508 // section of RELOBJ.
5509
5510 template<int size, bool big_endian>
5511 AArch64_input_section<size, big_endian>*
5512 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5513 Relobj* relobj, unsigned int shndx) const
5514 {
5515 Section_id sid(relobj, shndx);
5516 typename AArch64_input_section_map::const_iterator p =
5517 this->aarch64_input_section_map_.find(sid);
5518 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5519 }
5520
5521
5522 // Make a new AArch64_input_section object.
5523
5524 template<int size, bool big_endian>
5525 AArch64_input_section<size, big_endian>*
5526 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5527 Relobj* relobj, unsigned int shndx)
5528 {
5529 Section_id sid(relobj, shndx);
5530
5531 AArch64_input_section<size, big_endian>* input_section =
5532 new AArch64_input_section<size, big_endian>(relobj, shndx);
5533 input_section->init();
5534
5535 // Register new AArch64_input_section in map for look-up.
5536 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5537 this->aarch64_input_section_map_.insert(
5538 std::make_pair(sid, input_section));
5539
5540 // Make sure that it we have not created another AArch64_input_section
5541 // for this input section already.
5542 gold_assert(ins.second);
5543
5544 return input_section;
5545 }
5546
5547
5548 // Relaxation hook. This is where we do stub generation.
5549
5550 template<int size, bool big_endian>
5551 bool
5552 Target_aarch64<size, big_endian>::do_relax(
5553 int pass,
5554 const Input_objects* input_objects,
5555 Symbol_table* symtab,
5556 Layout* layout ,
5557 const Task* task)
5558 {
5559 gold_assert(!parameters->options().relocatable());
5560 if (pass == 1)
5561 {
5562 // We don't handle negative stub_group_size right now.
5563 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5564 if (this->stub_group_size_ == 1)
5565 {
5566 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5567 // will fail to link. The user will have to relink with an explicit
5568 // group size option.
5569 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5570 4096 * 4;
5571 }
5572 group_sections(layout, this->stub_group_size_, true, task);
5573 }
5574 else
5575 {
5576 // If this is not the first pass, addresses and file offsets have
5577 // been reset at this point, set them here.
5578 for (Stub_table_iterator sp = this->stub_tables_.begin();
5579 sp != this->stub_tables_.end(); ++sp)
5580 {
5581 The_stub_table* stt = *sp;
5582 The_aarch64_input_section* owner = stt->owner();
5583 off_t off = align_address(owner->original_size(),
5584 stt->addralign());
5585 stt->set_address_and_file_offset(owner->address() + off,
5586 owner->offset() + off);
5587 }
5588 }
5589
5590 // Scan relocs for relocation stubs
5591 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5592 op != input_objects->relobj_end();
5593 ++op)
5594 {
5595 The_aarch64_relobj* aarch64_relobj =
5596 static_cast<The_aarch64_relobj*>(*op);
5597 // Lock the object so we can read from it. This is only called
5598 // single-threaded from Layout::finalize, so it is OK to lock.
5599 Task_lock_obj<Object> tl(task, aarch64_relobj);
5600 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5601 }
5602
5603 bool any_stub_table_changed = false;
5604 for (Stub_table_iterator siter = this->stub_tables_.begin();
5605 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5606 {
5607 The_stub_table* stub_table = *siter;
5608 if (stub_table->update_data_size_changed_p())
5609 {
5610 The_aarch64_input_section* owner = stub_table->owner();
5611 uint64_t address = owner->address();
5612 off_t offset = owner->offset();
5613 owner->reset_address_and_file_offset();
5614 owner->set_address_and_file_offset(address, offset);
5615
5616 any_stub_table_changed = true;
5617 }
5618 }
5619
5620 // Do not continue relaxation.
5621 bool continue_relaxation = any_stub_table_changed;
5622 if (!continue_relaxation)
5623 for (Stub_table_iterator sp = this->stub_tables_.begin();
5624 (sp != this->stub_tables_.end());
5625 ++sp)
5626 (*sp)->finalize_stubs();
5627
5628 return continue_relaxation;
5629 }
5630
5631
5632 // Make a new Stub_table.
5633
5634 template<int size, bool big_endian>
5635 Stub_table<size, big_endian>*
5636 Target_aarch64<size, big_endian>::new_stub_table(
5637 AArch64_input_section<size, big_endian>* owner)
5638 {
5639 Stub_table<size, big_endian>* stub_table =
5640 new Stub_table<size, big_endian>(owner);
5641 stub_table->set_address(align_address(
5642 owner->address() + owner->data_size(), 8));
5643 stub_table->set_file_offset(owner->offset() + owner->data_size());
5644 stub_table->finalize_data_size();
5645
5646 this->stub_tables_.push_back(stub_table);
5647
5648 return stub_table;
5649 }
5650
5651
5652 template<int size, bool big_endian>
5653 uint64_t
5654 Target_aarch64<size, big_endian>::do_reloc_addend(
5655 void* arg, unsigned int r_type, uint64_t) const
5656 {
5657 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5658 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5659 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5660 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5661 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5662 gold_assert(psymval->is_tls_symbol());
5663 // The value of a TLS symbol is the offset in the TLS segment.
5664 return psymval->value(ti.object, 0);
5665 }
5666
5667 // Return the number of entries in the PLT.
5668
5669 template<int size, bool big_endian>
5670 unsigned int
5671 Target_aarch64<size, big_endian>::plt_entry_count() const
5672 {
5673 if (this->plt_ == NULL)
5674 return 0;
5675 return this->plt_->entry_count();
5676 }
5677
5678 // Return the offset of the first non-reserved PLT entry.
5679
5680 template<int size, bool big_endian>
5681 unsigned int
5682 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5683 {
5684 return this->plt_->first_plt_entry_offset();
5685 }
5686
5687 // Return the size of each PLT entry.
5688
5689 template<int size, bool big_endian>
5690 unsigned int
5691 Target_aarch64<size, big_endian>::plt_entry_size() const
5692 {
5693 return this->plt_->get_plt_entry_size();
5694 }
5695
5696 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5697
5698 template<int size, bool big_endian>
5699 void
5700 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5701 Symbol_table* symtab, Layout* layout)
5702 {
5703 if (this->tls_base_symbol_defined_)
5704 return;
5705
5706 Output_segment* tls_segment = layout->tls_segment();
5707 if (tls_segment != NULL)
5708 {
5709 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5710 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5711 Symbol_table::PREDEFINED,
5712 tls_segment, 0, 0,
5713 elfcpp::STT_TLS,
5714 elfcpp::STB_LOCAL,
5715 elfcpp::STV_HIDDEN, 0,
5716 Symbol::SEGMENT_START,
5717 true);
5718 }
5719 this->tls_base_symbol_defined_ = true;
5720 }
5721
5722 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5723
5724 template<int size, bool big_endian>
5725 void
5726 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5727 Symbol_table* symtab, Layout* layout)
5728 {
5729 if (this->plt_ == NULL)
5730 this->make_plt_section(symtab, layout);
5731
5732 if (!this->plt_->has_tlsdesc_entry())
5733 {
5734 // Allocate the TLSDESC_GOT entry.
5735 Output_data_got_aarch64<size, big_endian>* got =
5736 this->got_section(symtab, layout);
5737 unsigned int got_offset = got->add_constant(0);
5738
5739 // Allocate the TLSDESC_PLT entry.
5740 this->plt_->reserve_tlsdesc_entry(got_offset);
5741 }
5742 }
5743
5744 // Create a GOT entry for the TLS module index.
5745
5746 template<int size, bool big_endian>
5747 unsigned int
5748 Target_aarch64<size, big_endian>::got_mod_index_entry(
5749 Symbol_table* symtab, Layout* layout,
5750 Sized_relobj_file<size, big_endian>* object)
5751 {
5752 if (this->got_mod_index_offset_ == -1U)
5753 {
5754 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5755 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5756 Output_data_got_aarch64<size, big_endian>* got =
5757 this->got_section(symtab, layout);
5758 unsigned int got_offset = got->add_constant(0);
5759 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5760 got_offset, 0);
5761 got->add_constant(0);
5762 this->got_mod_index_offset_ = got_offset;
5763 }
5764 return this->got_mod_index_offset_;
5765 }
5766
5767 // Optimize the TLS relocation type based on what we know about the
5768 // symbol. IS_FINAL is true if the final address of this symbol is
5769 // known at link time.
5770
5771 template<int size, bool big_endian>
5772 tls::Tls_optimization
5773 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5774 int r_type)
5775 {
5776 // If we are generating a shared library, then we can't do anything
5777 // in the linker
5778 if (parameters->options().shared())
5779 return tls::TLSOPT_NONE;
5780
5781 switch (r_type)
5782 {
5783 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5784 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5785 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5786 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5787 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5788 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5789 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5790 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5791 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5792 case elfcpp::R_AARCH64_TLSDESC_LDR:
5793 case elfcpp::R_AARCH64_TLSDESC_ADD:
5794 case elfcpp::R_AARCH64_TLSDESC_CALL:
5795 // These are General-Dynamic which permits fully general TLS
5796 // access. Since we know that we are generating an executable,
5797 // we can convert this to Initial-Exec. If we also know that
5798 // this is a local symbol, we can further switch to Local-Exec.
5799 if (is_final)
5800 return tls::TLSOPT_TO_LE;
5801 return tls::TLSOPT_TO_IE;
5802
5803 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5804 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5805 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5806 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5807 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5808 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5809 // These are Local-Dynamic, which refer to local symbols in the
5810 // dynamic TLS block. Since we know that we generating an
5811 // executable, we can switch to Local-Exec.
5812 return tls::TLSOPT_TO_LE;
5813
5814 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5815 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5816 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5817 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5818 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5819 // These are Initial-Exec relocs which get the thread offset
5820 // from the GOT. If we know that we are linking against the
5821 // local symbol, we can switch to Local-Exec, which links the
5822 // thread offset into the instruction.
5823 if (is_final)
5824 return tls::TLSOPT_TO_LE;
5825 return tls::TLSOPT_NONE;
5826
5827 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5828 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5829 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5830 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5831 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5832 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5833 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5834 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5835 // When we already have Local-Exec, there is nothing further we
5836 // can do.
5837 return tls::TLSOPT_NONE;
5838
5839 default:
5840 gold_unreachable();
5841 }
5842 }
5843
5844 // Returns true if this relocation type could be that of a function pointer.
5845
5846 template<int size, bool big_endian>
5847 inline bool
5848 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5849 unsigned int r_type)
5850 {
5851 switch (r_type)
5852 {
5853 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5854 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5855 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5856 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5857 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5858 {
5859 return true;
5860 }
5861 }
5862 return false;
5863 }
5864
5865 // For safe ICF, scan a relocation for a local symbol to check if it
5866 // corresponds to a function pointer being taken. In that case mark
5867 // the function whose pointer was taken as not foldable.
5868
5869 template<int size, bool big_endian>
5870 inline bool
5871 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5872 Symbol_table* ,
5873 Layout* ,
5874 Target_aarch64<size, big_endian>* ,
5875 Sized_relobj_file<size, big_endian>* ,
5876 unsigned int ,
5877 Output_section* ,
5878 const elfcpp::Rela<size, big_endian>& ,
5879 unsigned int r_type,
5880 const elfcpp::Sym<size, big_endian>&)
5881 {
5882 // When building a shared library, do not fold any local symbols.
5883 return (parameters->options().shared()
5884 || possible_function_pointer_reloc(r_type));
5885 }
5886
5887 // For safe ICF, scan a relocation for a global symbol to check if it
5888 // corresponds to a function pointer being taken. In that case mark
5889 // the function whose pointer was taken as not foldable.
5890
5891 template<int size, bool big_endian>
5892 inline bool
5893 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5894 Symbol_table* ,
5895 Layout* ,
5896 Target_aarch64<size, big_endian>* ,
5897 Sized_relobj_file<size, big_endian>* ,
5898 unsigned int ,
5899 Output_section* ,
5900 const elfcpp::Rela<size, big_endian>& ,
5901 unsigned int r_type,
5902 Symbol* gsym)
5903 {
5904 // When building a shared library, do not fold symbols whose visibility
5905 // is hidden, internal or protected.
5906 return ((parameters->options().shared()
5907 && (gsym->visibility() == elfcpp::STV_INTERNAL
5908 || gsym->visibility() == elfcpp::STV_PROTECTED
5909 || gsym->visibility() == elfcpp::STV_HIDDEN))
5910 || possible_function_pointer_reloc(r_type));
5911 }
5912
5913 // Report an unsupported relocation against a local symbol.
5914
5915 template<int size, bool big_endian>
5916 void
5917 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5918 Sized_relobj_file<size, big_endian>* object,
5919 unsigned int r_type)
5920 {
5921 gold_error(_("%s: unsupported reloc %u against local symbol"),
5922 object->name().c_str(), r_type);
5923 }
5924
5925 // We are about to emit a dynamic relocation of type R_TYPE. If the
5926 // dynamic linker does not support it, issue an error.
5927
5928 template<int size, bool big_endian>
5929 void
5930 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5931 unsigned int r_type)
5932 {
5933 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5934
5935 switch (r_type)
5936 {
5937 // These are the relocation types supported by glibc for AARCH64.
5938 case elfcpp::R_AARCH64_NONE:
5939 case elfcpp::R_AARCH64_COPY:
5940 case elfcpp::R_AARCH64_GLOB_DAT:
5941 case elfcpp::R_AARCH64_JUMP_SLOT:
5942 case elfcpp::R_AARCH64_RELATIVE:
5943 case elfcpp::R_AARCH64_TLS_DTPREL64:
5944 case elfcpp::R_AARCH64_TLS_DTPMOD64:
5945 case elfcpp::R_AARCH64_TLS_TPREL64:
5946 case elfcpp::R_AARCH64_TLSDESC:
5947 case elfcpp::R_AARCH64_IRELATIVE:
5948 case elfcpp::R_AARCH64_ABS32:
5949 case elfcpp::R_AARCH64_ABS64:
5950 return;
5951
5952 default:
5953 break;
5954 }
5955
5956 // This prevents us from issuing more than one error per reloc
5957 // section. But we can still wind up issuing more than one
5958 // error per object file.
5959 if (this->issued_non_pic_error_)
5960 return;
5961 gold_assert(parameters->options().output_is_position_independent());
5962 object->error(_("requires unsupported dynamic reloc; "
5963 "recompile with -fPIC"));
5964 this->issued_non_pic_error_ = true;
5965 return;
5966 }
5967
5968 // Return whether we need to make a PLT entry for a relocation of the
5969 // given type against a STT_GNU_IFUNC symbol.
5970
5971 template<int size, bool big_endian>
5972 bool
5973 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
5974 Sized_relobj_file<size, big_endian>* object,
5975 unsigned int r_type)
5976 {
5977 const AArch64_reloc_property* arp =
5978 aarch64_reloc_property_table->get_reloc_property(r_type);
5979 gold_assert(arp != NULL);
5980
5981 int flags = arp->reference_flags();
5982 if (flags & Symbol::TLS_REF)
5983 {
5984 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
5985 object->name().c_str(), arp->name().c_str());
5986 return false;
5987 }
5988 return flags != 0;
5989 }
5990
5991 // Scan a relocation for a local symbol.
5992
5993 template<int size, bool big_endian>
5994 inline void
5995 Target_aarch64<size, big_endian>::Scan::local(
5996 Symbol_table* symtab,
5997 Layout* layout,
5998 Target_aarch64<size, big_endian>* target,
5999 Sized_relobj_file<size, big_endian>* object,
6000 unsigned int data_shndx,
6001 Output_section* output_section,
6002 const elfcpp::Rela<size, big_endian>& rela,
6003 unsigned int r_type,
6004 const elfcpp::Sym<size, big_endian>& lsym,
6005 bool is_discarded)
6006 {
6007 if (is_discarded)
6008 return;
6009
6010 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6011 Reloc_section;
6012 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6013
6014 // A local STT_GNU_IFUNC symbol may require a PLT entry.
6015 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
6016 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
6017 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
6018
6019 switch (r_type)
6020 {
6021 case elfcpp::R_AARCH64_NONE:
6022 break;
6023
6024 case elfcpp::R_AARCH64_ABS32:
6025 case elfcpp::R_AARCH64_ABS16:
6026 if (parameters->options().output_is_position_independent())
6027 {
6028 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6029 object->name().c_str(), r_type);
6030 }
6031 break;
6032
6033 case elfcpp::R_AARCH64_ABS64:
6034 // If building a shared library or pie, we need to mark this as a dynmic
6035 // reloction, so that the dynamic loader can relocate it.
6036 if (parameters->options().output_is_position_independent())
6037 {
6038 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6039 rela_dyn->add_local_relative(object, r_sym,
6040 elfcpp::R_AARCH64_RELATIVE,
6041 output_section,
6042 data_shndx,
6043 rela.get_r_offset(),
6044 rela.get_r_addend(),
6045 is_ifunc);
6046 }
6047 break;
6048
6049 case elfcpp::R_AARCH64_PREL64:
6050 case elfcpp::R_AARCH64_PREL32:
6051 case elfcpp::R_AARCH64_PREL16:
6052 break;
6053
6054 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6055 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6056 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6057 // The above relocations are used to access GOT entries.
6058 {
6059 Output_data_got_aarch64<size, big_endian>* got =
6060 target->got_section(symtab, layout);
6061 bool is_new = false;
6062 // This symbol requires a GOT entry.
6063 if (is_ifunc)
6064 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6065 else
6066 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6067 if (is_new && parameters->options().output_is_position_independent())
6068 target->rela_dyn_section(layout)->
6069 add_local_relative(object,
6070 r_sym,
6071 elfcpp::R_AARCH64_RELATIVE,
6072 got,
6073 object->local_got_offset(r_sym,
6074 GOT_TYPE_STANDARD),
6075 0,
6076 false);
6077 }
6078 break;
6079
6080 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6081 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6082 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6083 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6084 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6085 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6086 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6087 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6088 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6089 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6090 if (parameters->options().output_is_position_independent())
6091 {
6092 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6093 object->name().c_str(), r_type);
6094 }
6095 break;
6096
6097 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6098 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6099 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6100 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6101 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6102 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6103 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6104 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6105 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6106 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6107 break;
6108
6109 // Control flow, pc-relative. We don't need to do anything for a relative
6110 // addressing relocation against a local symbol if it does not reference
6111 // the GOT.
6112 case elfcpp::R_AARCH64_TSTBR14:
6113 case elfcpp::R_AARCH64_CONDBR19:
6114 case elfcpp::R_AARCH64_JUMP26:
6115 case elfcpp::R_AARCH64_CALL26:
6116 break;
6117
6118 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6119 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6120 {
6121 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6122 optimize_tls_reloc(!parameters->options().shared(), r_type);
6123 if (tlsopt == tls::TLSOPT_TO_LE)
6124 break;
6125
6126 layout->set_has_static_tls();
6127 // Create a GOT entry for the tp-relative offset.
6128 if (!parameters->doing_static_link())
6129 {
6130 Output_data_got_aarch64<size, big_endian>* got =
6131 target->got_section(symtab, layout);
6132 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6133 target->rela_dyn_section(layout),
6134 elfcpp::R_AARCH64_TLS_TPREL64);
6135 }
6136 else if (!object->local_has_got_offset(r_sym,
6137 GOT_TYPE_TLS_OFFSET))
6138 {
6139 Output_data_got_aarch64<size, big_endian>* got =
6140 target->got_section(symtab, layout);
6141 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6142 unsigned int got_offset =
6143 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6144 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6145 gold_assert(addend == 0);
6146 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6147 object, r_sym);
6148 }
6149 }
6150 break;
6151
6152 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6153 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6154 {
6155 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6156 optimize_tls_reloc(!parameters->options().shared(), r_type);
6157 if (tlsopt == tls::TLSOPT_TO_LE)
6158 {
6159 layout->set_has_static_tls();
6160 break;
6161 }
6162 gold_assert(tlsopt == tls::TLSOPT_NONE);
6163
6164 Output_data_got_aarch64<size, big_endian>* got =
6165 target->got_section(symtab, layout);
6166 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6167 GOT_TYPE_TLS_PAIR,
6168 target->rela_dyn_section(layout),
6169 elfcpp::R_AARCH64_TLS_DTPMOD64);
6170 }
6171 break;
6172
6173 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6174 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6175 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6176 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6177 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6178 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6179 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6180 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6181 {
6182 layout->set_has_static_tls();
6183 bool output_is_shared = parameters->options().shared();
6184 if (output_is_shared)
6185 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6186 object->name().c_str(), r_type);
6187 }
6188 break;
6189
6190 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6191 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6192 {
6193 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6194 optimize_tls_reloc(!parameters->options().shared(), r_type);
6195 if (tlsopt == tls::TLSOPT_NONE)
6196 {
6197 // Create a GOT entry for the module index.
6198 target->got_mod_index_entry(symtab, layout, object);
6199 }
6200 else if (tlsopt != tls::TLSOPT_TO_LE)
6201 unsupported_reloc_local(object, r_type);
6202 }
6203 break;
6204
6205 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6206 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6207 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6208 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6209 break;
6210
6211 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6212 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6213 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6214 {
6215 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6216 optimize_tls_reloc(!parameters->options().shared(), r_type);
6217 target->define_tls_base_symbol(symtab, layout);
6218 if (tlsopt == tls::TLSOPT_NONE)
6219 {
6220 // Create reserved PLT and GOT entries for the resolver.
6221 target->reserve_tlsdesc_entries(symtab, layout);
6222
6223 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6224 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6225 // entry needs to be in an area in .got.plt, not .got. Call
6226 // got_section to make sure the section has been created.
6227 target->got_section(symtab, layout);
6228 Output_data_got<size, big_endian>* got =
6229 target->got_tlsdesc_section();
6230 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6231 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6232 {
6233 unsigned int got_offset = got->add_constant(0);
6234 got->add_constant(0);
6235 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6236 got_offset);
6237 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6238 // We store the arguments we need in a vector, and use
6239 // the index into the vector as the parameter to pass
6240 // to the target specific routines.
6241 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6242 void* arg = reinterpret_cast<void*>(intarg);
6243 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6244 got, got_offset, 0);
6245 }
6246 }
6247 else if (tlsopt != tls::TLSOPT_TO_LE)
6248 unsupported_reloc_local(object, r_type);
6249 }
6250 break;
6251
6252 case elfcpp::R_AARCH64_TLSDESC_CALL:
6253 break;
6254
6255 default:
6256 unsupported_reloc_local(object, r_type);
6257 }
6258 }
6259
6260
6261 // Report an unsupported relocation against a global symbol.
6262
6263 template<int size, bool big_endian>
6264 void
6265 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6266 Sized_relobj_file<size, big_endian>* object,
6267 unsigned int r_type,
6268 Symbol* gsym)
6269 {
6270 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6271 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6272 }
6273
6274 template<int size, bool big_endian>
6275 inline void
6276 Target_aarch64<size, big_endian>::Scan::global(
6277 Symbol_table* symtab,
6278 Layout* layout,
6279 Target_aarch64<size, big_endian>* target,
6280 Sized_relobj_file<size, big_endian> * object,
6281 unsigned int data_shndx,
6282 Output_section* output_section,
6283 const elfcpp::Rela<size, big_endian>& rela,
6284 unsigned int r_type,
6285 Symbol* gsym)
6286 {
6287 // A STT_GNU_IFUNC symbol may require a PLT entry.
6288 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6289 && this->reloc_needs_plt_for_ifunc(object, r_type))
6290 target->make_plt_entry(symtab, layout, gsym);
6291
6292 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6293 Reloc_section;
6294 const AArch64_reloc_property* arp =
6295 aarch64_reloc_property_table->get_reloc_property(r_type);
6296 gold_assert(arp != NULL);
6297
6298 switch (r_type)
6299 {
6300 case elfcpp::R_AARCH64_NONE:
6301 break;
6302
6303 case elfcpp::R_AARCH64_ABS16:
6304 case elfcpp::R_AARCH64_ABS32:
6305 case elfcpp::R_AARCH64_ABS64:
6306 {
6307 // Make a PLT entry if necessary.
6308 if (gsym->needs_plt_entry())
6309 {
6310 target->make_plt_entry(symtab, layout, gsym);
6311 // Since this is not a PC-relative relocation, we may be
6312 // taking the address of a function. In that case we need to
6313 // set the entry in the dynamic symbol table to the address of
6314 // the PLT entry.
6315 if (gsym->is_from_dynobj() && !parameters->options().shared())
6316 gsym->set_needs_dynsym_value();
6317 }
6318 // Make a dynamic relocation if necessary.
6319 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6320 {
6321 if (!parameters->options().output_is_position_independent()
6322 && gsym->may_need_copy_reloc())
6323 {
6324 target->copy_reloc(symtab, layout, object,
6325 data_shndx, output_section, gsym, rela);
6326 }
6327 else if (r_type == elfcpp::R_AARCH64_ABS64
6328 && gsym->type() == elfcpp::STT_GNU_IFUNC
6329 && gsym->can_use_relative_reloc(false)
6330 && !gsym->is_from_dynobj()
6331 && !gsym->is_undefined()
6332 && !gsym->is_preemptible())
6333 {
6334 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6335 // symbol. This makes a function address in a PIE executable
6336 // match the address in a shared library that it links against.
6337 Reloc_section* rela_dyn =
6338 target->rela_irelative_section(layout);
6339 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6340 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6341 output_section, object,
6342 data_shndx,
6343 rela.get_r_offset(),
6344 rela.get_r_addend());
6345 }
6346 else if (r_type == elfcpp::R_AARCH64_ABS64
6347 && gsym->can_use_relative_reloc(false))
6348 {
6349 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6350 rela_dyn->add_global_relative(gsym,
6351 elfcpp::R_AARCH64_RELATIVE,
6352 output_section,
6353 object,
6354 data_shndx,
6355 rela.get_r_offset(),
6356 rela.get_r_addend(),
6357 false);
6358 }
6359 else
6360 {
6361 check_non_pic(object, r_type);
6362 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6363 rela_dyn = target->rela_dyn_section(layout);
6364 rela_dyn->add_global(
6365 gsym, r_type, output_section, object,
6366 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6367 }
6368 }
6369 }
6370 break;
6371
6372 case elfcpp::R_AARCH64_PREL16:
6373 case elfcpp::R_AARCH64_PREL32:
6374 case elfcpp::R_AARCH64_PREL64:
6375 // This is used to fill the GOT absolute address.
6376 if (gsym->needs_plt_entry())
6377 {
6378 target->make_plt_entry(symtab, layout, gsym);
6379 }
6380 break;
6381
6382 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6383 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6384 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6385 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6386 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6387 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6388 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6389 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6390 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6391 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6392 if (parameters->options().output_is_position_independent())
6393 {
6394 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6395 object->name().c_str(), r_type);
6396 }
6397 break;
6398
6399 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6400 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6401 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6402 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6403 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6404 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6405 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6406 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6407 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6408 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6409 {
6410 if (gsym->needs_plt_entry())
6411 target->make_plt_entry(symtab, layout, gsym);
6412 // Make a dynamic relocation if necessary.
6413 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6414 {
6415 if (parameters->options().output_is_executable()
6416 && gsym->may_need_copy_reloc())
6417 {
6418 target->copy_reloc(symtab, layout, object,
6419 data_shndx, output_section, gsym, rela);
6420 }
6421 }
6422 break;
6423 }
6424
6425 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6426 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6427 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6428 {
6429 // The above relocations are used to access GOT entries.
6430 // Note a GOT entry is an *address* to a symbol.
6431 // The symbol requires a GOT entry
6432 Output_data_got_aarch64<size, big_endian>* got =
6433 target->got_section(symtab, layout);
6434 if (gsym->final_value_is_known())
6435 {
6436 // For a STT_GNU_IFUNC symbol we want the PLT address.
6437 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6438 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6439 else
6440 got->add_global(gsym, GOT_TYPE_STANDARD);
6441 }
6442 else
6443 {
6444 // If this symbol is not fully resolved, we need to add a dynamic
6445 // relocation for it.
6446 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6447
6448 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6449 //
6450 // 1) The symbol may be defined in some other module.
6451 // 2) We are building a shared library and this is a protected
6452 // symbol; using GLOB_DAT means that the dynamic linker can use
6453 // the address of the PLT in the main executable when appropriate
6454 // so that function address comparisons work.
6455 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6456 // again so that function address comparisons work.
6457 if (gsym->is_from_dynobj()
6458 || gsym->is_undefined()
6459 || gsym->is_preemptible()
6460 || (gsym->visibility() == elfcpp::STV_PROTECTED
6461 && parameters->options().shared())
6462 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6463 && parameters->options().output_is_position_independent()))
6464 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6465 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6466 else
6467 {
6468 // For a STT_GNU_IFUNC symbol we want to write the PLT
6469 // offset into the GOT, so that function pointer
6470 // comparisons work correctly.
6471 bool is_new;
6472 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6473 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6474 else
6475 {
6476 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6477 // Tell the dynamic linker to use the PLT address
6478 // when resolving relocations.
6479 if (gsym->is_from_dynobj()
6480 && !parameters->options().shared())
6481 gsym->set_needs_dynsym_value();
6482 }
6483 if (is_new)
6484 {
6485 rela_dyn->add_global_relative(
6486 gsym, elfcpp::R_AARCH64_RELATIVE,
6487 got,
6488 gsym->got_offset(GOT_TYPE_STANDARD),
6489 0,
6490 false);
6491 }
6492 }
6493 }
6494 break;
6495 }
6496
6497 case elfcpp::R_AARCH64_TSTBR14:
6498 case elfcpp::R_AARCH64_CONDBR19:
6499 case elfcpp::R_AARCH64_JUMP26:
6500 case elfcpp::R_AARCH64_CALL26:
6501 {
6502 if (gsym->final_value_is_known())
6503 break;
6504
6505 if (gsym->is_defined() &&
6506 !gsym->is_from_dynobj() &&
6507 !gsym->is_preemptible())
6508 break;
6509
6510 // Make plt entry for function call.
6511 target->make_plt_entry(symtab, layout, gsym);
6512 break;
6513 }
6514
6515 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6516 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6517 {
6518 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6519 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6520 if (tlsopt == tls::TLSOPT_TO_LE)
6521 {
6522 layout->set_has_static_tls();
6523 break;
6524 }
6525 gold_assert(tlsopt == tls::TLSOPT_NONE);
6526
6527 // General dynamic.
6528 Output_data_got_aarch64<size, big_endian>* got =
6529 target->got_section(symtab, layout);
6530 // Create 2 consecutive entries for module index and offset.
6531 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6532 target->rela_dyn_section(layout),
6533 elfcpp::R_AARCH64_TLS_DTPMOD64,
6534 elfcpp::R_AARCH64_TLS_DTPREL64);
6535 }
6536 break;
6537
6538 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6539 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6540 {
6541 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6542 optimize_tls_reloc(!parameters->options().shared(), r_type);
6543 if (tlsopt == tls::TLSOPT_NONE)
6544 {
6545 // Create a GOT entry for the module index.
6546 target->got_mod_index_entry(symtab, layout, object);
6547 }
6548 else if (tlsopt != tls::TLSOPT_TO_LE)
6549 unsupported_reloc_local(object, r_type);
6550 }
6551 break;
6552
6553 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6554 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6555 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6556 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6557 break;
6558
6559 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6560 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6561 {
6562 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6563 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6564 if (tlsopt == tls::TLSOPT_TO_LE)
6565 break;
6566
6567 layout->set_has_static_tls();
6568 // Create a GOT entry for the tp-relative offset.
6569 Output_data_got_aarch64<size, big_endian>* got
6570 = target->got_section(symtab, layout);
6571 if (!parameters->doing_static_link())
6572 {
6573 got->add_global_with_rel(
6574 gsym, GOT_TYPE_TLS_OFFSET,
6575 target->rela_dyn_section(layout),
6576 elfcpp::R_AARCH64_TLS_TPREL64);
6577 }
6578 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6579 {
6580 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6581 unsigned int got_offset =
6582 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6583 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6584 gold_assert(addend == 0);
6585 got->add_static_reloc(got_offset,
6586 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6587 }
6588 }
6589 break;
6590
6591 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6592 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6593 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6594 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6595 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6596 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6597 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6598 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6599 layout->set_has_static_tls();
6600 if (parameters->options().shared())
6601 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6602 object->name().c_str(), r_type);
6603 break;
6604
6605 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6606 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6607 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6608 {
6609 target->define_tls_base_symbol(symtab, layout);
6610 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6611 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6612 if (tlsopt == tls::TLSOPT_NONE)
6613 {
6614 // Create reserved PLT and GOT entries for the resolver.
6615 target->reserve_tlsdesc_entries(symtab, layout);
6616
6617 // Create a double GOT entry with an R_AARCH64_TLSDESC
6618 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6619 // entry needs to be in an area in .got.plt, not .got. Call
6620 // got_section to make sure the section has been created.
6621 target->got_section(symtab, layout);
6622 Output_data_got<size, big_endian>* got =
6623 target->got_tlsdesc_section();
6624 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6625 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6626 elfcpp::R_AARCH64_TLSDESC, 0);
6627 }
6628 else if (tlsopt == tls::TLSOPT_TO_IE)
6629 {
6630 // Create a GOT entry for the tp-relative offset.
6631 Output_data_got<size, big_endian>* got
6632 = target->got_section(symtab, layout);
6633 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6634 target->rela_dyn_section(layout),
6635 elfcpp::R_AARCH64_TLS_TPREL64);
6636 }
6637 else if (tlsopt != tls::TLSOPT_TO_LE)
6638 unsupported_reloc_global(object, r_type, gsym);
6639 }
6640 break;
6641
6642 case elfcpp::R_AARCH64_TLSDESC_CALL:
6643 break;
6644
6645 default:
6646 gold_error(_("%s: unsupported reloc type in global scan"),
6647 aarch64_reloc_property_table->
6648 reloc_name_in_error_message(r_type).c_str());
6649 }
6650 return;
6651 } // End of Scan::global
6652
6653
6654 // Create the PLT section.
6655 template<int size, bool big_endian>
6656 void
6657 Target_aarch64<size, big_endian>::make_plt_section(
6658 Symbol_table* symtab, Layout* layout)
6659 {
6660 if (this->plt_ == NULL)
6661 {
6662 // Create the GOT section first.
6663 this->got_section(symtab, layout);
6664
6665 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6666 this->got_irelative_);
6667
6668 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6669 (elfcpp::SHF_ALLOC
6670 | elfcpp::SHF_EXECINSTR),
6671 this->plt_, ORDER_PLT, false);
6672
6673 // Make the sh_info field of .rela.plt point to .plt.
6674 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6675 rela_plt_os->set_info_section(this->plt_->output_section());
6676 }
6677 }
6678
6679 // Return the section for TLSDESC relocations.
6680
6681 template<int size, bool big_endian>
6682 typename Target_aarch64<size, big_endian>::Reloc_section*
6683 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6684 {
6685 return this->plt_section()->rela_tlsdesc(layout);
6686 }
6687
6688 // Create a PLT entry for a global symbol.
6689
6690 template<int size, bool big_endian>
6691 void
6692 Target_aarch64<size, big_endian>::make_plt_entry(
6693 Symbol_table* symtab,
6694 Layout* layout,
6695 Symbol* gsym)
6696 {
6697 if (gsym->has_plt_offset())
6698 return;
6699
6700 if (this->plt_ == NULL)
6701 this->make_plt_section(symtab, layout);
6702
6703 this->plt_->add_entry(symtab, layout, gsym);
6704 }
6705
6706 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6707
6708 template<int size, bool big_endian>
6709 void
6710 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6711 Symbol_table* symtab, Layout* layout,
6712 Sized_relobj_file<size, big_endian>* relobj,
6713 unsigned int local_sym_index)
6714 {
6715 if (relobj->local_has_plt_offset(local_sym_index))
6716 return;
6717 if (this->plt_ == NULL)
6718 this->make_plt_section(symtab, layout);
6719 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6720 relobj,
6721 local_sym_index);
6722 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6723 }
6724
6725 template<int size, bool big_endian>
6726 void
6727 Target_aarch64<size, big_endian>::gc_process_relocs(
6728 Symbol_table* symtab,
6729 Layout* layout,
6730 Sized_relobj_file<size, big_endian>* object,
6731 unsigned int data_shndx,
6732 unsigned int sh_type,
6733 const unsigned char* prelocs,
6734 size_t reloc_count,
6735 Output_section* output_section,
6736 bool needs_special_offset_handling,
6737 size_t local_symbol_count,
6738 const unsigned char* plocal_symbols)
6739 {
6740 typedef Target_aarch64<size, big_endian> Aarch64;
6741 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6742 Classify_reloc;
6743
6744 if (sh_type == elfcpp::SHT_REL)
6745 {
6746 return;
6747 }
6748
6749 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6750 symtab,
6751 layout,
6752 this,
6753 object,
6754 data_shndx,
6755 prelocs,
6756 reloc_count,
6757 output_section,
6758 needs_special_offset_handling,
6759 local_symbol_count,
6760 plocal_symbols);
6761 }
6762
6763 // Scan relocations for a section.
6764
6765 template<int size, bool big_endian>
6766 void
6767 Target_aarch64<size, big_endian>::scan_relocs(
6768 Symbol_table* symtab,
6769 Layout* layout,
6770 Sized_relobj_file<size, big_endian>* object,
6771 unsigned int data_shndx,
6772 unsigned int sh_type,
6773 const unsigned char* prelocs,
6774 size_t reloc_count,
6775 Output_section* output_section,
6776 bool needs_special_offset_handling,
6777 size_t local_symbol_count,
6778 const unsigned char* plocal_symbols)
6779 {
6780 typedef Target_aarch64<size, big_endian> Aarch64;
6781 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6782 Classify_reloc;
6783
6784 if (sh_type == elfcpp::SHT_REL)
6785 {
6786 gold_error(_("%s: unsupported REL reloc section"),
6787 object->name().c_str());
6788 return;
6789 }
6790
6791 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6792 symtab,
6793 layout,
6794 this,
6795 object,
6796 data_shndx,
6797 prelocs,
6798 reloc_count,
6799 output_section,
6800 needs_special_offset_handling,
6801 local_symbol_count,
6802 plocal_symbols);
6803 }
6804
6805 // Return the value to use for a dynamic which requires special
6806 // treatment. This is how we support equality comparisons of function
6807 // pointers across shared library boundaries, as described in the
6808 // processor specific ABI supplement.
6809
6810 template<int size, bool big_endian>
6811 uint64_t
6812 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6813 {
6814 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6815 return this->plt_address_for_global(gsym);
6816 }
6817
6818
6819 // Finalize the sections.
6820
6821 template<int size, bool big_endian>
6822 void
6823 Target_aarch64<size, big_endian>::do_finalize_sections(
6824 Layout* layout,
6825 const Input_objects*,
6826 Symbol_table* symtab)
6827 {
6828 const Reloc_section* rel_plt = (this->plt_ == NULL
6829 ? NULL
6830 : this->plt_->rela_plt());
6831 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6832 this->rela_dyn_, true, false);
6833
6834 // Emit any relocs we saved in an attempt to avoid generating COPY
6835 // relocs.
6836 if (this->copy_relocs_.any_saved_relocs())
6837 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6838
6839 // Fill in some more dynamic tags.
6840 Output_data_dynamic* const odyn = layout->dynamic_data();
6841 if (odyn != NULL)
6842 {
6843 if (this->plt_ != NULL
6844 && this->plt_->output_section() != NULL
6845 && this->plt_ ->has_tlsdesc_entry())
6846 {
6847 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6848 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6849 this->got_->finalize_data_size();
6850 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6851 this->plt_, plt_offset);
6852 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6853 this->got_, got_offset);
6854 }
6855 }
6856
6857 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6858 // the .got.plt section.
6859 Symbol* sym = this->global_offset_table_;
6860 if (sym != NULL)
6861 {
6862 uint64_t data_size = this->got_plt_->current_data_size();
6863 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6864
6865 // If the .got section is more than 0x8000 bytes, we add
6866 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6867 // bit relocations have a greater chance of working.
6868 if (data_size >= 0x8000)
6869 symtab->get_sized_symbol<size>(sym)->set_value(
6870 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6871 }
6872
6873 if (parameters->doing_static_link()
6874 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6875 {
6876 // If linking statically, make sure that the __rela_iplt symbols
6877 // were defined if necessary, even if we didn't create a PLT.
6878 static const Define_symbol_in_segment syms[] =
6879 {
6880 {
6881 "__rela_iplt_start", // name
6882 elfcpp::PT_LOAD, // segment_type
6883 elfcpp::PF_W, // segment_flags_set
6884 elfcpp::PF(0), // segment_flags_clear
6885 0, // value
6886 0, // size
6887 elfcpp::STT_NOTYPE, // type
6888 elfcpp::STB_GLOBAL, // binding
6889 elfcpp::STV_HIDDEN, // visibility
6890 0, // nonvis
6891 Symbol::SEGMENT_START, // offset_from_base
6892 true // only_if_ref
6893 },
6894 {
6895 "__rela_iplt_end", // name
6896 elfcpp::PT_LOAD, // segment_type
6897 elfcpp::PF_W, // segment_flags_set
6898 elfcpp::PF(0), // segment_flags_clear
6899 0, // value
6900 0, // size
6901 elfcpp::STT_NOTYPE, // type
6902 elfcpp::STB_GLOBAL, // binding
6903 elfcpp::STV_HIDDEN, // visibility
6904 0, // nonvis
6905 Symbol::SEGMENT_START, // offset_from_base
6906 true // only_if_ref
6907 }
6908 };
6909
6910 symtab->define_symbols(layout, 2, syms,
6911 layout->script_options()->saw_sections_clause());
6912 }
6913
6914 return;
6915 }
6916
6917 // Perform a relocation.
6918
6919 template<int size, bool big_endian>
6920 inline bool
6921 Target_aarch64<size, big_endian>::Relocate::relocate(
6922 const Relocate_info<size, big_endian>* relinfo,
6923 unsigned int,
6924 Target_aarch64<size, big_endian>* target,
6925 Output_section* ,
6926 size_t relnum,
6927 const unsigned char* preloc,
6928 const Sized_symbol<size>* gsym,
6929 const Symbol_value<size>* psymval,
6930 unsigned char* view,
6931 typename elfcpp::Elf_types<size>::Elf_Addr address,
6932 section_size_type /* view_size */)
6933 {
6934 if (view == NULL)
6935 return true;
6936
6937 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6938
6939 const elfcpp::Rela<size, big_endian> rela(preloc);
6940 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
6941 const AArch64_reloc_property* reloc_property =
6942 aarch64_reloc_property_table->get_reloc_property(r_type);
6943
6944 if (reloc_property == NULL)
6945 {
6946 std::string reloc_name =
6947 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
6948 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6949 _("cannot relocate %s in object file"),
6950 reloc_name.c_str());
6951 return true;
6952 }
6953
6954 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
6955
6956 // Pick the value to use for symbols defined in the PLT.
6957 Symbol_value<size> symval;
6958 if (gsym != NULL
6959 && gsym->use_plt_offset(reloc_property->reference_flags()))
6960 {
6961 symval.set_output_value(target->plt_address_for_global(gsym));
6962 psymval = &symval;
6963 }
6964 else if (gsym == NULL && psymval->is_ifunc_symbol())
6965 {
6966 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6967 if (object->local_has_plt_offset(r_sym))
6968 {
6969 symval.set_output_value(target->plt_address_for_local(object, r_sym));
6970 psymval = &symval;
6971 }
6972 }
6973
6974 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6975
6976 // Get the GOT offset if needed.
6977 // For aarch64, the GOT pointer points to the start of the GOT section.
6978 bool have_got_offset = false;
6979 int got_offset = 0;
6980 int got_base = (target->got_ != NULL
6981 ? (target->got_->current_data_size() >= 0x8000
6982 ? 0x8000 : 0)
6983 : 0);
6984 switch (r_type)
6985 {
6986 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
6987 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
6988 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
6989 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
6990 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
6991 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
6992 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
6993 case elfcpp::R_AARCH64_GOTREL64:
6994 case elfcpp::R_AARCH64_GOTREL32:
6995 case elfcpp::R_AARCH64_GOT_LD_PREL19:
6996 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
6997 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6998 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6999 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7000 if (gsym != NULL)
7001 {
7002 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
7003 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
7004 }
7005 else
7006 {
7007 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7008 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
7009 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
7010 - got_base);
7011 }
7012 have_got_offset = true;
7013 break;
7014
7015 default:
7016 break;
7017 }
7018
7019 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
7020 typename elfcpp::Elf_types<size>::Elf_Addr value;
7021 switch (r_type)
7022 {
7023 case elfcpp::R_AARCH64_NONE:
7024 break;
7025
7026 case elfcpp::R_AARCH64_ABS64:
7027 if (!parameters->options().apply_dynamic_relocs()
7028 && parameters->options().output_is_position_independent()
7029 && gsym != NULL
7030 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
7031 && !gsym->can_use_relative_reloc(false))
7032 // We have generated an absolute dynamic relocation, so do not
7033 // apply the relocation statically. (Works around bugs in older
7034 // Android dynamic linkers.)
7035 break;
7036 reloc_status = Reloc::template rela_ua<64>(
7037 view, object, psymval, addend, reloc_property);
7038 break;
7039
7040 case elfcpp::R_AARCH64_ABS32:
7041 if (!parameters->options().apply_dynamic_relocs()
7042 && parameters->options().output_is_position_independent()
7043 && gsym != NULL
7044 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7045 // We have generated an absolute dynamic relocation, so do not
7046 // apply the relocation statically. (Works around bugs in older
7047 // Android dynamic linkers.)
7048 break;
7049 reloc_status = Reloc::template rela_ua<32>(
7050 view, object, psymval, addend, reloc_property);
7051 break;
7052
7053 case elfcpp::R_AARCH64_ABS16:
7054 if (!parameters->options().apply_dynamic_relocs()
7055 && parameters->options().output_is_position_independent()
7056 && gsym != NULL
7057 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7058 // We have generated an absolute dynamic relocation, so do not
7059 // apply the relocation statically. (Works around bugs in older
7060 // Android dynamic linkers.)
7061 break;
7062 reloc_status = Reloc::template rela_ua<16>(
7063 view, object, psymval, addend, reloc_property);
7064 break;
7065
7066 case elfcpp::R_AARCH64_PREL64:
7067 reloc_status = Reloc::template pcrela_ua<64>(
7068 view, object, psymval, addend, address, reloc_property);
7069 break;
7070
7071 case elfcpp::R_AARCH64_PREL32:
7072 reloc_status = Reloc::template pcrela_ua<32>(
7073 view, object, psymval, addend, address, reloc_property);
7074 break;
7075
7076 case elfcpp::R_AARCH64_PREL16:
7077 reloc_status = Reloc::template pcrela_ua<16>(
7078 view, object, psymval, addend, address, reloc_property);
7079 break;
7080
7081 case elfcpp::R_AARCH64_MOVW_UABS_G0:
7082 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
7083 case elfcpp::R_AARCH64_MOVW_UABS_G1:
7084 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
7085 case elfcpp::R_AARCH64_MOVW_UABS_G2:
7086 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
7087 case elfcpp::R_AARCH64_MOVW_UABS_G3:
7088 reloc_status = Reloc::template rela_general<32>(
7089 view, object, psymval, addend, reloc_property);
7090 break;
7091 case elfcpp::R_AARCH64_MOVW_SABS_G0:
7092 case elfcpp::R_AARCH64_MOVW_SABS_G1:
7093 case elfcpp::R_AARCH64_MOVW_SABS_G2:
7094 reloc_status = Reloc::movnz(view, psymval->value(object, addend),
7095 reloc_property);
7096 break;
7097
7098 case elfcpp::R_AARCH64_LD_PREL_LO19:
7099 reloc_status = Reloc::template pcrela_general<32>(
7100 view, object, psymval, addend, address, reloc_property);
7101 break;
7102
7103 case elfcpp::R_AARCH64_ADR_PREL_LO21:
7104 reloc_status = Reloc::adr(view, object, psymval, addend,
7105 address, reloc_property);
7106 break;
7107
7108 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
7109 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
7110 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
7111 reloc_property);
7112 break;
7113
7114 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
7115 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
7116 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
7117 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7118 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7119 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7120 reloc_status = Reloc::template rela_general<32>(
7121 view, object, psymval, addend, reloc_property);
7122 break;
7123
7124 case elfcpp::R_AARCH64_CALL26:
7125 if (this->skip_call_tls_get_addr_)
7126 {
7127 // Double check that the TLSGD insn has been optimized away.
7128 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7129 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7130 reinterpret_cast<Insntype*>(view));
7131 gold_assert((insn & 0xff000000) == 0x91000000);
7132
7133 reloc_status = Reloc::STATUS_OKAY;
7134 this->skip_call_tls_get_addr_ = false;
7135 // Return false to stop further processing this reloc.
7136 return false;
7137 }
7138 // Fall through.
7139 case elfcpp::R_AARCH64_JUMP26:
7140 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7141 gsym, psymval, object,
7142 target->stub_group_size_))
7143 break;
7144 // Fall through.
7145 case elfcpp::R_AARCH64_TSTBR14:
7146 case elfcpp::R_AARCH64_CONDBR19:
7147 reloc_status = Reloc::template pcrela_general<32>(
7148 view, object, psymval, addend, address, reloc_property);
7149 break;
7150
7151 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7152 gold_assert(have_got_offset);
7153 value = target->got_->address() + got_base + got_offset;
7154 reloc_status = Reloc::adrp(view, value + addend, address);
7155 break;
7156
7157 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7158 gold_assert(have_got_offset);
7159 value = target->got_->address() + got_base + got_offset;
7160 reloc_status = Reloc::template rela_general<32>(
7161 view, value, addend, reloc_property);
7162 break;
7163
7164 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7165 {
7166 gold_assert(have_got_offset);
7167 value = target->got_->address() + got_base + got_offset + addend -
7168 Reloc::Page(target->got_->address() + got_base);
7169 if ((value & 7) != 0)
7170 reloc_status = Reloc::STATUS_OVERFLOW;
7171 else
7172 reloc_status = Reloc::template reloc_common<32>(
7173 view, value, reloc_property);
7174 break;
7175 }
7176
7177 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7178 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7179 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7180 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7181 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7182 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7183 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7184 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7185 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7186 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7187 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7188 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7189 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7190 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7191 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7192 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7193 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7194 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7195 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7196 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7197 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7198 case elfcpp::R_AARCH64_TLSDESC_CALL:
7199 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7200 gsym, psymval, view, address);
7201 break;
7202
7203 // These are dynamic relocations, which are unexpected when linking.
7204 case elfcpp::R_AARCH64_COPY:
7205 case elfcpp::R_AARCH64_GLOB_DAT:
7206 case elfcpp::R_AARCH64_JUMP_SLOT:
7207 case elfcpp::R_AARCH64_RELATIVE:
7208 case elfcpp::R_AARCH64_IRELATIVE:
7209 case elfcpp::R_AARCH64_TLS_DTPREL64:
7210 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7211 case elfcpp::R_AARCH64_TLS_TPREL64:
7212 case elfcpp::R_AARCH64_TLSDESC:
7213 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7214 _("unexpected reloc %u in object file"),
7215 r_type);
7216 break;
7217
7218 default:
7219 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7220 _("unsupported reloc %s"),
7221 reloc_property->name().c_str());
7222 break;
7223 }
7224
7225 // Report any errors.
7226 switch (reloc_status)
7227 {
7228 case Reloc::STATUS_OKAY:
7229 break;
7230 case Reloc::STATUS_OVERFLOW:
7231 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7232 _("relocation overflow in %s"),
7233 reloc_property->name().c_str());
7234 break;
7235 case Reloc::STATUS_BAD_RELOC:
7236 gold_error_at_location(
7237 relinfo,
7238 relnum,
7239 rela.get_r_offset(),
7240 _("unexpected opcode while processing relocation %s"),
7241 reloc_property->name().c_str());
7242 break;
7243 default:
7244 gold_unreachable();
7245 }
7246
7247 return true;
7248 }
7249
7250
7251 template<int size, bool big_endian>
7252 inline
7253 typename AArch64_relocate_functions<size, big_endian>::Status
7254 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7255 const Relocate_info<size, big_endian>* relinfo,
7256 Target_aarch64<size, big_endian>* target,
7257 size_t relnum,
7258 const elfcpp::Rela<size, big_endian>& rela,
7259 unsigned int r_type, const Sized_symbol<size>* gsym,
7260 const Symbol_value<size>* psymval,
7261 unsigned char* view,
7262 typename elfcpp::Elf_types<size>::Elf_Addr address)
7263 {
7264 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7265 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7266
7267 Output_segment* tls_segment = relinfo->layout->tls_segment();
7268 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7269 const AArch64_reloc_property* reloc_property =
7270 aarch64_reloc_property_table->get_reloc_property(r_type);
7271 gold_assert(reloc_property != NULL);
7272
7273 const bool is_final = (gsym == NULL
7274 ? !parameters->options().shared()
7275 : gsym->final_value_is_known());
7276 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7277 optimize_tls_reloc(is_final, r_type);
7278
7279 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7280 int tls_got_offset_type;
7281 switch (r_type)
7282 {
7283 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7284 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7285 {
7286 if (tlsopt == tls::TLSOPT_TO_LE)
7287 {
7288 if (tls_segment == NULL)
7289 {
7290 gold_assert(parameters->errors()->error_count() > 0
7291 || issue_undefined_symbol_error(gsym));
7292 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7293 }
7294 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7295 psymval);
7296 }
7297 else if (tlsopt == tls::TLSOPT_NONE)
7298 {
7299 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7300 // Firstly get the address for the got entry.
7301 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7302 if (gsym != NULL)
7303 {
7304 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7305 got_entry_address = target->got_->address() +
7306 gsym->got_offset(tls_got_offset_type);
7307 }
7308 else
7309 {
7310 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7311 gold_assert(
7312 object->local_has_got_offset(r_sym, tls_got_offset_type));
7313 got_entry_address = target->got_->address() +
7314 object->local_got_offset(r_sym, tls_got_offset_type);
7315 }
7316
7317 // Relocate the address into adrp/ld, adrp/add pair.
7318 switch (r_type)
7319 {
7320 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7321 return aarch64_reloc_funcs::adrp(
7322 view, got_entry_address + addend, address);
7323
7324 break;
7325
7326 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7327 return aarch64_reloc_funcs::template rela_general<32>(
7328 view, got_entry_address, addend, reloc_property);
7329 break;
7330
7331 default:
7332 gold_unreachable();
7333 }
7334 }
7335 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7336 _("unsupported gd_to_ie relaxation on %u"),
7337 r_type);
7338 }
7339 break;
7340
7341 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7342 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7343 {
7344 if (tlsopt == tls::TLSOPT_TO_LE)
7345 {
7346 if (tls_segment == NULL)
7347 {
7348 gold_assert(parameters->errors()->error_count() > 0
7349 || issue_undefined_symbol_error(gsym));
7350 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7351 }
7352 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7353 psymval);
7354 }
7355
7356 gold_assert(tlsopt == tls::TLSOPT_NONE);
7357 // Relocate the field with the offset of the GOT entry for
7358 // the module index.
7359 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7360 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7361 target->got_->address());
7362
7363 switch (r_type)
7364 {
7365 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7366 return aarch64_reloc_funcs::adrp(
7367 view, got_entry_address + addend, address);
7368 break;
7369
7370 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7371 return aarch64_reloc_funcs::template rela_general<32>(
7372 view, got_entry_address, addend, reloc_property);
7373 break;
7374
7375 default:
7376 gold_unreachable();
7377 }
7378 }
7379 break;
7380
7381 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7382 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7383 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7384 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7385 {
7386 AArch64_address value = psymval->value(object, 0);
7387 if (tlsopt == tls::TLSOPT_TO_LE)
7388 {
7389 if (tls_segment == NULL)
7390 {
7391 gold_assert(parameters->errors()->error_count() > 0
7392 || issue_undefined_symbol_error(gsym));
7393 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7394 }
7395 }
7396 switch (r_type)
7397 {
7398 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7399 return aarch64_reloc_funcs::movnz(view, value + addend,
7400 reloc_property);
7401 break;
7402
7403 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7404 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7405 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7406 return aarch64_reloc_funcs::template rela_general<32>(
7407 view, value, addend, reloc_property);
7408 break;
7409
7410 default:
7411 gold_unreachable();
7412 }
7413 // We should never reach here.
7414 }
7415 break;
7416
7417 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7418 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7419 {
7420 if (tlsopt == tls::TLSOPT_TO_LE)
7421 {
7422 if (tls_segment == NULL)
7423 {
7424 gold_assert(parameters->errors()->error_count() > 0
7425 || issue_undefined_symbol_error(gsym));
7426 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7427 }
7428 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7429 psymval);
7430 }
7431 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7432
7433 // Firstly get the address for the got entry.
7434 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7435 if (gsym != NULL)
7436 {
7437 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7438 got_entry_address = target->got_->address() +
7439 gsym->got_offset(tls_got_offset_type);
7440 }
7441 else
7442 {
7443 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7444 gold_assert(
7445 object->local_has_got_offset(r_sym, tls_got_offset_type));
7446 got_entry_address = target->got_->address() +
7447 object->local_got_offset(r_sym, tls_got_offset_type);
7448 }
7449 // Relocate the address into adrp/ld, adrp/add pair.
7450 switch (r_type)
7451 {
7452 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7453 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7454 address);
7455 break;
7456 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7457 return aarch64_reloc_funcs::template rela_general<32>(
7458 view, got_entry_address, addend, reloc_property);
7459 default:
7460 gold_unreachable();
7461 }
7462 }
7463 // We shall never reach here.
7464 break;
7465
7466 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7467 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7468 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7469 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7470 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7471 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7472 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7473 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7474 {
7475 gold_assert(tls_segment != NULL);
7476 AArch64_address value = psymval->value(object, 0);
7477
7478 if (!parameters->options().shared())
7479 {
7480 AArch64_address aligned_tcb_size =
7481 align_address(target->tcb_size(),
7482 tls_segment->maximum_alignment());
7483 value += aligned_tcb_size;
7484 switch (r_type)
7485 {
7486 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7487 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7488 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7489 return aarch64_reloc_funcs::movnz(view, value + addend,
7490 reloc_property);
7491 default:
7492 return aarch64_reloc_funcs::template
7493 rela_general<32>(view,
7494 value,
7495 addend,
7496 reloc_property);
7497 }
7498 }
7499 else
7500 gold_error(_("%s: unsupported reloc %u "
7501 "in non-static TLSLE mode."),
7502 object->name().c_str(), r_type);
7503 }
7504 break;
7505
7506 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7507 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7508 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7509 case elfcpp::R_AARCH64_TLSDESC_CALL:
7510 {
7511 if (tlsopt == tls::TLSOPT_TO_LE)
7512 {
7513 if (tls_segment == NULL)
7514 {
7515 gold_assert(parameters->errors()->error_count() > 0
7516 || issue_undefined_symbol_error(gsym));
7517 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7518 }
7519 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7520 view, psymval);
7521 }
7522 else
7523 {
7524 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7525 ? GOT_TYPE_TLS_OFFSET
7526 : GOT_TYPE_TLS_DESC);
7527 unsigned int got_tlsdesc_offset = 0;
7528 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7529 && tlsopt == tls::TLSOPT_NONE)
7530 {
7531 // We created GOT entries in the .got.tlsdesc portion of the
7532 // .got.plt section, but the offset stored in the symbol is the
7533 // offset within .got.tlsdesc.
7534 got_tlsdesc_offset = (target->got_->data_size()
7535 + target->got_plt_section()->data_size());
7536 }
7537 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7538 if (gsym != NULL)
7539 {
7540 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7541 got_entry_address = target->got_->address()
7542 + got_tlsdesc_offset
7543 + gsym->got_offset(tls_got_offset_type);
7544 }
7545 else
7546 {
7547 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7548 gold_assert(
7549 object->local_has_got_offset(r_sym, tls_got_offset_type));
7550 got_entry_address = target->got_->address() +
7551 got_tlsdesc_offset +
7552 object->local_got_offset(r_sym, tls_got_offset_type);
7553 }
7554 if (tlsopt == tls::TLSOPT_TO_IE)
7555 {
7556 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7557 view, psymval, got_entry_address,
7558 address);
7559 }
7560
7561 // Now do tlsdesc relocation.
7562 switch (r_type)
7563 {
7564 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7565 return aarch64_reloc_funcs::adrp(view,
7566 got_entry_address + addend,
7567 address);
7568 break;
7569 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7570 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7571 return aarch64_reloc_funcs::template rela_general<32>(
7572 view, got_entry_address, addend, reloc_property);
7573 break;
7574 case elfcpp::R_AARCH64_TLSDESC_CALL:
7575 return aarch64_reloc_funcs::STATUS_OKAY;
7576 break;
7577 default:
7578 gold_unreachable();
7579 }
7580 }
7581 }
7582 break;
7583
7584 default:
7585 gold_error(_("%s: unsupported TLS reloc %u."),
7586 object->name().c_str(), r_type);
7587 }
7588 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7589 } // End of relocate_tls.
7590
7591
7592 template<int size, bool big_endian>
7593 inline
7594 typename AArch64_relocate_functions<size, big_endian>::Status
7595 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7596 const Relocate_info<size, big_endian>* relinfo,
7597 Target_aarch64<size, big_endian>* target,
7598 const elfcpp::Rela<size, big_endian>& rela,
7599 unsigned int r_type,
7600 unsigned char* view,
7601 const Symbol_value<size>* psymval)
7602 {
7603 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7604 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7605 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7606
7607 Insntype* ip = reinterpret_cast<Insntype*>(view);
7608 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7609 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7610 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7611
7612 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7613 {
7614 // This is the 2nd relocs, optimization should already have been
7615 // done.
7616 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7617 return aarch64_reloc_funcs::STATUS_OKAY;
7618 }
7619
7620 // The original sequence is -
7621 // 90000000 adrp x0, 0 <main>
7622 // 91000000 add x0, x0, #0x0
7623 // 94000000 bl 0 <__tls_get_addr>
7624 // optimized to sequence -
7625 // d53bd040 mrs x0, tpidr_el0
7626 // 91400000 add x0, x0, #0x0, lsl #12
7627 // 91000000 add x0, x0, #0x0
7628
7629 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7630 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7631 // have to change "bl tls_get_addr", which does not have a corresponding tls
7632 // relocation type. So before proceeding, we need to make sure compiler
7633 // does not change the sequence.
7634 if(!(insn1 == 0x90000000 // adrp x0,0
7635 && insn2 == 0x91000000 // add x0, x0, #0x0
7636 && insn3 == 0x94000000)) // bl 0
7637 {
7638 // Ideally we should give up gd_to_le relaxation and do gd access.
7639 // However the gd_to_le relaxation decision has been made early
7640 // in the scan stage, where we did not allocate any GOT entry for
7641 // this symbol. Therefore we have to exit and report error now.
7642 gold_error(_("unexpected reloc insn sequence while relaxing "
7643 "tls gd to le for reloc %u."), r_type);
7644 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7645 }
7646
7647 // Write new insns.
7648 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7649 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7650 insn3 = 0x91000000; // add x0, x0, #0x0
7651 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7652 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7653 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7654
7655 // Calculate tprel value.
7656 Output_segment* tls_segment = relinfo->layout->tls_segment();
7657 gold_assert(tls_segment != NULL);
7658 AArch64_address value = psymval->value(relinfo->object, 0);
7659 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7660 AArch64_address aligned_tcb_size =
7661 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7662 AArch64_address x = value + aligned_tcb_size;
7663
7664 // After new insns are written, apply TLSLE relocs.
7665 const AArch64_reloc_property* rp1 =
7666 aarch64_reloc_property_table->get_reloc_property(
7667 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7668 const AArch64_reloc_property* rp2 =
7669 aarch64_reloc_property_table->get_reloc_property(
7670 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7671 gold_assert(rp1 != NULL && rp2 != NULL);
7672
7673 typename aarch64_reloc_funcs::Status s1 =
7674 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7675 x,
7676 addend,
7677 rp1);
7678 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7679 return s1;
7680
7681 typename aarch64_reloc_funcs::Status s2 =
7682 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7683 x,
7684 addend,
7685 rp2);
7686
7687 this->skip_call_tls_get_addr_ = true;
7688 return s2;
7689 } // End of tls_gd_to_le
7690
7691
7692 template<int size, bool big_endian>
7693 inline
7694 typename AArch64_relocate_functions<size, big_endian>::Status
7695 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7696 const Relocate_info<size, big_endian>* relinfo,
7697 Target_aarch64<size, big_endian>* target,
7698 const elfcpp::Rela<size, big_endian>& rela,
7699 unsigned int r_type,
7700 unsigned char* view,
7701 const Symbol_value<size>* psymval)
7702 {
7703 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7704 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7705 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7706
7707 Insntype* ip = reinterpret_cast<Insntype*>(view);
7708 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7709 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7710 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7711
7712 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7713 {
7714 // This is the 2nd relocs, optimization should already have been
7715 // done.
7716 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7717 return aarch64_reloc_funcs::STATUS_OKAY;
7718 }
7719
7720 // The original sequence is -
7721 // 90000000 adrp x0, 0 <main>
7722 // 91000000 add x0, x0, #0x0
7723 // 94000000 bl 0 <__tls_get_addr>
7724 // optimized to sequence -
7725 // d53bd040 mrs x0, tpidr_el0
7726 // 91400000 add x0, x0, #0x0, lsl #12
7727 // 91000000 add x0, x0, #0x0
7728
7729 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7730 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7731 // have to change "bl tls_get_addr", which does not have a corresponding tls
7732 // relocation type. So before proceeding, we need to make sure compiler
7733 // does not change the sequence.
7734 if(!(insn1 == 0x90000000 // adrp x0,0
7735 && insn2 == 0x91000000 // add x0, x0, #0x0
7736 && insn3 == 0x94000000)) // bl 0
7737 {
7738 // Ideally we should give up gd_to_le relaxation and do gd access.
7739 // However the gd_to_le relaxation decision has been made early
7740 // in the scan stage, where we did not allocate a GOT entry for
7741 // this symbol. Therefore we have to exit and report an error now.
7742 gold_error(_("unexpected reloc insn sequence while relaxing "
7743 "tls gd to le for reloc %u."), r_type);
7744 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7745 }
7746
7747 // Write new insns.
7748 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7749 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7750 insn3 = 0x91000000; // add x0, x0, #0x0
7751 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7752 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7753 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7754
7755 // Calculate tprel value.
7756 Output_segment* tls_segment = relinfo->layout->tls_segment();
7757 gold_assert(tls_segment != NULL);
7758 AArch64_address value = psymval->value(relinfo->object, 0);
7759 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7760 AArch64_address aligned_tcb_size =
7761 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7762 AArch64_address x = value + aligned_tcb_size;
7763
7764 // After new insns are written, apply TLSLE relocs.
7765 const AArch64_reloc_property* rp1 =
7766 aarch64_reloc_property_table->get_reloc_property(
7767 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7768 const AArch64_reloc_property* rp2 =
7769 aarch64_reloc_property_table->get_reloc_property(
7770 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7771 gold_assert(rp1 != NULL && rp2 != NULL);
7772
7773 typename aarch64_reloc_funcs::Status s1 =
7774 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7775 x,
7776 addend,
7777 rp1);
7778 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7779 return s1;
7780
7781 typename aarch64_reloc_funcs::Status s2 =
7782 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7783 x,
7784 addend,
7785 rp2);
7786
7787 this->skip_call_tls_get_addr_ = true;
7788 return s2;
7789
7790 } // End of tls_ld_to_le
7791
7792 template<int size, bool big_endian>
7793 inline
7794 typename AArch64_relocate_functions<size, big_endian>::Status
7795 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7796 const Relocate_info<size, big_endian>* relinfo,
7797 Target_aarch64<size, big_endian>* target,
7798 const elfcpp::Rela<size, big_endian>& rela,
7799 unsigned int r_type,
7800 unsigned char* view,
7801 const Symbol_value<size>* psymval)
7802 {
7803 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7804 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7805 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7806
7807 AArch64_address value = psymval->value(relinfo->object, 0);
7808 Output_segment* tls_segment = relinfo->layout->tls_segment();
7809 AArch64_address aligned_tcb_address =
7810 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7811 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7812 AArch64_address x = value + addend + aligned_tcb_address;
7813 // "x" is the offset to tp, we can only do this if x is within
7814 // range [0, 2^32-1]
7815 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7816 {
7817 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7818 r_type);
7819 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7820 }
7821
7822 Insntype* ip = reinterpret_cast<Insntype*>(view);
7823 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7824 unsigned int regno;
7825 Insntype newinsn;
7826 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7827 {
7828 // Generate movz.
7829 regno = (insn & 0x1f);
7830 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7831 }
7832 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7833 {
7834 // Generate movk.
7835 regno = (insn & 0x1f);
7836 gold_assert(regno == ((insn >> 5) & 0x1f));
7837 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7838 }
7839 else
7840 gold_unreachable();
7841
7842 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7843 return aarch64_reloc_funcs::STATUS_OKAY;
7844 } // End of tls_ie_to_le
7845
7846
7847 template<int size, bool big_endian>
7848 inline
7849 typename AArch64_relocate_functions<size, big_endian>::Status
7850 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7851 const Relocate_info<size, big_endian>* relinfo,
7852 Target_aarch64<size, big_endian>* target,
7853 const elfcpp::Rela<size, big_endian>& rela,
7854 unsigned int r_type,
7855 unsigned char* view,
7856 const Symbol_value<size>* psymval)
7857 {
7858 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7859 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7860 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7861
7862 // TLSDESC-GD sequence is like:
7863 // adrp x0, :tlsdesc:v1
7864 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7865 // add x0, x0, :tlsdesc_lo12:v1
7866 // .tlsdesccall v1
7867 // blr x1
7868 // After desc_gd_to_le optimization, the sequence will be like:
7869 // movz x0, #0x0, lsl #16
7870 // movk x0, #0x10
7871 // nop
7872 // nop
7873
7874 // Calculate tprel value.
7875 Output_segment* tls_segment = relinfo->layout->tls_segment();
7876 gold_assert(tls_segment != NULL);
7877 Insntype* ip = reinterpret_cast<Insntype*>(view);
7878 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7879 AArch64_address value = psymval->value(relinfo->object, addend);
7880 AArch64_address aligned_tcb_size =
7881 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7882 AArch64_address x = value + aligned_tcb_size;
7883 // x is the offset to tp, we can only do this if x is within range
7884 // [0, 2^32-1]. If x is out of range, fail and exit.
7885 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7886 {
7887 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7888 "We Can't do gd_to_le relaxation.\n"), r_type);
7889 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7890 }
7891 Insntype newinsn;
7892 switch (r_type)
7893 {
7894 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7895 case elfcpp::R_AARCH64_TLSDESC_CALL:
7896 // Change to nop
7897 newinsn = 0xd503201f;
7898 break;
7899
7900 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7901 // Change to movz.
7902 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7903 break;
7904
7905 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7906 // Change to movk.
7907 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7908 break;
7909
7910 default:
7911 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7912 r_type);
7913 gold_unreachable();
7914 }
7915 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7916 return aarch64_reloc_funcs::STATUS_OKAY;
7917 } // End of tls_desc_gd_to_le
7918
7919
7920 template<int size, bool big_endian>
7921 inline
7922 typename AArch64_relocate_functions<size, big_endian>::Status
7923 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7924 const Relocate_info<size, big_endian>* /* relinfo */,
7925 Target_aarch64<size, big_endian>* /* target */,
7926 const elfcpp::Rela<size, big_endian>& rela,
7927 unsigned int r_type,
7928 unsigned char* view,
7929 const Symbol_value<size>* /* psymval */,
7930 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7931 typename elfcpp::Elf_types<size>::Elf_Addr address)
7932 {
7933 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7934 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7935
7936 // TLSDESC-GD sequence is like:
7937 // adrp x0, :tlsdesc:v1
7938 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7939 // add x0, x0, :tlsdesc_lo12:v1
7940 // .tlsdesccall v1
7941 // blr x1
7942 // After desc_gd_to_ie optimization, the sequence will be like:
7943 // adrp x0, :tlsie:v1
7944 // ldr x0, [x0, :tlsie_lo12:v1]
7945 // nop
7946 // nop
7947
7948 Insntype* ip = reinterpret_cast<Insntype*>(view);
7949 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7950 Insntype newinsn;
7951 switch (r_type)
7952 {
7953 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7954 case elfcpp::R_AARCH64_TLSDESC_CALL:
7955 // Change to nop
7956 newinsn = 0xd503201f;
7957 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7958 break;
7959
7960 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7961 {
7962 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7963 address);
7964 }
7965 break;
7966
7967 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7968 {
7969 // Set ldr target register to be x0.
7970 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7971 insn &= 0xffffffe0;
7972 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
7973 // Do relocation.
7974 const AArch64_reloc_property* reloc_property =
7975 aarch64_reloc_property_table->get_reloc_property(
7976 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7977 return aarch64_reloc_funcs::template rela_general<32>(
7978 view, got_entry_address, addend, reloc_property);
7979 }
7980 break;
7981
7982 default:
7983 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
7984 r_type);
7985 gold_unreachable();
7986 }
7987 return aarch64_reloc_funcs::STATUS_OKAY;
7988 } // End of tls_desc_gd_to_ie
7989
7990 // Relocate section data.
7991
7992 template<int size, bool big_endian>
7993 void
7994 Target_aarch64<size, big_endian>::relocate_section(
7995 const Relocate_info<size, big_endian>* relinfo,
7996 unsigned int sh_type,
7997 const unsigned char* prelocs,
7998 size_t reloc_count,
7999 Output_section* output_section,
8000 bool needs_special_offset_handling,
8001 unsigned char* view,
8002 typename elfcpp::Elf_types<size>::Elf_Addr address,
8003 section_size_type view_size,
8004 const Reloc_symbol_changes* reloc_symbol_changes)
8005 {
8006 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
8007 typedef Target_aarch64<size, big_endian> Aarch64;
8008 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
8009 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8010 Classify_reloc;
8011
8012 gold_assert(sh_type == elfcpp::SHT_RELA);
8013
8014 // See if we are relocating a relaxed input section. If so, the view
8015 // covers the whole output section and we need to adjust accordingly.
8016 if (needs_special_offset_handling)
8017 {
8018 const Output_relaxed_input_section* poris =
8019 output_section->find_relaxed_input_section(relinfo->object,
8020 relinfo->data_shndx);
8021 if (poris != NULL)
8022 {
8023 Address section_address = poris->address();
8024 section_size_type section_size = poris->data_size();
8025
8026 gold_assert((section_address >= address)
8027 && ((section_address + section_size)
8028 <= (address + view_size)));
8029
8030 off_t offset = section_address - address;
8031 view += offset;
8032 address += offset;
8033 view_size = section_size;
8034 }
8035 }
8036
8037 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
8038 gold::Default_comdat_behavior, Classify_reloc>(
8039 relinfo,
8040 this,
8041 prelocs,
8042 reloc_count,
8043 output_section,
8044 needs_special_offset_handling,
8045 view,
8046 address,
8047 view_size,
8048 reloc_symbol_changes);
8049 }
8050
8051 // Scan the relocs during a relocatable link.
8052
8053 template<int size, bool big_endian>
8054 void
8055 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
8056 Symbol_table* symtab,
8057 Layout* layout,
8058 Sized_relobj_file<size, big_endian>* object,
8059 unsigned int data_shndx,
8060 unsigned int sh_type,
8061 const unsigned char* prelocs,
8062 size_t reloc_count,
8063 Output_section* output_section,
8064 bool needs_special_offset_handling,
8065 size_t local_symbol_count,
8066 const unsigned char* plocal_symbols,
8067 Relocatable_relocs* rr)
8068 {
8069 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8070 Classify_reloc;
8071 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
8072 Scan_relocatable_relocs;
8073
8074 gold_assert(sh_type == elfcpp::SHT_RELA);
8075
8076 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
8077 symtab,
8078 layout,
8079 object,
8080 data_shndx,
8081 prelocs,
8082 reloc_count,
8083 output_section,
8084 needs_special_offset_handling,
8085 local_symbol_count,
8086 plocal_symbols,
8087 rr);
8088 }
8089
8090 // Scan the relocs for --emit-relocs.
8091
8092 template<int size, bool big_endian>
8093 void
8094 Target_aarch64<size, big_endian>::emit_relocs_scan(
8095 Symbol_table* symtab,
8096 Layout* layout,
8097 Sized_relobj_file<size, big_endian>* object,
8098 unsigned int data_shndx,
8099 unsigned int sh_type,
8100 const unsigned char* prelocs,
8101 size_t reloc_count,
8102 Output_section* output_section,
8103 bool needs_special_offset_handling,
8104 size_t local_symbol_count,
8105 const unsigned char* plocal_syms,
8106 Relocatable_relocs* rr)
8107 {
8108 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8109 Classify_reloc;
8110 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
8111 Emit_relocs_strategy;
8112
8113 gold_assert(sh_type == elfcpp::SHT_RELA);
8114
8115 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
8116 symtab,
8117 layout,
8118 object,
8119 data_shndx,
8120 prelocs,
8121 reloc_count,
8122 output_section,
8123 needs_special_offset_handling,
8124 local_symbol_count,
8125 plocal_syms,
8126 rr);
8127 }
8128
8129 // Relocate a section during a relocatable link.
8130
8131 template<int size, bool big_endian>
8132 void
8133 Target_aarch64<size, big_endian>::relocate_relocs(
8134 const Relocate_info<size, big_endian>* relinfo,
8135 unsigned int sh_type,
8136 const unsigned char* prelocs,
8137 size_t reloc_count,
8138 Output_section* output_section,
8139 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
8140 unsigned char* view,
8141 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
8142 section_size_type view_size,
8143 unsigned char* reloc_view,
8144 section_size_type reloc_view_size)
8145 {
8146 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8147 Classify_reloc;
8148
8149 gold_assert(sh_type == elfcpp::SHT_RELA);
8150
8151 gold::relocate_relocs<size, big_endian, Classify_reloc>(
8152 relinfo,
8153 prelocs,
8154 reloc_count,
8155 output_section,
8156 offset_in_output_section,
8157 view,
8158 view_address,
8159 view_size,
8160 reloc_view,
8161 reloc_view_size);
8162 }
8163
8164
8165 // Return whether this is a 3-insn erratum sequence.
8166
8167 template<int size, bool big_endian>
8168 bool
8169 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8170 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8171 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8172 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8173 {
8174 unsigned rt1, rt2;
8175 bool load, pair;
8176
8177 // The 2nd insn is a single register load or store; or register pair
8178 // store.
8179 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8180 && (!pair || (pair && !load)))
8181 {
8182 // The 3rd insn is a load or store instruction from the "Load/store
8183 // register (unsigned immediate)" encoding class, using Rn as the
8184 // base address register.
8185 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8186 && (Insn_utilities::aarch64_rn(insn3)
8187 == Insn_utilities::aarch64_rd(insn1)))
8188 return true;
8189 }
8190 return false;
8191 }
8192
8193
8194 // Return whether this is a 835769 sequence.
8195 // (Similarly implemented as in elfnn-aarch64.c.)
8196
8197 template<int size, bool big_endian>
8198 bool
8199 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8200 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8201 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8202 {
8203 uint32_t rt;
8204 uint32_t rt2 = 0;
8205 uint32_t rn;
8206 uint32_t rm;
8207 uint32_t ra;
8208 bool pair;
8209 bool load;
8210
8211 if (Insn_utilities::aarch64_mlxl(insn2)
8212 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8213 {
8214 /* Any SIMD memory op is independent of the subsequent MLA
8215 by definition of the erratum. */
8216 if (Insn_utilities::aarch64_bit(insn1, 26))
8217 return true;
8218
8219 /* If not SIMD, check for integer memory ops and MLA relationship. */
8220 rn = Insn_utilities::aarch64_rn(insn2);
8221 ra = Insn_utilities::aarch64_ra(insn2);
8222 rm = Insn_utilities::aarch64_rm(insn2);
8223
8224 /* If this is a load and there's a true(RAW) dependency, we are safe
8225 and this is not an erratum sequence. */
8226 if (load &&
8227 (rt == rn || rt == rm || rt == ra
8228 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8229 return false;
8230
8231 /* We conservatively put out stubs for all other cases (including
8232 writebacks). */
8233 return true;
8234 }
8235
8236 return false;
8237 }
8238
8239
8240 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8241
8242 template<int size, bool big_endian>
8243 void
8244 Target_aarch64<size, big_endian>::create_erratum_stub(
8245 AArch64_relobj<size, big_endian>* relobj,
8246 unsigned int shndx,
8247 section_size_type erratum_insn_offset,
8248 Address erratum_address,
8249 typename Insn_utilities::Insntype erratum_insn,
8250 int erratum_type,
8251 unsigned int e843419_adrp_offset)
8252 {
8253 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8254 The_stub_table* stub_table = relobj->stub_table(shndx);
8255 gold_assert(stub_table != NULL);
8256 if (stub_table->find_erratum_stub(relobj,
8257 shndx,
8258 erratum_insn_offset) == NULL)
8259 {
8260 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8261 The_erratum_stub* stub;
8262 if (erratum_type == ST_E_835769)
8263 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8264 erratum_insn_offset);
8265 else if (erratum_type == ST_E_843419)
8266 stub = new E843419_stub<size, big_endian>(
8267 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8268 else
8269 gold_unreachable();
8270 stub->set_erratum_insn(erratum_insn);
8271 stub->set_erratum_address(erratum_address);
8272 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8273 // always the next insn after erratum insn.
8274 stub->set_destination_address(erratum_address + BPI);
8275 stub_table->add_erratum_stub(stub);
8276 }
8277 }
8278
8279
8280 // Scan erratum for section SHNDX range [output_address + span_start,
8281 // output_address + span_end). Note here we do not share the code with
8282 // scan_erratum_843419_span function, because for 843419 we optimize by only
8283 // scanning the last few insns of a page, whereas for 835769, we need to scan
8284 // every insn.
8285
8286 template<int size, bool big_endian>
8287 void
8288 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8289 AArch64_relobj<size, big_endian>* relobj,
8290 unsigned int shndx,
8291 const section_size_type span_start,
8292 const section_size_type span_end,
8293 unsigned char* input_view,
8294 Address output_address)
8295 {
8296 typedef typename Insn_utilities::Insntype Insntype;
8297
8298 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8299
8300 // Adjust output_address and view to the start of span.
8301 output_address += span_start;
8302 input_view += span_start;
8303
8304 section_size_type span_length = span_end - span_start;
8305 section_size_type offset = 0;
8306 for (offset = 0; offset + BPI < span_length; offset += BPI)
8307 {
8308 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8309 Insntype insn1 = ip[0];
8310 Insntype insn2 = ip[1];
8311 if (is_erratum_835769_sequence(insn1, insn2))
8312 {
8313 Insntype erratum_insn = insn2;
8314 // "span_start + offset" is the offset for insn1. So for insn2, it is
8315 // "span_start + offset + BPI".
8316 section_size_type erratum_insn_offset = span_start + offset + BPI;
8317 Address erratum_address = output_address + offset + BPI;
8318 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8319 "section %d, offset 0x%08x."),
8320 relobj->name().c_str(), shndx,
8321 (unsigned int)(span_start + offset));
8322
8323 this->create_erratum_stub(relobj, shndx,
8324 erratum_insn_offset, erratum_address,
8325 erratum_insn, ST_E_835769);
8326 offset += BPI; // Skip mac insn.
8327 }
8328 }
8329 } // End of "Target_aarch64::scan_erratum_835769_span".
8330
8331
8332 // Scan erratum for section SHNDX range
8333 // [output_address + span_start, output_address + span_end).
8334
8335 template<int size, bool big_endian>
8336 void
8337 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8338 AArch64_relobj<size, big_endian>* relobj,
8339 unsigned int shndx,
8340 const section_size_type span_start,
8341 const section_size_type span_end,
8342 unsigned char* input_view,
8343 Address output_address)
8344 {
8345 typedef typename Insn_utilities::Insntype Insntype;
8346
8347 // Adjust output_address and view to the start of span.
8348 output_address += span_start;
8349 input_view += span_start;
8350
8351 if ((output_address & 0x03) != 0)
8352 return;
8353
8354 section_size_type offset = 0;
8355 section_size_type span_length = span_end - span_start;
8356 // The first instruction must be ending at 0xFF8 or 0xFFC.
8357 unsigned int page_offset = output_address & 0xFFF;
8358 // Make sure starting position, that is "output_address+offset",
8359 // starts at page position 0xff8 or 0xffc.
8360 if (page_offset < 0xff8)
8361 offset = 0xff8 - page_offset;
8362 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8363 {
8364 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8365 Insntype insn1 = ip[0];
8366 if (Insn_utilities::is_adrp(insn1))
8367 {
8368 Insntype insn2 = ip[1];
8369 Insntype insn3 = ip[2];
8370 Insntype erratum_insn;
8371 unsigned insn_offset;
8372 bool do_report = false;
8373 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8374 {
8375 do_report = true;
8376 erratum_insn = insn3;
8377 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8378 }
8379 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8380 {
8381 // Optionally we can have an insn between ins2 and ins3
8382 Insntype insn_opt = ip[2];
8383 // And insn_opt must not be a branch.
8384 if (!Insn_utilities::aarch64_b(insn_opt)
8385 && !Insn_utilities::aarch64_bl(insn_opt)
8386 && !Insn_utilities::aarch64_blr(insn_opt)
8387 && !Insn_utilities::aarch64_br(insn_opt))
8388 {
8389 // And insn_opt must not write to dest reg in insn1. However
8390 // we do a conservative scan, which means we may fix/report
8391 // more than necessary, but it doesn't hurt.
8392
8393 Insntype insn4 = ip[3];
8394 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8395 {
8396 do_report = true;
8397 erratum_insn = insn4;
8398 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8399 }
8400 }
8401 }
8402 if (do_report)
8403 {
8404 unsigned int erratum_insn_offset =
8405 span_start + offset + insn_offset;
8406 Address erratum_address =
8407 output_address + offset + insn_offset;
8408 create_erratum_stub(relobj, shndx,
8409 erratum_insn_offset, erratum_address,
8410 erratum_insn, ST_E_843419,
8411 span_start + offset);
8412 }
8413 }
8414
8415 // Advance to next candidate instruction. We only consider instruction
8416 // sequences starting at a page offset of 0xff8 or 0xffc.
8417 page_offset = (output_address + offset) & 0xfff;
8418 if (page_offset == 0xff8)
8419 offset += 4;
8420 else // (page_offset == 0xffc), we move to next page's 0xff8.
8421 offset += 0xffc;
8422 }
8423 } // End of "Target_aarch64::scan_erratum_843419_span".
8424
8425
8426 // The selector for aarch64 object files.
8427
8428 template<int size, bool big_endian>
8429 class Target_selector_aarch64 : public Target_selector
8430 {
8431 public:
8432 Target_selector_aarch64();
8433
8434 virtual Target*
8435 do_instantiate_target()
8436 { return new Target_aarch64<size, big_endian>(); }
8437 };
8438
8439 template<>
8440 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8441 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8442 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8443 { }
8444
8445 template<>
8446 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8447 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8448 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8449 { }
8450
8451 template<>
8452 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8453 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8454 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8455 { }
8456
8457 template<>
8458 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8459 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8460 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8461 { }
8462
8463 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8464 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8465 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8466 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8467
8468 } // End anonymous namespace.
This page took 0.206295 seconds and 4 git commands to generate.