cbac4a58682ef49ac08382b5e44385a11dc39a4e
[deliverable/binutils-gdb.git] / gold / arm.cc
1 // arm.cc -- arm target support for gold.
2
3 // Copyright 2009 Free Software Foundation, Inc.
4 // Written by Doug Kwan <dougkwan@google.com> based on the i386 code
5 // by Ian Lance Taylor <iant@google.com>.
6 // This file also contains borrowed and adapted code from
7 // bfd/elf32-arm.c.
8
9 // This file is part of gold.
10
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 3 of the License, or
14 // (at your option) any later version.
15
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
20
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 // MA 02110-1301, USA.
25
26 #include "gold.h"
27
28 #include <cstring>
29 #include <limits>
30 #include <cstdio>
31 #include <string>
32 #include <algorithm>
33 #include <map>
34 #include <utility>
35
36 #include "elfcpp.h"
37 #include "parameters.h"
38 #include "reloc.h"
39 #include "arm.h"
40 #include "object.h"
41 #include "symtab.h"
42 #include "layout.h"
43 #include "output.h"
44 #include "copy-relocs.h"
45 #include "target.h"
46 #include "target-reloc.h"
47 #include "target-select.h"
48 #include "tls.h"
49 #include "defstd.h"
50 #include "gc.h"
51 #include "attributes.h"
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<bool big_endian>
59 class Output_data_plt_arm;
60
61 template<bool big_endian>
62 class Stub_table;
63
64 template<bool big_endian>
65 class Arm_input_section;
66
67 template<bool big_endian>
68 class Arm_output_section;
69
70 template<bool big_endian>
71 class Arm_relobj;
72
73 template<bool big_endian>
74 class Target_arm;
75
76 // For convenience.
77 typedef elfcpp::Elf_types<32>::Elf_Addr Arm_address;
78
79 // Maximum branch offsets for ARM, THUMB and THUMB2.
80 const int32_t ARM_MAX_FWD_BRANCH_OFFSET = ((((1 << 23) - 1) << 2) + 8);
81 const int32_t ARM_MAX_BWD_BRANCH_OFFSET = ((-((1 << 23) << 2)) + 8);
82 const int32_t THM_MAX_FWD_BRANCH_OFFSET = ((1 << 22) -2 + 4);
83 const int32_t THM_MAX_BWD_BRANCH_OFFSET = (-(1 << 22) + 4);
84 const int32_t THM2_MAX_FWD_BRANCH_OFFSET = (((1 << 24) - 2) + 4);
85 const int32_t THM2_MAX_BWD_BRANCH_OFFSET = (-(1 << 24) + 4);
86
87 // The arm target class.
88 //
89 // This is a very simple port of gold for ARM-EABI. It is intended for
90 // supporting Android only for the time being. Only these relocation types
91 // are supported.
92 //
93 // R_ARM_NONE
94 // R_ARM_ABS32
95 // R_ARM_ABS32_NOI
96 // R_ARM_ABS16
97 // R_ARM_ABS12
98 // R_ARM_ABS8
99 // R_ARM_THM_ABS5
100 // R_ARM_BASE_ABS
101 // R_ARM_REL32
102 // R_ARM_THM_CALL
103 // R_ARM_COPY
104 // R_ARM_GLOB_DAT
105 // R_ARM_BASE_PREL
106 // R_ARM_JUMP_SLOT
107 // R_ARM_RELATIVE
108 // R_ARM_GOTOFF32
109 // R_ARM_GOT_BREL
110 // R_ARM_GOT_PREL
111 // R_ARM_PLT32
112 // R_ARM_CALL
113 // R_ARM_JUMP24
114 // R_ARM_TARGET1
115 // R_ARM_PREL31
116 // R_ARM_ABS8
117 // R_ARM_MOVW_ABS_NC
118 // R_ARM_MOVT_ABS
119 // R_ARM_THM_MOVW_ABS_NC
120 // R_ARM_THM_MOVT_ABS
121 // R_ARM_MOVW_PREL_NC
122 // R_ARM_MOVT_PREL
123 // R_ARM_THM_MOVW_PREL_NC
124 // R_ARM_THM_MOVT_PREL
125 //
126 // TODOs:
127 // - Support more relocation types as needed.
128 // - Make PLTs more flexible for different architecture features like
129 // Thumb-2 and BE8.
130 // There are probably a lot more.
131
132 // Instruction template class. This class is similar to the insn_sequence
133 // struct in bfd/elf32-arm.c.
134
135 class Insn_template
136 {
137 public:
138 // Types of instruction templates.
139 enum Type
140 {
141 THUMB16_TYPE = 1,
142 // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
143 // templates with class-specific semantics. Currently this is used
144 // only by the Cortex_a8_stub class for handling condition codes in
145 // conditional branches.
146 THUMB16_SPECIAL_TYPE,
147 THUMB32_TYPE,
148 ARM_TYPE,
149 DATA_TYPE
150 };
151
152 // Factory methods to create instruction templates in different formats.
153
154 static const Insn_template
155 thumb16_insn(uint32_t data)
156 { return Insn_template(data, THUMB16_TYPE, elfcpp::R_ARM_NONE, 0); }
157
158 // A Thumb conditional branch, in which the proper condition is inserted
159 // when we build the stub.
160 static const Insn_template
161 thumb16_bcond_insn(uint32_t data)
162 { return Insn_template(data, THUMB16_SPECIAL_TYPE, elfcpp::R_ARM_NONE, 1); }
163
164 static const Insn_template
165 thumb32_insn(uint32_t data)
166 { return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_NONE, 0); }
167
168 static const Insn_template
169 thumb32_b_insn(uint32_t data, int reloc_addend)
170 {
171 return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_THM_JUMP24,
172 reloc_addend);
173 }
174
175 static const Insn_template
176 arm_insn(uint32_t data)
177 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_NONE, 0); }
178
179 static const Insn_template
180 arm_rel_insn(unsigned data, int reloc_addend)
181 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_JUMP24, reloc_addend); }
182
183 static const Insn_template
184 data_word(unsigned data, unsigned int r_type, int reloc_addend)
185 { return Insn_template(data, DATA_TYPE, r_type, reloc_addend); }
186
187 // Accessors. This class is used for read-only objects so no modifiers
188 // are provided.
189
190 uint32_t
191 data() const
192 { return this->data_; }
193
194 // Return the instruction sequence type of this.
195 Type
196 type() const
197 { return this->type_; }
198
199 // Return the ARM relocation type of this.
200 unsigned int
201 r_type() const
202 { return this->r_type_; }
203
204 int32_t
205 reloc_addend() const
206 { return this->reloc_addend_; }
207
208 // Return size of instruction template in bytes.
209 size_t
210 size() const;
211
212 // Return byte-alignment of instruction template.
213 unsigned
214 alignment() const;
215
216 private:
217 // We make the constructor private to ensure that only the factory
218 // methods are used.
219 inline
220 Insn_template(unsigned data, Type type, unsigned int r_type, int reloc_addend)
221 : data_(data), type_(type), r_type_(r_type), reloc_addend_(reloc_addend)
222 { }
223
224 // Instruction specific data. This is used to store information like
225 // some of the instruction bits.
226 uint32_t data_;
227 // Instruction template type.
228 Type type_;
229 // Relocation type if there is a relocation or R_ARM_NONE otherwise.
230 unsigned int r_type_;
231 // Relocation addend.
232 int32_t reloc_addend_;
233 };
234
235 // Macro for generating code to stub types. One entry per long/short
236 // branch stub
237
238 #define DEF_STUBS \
239 DEF_STUB(long_branch_any_any) \
240 DEF_STUB(long_branch_v4t_arm_thumb) \
241 DEF_STUB(long_branch_thumb_only) \
242 DEF_STUB(long_branch_v4t_thumb_thumb) \
243 DEF_STUB(long_branch_v4t_thumb_arm) \
244 DEF_STUB(short_branch_v4t_thumb_arm) \
245 DEF_STUB(long_branch_any_arm_pic) \
246 DEF_STUB(long_branch_any_thumb_pic) \
247 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
248 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
249 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
250 DEF_STUB(long_branch_thumb_only_pic) \
251 DEF_STUB(a8_veneer_b_cond) \
252 DEF_STUB(a8_veneer_b) \
253 DEF_STUB(a8_veneer_bl) \
254 DEF_STUB(a8_veneer_blx)
255
256 // Stub types.
257
258 #define DEF_STUB(x) arm_stub_##x,
259 typedef enum
260 {
261 arm_stub_none,
262 DEF_STUBS
263
264 // First reloc stub type.
265 arm_stub_reloc_first = arm_stub_long_branch_any_any,
266 // Last reloc stub type.
267 arm_stub_reloc_last = arm_stub_long_branch_thumb_only_pic,
268
269 // First Cortex-A8 stub type.
270 arm_stub_cortex_a8_first = arm_stub_a8_veneer_b_cond,
271 // Last Cortex-A8 stub type.
272 arm_stub_cortex_a8_last = arm_stub_a8_veneer_blx,
273
274 // Last stub type.
275 arm_stub_type_last = arm_stub_a8_veneer_blx
276 } Stub_type;
277 #undef DEF_STUB
278
279 // Stub template class. Templates are meant to be read-only objects.
280 // A stub template for a stub type contains all read-only attributes
281 // common to all stubs of the same type.
282
283 class Stub_template
284 {
285 public:
286 Stub_template(Stub_type, const Insn_template*, size_t);
287
288 ~Stub_template()
289 { }
290
291 // Return stub type.
292 Stub_type
293 type() const
294 { return this->type_; }
295
296 // Return an array of instruction templates.
297 const Insn_template*
298 insns() const
299 { return this->insns_; }
300
301 // Return size of template in number of instructions.
302 size_t
303 insn_count() const
304 { return this->insn_count_; }
305
306 // Return size of template in bytes.
307 size_t
308 size() const
309 { return this->size_; }
310
311 // Return alignment of the stub template.
312 unsigned
313 alignment() const
314 { return this->alignment_; }
315
316 // Return whether entry point is in thumb mode.
317 bool
318 entry_in_thumb_mode() const
319 { return this->entry_in_thumb_mode_; }
320
321 // Return number of relocations in this template.
322 size_t
323 reloc_count() const
324 { return this->relocs_.size(); }
325
326 // Return index of the I-th instruction with relocation.
327 size_t
328 reloc_insn_index(size_t i) const
329 {
330 gold_assert(i < this->relocs_.size());
331 return this->relocs_[i].first;
332 }
333
334 // Return the offset of the I-th instruction with relocation from the
335 // beginning of the stub.
336 section_size_type
337 reloc_offset(size_t i) const
338 {
339 gold_assert(i < this->relocs_.size());
340 return this->relocs_[i].second;
341 }
342
343 private:
344 // This contains information about an instruction template with a relocation
345 // and its offset from start of stub.
346 typedef std::pair<size_t, section_size_type> Reloc;
347
348 // A Stub_template may not be copied. We want to share templates as much
349 // as possible.
350 Stub_template(const Stub_template&);
351 Stub_template& operator=(const Stub_template&);
352
353 // Stub type.
354 Stub_type type_;
355 // Points to an array of Insn_templates.
356 const Insn_template* insns_;
357 // Number of Insn_templates in insns_[].
358 size_t insn_count_;
359 // Size of templated instructions in bytes.
360 size_t size_;
361 // Alignment of templated instructions.
362 unsigned alignment_;
363 // Flag to indicate if entry is in thumb mode.
364 bool entry_in_thumb_mode_;
365 // A table of reloc instruction indices and offsets. We can find these by
366 // looking at the instruction templates but we pre-compute and then stash
367 // them here for speed.
368 std::vector<Reloc> relocs_;
369 };
370
371 //
372 // A class for code stubs. This is a base class for different type of
373 // stubs used in the ARM target.
374 //
375
376 class Stub
377 {
378 private:
379 static const section_offset_type invalid_offset =
380 static_cast<section_offset_type>(-1);
381
382 public:
383 Stub(const Stub_template* stub_template)
384 : stub_template_(stub_template), offset_(invalid_offset)
385 { }
386
387 virtual
388 ~Stub()
389 { }
390
391 // Return the stub template.
392 const Stub_template*
393 stub_template() const
394 { return this->stub_template_; }
395
396 // Return offset of code stub from beginning of its containing stub table.
397 section_offset_type
398 offset() const
399 {
400 gold_assert(this->offset_ != invalid_offset);
401 return this->offset_;
402 }
403
404 // Set offset of code stub from beginning of its containing stub table.
405 void
406 set_offset(section_offset_type offset)
407 { this->offset_ = offset; }
408
409 // Return the relocation target address of the i-th relocation in the
410 // stub. This must be defined in a child class.
411 Arm_address
412 reloc_target(size_t i)
413 { return this->do_reloc_target(i); }
414
415 // Write a stub at output VIEW. BIG_ENDIAN select how a stub is written.
416 void
417 write(unsigned char* view, section_size_type view_size, bool big_endian)
418 { this->do_write(view, view_size, big_endian); }
419
420 // Return the instruction for THUMB16_SPECIAL_TYPE instruction template
421 // for the i-th instruction.
422 uint16_t
423 thumb16_special(size_t i)
424 { return this->do_thumb16_special(i); }
425
426 protected:
427 // This must be defined in the child class.
428 virtual Arm_address
429 do_reloc_target(size_t) = 0;
430
431 // This may be overridden in the child class.
432 virtual void
433 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
434 {
435 if (big_endian)
436 this->do_fixed_endian_write<true>(view, view_size);
437 else
438 this->do_fixed_endian_write<false>(view, view_size);
439 }
440
441 // This must be overridden if a child class uses the THUMB16_SPECIAL_TYPE
442 // instruction template.
443 virtual uint16_t
444 do_thumb16_special(size_t)
445 { gold_unreachable(); }
446
447 private:
448 // A template to implement do_write.
449 template<bool big_endian>
450 void inline
451 do_fixed_endian_write(unsigned char*, section_size_type);
452
453 // Its template.
454 const Stub_template* stub_template_;
455 // Offset within the section of containing this stub.
456 section_offset_type offset_;
457 };
458
459 // Reloc stub class. These are stubs we use to fix up relocation because
460 // of limited branch ranges.
461
462 class Reloc_stub : public Stub
463 {
464 public:
465 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
466 // We assume we never jump to this address.
467 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
468
469 // Return destination address.
470 Arm_address
471 destination_address() const
472 {
473 gold_assert(this->destination_address_ != this->invalid_address);
474 return this->destination_address_;
475 }
476
477 // Set destination address.
478 void
479 set_destination_address(Arm_address address)
480 {
481 gold_assert(address != this->invalid_address);
482 this->destination_address_ = address;
483 }
484
485 // Reset destination address.
486 void
487 reset_destination_address()
488 { this->destination_address_ = this->invalid_address; }
489
490 // Determine stub type for a branch of a relocation of R_TYPE going
491 // from BRANCH_ADDRESS to BRANCH_TARGET. If TARGET_IS_THUMB is set,
492 // the branch target is a thumb instruction. TARGET is used for look
493 // up ARM-specific linker settings.
494 static Stub_type
495 stub_type_for_reloc(unsigned int r_type, Arm_address branch_address,
496 Arm_address branch_target, bool target_is_thumb);
497
498 // Reloc_stub key. A key is logically a triplet of a stub type, a symbol
499 // and an addend. Since we treat global and local symbol differently, we
500 // use a Symbol object for a global symbol and a object-index pair for
501 // a local symbol.
502 class Key
503 {
504 public:
505 // If SYMBOL is not null, this is a global symbol, we ignore RELOBJ and
506 // R_SYM. Otherwise, this is a local symbol and RELOBJ must non-NULL
507 // and R_SYM must not be invalid_index.
508 Key(Stub_type stub_type, const Symbol* symbol, const Relobj* relobj,
509 unsigned int r_sym, int32_t addend)
510 : stub_type_(stub_type), addend_(addend)
511 {
512 if (symbol != NULL)
513 {
514 this->r_sym_ = Reloc_stub::invalid_index;
515 this->u_.symbol = symbol;
516 }
517 else
518 {
519 gold_assert(relobj != NULL && r_sym != invalid_index);
520 this->r_sym_ = r_sym;
521 this->u_.relobj = relobj;
522 }
523 }
524
525 ~Key()
526 { }
527
528 // Accessors: Keys are meant to be read-only object so no modifiers are
529 // provided.
530
531 // Return stub type.
532 Stub_type
533 stub_type() const
534 { return this->stub_type_; }
535
536 // Return the local symbol index or invalid_index.
537 unsigned int
538 r_sym() const
539 { return this->r_sym_; }
540
541 // Return the symbol if there is one.
542 const Symbol*
543 symbol() const
544 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
545
546 // Return the relobj if there is one.
547 const Relobj*
548 relobj() const
549 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
550
551 // Whether this equals to another key k.
552 bool
553 eq(const Key& k) const
554 {
555 return ((this->stub_type_ == k.stub_type_)
556 && (this->r_sym_ == k.r_sym_)
557 && ((this->r_sym_ != Reloc_stub::invalid_index)
558 ? (this->u_.relobj == k.u_.relobj)
559 : (this->u_.symbol == k.u_.symbol))
560 && (this->addend_ == k.addend_));
561 }
562
563 // Return a hash value.
564 size_t
565 hash_value() const
566 {
567 return (this->stub_type_
568 ^ this->r_sym_
569 ^ gold::string_hash<char>(
570 (this->r_sym_ != Reloc_stub::invalid_index)
571 ? this->u_.relobj->name().c_str()
572 : this->u_.symbol->name())
573 ^ this->addend_);
574 }
575
576 // Functors for STL associative containers.
577 struct hash
578 {
579 size_t
580 operator()(const Key& k) const
581 { return k.hash_value(); }
582 };
583
584 struct equal_to
585 {
586 bool
587 operator()(const Key& k1, const Key& k2) const
588 { return k1.eq(k2); }
589 };
590
591 // Name of key. This is mainly for debugging.
592 std::string
593 name() const;
594
595 private:
596 // Stub type.
597 Stub_type stub_type_;
598 // If this is a local symbol, this is the index in the defining object.
599 // Otherwise, it is invalid_index for a global symbol.
600 unsigned int r_sym_;
601 // If r_sym_ is invalid index. This points to a global symbol.
602 // Otherwise, this points a relobj. We used the unsized and target
603 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
604 // Arm_relobj. This is done to avoid making the stub class a template
605 // as most of the stub machinery is endianity-neutral. However, it
606 // may require a bit of casting done by users of this class.
607 union
608 {
609 const Symbol* symbol;
610 const Relobj* relobj;
611 } u_;
612 // Addend associated with a reloc.
613 int32_t addend_;
614 };
615
616 protected:
617 // Reloc_stubs are created via a stub factory. So these are protected.
618 Reloc_stub(const Stub_template* stub_template)
619 : Stub(stub_template), destination_address_(invalid_address)
620 { }
621
622 ~Reloc_stub()
623 { }
624
625 friend class Stub_factory;
626
627 // Return the relocation target address of the i-th relocation in the
628 // stub.
629 Arm_address
630 do_reloc_target(size_t i)
631 {
632 // All reloc stub have only one relocation.
633 gold_assert(i == 0);
634 return this->destination_address_;
635 }
636
637 private:
638 // Address of destination.
639 Arm_address destination_address_;
640 };
641
642 // Cortex-A8 stub class. We need a Cortex-A8 stub to redirect any 32-bit
643 // THUMB branch that meets the following conditions:
644 //
645 // 1. The branch straddles across a page boundary. i.e. lower 12-bit of
646 // branch address is 0xffe.
647 // 2. The branch target address is in the same page as the first word of the
648 // branch.
649 // 3. The branch follows a 32-bit instruction which is not a branch.
650 //
651 // To do the fix up, we need to store the address of the branch instruction
652 // and its target at least. We also need to store the original branch
653 // instruction bits for the condition code in a conditional branch. The
654 // condition code is used in a special instruction template. We also want
655 // to identify input sections needing Cortex-A8 workaround quickly. We store
656 // extra information about object and section index of the code section
657 // containing a branch being fixed up. The information is used to mark
658 // the code section when we finalize the Cortex-A8 stubs.
659 //
660
661 class Cortex_a8_stub : public Stub
662 {
663 public:
664 ~Cortex_a8_stub()
665 { }
666
667 // Return the object of the code section containing the branch being fixed
668 // up.
669 Relobj*
670 relobj() const
671 { return this->relobj_; }
672
673 // Return the section index of the code section containing the branch being
674 // fixed up.
675 unsigned int
676 shndx() const
677 { return this->shndx_; }
678
679 // Return the source address of stub. This is the address of the original
680 // branch instruction. LSB is 1 always set to indicate that it is a THUMB
681 // instruction.
682 Arm_address
683 source_address() const
684 { return this->source_address_; }
685
686 // Return the destination address of the stub. This is the branch taken
687 // address of the original branch instruction. LSB is 1 if it is a THUMB
688 // instruction address.
689 Arm_address
690 destination_address() const
691 { return this->destination_address_; }
692
693 // Return the instruction being fixed up.
694 uint32_t
695 original_insn() const
696 { return this->original_insn_; }
697
698 protected:
699 // Cortex_a8_stubs are created via a stub factory. So these are protected.
700 Cortex_a8_stub(const Stub_template* stub_template, Relobj* relobj,
701 unsigned int shndx, Arm_address source_address,
702 Arm_address destination_address, uint32_t original_insn)
703 : Stub(stub_template), relobj_(relobj), shndx_(shndx),
704 source_address_(source_address | 1U),
705 destination_address_(destination_address),
706 original_insn_(original_insn)
707 { }
708
709 friend class Stub_factory;
710
711 // Return the relocation target address of the i-th relocation in the
712 // stub.
713 Arm_address
714 do_reloc_target(size_t i)
715 {
716 if (this->stub_template()->type() == arm_stub_a8_veneer_b_cond)
717 {
718 // The conditional branch veneer has two relocations.
719 gold_assert(i < 2);
720 return i == 0 ? this->source_address_ + 4 : this->destination_address_;
721 }
722 else
723 {
724 // All other Cortex-A8 stubs have only one relocation.
725 gold_assert(i == 0);
726 return this->destination_address_;
727 }
728 }
729
730 // Return an instruction for the THUMB16_SPECIAL_TYPE instruction template.
731 uint16_t
732 do_thumb16_special(size_t);
733
734 private:
735 // Object of the code section containing the branch being fixed up.
736 Relobj* relobj_;
737 // Section index of the code section containing the branch begin fixed up.
738 unsigned int shndx_;
739 // Source address of original branch.
740 Arm_address source_address_;
741 // Destination address of the original branch.
742 Arm_address destination_address_;
743 // Original branch instruction. This is needed for copying the condition
744 // code from a condition branch to its stub.
745 uint32_t original_insn_;
746 };
747
748 // Stub factory class.
749
750 class Stub_factory
751 {
752 public:
753 // Return the unique instance of this class.
754 static const Stub_factory&
755 get_instance()
756 {
757 static Stub_factory singleton;
758 return singleton;
759 }
760
761 // Make a relocation stub.
762 Reloc_stub*
763 make_reloc_stub(Stub_type stub_type) const
764 {
765 gold_assert(stub_type >= arm_stub_reloc_first
766 && stub_type <= arm_stub_reloc_last);
767 return new Reloc_stub(this->stub_templates_[stub_type]);
768 }
769
770 // Make a Cortex-A8 stub.
771 Cortex_a8_stub*
772 make_cortex_a8_stub(Stub_type stub_type, Relobj* relobj, unsigned int shndx,
773 Arm_address source, Arm_address destination,
774 uint32_t original_insn) const
775 {
776 gold_assert(stub_type >= arm_stub_cortex_a8_first
777 && stub_type <= arm_stub_cortex_a8_last);
778 return new Cortex_a8_stub(this->stub_templates_[stub_type], relobj, shndx,
779 source, destination, original_insn);
780 }
781
782 private:
783 // Constructor and destructor are protected since we only return a single
784 // instance created in Stub_factory::get_instance().
785
786 Stub_factory();
787
788 // A Stub_factory may not be copied since it is a singleton.
789 Stub_factory(const Stub_factory&);
790 Stub_factory& operator=(Stub_factory&);
791
792 // Stub templates. These are initialized in the constructor.
793 const Stub_template* stub_templates_[arm_stub_type_last+1];
794 };
795
796 // A class to hold stubs for the ARM target.
797
798 template<bool big_endian>
799 class Stub_table : public Output_data
800 {
801 public:
802 Stub_table(Arm_input_section<big_endian>* owner)
803 : Output_data(), owner_(owner), reloc_stubs_(), cortex_a8_stubs_(),
804 prev_data_size_(0), prev_addralign_(1)
805 { }
806
807 ~Stub_table()
808 { }
809
810 // Owner of this stub table.
811 Arm_input_section<big_endian>*
812 owner() const
813 { return this->owner_; }
814
815 // Whether this stub table is empty.
816 bool
817 empty() const
818 { return this->reloc_stubs_.empty() && this->cortex_a8_stubs_.empty(); }
819
820 // Return the current data size.
821 off_t
822 current_data_size() const
823 { return this->current_data_size_for_child(); }
824
825 // Add a STUB with using KEY. Caller is reponsible for avoid adding
826 // if already a STUB with the same key has been added.
827 void
828 add_reloc_stub(Reloc_stub* stub, const Reloc_stub::Key& key)
829 {
830 const Stub_template* stub_template = stub->stub_template();
831 gold_assert(stub_template->type() == key.stub_type());
832 this->reloc_stubs_[key] = stub;
833 }
834
835 // Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
836 // Caller is reponsible for avoid adding if already a STUB with the same
837 // address has been added.
838 void
839 add_cortex_a8_stub(Arm_address address, Cortex_a8_stub* stub)
840 {
841 std::pair<Arm_address, Cortex_a8_stub*> value(address, stub);
842 this->cortex_a8_stubs_.insert(value);
843 }
844
845 // Remove all Cortex-A8 stubs.
846 void
847 remove_all_cortex_a8_stubs();
848
849 // Look up a relocation stub using KEY. Return NULL if there is none.
850 Reloc_stub*
851 find_reloc_stub(const Reloc_stub::Key& key) const
852 {
853 typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.find(key);
854 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
855 }
856
857 // Relocate stubs in this stub table.
858 void
859 relocate_stubs(const Relocate_info<32, big_endian>*,
860 Target_arm<big_endian>*, Output_section*,
861 unsigned char*, Arm_address, section_size_type);
862
863 // Update data size and alignment at the end of a relaxation pass. Return
864 // true if either data size or alignment is different from that of the
865 // previous relaxation pass.
866 bool
867 update_data_size_and_addralign();
868
869 // Finalize stubs. Set the offsets of all stubs and mark input sections
870 // needing the Cortex-A8 workaround.
871 void
872 finalize_stubs();
873
874 // Apply Cortex-A8 workaround to an address range.
875 void
876 apply_cortex_a8_workaround_to_address_range(Target_arm<big_endian>*,
877 unsigned char*, Arm_address,
878 section_size_type);
879
880 protected:
881 // Write out section contents.
882 void
883 do_write(Output_file*);
884
885 // Return the required alignment.
886 uint64_t
887 do_addralign() const
888 { return this->prev_addralign_; }
889
890 // Reset address and file offset.
891 void
892 do_reset_address_and_file_offset()
893 { this->set_current_data_size_for_child(this->prev_data_size_); }
894
895 // Set final data size.
896 void
897 set_final_data_size()
898 { this->set_data_size(this->current_data_size()); }
899
900 private:
901 // Relocate one stub.
902 void
903 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
904 Target_arm<big_endian>*, Output_section*,
905 unsigned char*, Arm_address, section_size_type);
906
907 // Unordered map of relocation stubs.
908 typedef
909 Unordered_map<Reloc_stub::Key, Reloc_stub*, Reloc_stub::Key::hash,
910 Reloc_stub::Key::equal_to>
911 Reloc_stub_map;
912
913 // List of Cortex-A8 stubs ordered by addresses of branches being
914 // fixed up in output.
915 typedef std::map<Arm_address, Cortex_a8_stub*> Cortex_a8_stub_list;
916
917 // Owner of this stub table.
918 Arm_input_section<big_endian>* owner_;
919 // The relocation stubs.
920 Reloc_stub_map reloc_stubs_;
921 // The cortex_a8_stubs.
922 Cortex_a8_stub_list cortex_a8_stubs_;
923 // data size of this in the previous pass.
924 off_t prev_data_size_;
925 // address alignment of this in the previous pass.
926 uint64_t prev_addralign_;
927 };
928
929 // A class to wrap an ordinary input section containing executable code.
930
931 template<bool big_endian>
932 class Arm_input_section : public Output_relaxed_input_section
933 {
934 public:
935 Arm_input_section(Relobj* relobj, unsigned int shndx)
936 : Output_relaxed_input_section(relobj, shndx, 1),
937 original_addralign_(1), original_size_(0), stub_table_(NULL)
938 { }
939
940 ~Arm_input_section()
941 { }
942
943 // Initialize.
944 void
945 init();
946
947 // Whether this is a stub table owner.
948 bool
949 is_stub_table_owner() const
950 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
951
952 // Return the stub table.
953 Stub_table<big_endian>*
954 stub_table() const
955 { return this->stub_table_; }
956
957 // Set the stub_table.
958 void
959 set_stub_table(Stub_table<big_endian>* stub_table)
960 { this->stub_table_ = stub_table; }
961
962 // Downcast a base pointer to an Arm_input_section pointer. This is
963 // not type-safe but we only use Arm_input_section not the base class.
964 static Arm_input_section<big_endian>*
965 as_arm_input_section(Output_relaxed_input_section* poris)
966 { return static_cast<Arm_input_section<big_endian>*>(poris); }
967
968 protected:
969 // Write data to output file.
970 void
971 do_write(Output_file*);
972
973 // Return required alignment of this.
974 uint64_t
975 do_addralign() const
976 {
977 if (this->is_stub_table_owner())
978 return std::max(this->stub_table_->addralign(),
979 this->original_addralign_);
980 else
981 return this->original_addralign_;
982 }
983
984 // Finalize data size.
985 void
986 set_final_data_size();
987
988 // Reset address and file offset.
989 void
990 do_reset_address_and_file_offset();
991
992 // Output offset.
993 bool
994 do_output_offset(const Relobj* object, unsigned int shndx,
995 section_offset_type offset,
996 section_offset_type* poutput) const
997 {
998 if ((object == this->relobj())
999 && (shndx == this->shndx())
1000 && (offset >= 0)
1001 && (convert_types<uint64_t, section_offset_type>(offset)
1002 <= this->original_size_))
1003 {
1004 *poutput = offset;
1005 return true;
1006 }
1007 else
1008 return false;
1009 }
1010
1011 private:
1012 // Copying is not allowed.
1013 Arm_input_section(const Arm_input_section&);
1014 Arm_input_section& operator=(const Arm_input_section&);
1015
1016 // Address alignment of the original input section.
1017 uint64_t original_addralign_;
1018 // Section size of the original input section.
1019 uint64_t original_size_;
1020 // Stub table.
1021 Stub_table<big_endian>* stub_table_;
1022 };
1023
1024 // Arm output section class. This is defined mainly to add a number of
1025 // stub generation methods.
1026
1027 template<bool big_endian>
1028 class Arm_output_section : public Output_section
1029 {
1030 public:
1031 Arm_output_section(const char* name, elfcpp::Elf_Word type,
1032 elfcpp::Elf_Xword flags)
1033 : Output_section(name, type, flags)
1034 { }
1035
1036 ~Arm_output_section()
1037 { }
1038
1039 // Group input sections for stub generation.
1040 void
1041 group_sections(section_size_type, bool, Target_arm<big_endian>*);
1042
1043 // Downcast a base pointer to an Arm_output_section pointer. This is
1044 // not type-safe but we only use Arm_output_section not the base class.
1045 static Arm_output_section<big_endian>*
1046 as_arm_output_section(Output_section* os)
1047 { return static_cast<Arm_output_section<big_endian>*>(os); }
1048
1049 private:
1050 // For convenience.
1051 typedef Output_section::Input_section Input_section;
1052 typedef Output_section::Input_section_list Input_section_list;
1053
1054 // Create a stub group.
1055 void create_stub_group(Input_section_list::const_iterator,
1056 Input_section_list::const_iterator,
1057 Input_section_list::const_iterator,
1058 Target_arm<big_endian>*,
1059 std::vector<Output_relaxed_input_section*>*);
1060 };
1061
1062 // Arm_relobj class.
1063
1064 template<bool big_endian>
1065 class Arm_relobj : public Sized_relobj<32, big_endian>
1066 {
1067 public:
1068 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
1069
1070 Arm_relobj(const std::string& name, Input_file* input_file, off_t offset,
1071 const typename elfcpp::Ehdr<32, big_endian>& ehdr)
1072 : Sized_relobj<32, big_endian>(name, input_file, offset, ehdr),
1073 stub_tables_(), local_symbol_is_thumb_function_(),
1074 attributes_section_data_(NULL), mapping_symbols_info_(),
1075 section_has_cortex_a8_workaround_(NULL)
1076 { }
1077
1078 ~Arm_relobj()
1079 { delete this->attributes_section_data_; }
1080
1081 // Return the stub table of the SHNDX-th section if there is one.
1082 Stub_table<big_endian>*
1083 stub_table(unsigned int shndx) const
1084 {
1085 gold_assert(shndx < this->stub_tables_.size());
1086 return this->stub_tables_[shndx];
1087 }
1088
1089 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1090 void
1091 set_stub_table(unsigned int shndx, Stub_table<big_endian>* stub_table)
1092 {
1093 gold_assert(shndx < this->stub_tables_.size());
1094 this->stub_tables_[shndx] = stub_table;
1095 }
1096
1097 // Whether a local symbol is a THUMB function. R_SYM is the symbol table
1098 // index. This is only valid after do_count_local_symbol is called.
1099 bool
1100 local_symbol_is_thumb_function(unsigned int r_sym) const
1101 {
1102 gold_assert(r_sym < this->local_symbol_is_thumb_function_.size());
1103 return this->local_symbol_is_thumb_function_[r_sym];
1104 }
1105
1106 // Scan all relocation sections for stub generation.
1107 void
1108 scan_sections_for_stubs(Target_arm<big_endian>*, const Symbol_table*,
1109 const Layout*);
1110
1111 // Convert regular input section with index SHNDX to a relaxed section.
1112 void
1113 convert_input_section_to_relaxed_section(unsigned shndx)
1114 {
1115 // The stubs have relocations and we need to process them after writing
1116 // out the stubs. So relocation now must follow section write.
1117 this->invalidate_section_offset(shndx);
1118 this->set_relocs_must_follow_section_writes();
1119 }
1120
1121 // Downcast a base pointer to an Arm_relobj pointer. This is
1122 // not type-safe but we only use Arm_relobj not the base class.
1123 static Arm_relobj<big_endian>*
1124 as_arm_relobj(Relobj* relobj)
1125 { return static_cast<Arm_relobj<big_endian>*>(relobj); }
1126
1127 // Processor-specific flags in ELF file header. This is valid only after
1128 // reading symbols.
1129 elfcpp::Elf_Word
1130 processor_specific_flags() const
1131 { return this->processor_specific_flags_; }
1132
1133 // Attribute section data This is the contents of the .ARM.attribute section
1134 // if there is one.
1135 const Attributes_section_data*
1136 attributes_section_data() const
1137 { return this->attributes_section_data_; }
1138
1139 // Mapping symbol location.
1140 typedef std::pair<unsigned int, Arm_address> Mapping_symbol_position;
1141
1142 // Functor for STL container.
1143 struct Mapping_symbol_position_less
1144 {
1145 bool
1146 operator()(const Mapping_symbol_position& p1,
1147 const Mapping_symbol_position& p2) const
1148 {
1149 return (p1.first < p2.first
1150 || (p1.first == p2.first && p1.second < p2.second));
1151 }
1152 };
1153
1154 // We only care about the first character of a mapping symbol, so
1155 // we only store that instead of the whole symbol name.
1156 typedef std::map<Mapping_symbol_position, char,
1157 Mapping_symbol_position_less> Mapping_symbols_info;
1158
1159 // Whether a section contains any Cortex-A8 workaround.
1160 bool
1161 section_has_cortex_a8_workaround(unsigned int shndx) const
1162 {
1163 return (this->section_has_cortex_a8_workaround_ != NULL
1164 && (*this->section_has_cortex_a8_workaround_)[shndx]);
1165 }
1166
1167 // Mark a section that has Cortex-A8 workaround.
1168 void
1169 mark_section_for_cortex_a8_workaround(unsigned int shndx)
1170 {
1171 if (this->section_has_cortex_a8_workaround_ == NULL)
1172 this->section_has_cortex_a8_workaround_ =
1173 new std::vector<bool>(this->shnum(), false);
1174 (*this->section_has_cortex_a8_workaround_)[shndx] = true;
1175 }
1176
1177 protected:
1178 // Post constructor setup.
1179 void
1180 do_setup()
1181 {
1182 // Call parent's setup method.
1183 Sized_relobj<32, big_endian>::do_setup();
1184
1185 // Initialize look-up tables.
1186 Stub_table_list empty_stub_table_list(this->shnum(), NULL);
1187 this->stub_tables_.swap(empty_stub_table_list);
1188 }
1189
1190 // Count the local symbols.
1191 void
1192 do_count_local_symbols(Stringpool_template<char>*,
1193 Stringpool_template<char>*);
1194
1195 void
1196 do_relocate_sections(const Symbol_table* symtab, const Layout* layout,
1197 const unsigned char* pshdrs,
1198 typename Sized_relobj<32, big_endian>::Views* pivews);
1199
1200 // Read the symbol information.
1201 void
1202 do_read_symbols(Read_symbols_data* sd);
1203
1204 // Process relocs for garbage collection.
1205 void
1206 do_gc_process_relocs(Symbol_table*, Layout*, Read_relocs_data*);
1207
1208 private:
1209
1210 // Whether a section needs to be scanned for relocation stubs.
1211 bool
1212 section_needs_reloc_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1213 const Relobj::Output_sections&,
1214 const Symbol_table *);
1215
1216 // Whether a section needs to be scanned for the Cortex-A8 erratum.
1217 bool
1218 section_needs_cortex_a8_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1219 unsigned int, Output_section*,
1220 const Symbol_table *);
1221
1222 // Scan a section for the Cortex-A8 erratum.
1223 void
1224 scan_section_for_cortex_a8_erratum(const elfcpp::Shdr<32, big_endian>&,
1225 unsigned int, Output_section*,
1226 Target_arm<big_endian>*);
1227
1228 // List of stub tables.
1229 typedef std::vector<Stub_table<big_endian>*> Stub_table_list;
1230 Stub_table_list stub_tables_;
1231 // Bit vector to tell if a local symbol is a thumb function or not.
1232 // This is only valid after do_count_local_symbol is called.
1233 std::vector<bool> local_symbol_is_thumb_function_;
1234 // processor-specific flags in ELF file header.
1235 elfcpp::Elf_Word processor_specific_flags_;
1236 // Object attributes if there is an .ARM.attributes section or NULL.
1237 Attributes_section_data* attributes_section_data_;
1238 // Mapping symbols information.
1239 Mapping_symbols_info mapping_symbols_info_;
1240 // Bitmap to indicate sections with Cortex-A8 workaround or NULL.
1241 std::vector<bool>* section_has_cortex_a8_workaround_;
1242 };
1243
1244 // Arm_dynobj class.
1245
1246 template<bool big_endian>
1247 class Arm_dynobj : public Sized_dynobj<32, big_endian>
1248 {
1249 public:
1250 Arm_dynobj(const std::string& name, Input_file* input_file, off_t offset,
1251 const elfcpp::Ehdr<32, big_endian>& ehdr)
1252 : Sized_dynobj<32, big_endian>(name, input_file, offset, ehdr),
1253 processor_specific_flags_(0), attributes_section_data_(NULL)
1254 { }
1255
1256 ~Arm_dynobj()
1257 { delete this->attributes_section_data_; }
1258
1259 // Downcast a base pointer to an Arm_relobj pointer. This is
1260 // not type-safe but we only use Arm_relobj not the base class.
1261 static Arm_dynobj<big_endian>*
1262 as_arm_dynobj(Dynobj* dynobj)
1263 { return static_cast<Arm_dynobj<big_endian>*>(dynobj); }
1264
1265 // Processor-specific flags in ELF file header. This is valid only after
1266 // reading symbols.
1267 elfcpp::Elf_Word
1268 processor_specific_flags() const
1269 { return this->processor_specific_flags_; }
1270
1271 // Attributes section data.
1272 const Attributes_section_data*
1273 attributes_section_data() const
1274 { return this->attributes_section_data_; }
1275
1276 protected:
1277 // Read the symbol information.
1278 void
1279 do_read_symbols(Read_symbols_data* sd);
1280
1281 private:
1282 // processor-specific flags in ELF file header.
1283 elfcpp::Elf_Word processor_specific_flags_;
1284 // Object attributes if there is an .ARM.attributes section or NULL.
1285 Attributes_section_data* attributes_section_data_;
1286 };
1287
1288 // Functor to read reloc addends during stub generation.
1289
1290 template<int sh_type, bool big_endian>
1291 struct Stub_addend_reader
1292 {
1293 // Return the addend for a relocation of a particular type. Depending
1294 // on whether this is a REL or RELA relocation, read the addend from a
1295 // view or from a Reloc object.
1296 elfcpp::Elf_types<32>::Elf_Swxword
1297 operator()(
1298 unsigned int /* r_type */,
1299 const unsigned char* /* view */,
1300 const typename Reloc_types<sh_type,
1301 32, big_endian>::Reloc& /* reloc */) const;
1302 };
1303
1304 // Specialized Stub_addend_reader for SHT_REL type relocation sections.
1305
1306 template<bool big_endian>
1307 struct Stub_addend_reader<elfcpp::SHT_REL, big_endian>
1308 {
1309 elfcpp::Elf_types<32>::Elf_Swxword
1310 operator()(
1311 unsigned int,
1312 const unsigned char*,
1313 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const;
1314 };
1315
1316 // Specialized Stub_addend_reader for RELA type relocation sections.
1317 // We currently do not handle RELA type relocation sections but it is trivial
1318 // to implement the addend reader. This is provided for completeness and to
1319 // make it easier to add support for RELA relocation sections in the future.
1320
1321 template<bool big_endian>
1322 struct Stub_addend_reader<elfcpp::SHT_RELA, big_endian>
1323 {
1324 elfcpp::Elf_types<32>::Elf_Swxword
1325 operator()(
1326 unsigned int,
1327 const unsigned char*,
1328 const typename Reloc_types<elfcpp::SHT_RELA, 32,
1329 big_endian>::Reloc& reloc) const
1330 { return reloc.get_r_addend(); }
1331 };
1332
1333 // Cortex_a8_reloc class. We keep record of relocation that may need
1334 // the Cortex-A8 erratum workaround.
1335
1336 class Cortex_a8_reloc
1337 {
1338 public:
1339 Cortex_a8_reloc(Reloc_stub* reloc_stub, unsigned r_type,
1340 Arm_address destination)
1341 : reloc_stub_(reloc_stub), r_type_(r_type), destination_(destination)
1342 { }
1343
1344 ~Cortex_a8_reloc()
1345 { }
1346
1347 // Accessors: This is a read-only class.
1348
1349 // Return the relocation stub associated with this relocation if there is
1350 // one.
1351 const Reloc_stub*
1352 reloc_stub() const
1353 { return this->reloc_stub_; }
1354
1355 // Return the relocation type.
1356 unsigned int
1357 r_type() const
1358 { return this->r_type_; }
1359
1360 // Return the destination address of the relocation. LSB stores the THUMB
1361 // bit.
1362 Arm_address
1363 destination() const
1364 { return this->destination_; }
1365
1366 private:
1367 // Associated relocation stub if there is one, or NULL.
1368 const Reloc_stub* reloc_stub_;
1369 // Relocation type.
1370 unsigned int r_type_;
1371 // Destination address of this relocation. LSB is used to distinguish
1372 // ARM/THUMB mode.
1373 Arm_address destination_;
1374 };
1375
1376 // Utilities for manipulating integers of up to 32-bits
1377
1378 namespace utils
1379 {
1380 // Sign extend an n-bit unsigned integer stored in an uint32_t into
1381 // an int32_t. NO_BITS must be between 1 to 32.
1382 template<int no_bits>
1383 static inline int32_t
1384 sign_extend(uint32_t bits)
1385 {
1386 gold_assert(no_bits >= 0 && no_bits <= 32);
1387 if (no_bits == 32)
1388 return static_cast<int32_t>(bits);
1389 uint32_t mask = (~((uint32_t) 0)) >> (32 - no_bits);
1390 bits &= mask;
1391 uint32_t top_bit = 1U << (no_bits - 1);
1392 int32_t as_signed = static_cast<int32_t>(bits);
1393 return (bits & top_bit) ? as_signed + (-top_bit * 2) : as_signed;
1394 }
1395
1396 // Detects overflow of an NO_BITS integer stored in a uint32_t.
1397 template<int no_bits>
1398 static inline bool
1399 has_overflow(uint32_t bits)
1400 {
1401 gold_assert(no_bits >= 0 && no_bits <= 32);
1402 if (no_bits == 32)
1403 return false;
1404 int32_t max = (1 << (no_bits - 1)) - 1;
1405 int32_t min = -(1 << (no_bits - 1));
1406 int32_t as_signed = static_cast<int32_t>(bits);
1407 return as_signed > max || as_signed < min;
1408 }
1409
1410 // Detects overflow of an NO_BITS integer stored in a uint32_t when it
1411 // fits in the given number of bits as either a signed or unsigned value.
1412 // For example, has_signed_unsigned_overflow<8> would check
1413 // -128 <= bits <= 255
1414 template<int no_bits>
1415 static inline bool
1416 has_signed_unsigned_overflow(uint32_t bits)
1417 {
1418 gold_assert(no_bits >= 2 && no_bits <= 32);
1419 if (no_bits == 32)
1420 return false;
1421 int32_t max = static_cast<int32_t>((1U << no_bits) - 1);
1422 int32_t min = -(1 << (no_bits - 1));
1423 int32_t as_signed = static_cast<int32_t>(bits);
1424 return as_signed > max || as_signed < min;
1425 }
1426
1427 // Select bits from A and B using bits in MASK. For each n in [0..31],
1428 // the n-th bit in the result is chosen from the n-th bits of A and B.
1429 // A zero selects A and a one selects B.
1430 static inline uint32_t
1431 bit_select(uint32_t a, uint32_t b, uint32_t mask)
1432 { return (a & ~mask) | (b & mask); }
1433 };
1434
1435 template<bool big_endian>
1436 class Target_arm : public Sized_target<32, big_endian>
1437 {
1438 public:
1439 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
1440 Reloc_section;
1441
1442 // When were are relocating a stub, we pass this as the relocation number.
1443 static const size_t fake_relnum_for_stubs = static_cast<size_t>(-1);
1444
1445 Target_arm()
1446 : Sized_target<32, big_endian>(&arm_info),
1447 got_(NULL), plt_(NULL), got_plt_(NULL), rel_dyn_(NULL),
1448 copy_relocs_(elfcpp::R_ARM_COPY), dynbss_(NULL), stub_tables_(),
1449 stub_factory_(Stub_factory::get_instance()), may_use_blx_(false),
1450 should_force_pic_veneer_(false), arm_input_section_map_(),
1451 attributes_section_data_(NULL), fix_cortex_a8_(false),
1452 cortex_a8_relocs_info_()
1453 { }
1454
1455 // Whether we can use BLX.
1456 bool
1457 may_use_blx() const
1458 { return this->may_use_blx_; }
1459
1460 // Set use-BLX flag.
1461 void
1462 set_may_use_blx(bool value)
1463 { this->may_use_blx_ = value; }
1464
1465 // Whether we force PCI branch veneers.
1466 bool
1467 should_force_pic_veneer() const
1468 { return this->should_force_pic_veneer_; }
1469
1470 // Set PIC veneer flag.
1471 void
1472 set_should_force_pic_veneer(bool value)
1473 { this->should_force_pic_veneer_ = value; }
1474
1475 // Whether we use THUMB-2 instructions.
1476 bool
1477 using_thumb2() const
1478 {
1479 Object_attribute* attr =
1480 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1481 int arch = attr->int_value();
1482 return arch == elfcpp::TAG_CPU_ARCH_V6T2 || arch >= elfcpp::TAG_CPU_ARCH_V7;
1483 }
1484
1485 // Whether we use THUMB/THUMB-2 instructions only.
1486 bool
1487 using_thumb_only() const
1488 {
1489 Object_attribute* attr =
1490 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1491 if (attr->int_value() != elfcpp::TAG_CPU_ARCH_V7
1492 && attr->int_value() != elfcpp::TAG_CPU_ARCH_V7E_M)
1493 return false;
1494 attr = this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
1495 return attr->int_value() == 'M';
1496 }
1497
1498 // Whether we have an NOP instruction. If not, use mov r0, r0 instead.
1499 bool
1500 may_use_arm_nop() const
1501 {
1502 Object_attribute* attr =
1503 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1504 int arch = attr->int_value();
1505 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
1506 || arch == elfcpp::TAG_CPU_ARCH_V6K
1507 || arch == elfcpp::TAG_CPU_ARCH_V7
1508 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
1509 }
1510
1511 // Whether we have THUMB-2 NOP.W instruction.
1512 bool
1513 may_use_thumb2_nop() const
1514 {
1515 Object_attribute* attr =
1516 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1517 int arch = attr->int_value();
1518 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
1519 || arch == elfcpp::TAG_CPU_ARCH_V7
1520 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
1521 }
1522
1523 // Process the relocations to determine unreferenced sections for
1524 // garbage collection.
1525 void
1526 gc_process_relocs(Symbol_table* symtab,
1527 Layout* layout,
1528 Sized_relobj<32, big_endian>* object,
1529 unsigned int data_shndx,
1530 unsigned int sh_type,
1531 const unsigned char* prelocs,
1532 size_t reloc_count,
1533 Output_section* output_section,
1534 bool needs_special_offset_handling,
1535 size_t local_symbol_count,
1536 const unsigned char* plocal_symbols);
1537
1538 // Scan the relocations to look for symbol adjustments.
1539 void
1540 scan_relocs(Symbol_table* symtab,
1541 Layout* layout,
1542 Sized_relobj<32, big_endian>* object,
1543 unsigned int data_shndx,
1544 unsigned int sh_type,
1545 const unsigned char* prelocs,
1546 size_t reloc_count,
1547 Output_section* output_section,
1548 bool needs_special_offset_handling,
1549 size_t local_symbol_count,
1550 const unsigned char* plocal_symbols);
1551
1552 // Finalize the sections.
1553 void
1554 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
1555
1556 // Return the value to use for a dynamic symbol which requires special
1557 // treatment.
1558 uint64_t
1559 do_dynsym_value(const Symbol*) const;
1560
1561 // Relocate a section.
1562 void
1563 relocate_section(const Relocate_info<32, big_endian>*,
1564 unsigned int sh_type,
1565 const unsigned char* prelocs,
1566 size_t reloc_count,
1567 Output_section* output_section,
1568 bool needs_special_offset_handling,
1569 unsigned char* view,
1570 Arm_address view_address,
1571 section_size_type view_size,
1572 const Reloc_symbol_changes*);
1573
1574 // Scan the relocs during a relocatable link.
1575 void
1576 scan_relocatable_relocs(Symbol_table* symtab,
1577 Layout* layout,
1578 Sized_relobj<32, big_endian>* object,
1579 unsigned int data_shndx,
1580 unsigned int sh_type,
1581 const unsigned char* prelocs,
1582 size_t reloc_count,
1583 Output_section* output_section,
1584 bool needs_special_offset_handling,
1585 size_t local_symbol_count,
1586 const unsigned char* plocal_symbols,
1587 Relocatable_relocs*);
1588
1589 // Relocate a section during a relocatable link.
1590 void
1591 relocate_for_relocatable(const Relocate_info<32, big_endian>*,
1592 unsigned int sh_type,
1593 const unsigned char* prelocs,
1594 size_t reloc_count,
1595 Output_section* output_section,
1596 off_t offset_in_output_section,
1597 const Relocatable_relocs*,
1598 unsigned char* view,
1599 Arm_address view_address,
1600 section_size_type view_size,
1601 unsigned char* reloc_view,
1602 section_size_type reloc_view_size);
1603
1604 // Return whether SYM is defined by the ABI.
1605 bool
1606 do_is_defined_by_abi(Symbol* sym) const
1607 { return strcmp(sym->name(), "__tls_get_addr") == 0; }
1608
1609 // Return the size of the GOT section.
1610 section_size_type
1611 got_size()
1612 {
1613 gold_assert(this->got_ != NULL);
1614 return this->got_->data_size();
1615 }
1616
1617 // Map platform-specific reloc types
1618 static unsigned int
1619 get_real_reloc_type (unsigned int r_type);
1620
1621 //
1622 // Methods to support stub-generations.
1623 //
1624
1625 // Return the stub factory
1626 const Stub_factory&
1627 stub_factory() const
1628 { return this->stub_factory_; }
1629
1630 // Make a new Arm_input_section object.
1631 Arm_input_section<big_endian>*
1632 new_arm_input_section(Relobj*, unsigned int);
1633
1634 // Find the Arm_input_section object corresponding to the SHNDX-th input
1635 // section of RELOBJ.
1636 Arm_input_section<big_endian>*
1637 find_arm_input_section(Relobj* relobj, unsigned int shndx) const;
1638
1639 // Make a new Stub_table
1640 Stub_table<big_endian>*
1641 new_stub_table(Arm_input_section<big_endian>*);
1642
1643 // Scan a section for stub generation.
1644 void
1645 scan_section_for_stubs(const Relocate_info<32, big_endian>*, unsigned int,
1646 const unsigned char*, size_t, Output_section*,
1647 bool, const unsigned char*, Arm_address,
1648 section_size_type);
1649
1650 // Relocate a stub.
1651 void
1652 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
1653 Output_section*, unsigned char*, Arm_address,
1654 section_size_type);
1655
1656 // Get the default ARM target.
1657 static Target_arm<big_endian>*
1658 default_target()
1659 {
1660 gold_assert(parameters->target().machine_code() == elfcpp::EM_ARM
1661 && parameters->target().is_big_endian() == big_endian);
1662 return static_cast<Target_arm<big_endian>*>(
1663 parameters->sized_target<32, big_endian>());
1664 }
1665
1666 // Whether relocation type uses LSB to distinguish THUMB addresses.
1667 static bool
1668 reloc_uses_thumb_bit(unsigned int r_type);
1669
1670 // Whether NAME belongs to a mapping symbol.
1671 static bool
1672 is_mapping_symbol_name(const char* name)
1673 {
1674 return (name
1675 && name[0] == '$'
1676 && (name[1] == 'a' || name[1] == 't' || name[1] == 'd')
1677 && (name[2] == '\0' || name[2] == '.'));
1678 }
1679
1680 // Whether we work around the Cortex-A8 erratum.
1681 bool
1682 fix_cortex_a8() const
1683 { return this->fix_cortex_a8_; }
1684
1685 // Scan a span of THUMB code section for Cortex-A8 erratum.
1686 void
1687 scan_span_for_cortex_a8_erratum(Arm_relobj<big_endian>*, unsigned int,
1688 section_size_type, section_size_type,
1689 const unsigned char*, Arm_address);
1690
1691 // Apply Cortex-A8 workaround to a branch.
1692 void
1693 apply_cortex_a8_workaround(const Cortex_a8_stub*, Arm_address,
1694 unsigned char*, Arm_address);
1695
1696 protected:
1697 // Make an ELF object.
1698 Object*
1699 do_make_elf_object(const std::string&, Input_file*, off_t,
1700 const elfcpp::Ehdr<32, big_endian>& ehdr);
1701
1702 Object*
1703 do_make_elf_object(const std::string&, Input_file*, off_t,
1704 const elfcpp::Ehdr<32, !big_endian>&)
1705 { gold_unreachable(); }
1706
1707 Object*
1708 do_make_elf_object(const std::string&, Input_file*, off_t,
1709 const elfcpp::Ehdr<64, false>&)
1710 { gold_unreachable(); }
1711
1712 Object*
1713 do_make_elf_object(const std::string&, Input_file*, off_t,
1714 const elfcpp::Ehdr<64, true>&)
1715 { gold_unreachable(); }
1716
1717 // Make an output section.
1718 Output_section*
1719 do_make_output_section(const char* name, elfcpp::Elf_Word type,
1720 elfcpp::Elf_Xword flags)
1721 { return new Arm_output_section<big_endian>(name, type, flags); }
1722
1723 void
1724 do_adjust_elf_header(unsigned char* view, int len) const;
1725
1726 // We only need to generate stubs, and hence perform relaxation if we are
1727 // not doing relocatable linking.
1728 bool
1729 do_may_relax() const
1730 { return !parameters->options().relocatable(); }
1731
1732 bool
1733 do_relax(int, const Input_objects*, Symbol_table*, Layout*);
1734
1735 // Determine whether an object attribute tag takes an integer, a
1736 // string or both.
1737 int
1738 do_attribute_arg_type(int tag) const;
1739
1740 // Reorder tags during output.
1741 int
1742 do_attributes_order(int num) const;
1743
1744 private:
1745 // The class which scans relocations.
1746 class Scan
1747 {
1748 public:
1749 Scan()
1750 : issued_non_pic_error_(false)
1751 { }
1752
1753 inline void
1754 local(Symbol_table* symtab, Layout* layout, Target_arm* target,
1755 Sized_relobj<32, big_endian>* object,
1756 unsigned int data_shndx,
1757 Output_section* output_section,
1758 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
1759 const elfcpp::Sym<32, big_endian>& lsym);
1760
1761 inline void
1762 global(Symbol_table* symtab, Layout* layout, Target_arm* target,
1763 Sized_relobj<32, big_endian>* object,
1764 unsigned int data_shndx,
1765 Output_section* output_section,
1766 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
1767 Symbol* gsym);
1768
1769 private:
1770 static void
1771 unsupported_reloc_local(Sized_relobj<32, big_endian>*,
1772 unsigned int r_type);
1773
1774 static void
1775 unsupported_reloc_global(Sized_relobj<32, big_endian>*,
1776 unsigned int r_type, Symbol*);
1777
1778 void
1779 check_non_pic(Relobj*, unsigned int r_type);
1780
1781 // Almost identical to Symbol::needs_plt_entry except that it also
1782 // handles STT_ARM_TFUNC.
1783 static bool
1784 symbol_needs_plt_entry(const Symbol* sym)
1785 {
1786 // An undefined symbol from an executable does not need a PLT entry.
1787 if (sym->is_undefined() && !parameters->options().shared())
1788 return false;
1789
1790 return (!parameters->doing_static_link()
1791 && (sym->type() == elfcpp::STT_FUNC
1792 || sym->type() == elfcpp::STT_ARM_TFUNC)
1793 && (sym->is_from_dynobj()
1794 || sym->is_undefined()
1795 || sym->is_preemptible()));
1796 }
1797
1798 // Whether we have issued an error about a non-PIC compilation.
1799 bool issued_non_pic_error_;
1800 };
1801
1802 // The class which implements relocation.
1803 class Relocate
1804 {
1805 public:
1806 Relocate()
1807 { }
1808
1809 ~Relocate()
1810 { }
1811
1812 // Return whether the static relocation needs to be applied.
1813 inline bool
1814 should_apply_static_reloc(const Sized_symbol<32>* gsym,
1815 int ref_flags,
1816 bool is_32bit,
1817 Output_section* output_section);
1818
1819 // Do a relocation. Return false if the caller should not issue
1820 // any warnings about this relocation.
1821 inline bool
1822 relocate(const Relocate_info<32, big_endian>*, Target_arm*,
1823 Output_section*, size_t relnum,
1824 const elfcpp::Rel<32, big_endian>&,
1825 unsigned int r_type, const Sized_symbol<32>*,
1826 const Symbol_value<32>*,
1827 unsigned char*, Arm_address,
1828 section_size_type);
1829
1830 // Return whether we want to pass flag NON_PIC_REF for this
1831 // reloc. This means the relocation type accesses a symbol not via
1832 // GOT or PLT.
1833 static inline bool
1834 reloc_is_non_pic (unsigned int r_type)
1835 {
1836 switch (r_type)
1837 {
1838 // These relocation types reference GOT or PLT entries explicitly.
1839 case elfcpp::R_ARM_GOT_BREL:
1840 case elfcpp::R_ARM_GOT_ABS:
1841 case elfcpp::R_ARM_GOT_PREL:
1842 case elfcpp::R_ARM_GOT_BREL12:
1843 case elfcpp::R_ARM_PLT32_ABS:
1844 case elfcpp::R_ARM_TLS_GD32:
1845 case elfcpp::R_ARM_TLS_LDM32:
1846 case elfcpp::R_ARM_TLS_IE32:
1847 case elfcpp::R_ARM_TLS_IE12GP:
1848
1849 // These relocate types may use PLT entries.
1850 case elfcpp::R_ARM_CALL:
1851 case elfcpp::R_ARM_THM_CALL:
1852 case elfcpp::R_ARM_JUMP24:
1853 case elfcpp::R_ARM_THM_JUMP24:
1854 case elfcpp::R_ARM_THM_JUMP19:
1855 case elfcpp::R_ARM_PLT32:
1856 case elfcpp::R_ARM_THM_XPC22:
1857 return false;
1858
1859 default:
1860 return true;
1861 }
1862 }
1863 };
1864
1865 // A class which returns the size required for a relocation type,
1866 // used while scanning relocs during a relocatable link.
1867 class Relocatable_size_for_reloc
1868 {
1869 public:
1870 unsigned int
1871 get_size_for_reloc(unsigned int, Relobj*);
1872 };
1873
1874 // Get the GOT section, creating it if necessary.
1875 Output_data_got<32, big_endian>*
1876 got_section(Symbol_table*, Layout*);
1877
1878 // Get the GOT PLT section.
1879 Output_data_space*
1880 got_plt_section() const
1881 {
1882 gold_assert(this->got_plt_ != NULL);
1883 return this->got_plt_;
1884 }
1885
1886 // Create a PLT entry for a global symbol.
1887 void
1888 make_plt_entry(Symbol_table*, Layout*, Symbol*);
1889
1890 // Get the PLT section.
1891 const Output_data_plt_arm<big_endian>*
1892 plt_section() const
1893 {
1894 gold_assert(this->plt_ != NULL);
1895 return this->plt_;
1896 }
1897
1898 // Get the dynamic reloc section, creating it if necessary.
1899 Reloc_section*
1900 rel_dyn_section(Layout*);
1901
1902 // Return true if the symbol may need a COPY relocation.
1903 // References from an executable object to non-function symbols
1904 // defined in a dynamic object may need a COPY relocation.
1905 bool
1906 may_need_copy_reloc(Symbol* gsym)
1907 {
1908 return (gsym->type() != elfcpp::STT_ARM_TFUNC
1909 && gsym->may_need_copy_reloc());
1910 }
1911
1912 // Add a potential copy relocation.
1913 void
1914 copy_reloc(Symbol_table* symtab, Layout* layout,
1915 Sized_relobj<32, big_endian>* object,
1916 unsigned int shndx, Output_section* output_section,
1917 Symbol* sym, const elfcpp::Rel<32, big_endian>& reloc)
1918 {
1919 this->copy_relocs_.copy_reloc(symtab, layout,
1920 symtab->get_sized_symbol<32>(sym),
1921 object, shndx, output_section, reloc,
1922 this->rel_dyn_section(layout));
1923 }
1924
1925 // Whether two EABI versions are compatible.
1926 static bool
1927 are_eabi_versions_compatible(elfcpp::Elf_Word v1, elfcpp::Elf_Word v2);
1928
1929 // Merge processor-specific flags from input object and those in the ELF
1930 // header of the output.
1931 void
1932 merge_processor_specific_flags(const std::string&, elfcpp::Elf_Word);
1933
1934 // Get the secondary compatible architecture.
1935 static int
1936 get_secondary_compatible_arch(const Attributes_section_data*);
1937
1938 // Set the secondary compatible architecture.
1939 static void
1940 set_secondary_compatible_arch(Attributes_section_data*, int);
1941
1942 static int
1943 tag_cpu_arch_combine(const char*, int, int*, int, int);
1944
1945 // Helper to print AEABI enum tag value.
1946 static std::string
1947 aeabi_enum_name(unsigned int);
1948
1949 // Return string value for TAG_CPU_name.
1950 static std::string
1951 tag_cpu_name_value(unsigned int);
1952
1953 // Merge object attributes from input object and those in the output.
1954 void
1955 merge_object_attributes(const char*, const Attributes_section_data*);
1956
1957 // Helper to get an AEABI object attribute
1958 Object_attribute*
1959 get_aeabi_object_attribute(int tag) const
1960 {
1961 Attributes_section_data* pasd = this->attributes_section_data_;
1962 gold_assert(pasd != NULL);
1963 Object_attribute* attr =
1964 pasd->get_attribute(Object_attribute::OBJ_ATTR_PROC, tag);
1965 gold_assert(attr != NULL);
1966 return attr;
1967 }
1968
1969 //
1970 // Methods to support stub-generations.
1971 //
1972
1973 // Group input sections for stub generation.
1974 void
1975 group_sections(Layout*, section_size_type, bool);
1976
1977 // Scan a relocation for stub generation.
1978 void
1979 scan_reloc_for_stub(const Relocate_info<32, big_endian>*, unsigned int,
1980 const Sized_symbol<32>*, unsigned int,
1981 const Symbol_value<32>*,
1982 elfcpp::Elf_types<32>::Elf_Swxword, Arm_address);
1983
1984 // Scan a relocation section for stub.
1985 template<int sh_type>
1986 void
1987 scan_reloc_section_for_stubs(
1988 const Relocate_info<32, big_endian>* relinfo,
1989 const unsigned char* prelocs,
1990 size_t reloc_count,
1991 Output_section* output_section,
1992 bool needs_special_offset_handling,
1993 const unsigned char* view,
1994 elfcpp::Elf_types<32>::Elf_Addr view_address,
1995 section_size_type);
1996
1997 // Information about this specific target which we pass to the
1998 // general Target structure.
1999 static const Target::Target_info arm_info;
2000
2001 // The types of GOT entries needed for this platform.
2002 enum Got_type
2003 {
2004 GOT_TYPE_STANDARD = 0 // GOT entry for a regular symbol
2005 };
2006
2007 typedef typename std::vector<Stub_table<big_endian>*> Stub_table_list;
2008
2009 // Map input section to Arm_input_section.
2010 typedef Unordered_map<Input_section_specifier,
2011 Arm_input_section<big_endian>*,
2012 Input_section_specifier::hash,
2013 Input_section_specifier::equal_to>
2014 Arm_input_section_map;
2015
2016 // Map output addresses to relocs for Cortex-A8 erratum.
2017 typedef Unordered_map<Arm_address, const Cortex_a8_reloc*>
2018 Cortex_a8_relocs_info;
2019
2020 // The GOT section.
2021 Output_data_got<32, big_endian>* got_;
2022 // The PLT section.
2023 Output_data_plt_arm<big_endian>* plt_;
2024 // The GOT PLT section.
2025 Output_data_space* got_plt_;
2026 // The dynamic reloc section.
2027 Reloc_section* rel_dyn_;
2028 // Relocs saved to avoid a COPY reloc.
2029 Copy_relocs<elfcpp::SHT_REL, 32, big_endian> copy_relocs_;
2030 // Space for variables copied with a COPY reloc.
2031 Output_data_space* dynbss_;
2032 // Vector of Stub_tables created.
2033 Stub_table_list stub_tables_;
2034 // Stub factory.
2035 const Stub_factory &stub_factory_;
2036 // Whether we can use BLX.
2037 bool may_use_blx_;
2038 // Whether we force PIC branch veneers.
2039 bool should_force_pic_veneer_;
2040 // Map for locating Arm_input_sections.
2041 Arm_input_section_map arm_input_section_map_;
2042 // Attributes section data in output.
2043 Attributes_section_data* attributes_section_data_;
2044 // Whether we want to fix code for Cortex-A8 erratum.
2045 bool fix_cortex_a8_;
2046 // Map addresses to relocs for Cortex-A8 erratum.
2047 Cortex_a8_relocs_info cortex_a8_relocs_info_;
2048 };
2049
2050 template<bool big_endian>
2051 const Target::Target_info Target_arm<big_endian>::arm_info =
2052 {
2053 32, // size
2054 big_endian, // is_big_endian
2055 elfcpp::EM_ARM, // machine_code
2056 false, // has_make_symbol
2057 false, // has_resolve
2058 false, // has_code_fill
2059 true, // is_default_stack_executable
2060 '\0', // wrap_char
2061 "/usr/lib/libc.so.1", // dynamic_linker
2062 0x8000, // default_text_segment_address
2063 0x1000, // abi_pagesize (overridable by -z max-page-size)
2064 0x1000, // common_pagesize (overridable by -z common-page-size)
2065 elfcpp::SHN_UNDEF, // small_common_shndx
2066 elfcpp::SHN_UNDEF, // large_common_shndx
2067 0, // small_common_section_flags
2068 0, // large_common_section_flags
2069 ".ARM.attributes", // attributes_section
2070 "aeabi" // attributes_vendor
2071 };
2072
2073 // Arm relocate functions class
2074 //
2075
2076 template<bool big_endian>
2077 class Arm_relocate_functions : public Relocate_functions<32, big_endian>
2078 {
2079 public:
2080 typedef enum
2081 {
2082 STATUS_OKAY, // No error during relocation.
2083 STATUS_OVERFLOW, // Relocation oveflow.
2084 STATUS_BAD_RELOC // Relocation cannot be applied.
2085 } Status;
2086
2087 private:
2088 typedef Relocate_functions<32, big_endian> Base;
2089 typedef Arm_relocate_functions<big_endian> This;
2090
2091 // Encoding of imm16 argument for movt and movw ARM instructions
2092 // from ARM ARM:
2093 //
2094 // imm16 := imm4 | imm12
2095 //
2096 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2097 // +-------+---------------+-------+-------+-----------------------+
2098 // | | |imm4 | |imm12 |
2099 // +-------+---------------+-------+-------+-----------------------+
2100
2101 // Extract the relocation addend from VAL based on the ARM
2102 // instruction encoding described above.
2103 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2104 extract_arm_movw_movt_addend(
2105 typename elfcpp::Swap<32, big_endian>::Valtype val)
2106 {
2107 // According to the Elf ABI for ARM Architecture the immediate
2108 // field is sign-extended to form the addend.
2109 return utils::sign_extend<16>(((val >> 4) & 0xf000) | (val & 0xfff));
2110 }
2111
2112 // Insert X into VAL based on the ARM instruction encoding described
2113 // above.
2114 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2115 insert_val_arm_movw_movt(
2116 typename elfcpp::Swap<32, big_endian>::Valtype val,
2117 typename elfcpp::Swap<32, big_endian>::Valtype x)
2118 {
2119 val &= 0xfff0f000;
2120 val |= x & 0x0fff;
2121 val |= (x & 0xf000) << 4;
2122 return val;
2123 }
2124
2125 // Encoding of imm16 argument for movt and movw Thumb2 instructions
2126 // from ARM ARM:
2127 //
2128 // imm16 := imm4 | i | imm3 | imm8
2129 //
2130 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2131 // +---------+-+-----------+-------++-+-----+-------+---------------+
2132 // | |i| |imm4 || |imm3 | |imm8 |
2133 // +---------+-+-----------+-------++-+-----+-------+---------------+
2134
2135 // Extract the relocation addend from VAL based on the Thumb2
2136 // instruction encoding described above.
2137 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2138 extract_thumb_movw_movt_addend(
2139 typename elfcpp::Swap<32, big_endian>::Valtype val)
2140 {
2141 // According to the Elf ABI for ARM Architecture the immediate
2142 // field is sign-extended to form the addend.
2143 return utils::sign_extend<16>(((val >> 4) & 0xf000)
2144 | ((val >> 15) & 0x0800)
2145 | ((val >> 4) & 0x0700)
2146 | (val & 0x00ff));
2147 }
2148
2149 // Insert X into VAL based on the Thumb2 instruction encoding
2150 // described above.
2151 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2152 insert_val_thumb_movw_movt(
2153 typename elfcpp::Swap<32, big_endian>::Valtype val,
2154 typename elfcpp::Swap<32, big_endian>::Valtype x)
2155 {
2156 val &= 0xfbf08f00;
2157 val |= (x & 0xf000) << 4;
2158 val |= (x & 0x0800) << 15;
2159 val |= (x & 0x0700) << 4;
2160 val |= (x & 0x00ff);
2161 return val;
2162 }
2163
2164 // Handle ARM long branches.
2165 static typename This::Status
2166 arm_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2167 unsigned char *, const Sized_symbol<32>*,
2168 const Arm_relobj<big_endian>*, unsigned int,
2169 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2170
2171 // Handle THUMB long branches.
2172 static typename This::Status
2173 thumb_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2174 unsigned char *, const Sized_symbol<32>*,
2175 const Arm_relobj<big_endian>*, unsigned int,
2176 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2177
2178 public:
2179
2180 // Return the branch offset of a 32-bit THUMB branch.
2181 static inline int32_t
2182 thumb32_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2183 {
2184 // We use the Thumb-2 encoding (backwards compatible with Thumb-1)
2185 // involving the J1 and J2 bits.
2186 uint32_t s = (upper_insn & (1U << 10)) >> 10;
2187 uint32_t upper = upper_insn & 0x3ffU;
2188 uint32_t lower = lower_insn & 0x7ffU;
2189 uint32_t j1 = (lower_insn & (1U << 13)) >> 13;
2190 uint32_t j2 = (lower_insn & (1U << 11)) >> 11;
2191 uint32_t i1 = j1 ^ s ? 0 : 1;
2192 uint32_t i2 = j2 ^ s ? 0 : 1;
2193
2194 return utils::sign_extend<25>((s << 24) | (i1 << 23) | (i2 << 22)
2195 | (upper << 12) | (lower << 1));
2196 }
2197
2198 // Insert OFFSET to a 32-bit THUMB branch and return the upper instruction.
2199 // UPPER_INSN is the original upper instruction of the branch. Caller is
2200 // responsible for overflow checking and BLX offset adjustment.
2201 static inline uint16_t
2202 thumb32_branch_upper(uint16_t upper_insn, int32_t offset)
2203 {
2204 uint32_t s = offset < 0 ? 1 : 0;
2205 uint32_t bits = static_cast<uint32_t>(offset);
2206 return (upper_insn & ~0x7ffU) | ((bits >> 12) & 0x3ffU) | (s << 10);
2207 }
2208
2209 // Insert OFFSET to a 32-bit THUMB branch and return the lower instruction.
2210 // LOWER_INSN is the original lower instruction of the branch. Caller is
2211 // responsible for overflow checking and BLX offset adjustment.
2212 static inline uint16_t
2213 thumb32_branch_lower(uint16_t lower_insn, int32_t offset)
2214 {
2215 uint32_t s = offset < 0 ? 1 : 0;
2216 uint32_t bits = static_cast<uint32_t>(offset);
2217 return ((lower_insn & ~0x2fffU)
2218 | ((((bits >> 23) & 1) ^ !s) << 13)
2219 | ((((bits >> 22) & 1) ^ !s) << 11)
2220 | ((bits >> 1) & 0x7ffU));
2221 }
2222
2223 // Return the branch offset of a 32-bit THUMB conditional branch.
2224 static inline int32_t
2225 thumb32_cond_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2226 {
2227 uint32_t s = (upper_insn & 0x0400U) >> 10;
2228 uint32_t j1 = (lower_insn & 0x2000U) >> 13;
2229 uint32_t j2 = (lower_insn & 0x0800U) >> 11;
2230 uint32_t lower = (lower_insn & 0x07ffU);
2231 uint32_t upper = (s << 8) | (j2 << 7) | (j1 << 6) | (upper_insn & 0x003fU);
2232
2233 return utils::sign_extend<21>((upper << 12) | (lower << 1));
2234 }
2235
2236 // Insert OFFSET to a 32-bit THUMB conditional branch and return the upper
2237 // instruction. UPPER_INSN is the original upper instruction of the branch.
2238 // Caller is responsible for overflow checking.
2239 static inline uint16_t
2240 thumb32_cond_branch_upper(uint16_t upper_insn, int32_t offset)
2241 {
2242 uint32_t s = offset < 0 ? 1 : 0;
2243 uint32_t bits = static_cast<uint32_t>(offset);
2244 return (upper_insn & 0xfbc0U) | (s << 10) | ((bits & 0x0003f000U) >> 12);
2245 }
2246
2247 // Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
2248 // instruction. LOWER_INSN is the original lower instruction of the branch.
2249 // Caller is reponsible for overflow checking.
2250 static inline uint16_t
2251 thumb32_cond_branch_lower(uint16_t lower_insn, int32_t offset)
2252 {
2253 uint32_t bits = static_cast<uint32_t>(offset);
2254 uint32_t j2 = (bits & 0x00080000U) >> 19;
2255 uint32_t j1 = (bits & 0x00040000U) >> 18;
2256 uint32_t lo = (bits & 0x00000ffeU) >> 1;
2257
2258 return (lower_insn & 0xd000U) | (j1 << 13) | (j2 << 11) | lo;
2259 }
2260
2261 // R_ARM_ABS8: S + A
2262 static inline typename This::Status
2263 abs8(unsigned char *view,
2264 const Sized_relobj<32, big_endian>* object,
2265 const Symbol_value<32>* psymval)
2266 {
2267 typedef typename elfcpp::Swap<8, big_endian>::Valtype Valtype;
2268 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2269 Valtype* wv = reinterpret_cast<Valtype*>(view);
2270 Valtype val = elfcpp::Swap<8, big_endian>::readval(wv);
2271 Reltype addend = utils::sign_extend<8>(val);
2272 Reltype x = psymval->value(object, addend);
2273 val = utils::bit_select(val, x, 0xffU);
2274 elfcpp::Swap<8, big_endian>::writeval(wv, val);
2275 return (utils::has_signed_unsigned_overflow<8>(x)
2276 ? This::STATUS_OVERFLOW
2277 : This::STATUS_OKAY);
2278 }
2279
2280 // R_ARM_THM_ABS5: S + A
2281 static inline typename This::Status
2282 thm_abs5(unsigned char *view,
2283 const Sized_relobj<32, big_endian>* object,
2284 const Symbol_value<32>* psymval)
2285 {
2286 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2287 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2288 Valtype* wv = reinterpret_cast<Valtype*>(view);
2289 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2290 Reltype addend = (val & 0x7e0U) >> 6;
2291 Reltype x = psymval->value(object, addend);
2292 val = utils::bit_select(val, x << 6, 0x7e0U);
2293 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2294 return (utils::has_overflow<5>(x)
2295 ? This::STATUS_OVERFLOW
2296 : This::STATUS_OKAY);
2297 }
2298
2299 // R_ARM_ABS12: S + A
2300 static inline typename This::Status
2301 abs12(unsigned char *view,
2302 const Sized_relobj<32, big_endian>* object,
2303 const Symbol_value<32>* psymval)
2304 {
2305 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2306 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2307 Valtype* wv = reinterpret_cast<Valtype*>(view);
2308 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2309 Reltype addend = val & 0x0fffU;
2310 Reltype x = psymval->value(object, addend);
2311 val = utils::bit_select(val, x, 0x0fffU);
2312 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2313 return (utils::has_overflow<12>(x)
2314 ? This::STATUS_OVERFLOW
2315 : This::STATUS_OKAY);
2316 }
2317
2318 // R_ARM_ABS16: S + A
2319 static inline typename This::Status
2320 abs16(unsigned char *view,
2321 const Sized_relobj<32, big_endian>* object,
2322 const Symbol_value<32>* psymval)
2323 {
2324 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2325 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2326 Valtype* wv = reinterpret_cast<Valtype*>(view);
2327 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2328 Reltype addend = utils::sign_extend<16>(val);
2329 Reltype x = psymval->value(object, addend);
2330 val = utils::bit_select(val, x, 0xffffU);
2331 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2332 return (utils::has_signed_unsigned_overflow<16>(x)
2333 ? This::STATUS_OVERFLOW
2334 : This::STATUS_OKAY);
2335 }
2336
2337 // R_ARM_ABS32: (S + A) | T
2338 static inline typename This::Status
2339 abs32(unsigned char *view,
2340 const Sized_relobj<32, big_endian>* object,
2341 const Symbol_value<32>* psymval,
2342 Arm_address thumb_bit)
2343 {
2344 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2345 Valtype* wv = reinterpret_cast<Valtype*>(view);
2346 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
2347 Valtype x = psymval->value(object, addend) | thumb_bit;
2348 elfcpp::Swap<32, big_endian>::writeval(wv, x);
2349 return This::STATUS_OKAY;
2350 }
2351
2352 // R_ARM_REL32: (S + A) | T - P
2353 static inline typename This::Status
2354 rel32(unsigned char *view,
2355 const Sized_relobj<32, big_endian>* object,
2356 const Symbol_value<32>* psymval,
2357 Arm_address address,
2358 Arm_address thumb_bit)
2359 {
2360 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2361 Valtype* wv = reinterpret_cast<Valtype*>(view);
2362 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
2363 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2364 elfcpp::Swap<32, big_endian>::writeval(wv, x);
2365 return This::STATUS_OKAY;
2366 }
2367
2368 // R_ARM_THM_CALL: (S + A) | T - P
2369 static inline typename This::Status
2370 thm_call(const Relocate_info<32, big_endian>* relinfo, unsigned char *view,
2371 const Sized_symbol<32>* gsym, const Arm_relobj<big_endian>* object,
2372 unsigned int r_sym, const Symbol_value<32>* psymval,
2373 Arm_address address, Arm_address thumb_bit,
2374 bool is_weakly_undefined_without_plt)
2375 {
2376 return thumb_branch_common(elfcpp::R_ARM_THM_CALL, relinfo, view, gsym,
2377 object, r_sym, psymval, address, thumb_bit,
2378 is_weakly_undefined_without_plt);
2379 }
2380
2381 // R_ARM_THM_JUMP24: (S + A) | T - P
2382 static inline typename This::Status
2383 thm_jump24(const Relocate_info<32, big_endian>* relinfo, unsigned char *view,
2384 const Sized_symbol<32>* gsym, const Arm_relobj<big_endian>* object,
2385 unsigned int r_sym, const Symbol_value<32>* psymval,
2386 Arm_address address, Arm_address thumb_bit,
2387 bool is_weakly_undefined_without_plt)
2388 {
2389 return thumb_branch_common(elfcpp::R_ARM_THM_JUMP24, relinfo, view, gsym,
2390 object, r_sym, psymval, address, thumb_bit,
2391 is_weakly_undefined_without_plt);
2392 }
2393
2394 // R_ARM_THM_JUMP24: (S + A) | T - P
2395 static typename This::Status
2396 thm_jump19(unsigned char *view, const Arm_relobj<big_endian>* object,
2397 const Symbol_value<32>* psymval, Arm_address address,
2398 Arm_address thumb_bit);
2399
2400 // R_ARM_THM_XPC22: (S + A) | T - P
2401 static inline typename This::Status
2402 thm_xpc22(const Relocate_info<32, big_endian>* relinfo, unsigned char *view,
2403 const Sized_symbol<32>* gsym, const Arm_relobj<big_endian>* object,
2404 unsigned int r_sym, const Symbol_value<32>* psymval,
2405 Arm_address address, Arm_address thumb_bit,
2406 bool is_weakly_undefined_without_plt)
2407 {
2408 return thumb_branch_common(elfcpp::R_ARM_THM_XPC22, relinfo, view, gsym,
2409 object, r_sym, psymval, address, thumb_bit,
2410 is_weakly_undefined_without_plt);
2411 }
2412
2413 // R_ARM_BASE_PREL: B(S) + A - P
2414 static inline typename This::Status
2415 base_prel(unsigned char* view,
2416 Arm_address origin,
2417 Arm_address address)
2418 {
2419 Base::rel32(view, origin - address);
2420 return STATUS_OKAY;
2421 }
2422
2423 // R_ARM_BASE_ABS: B(S) + A
2424 static inline typename This::Status
2425 base_abs(unsigned char* view,
2426 Arm_address origin)
2427 {
2428 Base::rel32(view, origin);
2429 return STATUS_OKAY;
2430 }
2431
2432 // R_ARM_GOT_BREL: GOT(S) + A - GOT_ORG
2433 static inline typename This::Status
2434 got_brel(unsigned char* view,
2435 typename elfcpp::Swap<32, big_endian>::Valtype got_offset)
2436 {
2437 Base::rel32(view, got_offset);
2438 return This::STATUS_OKAY;
2439 }
2440
2441 // R_ARM_GOT_PREL: GOT(S) + A - P
2442 static inline typename This::Status
2443 got_prel(unsigned char *view,
2444 Arm_address got_entry,
2445 Arm_address address)
2446 {
2447 Base::rel32(view, got_entry - address);
2448 return This::STATUS_OKAY;
2449 }
2450
2451 // R_ARM_PLT32: (S + A) | T - P
2452 static inline typename This::Status
2453 plt32(const Relocate_info<32, big_endian>* relinfo,
2454 unsigned char *view,
2455 const Sized_symbol<32>* gsym,
2456 const Arm_relobj<big_endian>* object,
2457 unsigned int r_sym,
2458 const Symbol_value<32>* psymval,
2459 Arm_address address,
2460 Arm_address thumb_bit,
2461 bool is_weakly_undefined_without_plt)
2462 {
2463 return arm_branch_common(elfcpp::R_ARM_PLT32, relinfo, view, gsym,
2464 object, r_sym, psymval, address, thumb_bit,
2465 is_weakly_undefined_without_plt);
2466 }
2467
2468 // R_ARM_XPC25: (S + A) | T - P
2469 static inline typename This::Status
2470 xpc25(const Relocate_info<32, big_endian>* relinfo,
2471 unsigned char *view,
2472 const Sized_symbol<32>* gsym,
2473 const Arm_relobj<big_endian>* object,
2474 unsigned int r_sym,
2475 const Symbol_value<32>* psymval,
2476 Arm_address address,
2477 Arm_address thumb_bit,
2478 bool is_weakly_undefined_without_plt)
2479 {
2480 return arm_branch_common(elfcpp::R_ARM_XPC25, relinfo, view, gsym,
2481 object, r_sym, psymval, address, thumb_bit,
2482 is_weakly_undefined_without_plt);
2483 }
2484
2485 // R_ARM_CALL: (S + A) | T - P
2486 static inline typename This::Status
2487 call(const Relocate_info<32, big_endian>* relinfo,
2488 unsigned char *view,
2489 const Sized_symbol<32>* gsym,
2490 const Arm_relobj<big_endian>* object,
2491 unsigned int r_sym,
2492 const Symbol_value<32>* psymval,
2493 Arm_address address,
2494 Arm_address thumb_bit,
2495 bool is_weakly_undefined_without_plt)
2496 {
2497 return arm_branch_common(elfcpp::R_ARM_CALL, relinfo, view, gsym,
2498 object, r_sym, psymval, address, thumb_bit,
2499 is_weakly_undefined_without_plt);
2500 }
2501
2502 // R_ARM_JUMP24: (S + A) | T - P
2503 static inline typename This::Status
2504 jump24(const Relocate_info<32, big_endian>* relinfo,
2505 unsigned char *view,
2506 const Sized_symbol<32>* gsym,
2507 const Arm_relobj<big_endian>* object,
2508 unsigned int r_sym,
2509 const Symbol_value<32>* psymval,
2510 Arm_address address,
2511 Arm_address thumb_bit,
2512 bool is_weakly_undefined_without_plt)
2513 {
2514 return arm_branch_common(elfcpp::R_ARM_JUMP24, relinfo, view, gsym,
2515 object, r_sym, psymval, address, thumb_bit,
2516 is_weakly_undefined_without_plt);
2517 }
2518
2519 // R_ARM_PREL: (S + A) | T - P
2520 static inline typename This::Status
2521 prel31(unsigned char *view,
2522 const Sized_relobj<32, big_endian>* object,
2523 const Symbol_value<32>* psymval,
2524 Arm_address address,
2525 Arm_address thumb_bit)
2526 {
2527 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2528 Valtype* wv = reinterpret_cast<Valtype*>(view);
2529 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2530 Valtype addend = utils::sign_extend<31>(val);
2531 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2532 val = utils::bit_select(val, x, 0x7fffffffU);
2533 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2534 return (utils::has_overflow<31>(x) ?
2535 This::STATUS_OVERFLOW : This::STATUS_OKAY);
2536 }
2537
2538 // R_ARM_MOVW_ABS_NC: (S + A) | T
2539 static inline typename This::Status
2540 movw_abs_nc(unsigned char *view,
2541 const Sized_relobj<32, big_endian>* object,
2542 const Symbol_value<32>* psymval,
2543 Arm_address thumb_bit)
2544 {
2545 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2546 Valtype* wv = reinterpret_cast<Valtype*>(view);
2547 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2548 Valtype addend = This::extract_arm_movw_movt_addend(val);
2549 Valtype x = psymval->value(object, addend) | thumb_bit;
2550 val = This::insert_val_arm_movw_movt(val, x);
2551 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2552 return This::STATUS_OKAY;
2553 }
2554
2555 // R_ARM_MOVT_ABS: S + A
2556 static inline typename This::Status
2557 movt_abs(unsigned char *view,
2558 const Sized_relobj<32, big_endian>* object,
2559 const Symbol_value<32>* psymval)
2560 {
2561 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2562 Valtype* wv = reinterpret_cast<Valtype*>(view);
2563 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2564 Valtype addend = This::extract_arm_movw_movt_addend(val);
2565 Valtype x = psymval->value(object, addend) >> 16;
2566 val = This::insert_val_arm_movw_movt(val, x);
2567 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2568 return This::STATUS_OKAY;
2569 }
2570
2571 // R_ARM_THM_MOVW_ABS_NC: S + A | T
2572 static inline typename This::Status
2573 thm_movw_abs_nc(unsigned char *view,
2574 const Sized_relobj<32, big_endian>* object,
2575 const Symbol_value<32>* psymval,
2576 Arm_address thumb_bit)
2577 {
2578 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2579 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2580 Valtype* wv = reinterpret_cast<Valtype*>(view);
2581 Reltype val = ((elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2582 | elfcpp::Swap<16, big_endian>::readval(wv + 1));
2583 Reltype addend = extract_thumb_movw_movt_addend(val);
2584 Reltype x = psymval->value(object, addend) | thumb_bit;
2585 val = This::insert_val_thumb_movw_movt(val, x);
2586 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2587 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2588 return This::STATUS_OKAY;
2589 }
2590
2591 // R_ARM_THM_MOVT_ABS: S + A
2592 static inline typename This::Status
2593 thm_movt_abs(unsigned char *view,
2594 const Sized_relobj<32, big_endian>* object,
2595 const Symbol_value<32>* psymval)
2596 {
2597 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2598 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2599 Valtype* wv = reinterpret_cast<Valtype*>(view);
2600 Reltype val = ((elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2601 | elfcpp::Swap<16, big_endian>::readval(wv + 1));
2602 Reltype addend = This::extract_thumb_movw_movt_addend(val);
2603 Reltype x = psymval->value(object, addend) >> 16;
2604 val = This::insert_val_thumb_movw_movt(val, x);
2605 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2606 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2607 return This::STATUS_OKAY;
2608 }
2609
2610 // R_ARM_MOVW_PREL_NC: (S + A) | T - P
2611 static inline typename This::Status
2612 movw_prel_nc(unsigned char *view,
2613 const Sized_relobj<32, big_endian>* object,
2614 const Symbol_value<32>* psymval,
2615 Arm_address address,
2616 Arm_address thumb_bit)
2617 {
2618 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2619 Valtype* wv = reinterpret_cast<Valtype*>(view);
2620 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2621 Valtype addend = This::extract_arm_movw_movt_addend(val);
2622 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2623 val = This::insert_val_arm_movw_movt(val, x);
2624 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2625 return This::STATUS_OKAY;
2626 }
2627
2628 // R_ARM_MOVT_PREL: S + A - P
2629 static inline typename This::Status
2630 movt_prel(unsigned char *view,
2631 const Sized_relobj<32, big_endian>* object,
2632 const Symbol_value<32>* psymval,
2633 Arm_address address)
2634 {
2635 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2636 Valtype* wv = reinterpret_cast<Valtype*>(view);
2637 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2638 Valtype addend = This::extract_arm_movw_movt_addend(val);
2639 Valtype x = (psymval->value(object, addend) - address) >> 16;
2640 val = This::insert_val_arm_movw_movt(val, x);
2641 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2642 return This::STATUS_OKAY;
2643 }
2644
2645 // R_ARM_THM_MOVW_PREL_NC: (S + A) | T - P
2646 static inline typename This::Status
2647 thm_movw_prel_nc(unsigned char *view,
2648 const Sized_relobj<32, big_endian>* object,
2649 const Symbol_value<32>* psymval,
2650 Arm_address address,
2651 Arm_address thumb_bit)
2652 {
2653 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2654 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2655 Valtype* wv = reinterpret_cast<Valtype*>(view);
2656 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2657 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
2658 Reltype addend = This::extract_thumb_movw_movt_addend(val);
2659 Reltype x = (psymval->value(object, addend) | thumb_bit) - address;
2660 val = This::insert_val_thumb_movw_movt(val, x);
2661 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2662 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2663 return This::STATUS_OKAY;
2664 }
2665
2666 // R_ARM_THM_MOVT_PREL: S + A - P
2667 static inline typename This::Status
2668 thm_movt_prel(unsigned char *view,
2669 const Sized_relobj<32, big_endian>* object,
2670 const Symbol_value<32>* psymval,
2671 Arm_address address)
2672 {
2673 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2674 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2675 Valtype* wv = reinterpret_cast<Valtype*>(view);
2676 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2677 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
2678 Reltype addend = This::extract_thumb_movw_movt_addend(val);
2679 Reltype x = (psymval->value(object, addend) - address) >> 16;
2680 val = This::insert_val_thumb_movw_movt(val, x);
2681 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2682 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2683 return This::STATUS_OKAY;
2684 }
2685 };
2686
2687 // Relocate ARM long branches. This handles relocation types
2688 // R_ARM_CALL, R_ARM_JUMP24, R_ARM_PLT32 and R_ARM_XPC25.
2689 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
2690 // undefined and we do not use PLT in this relocation. In such a case,
2691 // the branch is converted into an NOP.
2692
2693 template<bool big_endian>
2694 typename Arm_relocate_functions<big_endian>::Status
2695 Arm_relocate_functions<big_endian>::arm_branch_common(
2696 unsigned int r_type,
2697 const Relocate_info<32, big_endian>* relinfo,
2698 unsigned char *view,
2699 const Sized_symbol<32>* gsym,
2700 const Arm_relobj<big_endian>* object,
2701 unsigned int r_sym,
2702 const Symbol_value<32>* psymval,
2703 Arm_address address,
2704 Arm_address thumb_bit,
2705 bool is_weakly_undefined_without_plt)
2706 {
2707 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2708 Valtype* wv = reinterpret_cast<Valtype*>(view);
2709 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2710
2711 bool insn_is_b = (((val >> 28) & 0xf) <= 0xe)
2712 && ((val & 0x0f000000UL) == 0x0a000000UL);
2713 bool insn_is_uncond_bl = (val & 0xff000000UL) == 0xeb000000UL;
2714 bool insn_is_cond_bl = (((val >> 28) & 0xf) < 0xe)
2715 && ((val & 0x0f000000UL) == 0x0b000000UL);
2716 bool insn_is_blx = (val & 0xfe000000UL) == 0xfa000000UL;
2717 bool insn_is_any_branch = (val & 0x0e000000UL) == 0x0a000000UL;
2718
2719 // Check that the instruction is valid.
2720 if (r_type == elfcpp::R_ARM_CALL)
2721 {
2722 if (!insn_is_uncond_bl && !insn_is_blx)
2723 return This::STATUS_BAD_RELOC;
2724 }
2725 else if (r_type == elfcpp::R_ARM_JUMP24)
2726 {
2727 if (!insn_is_b && !insn_is_cond_bl)
2728 return This::STATUS_BAD_RELOC;
2729 }
2730 else if (r_type == elfcpp::R_ARM_PLT32)
2731 {
2732 if (!insn_is_any_branch)
2733 return This::STATUS_BAD_RELOC;
2734 }
2735 else if (r_type == elfcpp::R_ARM_XPC25)
2736 {
2737 // FIXME: AAELF document IH0044C does not say much about it other
2738 // than it being obsolete.
2739 if (!insn_is_any_branch)
2740 return This::STATUS_BAD_RELOC;
2741 }
2742 else
2743 gold_unreachable();
2744
2745 // A branch to an undefined weak symbol is turned into a jump to
2746 // the next instruction unless a PLT entry will be created.
2747 // Do the same for local undefined symbols.
2748 // The jump to the next instruction is optimized as a NOP depending
2749 // on the architecture.
2750 const Target_arm<big_endian>* arm_target =
2751 Target_arm<big_endian>::default_target();
2752 if (is_weakly_undefined_without_plt)
2753 {
2754 Valtype cond = val & 0xf0000000U;
2755 if (arm_target->may_use_arm_nop())
2756 val = cond | 0x0320f000;
2757 else
2758 val = cond | 0x01a00000; // Using pre-UAL nop: mov r0, r0.
2759 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2760 return This::STATUS_OKAY;
2761 }
2762
2763 Valtype addend = utils::sign_extend<26>(val << 2);
2764 Valtype branch_target = psymval->value(object, addend);
2765 int32_t branch_offset = branch_target - address;
2766
2767 // We need a stub if the branch offset is too large or if we need
2768 // to switch mode.
2769 bool may_use_blx = arm_target->may_use_blx();
2770 Reloc_stub* stub = NULL;
2771 if ((branch_offset > ARM_MAX_FWD_BRANCH_OFFSET)
2772 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
2773 || ((thumb_bit != 0) && !(may_use_blx && r_type == elfcpp::R_ARM_CALL)))
2774 {
2775 Stub_type stub_type =
2776 Reloc_stub::stub_type_for_reloc(r_type, address, branch_target,
2777 (thumb_bit != 0));
2778 if (stub_type != arm_stub_none)
2779 {
2780 Stub_table<big_endian>* stub_table =
2781 object->stub_table(relinfo->data_shndx);
2782 gold_assert(stub_table != NULL);
2783
2784 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
2785 stub = stub_table->find_reloc_stub(stub_key);
2786 gold_assert(stub != NULL);
2787 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
2788 branch_target = stub_table->address() + stub->offset() + addend;
2789 branch_offset = branch_target - address;
2790 gold_assert((branch_offset <= ARM_MAX_FWD_BRANCH_OFFSET)
2791 && (branch_offset >= ARM_MAX_BWD_BRANCH_OFFSET));
2792 }
2793 }
2794
2795 // At this point, if we still need to switch mode, the instruction
2796 // must either be a BLX or a BL that can be converted to a BLX.
2797 if (thumb_bit != 0)
2798 {
2799 // Turn BL to BLX.
2800 gold_assert(may_use_blx && r_type == elfcpp::R_ARM_CALL);
2801 val = (val & 0xffffff) | 0xfa000000 | ((branch_offset & 2) << 23);
2802 }
2803
2804 val = utils::bit_select(val, (branch_offset >> 2), 0xffffffUL);
2805 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2806 return (utils::has_overflow<26>(branch_offset)
2807 ? This::STATUS_OVERFLOW : This::STATUS_OKAY);
2808 }
2809
2810 // Relocate THUMB long branches. This handles relocation types
2811 // R_ARM_THM_CALL, R_ARM_THM_JUMP24 and R_ARM_THM_XPC22.
2812 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
2813 // undefined and we do not use PLT in this relocation. In such a case,
2814 // the branch is converted into an NOP.
2815
2816 template<bool big_endian>
2817 typename Arm_relocate_functions<big_endian>::Status
2818 Arm_relocate_functions<big_endian>::thumb_branch_common(
2819 unsigned int r_type,
2820 const Relocate_info<32, big_endian>* relinfo,
2821 unsigned char *view,
2822 const Sized_symbol<32>* gsym,
2823 const Arm_relobj<big_endian>* object,
2824 unsigned int r_sym,
2825 const Symbol_value<32>* psymval,
2826 Arm_address address,
2827 Arm_address thumb_bit,
2828 bool is_weakly_undefined_without_plt)
2829 {
2830 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2831 Valtype* wv = reinterpret_cast<Valtype*>(view);
2832 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
2833 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
2834
2835 // FIXME: These tests are too loose and do not take THUMB/THUMB-2 difference
2836 // into account.
2837 bool is_bl_insn = (lower_insn & 0x1000U) == 0x1000U;
2838 bool is_blx_insn = (lower_insn & 0x1000U) == 0x0000U;
2839
2840 // Check that the instruction is valid.
2841 if (r_type == elfcpp::R_ARM_THM_CALL)
2842 {
2843 if (!is_bl_insn && !is_blx_insn)
2844 return This::STATUS_BAD_RELOC;
2845 }
2846 else if (r_type == elfcpp::R_ARM_THM_JUMP24)
2847 {
2848 // This cannot be a BLX.
2849 if (!is_bl_insn)
2850 return This::STATUS_BAD_RELOC;
2851 }
2852 else if (r_type == elfcpp::R_ARM_THM_XPC22)
2853 {
2854 // Check for Thumb to Thumb call.
2855 if (!is_blx_insn)
2856 return This::STATUS_BAD_RELOC;
2857 if (thumb_bit != 0)
2858 {
2859 gold_warning(_("%s: Thumb BLX instruction targets "
2860 "thumb function '%s'."),
2861 object->name().c_str(),
2862 (gsym ? gsym->name() : "(local)"));
2863 // Convert BLX to BL.
2864 lower_insn |= 0x1000U;
2865 }
2866 }
2867 else
2868 gold_unreachable();
2869
2870 // A branch to an undefined weak symbol is turned into a jump to
2871 // the next instruction unless a PLT entry will be created.
2872 // The jump to the next instruction is optimized as a NOP.W for
2873 // Thumb-2 enabled architectures.
2874 const Target_arm<big_endian>* arm_target =
2875 Target_arm<big_endian>::default_target();
2876 if (is_weakly_undefined_without_plt)
2877 {
2878 if (arm_target->may_use_thumb2_nop())
2879 {
2880 elfcpp::Swap<16, big_endian>::writeval(wv, 0xf3af);
2881 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0x8000);
2882 }
2883 else
2884 {
2885 elfcpp::Swap<16, big_endian>::writeval(wv, 0xe000);
2886 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0xbf00);
2887 }
2888 return This::STATUS_OKAY;
2889 }
2890
2891 int32_t addend = This::thumb32_branch_offset(upper_insn, lower_insn);
2892 Arm_address branch_target = psymval->value(object, addend);
2893 int32_t branch_offset = branch_target - address;
2894
2895 // We need a stub if the branch offset is too large or if we need
2896 // to switch mode.
2897 bool may_use_blx = arm_target->may_use_blx();
2898 bool thumb2 = arm_target->using_thumb2();
2899 if ((!thumb2
2900 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
2901 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
2902 || (thumb2
2903 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
2904 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
2905 || ((thumb_bit == 0)
2906 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
2907 || r_type == elfcpp::R_ARM_THM_JUMP24)))
2908 {
2909 Stub_type stub_type =
2910 Reloc_stub::stub_type_for_reloc(r_type, address, branch_target,
2911 (thumb_bit != 0));
2912 if (stub_type != arm_stub_none)
2913 {
2914 Stub_table<big_endian>* stub_table =
2915 object->stub_table(relinfo->data_shndx);
2916 gold_assert(stub_table != NULL);
2917
2918 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
2919 Reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
2920 gold_assert(stub != NULL);
2921 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
2922 branch_target = stub_table->address() + stub->offset() + addend;
2923 branch_offset = branch_target - address;
2924 }
2925 }
2926
2927 // At this point, if we still need to switch mode, the instruction
2928 // must either be a BLX or a BL that can be converted to a BLX.
2929 if (thumb_bit == 0)
2930 {
2931 gold_assert(may_use_blx
2932 && (r_type == elfcpp::R_ARM_THM_CALL
2933 || r_type == elfcpp::R_ARM_THM_XPC22));
2934 // Make sure this is a BLX.
2935 lower_insn &= ~0x1000U;
2936 }
2937 else
2938 {
2939 // Make sure this is a BL.
2940 lower_insn |= 0x1000U;
2941 }
2942
2943 if ((lower_insn & 0x5000U) == 0x4000U)
2944 // For a BLX instruction, make sure that the relocation is rounded up
2945 // to a word boundary. This follows the semantics of the instruction
2946 // which specifies that bit 1 of the target address will come from bit
2947 // 1 of the base address.
2948 branch_offset = (branch_offset + 2) & ~3;
2949
2950 // Put BRANCH_OFFSET back into the insn. Assumes two's complement.
2951 // We use the Thumb-2 encoding, which is safe even if dealing with
2952 // a Thumb-1 instruction by virtue of our overflow check above. */
2953 upper_insn = This::thumb32_branch_upper(upper_insn, branch_offset);
2954 lower_insn = This::thumb32_branch_lower(lower_insn, branch_offset);
2955
2956 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
2957 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
2958
2959 return ((thumb2
2960 ? utils::has_overflow<25>(branch_offset)
2961 : utils::has_overflow<23>(branch_offset))
2962 ? This::STATUS_OVERFLOW
2963 : This::STATUS_OKAY);
2964 }
2965
2966 // Relocate THUMB-2 long conditional branches.
2967 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
2968 // undefined and we do not use PLT in this relocation. In such a case,
2969 // the branch is converted into an NOP.
2970
2971 template<bool big_endian>
2972 typename Arm_relocate_functions<big_endian>::Status
2973 Arm_relocate_functions<big_endian>::thm_jump19(
2974 unsigned char *view,
2975 const Arm_relobj<big_endian>* object,
2976 const Symbol_value<32>* psymval,
2977 Arm_address address,
2978 Arm_address thumb_bit)
2979 {
2980 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2981 Valtype* wv = reinterpret_cast<Valtype*>(view);
2982 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
2983 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
2984 int32_t addend = This::thumb32_cond_branch_offset(upper_insn, lower_insn);
2985
2986 Arm_address branch_target = psymval->value(object, addend);
2987 int32_t branch_offset = branch_target - address;
2988
2989 // ??? Should handle interworking? GCC might someday try to
2990 // use this for tail calls.
2991 // FIXME: We do support thumb entry to PLT yet.
2992 if (thumb_bit == 0)
2993 {
2994 gold_error(_("conditional branch to PLT in THUMB-2 not supported yet."));
2995 return This::STATUS_BAD_RELOC;
2996 }
2997
2998 // Put RELOCATION back into the insn.
2999 upper_insn = This::thumb32_cond_branch_upper(upper_insn, branch_offset);
3000 lower_insn = This::thumb32_cond_branch_lower(lower_insn, branch_offset);
3001
3002 // Put the relocated value back in the object file:
3003 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3004 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3005
3006 return (utils::has_overflow<21>(branch_offset)
3007 ? This::STATUS_OVERFLOW
3008 : This::STATUS_OKAY);
3009 }
3010
3011 // Get the GOT section, creating it if necessary.
3012
3013 template<bool big_endian>
3014 Output_data_got<32, big_endian>*
3015 Target_arm<big_endian>::got_section(Symbol_table* symtab, Layout* layout)
3016 {
3017 if (this->got_ == NULL)
3018 {
3019 gold_assert(symtab != NULL && layout != NULL);
3020
3021 this->got_ = new Output_data_got<32, big_endian>();
3022
3023 Output_section* os;
3024 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3025 (elfcpp::SHF_ALLOC
3026 | elfcpp::SHF_WRITE),
3027 this->got_, false, true, true,
3028 false);
3029
3030 // The old GNU linker creates a .got.plt section. We just
3031 // create another set of data in the .got section. Note that we
3032 // always create a PLT if we create a GOT, although the PLT
3033 // might be empty.
3034 this->got_plt_ = new Output_data_space(4, "** GOT PLT");
3035 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3036 (elfcpp::SHF_ALLOC
3037 | elfcpp::SHF_WRITE),
3038 this->got_plt_, false, false,
3039 false, true);
3040
3041 // The first three entries are reserved.
3042 this->got_plt_->set_current_data_size(3 * 4);
3043
3044 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3045 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3046 Symbol_table::PREDEFINED,
3047 this->got_plt_,
3048 0, 0, elfcpp::STT_OBJECT,
3049 elfcpp::STB_LOCAL,
3050 elfcpp::STV_HIDDEN, 0,
3051 false, false);
3052 }
3053 return this->got_;
3054 }
3055
3056 // Get the dynamic reloc section, creating it if necessary.
3057
3058 template<bool big_endian>
3059 typename Target_arm<big_endian>::Reloc_section*
3060 Target_arm<big_endian>::rel_dyn_section(Layout* layout)
3061 {
3062 if (this->rel_dyn_ == NULL)
3063 {
3064 gold_assert(layout != NULL);
3065 this->rel_dyn_ = new Reloc_section(parameters->options().combreloc());
3066 layout->add_output_section_data(".rel.dyn", elfcpp::SHT_REL,
3067 elfcpp::SHF_ALLOC, this->rel_dyn_, true,
3068 false, false, false);
3069 }
3070 return this->rel_dyn_;
3071 }
3072
3073 // Insn_template methods.
3074
3075 // Return byte size of an instruction template.
3076
3077 size_t
3078 Insn_template::size() const
3079 {
3080 switch (this->type())
3081 {
3082 case THUMB16_TYPE:
3083 case THUMB16_SPECIAL_TYPE:
3084 return 2;
3085 case ARM_TYPE:
3086 case THUMB32_TYPE:
3087 case DATA_TYPE:
3088 return 4;
3089 default:
3090 gold_unreachable();
3091 }
3092 }
3093
3094 // Return alignment of an instruction template.
3095
3096 unsigned
3097 Insn_template::alignment() const
3098 {
3099 switch (this->type())
3100 {
3101 case THUMB16_TYPE:
3102 case THUMB16_SPECIAL_TYPE:
3103 case THUMB32_TYPE:
3104 return 2;
3105 case ARM_TYPE:
3106 case DATA_TYPE:
3107 return 4;
3108 default:
3109 gold_unreachable();
3110 }
3111 }
3112
3113 // Stub_template methods.
3114
3115 Stub_template::Stub_template(
3116 Stub_type type, const Insn_template* insns,
3117 size_t insn_count)
3118 : type_(type), insns_(insns), insn_count_(insn_count), alignment_(1),
3119 entry_in_thumb_mode_(false), relocs_()
3120 {
3121 off_t offset = 0;
3122
3123 // Compute byte size and alignment of stub template.
3124 for (size_t i = 0; i < insn_count; i++)
3125 {
3126 unsigned insn_alignment = insns[i].alignment();
3127 size_t insn_size = insns[i].size();
3128 gold_assert((offset & (insn_alignment - 1)) == 0);
3129 this->alignment_ = std::max(this->alignment_, insn_alignment);
3130 switch (insns[i].type())
3131 {
3132 case Insn_template::THUMB16_TYPE:
3133 case Insn_template::THUMB16_SPECIAL_TYPE:
3134 if (i == 0)
3135 this->entry_in_thumb_mode_ = true;
3136 break;
3137
3138 case Insn_template::THUMB32_TYPE:
3139 if (insns[i].r_type() != elfcpp::R_ARM_NONE)
3140 this->relocs_.push_back(Reloc(i, offset));
3141 if (i == 0)
3142 this->entry_in_thumb_mode_ = true;
3143 break;
3144
3145 case Insn_template::ARM_TYPE:
3146 // Handle cases where the target is encoded within the
3147 // instruction.
3148 if (insns[i].r_type() == elfcpp::R_ARM_JUMP24)
3149 this->relocs_.push_back(Reloc(i, offset));
3150 break;
3151
3152 case Insn_template::DATA_TYPE:
3153 // Entry point cannot be data.
3154 gold_assert(i != 0);
3155 this->relocs_.push_back(Reloc(i, offset));
3156 break;
3157
3158 default:
3159 gold_unreachable();
3160 }
3161 offset += insn_size;
3162 }
3163 this->size_ = offset;
3164 }
3165
3166 // Stub methods.
3167
3168 // Template to implement do_write for a specific target endianity.
3169
3170 template<bool big_endian>
3171 void inline
3172 Stub::do_fixed_endian_write(unsigned char* view, section_size_type view_size)
3173 {
3174 const Stub_template* stub_template = this->stub_template();
3175 const Insn_template* insns = stub_template->insns();
3176
3177 // FIXME: We do not handle BE8 encoding yet.
3178 unsigned char* pov = view;
3179 for (size_t i = 0; i < stub_template->insn_count(); i++)
3180 {
3181 switch (insns[i].type())
3182 {
3183 case Insn_template::THUMB16_TYPE:
3184 elfcpp::Swap<16, big_endian>::writeval(pov, insns[i].data() & 0xffff);
3185 break;
3186 case Insn_template::THUMB16_SPECIAL_TYPE:
3187 elfcpp::Swap<16, big_endian>::writeval(
3188 pov,
3189 this->thumb16_special(i));
3190 break;
3191 case Insn_template::THUMB32_TYPE:
3192 {
3193 uint32_t hi = (insns[i].data() >> 16) & 0xffff;
3194 uint32_t lo = insns[i].data() & 0xffff;
3195 elfcpp::Swap<16, big_endian>::writeval(pov, hi);
3196 elfcpp::Swap<16, big_endian>::writeval(pov + 2, lo);
3197 }
3198 break;
3199 case Insn_template::ARM_TYPE:
3200 case Insn_template::DATA_TYPE:
3201 elfcpp::Swap<32, big_endian>::writeval(pov, insns[i].data());
3202 break;
3203 default:
3204 gold_unreachable();
3205 }
3206 pov += insns[i].size();
3207 }
3208 gold_assert(static_cast<section_size_type>(pov - view) == view_size);
3209 }
3210
3211 // Reloc_stub::Key methods.
3212
3213 // Dump a Key as a string for debugging.
3214
3215 std::string
3216 Reloc_stub::Key::name() const
3217 {
3218 if (this->r_sym_ == invalid_index)
3219 {
3220 // Global symbol key name
3221 // <stub-type>:<symbol name>:<addend>.
3222 const std::string sym_name = this->u_.symbol->name();
3223 // We need to print two hex number and two colons. So just add 100 bytes
3224 // to the symbol name size.
3225 size_t len = sym_name.size() + 100;
3226 char* buffer = new char[len];
3227 int c = snprintf(buffer, len, "%d:%s:%x", this->stub_type_,
3228 sym_name.c_str(), this->addend_);
3229 gold_assert(c > 0 && c < static_cast<int>(len));
3230 delete[] buffer;
3231 return std::string(buffer);
3232 }
3233 else
3234 {
3235 // local symbol key name
3236 // <stub-type>:<object>:<r_sym>:<addend>.
3237 const size_t len = 200;
3238 char buffer[len];
3239 int c = snprintf(buffer, len, "%d:%p:%u:%x", this->stub_type_,
3240 this->u_.relobj, this->r_sym_, this->addend_);
3241 gold_assert(c > 0 && c < static_cast<int>(len));
3242 return std::string(buffer);
3243 }
3244 }
3245
3246 // Reloc_stub methods.
3247
3248 // Determine the type of stub needed, if any, for a relocation of R_TYPE at
3249 // LOCATION to DESTINATION.
3250 // This code is based on the arm_type_of_stub function in
3251 // bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
3252 // class simple.
3253
3254 Stub_type
3255 Reloc_stub::stub_type_for_reloc(
3256 unsigned int r_type,
3257 Arm_address location,
3258 Arm_address destination,
3259 bool target_is_thumb)
3260 {
3261 Stub_type stub_type = arm_stub_none;
3262
3263 // This is a bit ugly but we want to avoid using a templated class for
3264 // big and little endianities.
3265 bool may_use_blx;
3266 bool should_force_pic_veneer;
3267 bool thumb2;
3268 bool thumb_only;
3269 if (parameters->target().is_big_endian())
3270 {
3271 const Target_arm<true>* big_endian_target =
3272 Target_arm<true>::default_target();
3273 may_use_blx = big_endian_target->may_use_blx();
3274 should_force_pic_veneer = big_endian_target->should_force_pic_veneer();
3275 thumb2 = big_endian_target->using_thumb2();
3276 thumb_only = big_endian_target->using_thumb_only();
3277 }
3278 else
3279 {
3280 const Target_arm<false>* little_endian_target =
3281 Target_arm<false>::default_target();
3282 may_use_blx = little_endian_target->may_use_blx();
3283 should_force_pic_veneer = little_endian_target->should_force_pic_veneer();
3284 thumb2 = little_endian_target->using_thumb2();
3285 thumb_only = little_endian_target->using_thumb_only();
3286 }
3287
3288 int64_t branch_offset = (int64_t)destination - location;
3289
3290 if (r_type == elfcpp::R_ARM_THM_CALL || r_type == elfcpp::R_ARM_THM_JUMP24)
3291 {
3292 // Handle cases where:
3293 // - this call goes too far (different Thumb/Thumb2 max
3294 // distance)
3295 // - it's a Thumb->Arm call and blx is not available, or it's a
3296 // Thumb->Arm branch (not bl). A stub is needed in this case.
3297 if ((!thumb2
3298 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3299 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3300 || (thumb2
3301 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3302 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3303 || ((!target_is_thumb)
3304 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
3305 || (r_type == elfcpp::R_ARM_THM_JUMP24))))
3306 {
3307 if (target_is_thumb)
3308 {
3309 // Thumb to thumb.
3310 if (!thumb_only)
3311 {
3312 stub_type = (parameters->options().shared()
3313 || should_force_pic_veneer)
3314 // PIC stubs.
3315 ? ((may_use_blx
3316 && (r_type == elfcpp::R_ARM_THM_CALL))
3317 // V5T and above. Stub starts with ARM code, so
3318 // we must be able to switch mode before
3319 // reaching it, which is only possible for 'bl'
3320 // (ie R_ARM_THM_CALL relocation).
3321 ? arm_stub_long_branch_any_thumb_pic
3322 // On V4T, use Thumb code only.
3323 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3324
3325 // non-PIC stubs.
3326 : ((may_use_blx
3327 && (r_type == elfcpp::R_ARM_THM_CALL))
3328 ? arm_stub_long_branch_any_any // V5T and above.
3329 : arm_stub_long_branch_v4t_thumb_thumb); // V4T.
3330 }
3331 else
3332 {
3333 stub_type = (parameters->options().shared()
3334 || should_force_pic_veneer)
3335 ? arm_stub_long_branch_thumb_only_pic // PIC stub.
3336 : arm_stub_long_branch_thumb_only; // non-PIC stub.
3337 }
3338 }
3339 else
3340 {
3341 // Thumb to arm.
3342
3343 // FIXME: We should check that the input section is from an
3344 // object that has interwork enabled.
3345
3346 stub_type = (parameters->options().shared()
3347 || should_force_pic_veneer)
3348 // PIC stubs.
3349 ? ((may_use_blx
3350 && (r_type == elfcpp::R_ARM_THM_CALL))
3351 ? arm_stub_long_branch_any_arm_pic // V5T and above.
3352 : arm_stub_long_branch_v4t_thumb_arm_pic) // V4T.
3353
3354 // non-PIC stubs.
3355 : ((may_use_blx
3356 && (r_type == elfcpp::R_ARM_THM_CALL))
3357 ? arm_stub_long_branch_any_any // V5T and above.
3358 : arm_stub_long_branch_v4t_thumb_arm); // V4T.
3359
3360 // Handle v4t short branches.
3361 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3362 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3363 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3364 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3365 }
3366 }
3367 }
3368 else if (r_type == elfcpp::R_ARM_CALL
3369 || r_type == elfcpp::R_ARM_JUMP24
3370 || r_type == elfcpp::R_ARM_PLT32)
3371 {
3372 if (target_is_thumb)
3373 {
3374 // Arm to thumb.
3375
3376 // FIXME: We should check that the input section is from an
3377 // object that has interwork enabled.
3378
3379 // We have an extra 2-bytes reach because of
3380 // the mode change (bit 24 (H) of BLX encoding).
3381 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3382 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3383 || ((r_type == elfcpp::R_ARM_CALL) && !may_use_blx)
3384 || (r_type == elfcpp::R_ARM_JUMP24)
3385 || (r_type == elfcpp::R_ARM_PLT32))
3386 {
3387 stub_type = (parameters->options().shared()
3388 || should_force_pic_veneer)
3389 // PIC stubs.
3390 ? (may_use_blx
3391 ? arm_stub_long_branch_any_thumb_pic// V5T and above.
3392 : arm_stub_long_branch_v4t_arm_thumb_pic) // V4T stub.
3393
3394 // non-PIC stubs.
3395 : (may_use_blx
3396 ? arm_stub_long_branch_any_any // V5T and above.
3397 : arm_stub_long_branch_v4t_arm_thumb); // V4T.
3398 }
3399 }
3400 else
3401 {
3402 // Arm to arm.
3403 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3404 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3405 {
3406 stub_type = (parameters->options().shared()
3407 || should_force_pic_veneer)
3408 ? arm_stub_long_branch_any_arm_pic // PIC stubs.
3409 : arm_stub_long_branch_any_any; /// non-PIC.
3410 }
3411 }
3412 }
3413
3414 return stub_type;
3415 }
3416
3417 // Cortex_a8_stub methods.
3418
3419 // Return the instruction for a THUMB16_SPECIAL_TYPE instruction template.
3420 // I is the position of the instruction template in the stub template.
3421
3422 uint16_t
3423 Cortex_a8_stub::do_thumb16_special(size_t i)
3424 {
3425 // The only use of this is to copy condition code from a conditional
3426 // branch being worked around to the corresponding conditional branch in
3427 // to the stub.
3428 gold_assert(this->stub_template()->type() == arm_stub_a8_veneer_b_cond
3429 && i == 0);
3430 uint16_t data = this->stub_template()->insns()[i].data();
3431 gold_assert((data & 0xff00U) == 0xd000U);
3432 data |= ((this->original_insn_ >> 22) & 0xf) << 8;
3433 return data;
3434 }
3435
3436 // Stub_factory methods.
3437
3438 Stub_factory::Stub_factory()
3439 {
3440 // The instruction template sequences are declared as static
3441 // objects and initialized first time the constructor runs.
3442
3443 // Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
3444 // to reach the stub if necessary.
3445 static const Insn_template elf32_arm_stub_long_branch_any_any[] =
3446 {
3447 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
3448 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3449 // dcd R_ARM_ABS32(X)
3450 };
3451
3452 // V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
3453 // available.
3454 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb[] =
3455 {
3456 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3457 Insn_template::arm_insn(0xe12fff1c), // bx ip
3458 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3459 // dcd R_ARM_ABS32(X)
3460 };
3461
3462 // Thumb -> Thumb long branch stub. Used on M-profile architectures.
3463 static const Insn_template elf32_arm_stub_long_branch_thumb_only[] =
3464 {
3465 Insn_template::thumb16_insn(0xb401), // push {r0}
3466 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
3467 Insn_template::thumb16_insn(0x4684), // mov ip, r0
3468 Insn_template::thumb16_insn(0xbc01), // pop {r0}
3469 Insn_template::thumb16_insn(0x4760), // bx ip
3470 Insn_template::thumb16_insn(0xbf00), // nop
3471 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3472 // dcd R_ARM_ABS32(X)
3473 };
3474
3475 // V4T Thumb -> Thumb long branch stub. Using the stack is not
3476 // allowed.
3477 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
3478 {
3479 Insn_template::thumb16_insn(0x4778), // bx pc
3480 Insn_template::thumb16_insn(0x46c0), // nop
3481 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3482 Insn_template::arm_insn(0xe12fff1c), // bx ip
3483 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3484 // dcd R_ARM_ABS32(X)
3485 };
3486
3487 // V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
3488 // available.
3489 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm[] =
3490 {
3491 Insn_template::thumb16_insn(0x4778), // bx pc
3492 Insn_template::thumb16_insn(0x46c0), // nop
3493 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
3494 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3495 // dcd R_ARM_ABS32(X)
3496 };
3497
3498 // V4T Thumb -> ARM short branch stub. Shorter variant of the above
3499 // one, when the destination is close enough.
3500 static const Insn_template elf32_arm_stub_short_branch_v4t_thumb_arm[] =
3501 {
3502 Insn_template::thumb16_insn(0x4778), // bx pc
3503 Insn_template::thumb16_insn(0x46c0), // nop
3504 Insn_template::arm_rel_insn(0xea000000, -8), // b (X-8)
3505 };
3506
3507 // ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
3508 // blx to reach the stub if necessary.
3509 static const Insn_template elf32_arm_stub_long_branch_any_arm_pic[] =
3510 {
3511 Insn_template::arm_insn(0xe59fc000), // ldr r12, [pc]
3512 Insn_template::arm_insn(0xe08ff00c), // add pc, pc, ip
3513 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
3514 // dcd R_ARM_REL32(X-4)
3515 };
3516
3517 // ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
3518 // blx to reach the stub if necessary. We can not add into pc;
3519 // it is not guaranteed to mode switch (different in ARMv6 and
3520 // ARMv7).
3521 static const Insn_template elf32_arm_stub_long_branch_any_thumb_pic[] =
3522 {
3523 Insn_template::arm_insn(0xe59fc004), // ldr r12, [pc, #4]
3524 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3525 Insn_template::arm_insn(0xe12fff1c), // bx ip
3526 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
3527 // dcd R_ARM_REL32(X)
3528 };
3529
3530 // V4T ARM -> ARM long branch stub, PIC.
3531 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
3532 {
3533 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
3534 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3535 Insn_template::arm_insn(0xe12fff1c), // bx ip
3536 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
3537 // dcd R_ARM_REL32(X)
3538 };
3539
3540 // V4T Thumb -> ARM long branch stub, PIC.
3541 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
3542 {
3543 Insn_template::thumb16_insn(0x4778), // bx pc
3544 Insn_template::thumb16_insn(0x46c0), // nop
3545 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3546 Insn_template::arm_insn(0xe08cf00f), // add pc, ip, pc
3547 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
3548 // dcd R_ARM_REL32(X)
3549 };
3550
3551 // Thumb -> Thumb long branch stub, PIC. Used on M-profile
3552 // architectures.
3553 static const Insn_template elf32_arm_stub_long_branch_thumb_only_pic[] =
3554 {
3555 Insn_template::thumb16_insn(0xb401), // push {r0}
3556 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
3557 Insn_template::thumb16_insn(0x46fc), // mov ip, pc
3558 Insn_template::thumb16_insn(0x4484), // add ip, r0
3559 Insn_template::thumb16_insn(0xbc01), // pop {r0}
3560 Insn_template::thumb16_insn(0x4760), // bx ip
3561 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 4),
3562 // dcd R_ARM_REL32(X)
3563 };
3564
3565 // V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
3566 // allowed.
3567 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
3568 {
3569 Insn_template::thumb16_insn(0x4778), // bx pc
3570 Insn_template::thumb16_insn(0x46c0), // nop
3571 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
3572 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3573 Insn_template::arm_insn(0xe12fff1c), // bx ip
3574 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
3575 // dcd R_ARM_REL32(X)
3576 };
3577
3578 // Cortex-A8 erratum-workaround stubs.
3579
3580 // Stub used for conditional branches (which may be beyond +/-1MB away,
3581 // so we can't use a conditional branch to reach this stub).
3582
3583 // original code:
3584 //
3585 // b<cond> X
3586 // after:
3587 //
3588 static const Insn_template elf32_arm_stub_a8_veneer_b_cond[] =
3589 {
3590 Insn_template::thumb16_bcond_insn(0xd001), // b<cond>.n true
3591 Insn_template::thumb32_b_insn(0xf000b800, -4), // b.w after
3592 Insn_template::thumb32_b_insn(0xf000b800, -4) // true:
3593 // b.w X
3594 };
3595
3596 // Stub used for b.w and bl.w instructions.
3597
3598 static const Insn_template elf32_arm_stub_a8_veneer_b[] =
3599 {
3600 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
3601 };
3602
3603 static const Insn_template elf32_arm_stub_a8_veneer_bl[] =
3604 {
3605 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
3606 };
3607
3608 // Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
3609 // instruction (which switches to ARM mode) to point to this stub. Jump to
3610 // the real destination using an ARM-mode branch.
3611 static const Insn_template elf32_arm_stub_a8_veneer_blx[] =
3612 {
3613 Insn_template::arm_rel_insn(0xea000000, -8) // b dest
3614 };
3615
3616 // Fill in the stub template look-up table. Stub templates are constructed
3617 // per instance of Stub_factory for fast look-up without locking
3618 // in a thread-enabled environment.
3619
3620 this->stub_templates_[arm_stub_none] =
3621 new Stub_template(arm_stub_none, NULL, 0);
3622
3623 #define DEF_STUB(x) \
3624 do \
3625 { \
3626 size_t array_size \
3627 = sizeof(elf32_arm_stub_##x) / sizeof(elf32_arm_stub_##x[0]); \
3628 Stub_type type = arm_stub_##x; \
3629 this->stub_templates_[type] = \
3630 new Stub_template(type, elf32_arm_stub_##x, array_size); \
3631 } \
3632 while (0);
3633
3634 DEF_STUBS
3635 #undef DEF_STUB
3636 }
3637
3638 // Stub_table methods.
3639
3640 // Removel all Cortex-A8 stub.
3641
3642 template<bool big_endian>
3643 void
3644 Stub_table<big_endian>::remove_all_cortex_a8_stubs()
3645 {
3646 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
3647 p != this->cortex_a8_stubs_.end();
3648 ++p)
3649 delete p->second;
3650 this->cortex_a8_stubs_.clear();
3651 }
3652
3653 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
3654
3655 template<bool big_endian>
3656 void
3657 Stub_table<big_endian>::relocate_stub(
3658 Stub* stub,
3659 const Relocate_info<32, big_endian>* relinfo,
3660 Target_arm<big_endian>* arm_target,
3661 Output_section* output_section,
3662 unsigned char* view,
3663 Arm_address address,
3664 section_size_type view_size)
3665 {
3666 const Stub_template* stub_template = stub->stub_template();
3667 if (stub_template->reloc_count() != 0)
3668 {
3669 // Adjust view to cover the stub only.
3670 section_size_type offset = stub->offset();
3671 section_size_type stub_size = stub_template->size();
3672 gold_assert(offset + stub_size <= view_size);
3673
3674 arm_target->relocate_stub(stub, relinfo, output_section, view + offset,
3675 address + offset, stub_size);
3676 }
3677 }
3678
3679 // Relocate all stubs in this stub table.
3680
3681 template<bool big_endian>
3682 void
3683 Stub_table<big_endian>::relocate_stubs(
3684 const Relocate_info<32, big_endian>* relinfo,
3685 Target_arm<big_endian>* arm_target,
3686 Output_section* output_section,
3687 unsigned char* view,
3688 Arm_address address,
3689 section_size_type view_size)
3690 {
3691 // If we are passed a view bigger than the stub table's. we need to
3692 // adjust the view.
3693 gold_assert(address == this->address()
3694 && (view_size
3695 == static_cast<section_size_type>(this->data_size())));
3696
3697 // Relocate all relocation stubs.
3698 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
3699 p != this->reloc_stubs_.end();
3700 ++p)
3701 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
3702 address, view_size);
3703
3704 // Relocate all Cortex-A8 stubs.
3705 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
3706 p != this->cortex_a8_stubs_.end();
3707 ++p)
3708 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
3709 address, view_size);
3710 }
3711
3712 // Write out the stubs to file.
3713
3714 template<bool big_endian>
3715 void
3716 Stub_table<big_endian>::do_write(Output_file* of)
3717 {
3718 off_t offset = this->offset();
3719 const section_size_type oview_size =
3720 convert_to_section_size_type(this->data_size());
3721 unsigned char* const oview = of->get_output_view(offset, oview_size);
3722
3723 // Write relocation stubs.
3724 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
3725 p != this->reloc_stubs_.end();
3726 ++p)
3727 {
3728 Reloc_stub* stub = p->second;
3729 Arm_address address = this->address() + stub->offset();
3730 gold_assert(address
3731 == align_address(address,
3732 stub->stub_template()->alignment()));
3733 stub->write(oview + stub->offset(), stub->stub_template()->size(),
3734 big_endian);
3735 }
3736
3737 // Write Cortex-A8 stubs.
3738 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
3739 p != this->cortex_a8_stubs_.end();
3740 ++p)
3741 {
3742 Cortex_a8_stub* stub = p->second;
3743 Arm_address address = this->address() + stub->offset();
3744 gold_assert(address
3745 == align_address(address,
3746 stub->stub_template()->alignment()));
3747 stub->write(oview + stub->offset(), stub->stub_template()->size(),
3748 big_endian);
3749 }
3750
3751 of->write_output_view(this->offset(), oview_size, oview);
3752 }
3753
3754 // Update the data size and address alignment of the stub table at the end
3755 // of a relaxation pass. Return true if either the data size or the
3756 // alignment changed in this relaxation pass.
3757
3758 template<bool big_endian>
3759 bool
3760 Stub_table<big_endian>::update_data_size_and_addralign()
3761 {
3762 off_t size = 0;
3763 unsigned addralign = 1;
3764
3765 // Go over all stubs in table to compute data size and address alignment.
3766
3767 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
3768 p != this->reloc_stubs_.end();
3769 ++p)
3770 {
3771 const Stub_template* stub_template = p->second->stub_template();
3772 addralign = std::max(addralign, stub_template->alignment());
3773 size = (align_address(size, stub_template->alignment())
3774 + stub_template->size());
3775 }
3776
3777 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
3778 p != this->cortex_a8_stubs_.end();
3779 ++p)
3780 {
3781 const Stub_template* stub_template = p->second->stub_template();
3782 addralign = std::max(addralign, stub_template->alignment());
3783 size = (align_address(size, stub_template->alignment())
3784 + stub_template->size());
3785 }
3786
3787 // Check if either data size or alignment changed in this pass.
3788 // Update prev_data_size_ and prev_addralign_. These will be used
3789 // as the current data size and address alignment for the next pass.
3790 bool changed = size != this->prev_data_size_;
3791 this->prev_data_size_ = size;
3792
3793 if (addralign != this->prev_addralign_)
3794 changed = true;
3795 this->prev_addralign_ = addralign;
3796
3797 return changed;
3798 }
3799
3800 // Finalize the stubs. This sets the offsets of the stubs within the stub
3801 // table. It also marks all input sections needing Cortex-A8 workaround.
3802
3803 template<bool big_endian>
3804 void
3805 Stub_table<big_endian>::finalize_stubs()
3806 {
3807 off_t off = 0;
3808 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
3809 p != this->reloc_stubs_.end();
3810 ++p)
3811 {
3812 Reloc_stub* stub = p->second;
3813 const Stub_template* stub_template = stub->stub_template();
3814 uint64_t stub_addralign = stub_template->alignment();
3815 off = align_address(off, stub_addralign);
3816 stub->set_offset(off);
3817 off += stub_template->size();
3818 }
3819
3820 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
3821 p != this->cortex_a8_stubs_.end();
3822 ++p)
3823 {
3824 Cortex_a8_stub* stub = p->second;
3825 const Stub_template* stub_template = stub->stub_template();
3826 uint64_t stub_addralign = stub_template->alignment();
3827 off = align_address(off, stub_addralign);
3828 stub->set_offset(off);
3829 off += stub_template->size();
3830
3831 // Mark input section so that we can determine later if a code section
3832 // needs the Cortex-A8 workaround quickly.
3833 Arm_relobj<big_endian>* arm_relobj =
3834 Arm_relobj<big_endian>::as_arm_relobj(stub->relobj());
3835 arm_relobj->mark_section_for_cortex_a8_workaround(stub->shndx());
3836 }
3837
3838 gold_assert(off <= this->prev_data_size_);
3839 }
3840
3841 // Apply Cortex-A8 workaround to an address range between VIEW_ADDRESS
3842 // and VIEW_ADDRESS + VIEW_SIZE - 1. VIEW points to the mapped address
3843 // of the address range seen by the linker.
3844
3845 template<bool big_endian>
3846 void
3847 Stub_table<big_endian>::apply_cortex_a8_workaround_to_address_range(
3848 Target_arm<big_endian>* arm_target,
3849 unsigned char* view,
3850 Arm_address view_address,
3851 section_size_type view_size)
3852 {
3853 // Cortex-A8 stubs are sorted by addresses of branches being fixed up.
3854 for (Cortex_a8_stub_list::const_iterator p =
3855 this->cortex_a8_stubs_.lower_bound(view_address);
3856 ((p != this->cortex_a8_stubs_.end())
3857 && (p->first < (view_address + view_size)));
3858 ++p)
3859 {
3860 // We do not store the THUMB bit in the LSB of either the branch address
3861 // or the stub offset. There is no need to strip the LSB.
3862 Arm_address branch_address = p->first;
3863 const Cortex_a8_stub* stub = p->second;
3864 Arm_address stub_address = this->address() + stub->offset();
3865
3866 // Offset of the branch instruction relative to this view.
3867 section_size_type offset =
3868 convert_to_section_size_type(branch_address - view_address);
3869 gold_assert((offset + 4) <= view_size);
3870
3871 arm_target->apply_cortex_a8_workaround(stub, stub_address,
3872 view + offset, branch_address);
3873 }
3874 }
3875
3876 // Arm_input_section methods.
3877
3878 // Initialize an Arm_input_section.
3879
3880 template<bool big_endian>
3881 void
3882 Arm_input_section<big_endian>::init()
3883 {
3884 Relobj* relobj = this->relobj();
3885 unsigned int shndx = this->shndx();
3886
3887 // Cache these to speed up size and alignment queries. It is too slow
3888 // to call section_addraglin and section_size every time.
3889 this->original_addralign_ = relobj->section_addralign(shndx);
3890 this->original_size_ = relobj->section_size(shndx);
3891
3892 // We want to make this look like the original input section after
3893 // output sections are finalized.
3894 Output_section* os = relobj->output_section(shndx);
3895 off_t offset = relobj->output_section_offset(shndx);
3896 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
3897 this->set_address(os->address() + offset);
3898 this->set_file_offset(os->offset() + offset);
3899
3900 this->set_current_data_size(this->original_size_);
3901 this->finalize_data_size();
3902 }
3903
3904 template<bool big_endian>
3905 void
3906 Arm_input_section<big_endian>::do_write(Output_file* of)
3907 {
3908 // We have to write out the original section content.
3909 section_size_type section_size;
3910 const unsigned char* section_contents =
3911 this->relobj()->section_contents(this->shndx(), &section_size, false);
3912 of->write(this->offset(), section_contents, section_size);
3913
3914 // If this owns a stub table and it is not empty, write it.
3915 if (this->is_stub_table_owner() && !this->stub_table_->empty())
3916 this->stub_table_->write(of);
3917 }
3918
3919 // Finalize data size.
3920
3921 template<bool big_endian>
3922 void
3923 Arm_input_section<big_endian>::set_final_data_size()
3924 {
3925 // If this owns a stub table, finalize its data size as well.
3926 if (this->is_stub_table_owner())
3927 {
3928 uint64_t address = this->address();
3929
3930 // The stub table comes after the original section contents.
3931 address += this->original_size_;
3932 address = align_address(address, this->stub_table_->addralign());
3933 off_t offset = this->offset() + (address - this->address());
3934 this->stub_table_->set_address_and_file_offset(address, offset);
3935 address += this->stub_table_->data_size();
3936 gold_assert(address == this->address() + this->current_data_size());
3937 }
3938
3939 this->set_data_size(this->current_data_size());
3940 }
3941
3942 // Reset address and file offset.
3943
3944 template<bool big_endian>
3945 void
3946 Arm_input_section<big_endian>::do_reset_address_and_file_offset()
3947 {
3948 // Size of the original input section contents.
3949 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
3950
3951 // If this is a stub table owner, account for the stub table size.
3952 if (this->is_stub_table_owner())
3953 {
3954 Stub_table<big_endian>* stub_table = this->stub_table_;
3955
3956 // Reset the stub table's address and file offset. The
3957 // current data size for child will be updated after that.
3958 stub_table_->reset_address_and_file_offset();
3959 off = align_address(off, stub_table_->addralign());
3960 off += stub_table->current_data_size();
3961 }
3962
3963 this->set_current_data_size(off);
3964 }
3965
3966 // Arm_output_section methods.
3967
3968 // Create a stub group for input sections from BEGIN to END. OWNER
3969 // points to the input section to be the owner a new stub table.
3970
3971 template<bool big_endian>
3972 void
3973 Arm_output_section<big_endian>::create_stub_group(
3974 Input_section_list::const_iterator begin,
3975 Input_section_list::const_iterator end,
3976 Input_section_list::const_iterator owner,
3977 Target_arm<big_endian>* target,
3978 std::vector<Output_relaxed_input_section*>* new_relaxed_sections)
3979 {
3980 // Currently we convert ordinary input sections into relaxed sections only
3981 // at this point but we may want to support creating relaxed input section
3982 // very early. So we check here to see if owner is already a relaxed
3983 // section.
3984
3985 Arm_input_section<big_endian>* arm_input_section;
3986 if (owner->is_relaxed_input_section())
3987 {
3988 arm_input_section =
3989 Arm_input_section<big_endian>::as_arm_input_section(
3990 owner->relaxed_input_section());
3991 }
3992 else
3993 {
3994 gold_assert(owner->is_input_section());
3995 // Create a new relaxed input section.
3996 arm_input_section =
3997 target->new_arm_input_section(owner->relobj(), owner->shndx());
3998 new_relaxed_sections->push_back(arm_input_section);
3999 }
4000
4001 // Create a stub table.
4002 Stub_table<big_endian>* stub_table =
4003 target->new_stub_table(arm_input_section);
4004
4005 arm_input_section->set_stub_table(stub_table);
4006
4007 Input_section_list::const_iterator p = begin;
4008 Input_section_list::const_iterator prev_p;
4009
4010 // Look for input sections or relaxed input sections in [begin ... end].
4011 do
4012 {
4013 if (p->is_input_section() || p->is_relaxed_input_section())
4014 {
4015 // The stub table information for input sections live
4016 // in their objects.
4017 Arm_relobj<big_endian>* arm_relobj =
4018 Arm_relobj<big_endian>::as_arm_relobj(p->relobj());
4019 arm_relobj->set_stub_table(p->shndx(), stub_table);
4020 }
4021 prev_p = p++;
4022 }
4023 while (prev_p != end);
4024 }
4025
4026 // Group input sections for stub generation. GROUP_SIZE is roughly the limit
4027 // of stub groups. We grow a stub group by adding input section until the
4028 // size is just below GROUP_SIZE. The last input section will be converted
4029 // into a stub table. If STUB_ALWAYS_AFTER_BRANCH is false, we also add
4030 // input section after the stub table, effectively double the group size.
4031 //
4032 // This is similar to the group_sections() function in elf32-arm.c but is
4033 // implemented differently.
4034
4035 template<bool big_endian>
4036 void
4037 Arm_output_section<big_endian>::group_sections(
4038 section_size_type group_size,
4039 bool stubs_always_after_branch,
4040 Target_arm<big_endian>* target)
4041 {
4042 // We only care about sections containing code.
4043 if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
4044 return;
4045
4046 // States for grouping.
4047 typedef enum
4048 {
4049 // No group is being built.
4050 NO_GROUP,
4051 // A group is being built but the stub table is not found yet.
4052 // We keep group a stub group until the size is just under GROUP_SIZE.
4053 // The last input section in the group will be used as the stub table.
4054 FINDING_STUB_SECTION,
4055 // A group is being built and we have already found a stub table.
4056 // We enter this state to grow a stub group by adding input section
4057 // after the stub table. This effectively doubles the group size.
4058 HAS_STUB_SECTION
4059 } State;
4060
4061 // Any newly created relaxed sections are stored here.
4062 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
4063
4064 State state = NO_GROUP;
4065 section_size_type off = 0;
4066 section_size_type group_begin_offset = 0;
4067 section_size_type group_end_offset = 0;
4068 section_size_type stub_table_end_offset = 0;
4069 Input_section_list::const_iterator group_begin =
4070 this->input_sections().end();
4071 Input_section_list::const_iterator stub_table =
4072 this->input_sections().end();
4073 Input_section_list::const_iterator group_end = this->input_sections().end();
4074 for (Input_section_list::const_iterator p = this->input_sections().begin();
4075 p != this->input_sections().end();
4076 ++p)
4077 {
4078 section_size_type section_begin_offset =
4079 align_address(off, p->addralign());
4080 section_size_type section_end_offset =
4081 section_begin_offset + p->data_size();
4082
4083 // Check to see if we should group the previously seens sections.
4084 switch (state)
4085 {
4086 case NO_GROUP:
4087 break;
4088
4089 case FINDING_STUB_SECTION:
4090 // Adding this section makes the group larger than GROUP_SIZE.
4091 if (section_end_offset - group_begin_offset >= group_size)
4092 {
4093 if (stubs_always_after_branch)
4094 {
4095 gold_assert(group_end != this->input_sections().end());
4096 this->create_stub_group(group_begin, group_end, group_end,
4097 target, &new_relaxed_sections);
4098 state = NO_GROUP;
4099 }
4100 else
4101 {
4102 // But wait, there's more! Input sections up to
4103 // stub_group_size bytes after the stub table can be
4104 // handled by it too.
4105 state = HAS_STUB_SECTION;
4106 stub_table = group_end;
4107 stub_table_end_offset = group_end_offset;
4108 }
4109 }
4110 break;
4111
4112 case HAS_STUB_SECTION:
4113 // Adding this section makes the post stub-section group larger
4114 // than GROUP_SIZE.
4115 if (section_end_offset - stub_table_end_offset >= group_size)
4116 {
4117 gold_assert(group_end != this->input_sections().end());
4118 this->create_stub_group(group_begin, group_end, stub_table,
4119 target, &new_relaxed_sections);
4120 state = NO_GROUP;
4121 }
4122 break;
4123
4124 default:
4125 gold_unreachable();
4126 }
4127
4128 // If we see an input section and currently there is no group, start
4129 // a new one. Skip any empty sections.
4130 if ((p->is_input_section() || p->is_relaxed_input_section())
4131 && (p->relobj()->section_size(p->shndx()) != 0))
4132 {
4133 if (state == NO_GROUP)
4134 {
4135 state = FINDING_STUB_SECTION;
4136 group_begin = p;
4137 group_begin_offset = section_begin_offset;
4138 }
4139
4140 // Keep track of the last input section seen.
4141 group_end = p;
4142 group_end_offset = section_end_offset;
4143 }
4144
4145 off = section_end_offset;
4146 }
4147
4148 // Create a stub group for any ungrouped sections.
4149 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
4150 {
4151 gold_assert(group_end != this->input_sections().end());
4152 this->create_stub_group(group_begin, group_end,
4153 (state == FINDING_STUB_SECTION
4154 ? group_end
4155 : stub_table),
4156 target, &new_relaxed_sections);
4157 }
4158
4159 // Convert input section into relaxed input section in a batch.
4160 if (!new_relaxed_sections.empty())
4161 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
4162
4163 // Update the section offsets
4164 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
4165 {
4166 Arm_relobj<big_endian>* arm_relobj =
4167 Arm_relobj<big_endian>::as_arm_relobj(
4168 new_relaxed_sections[i]->relobj());
4169 unsigned int shndx = new_relaxed_sections[i]->shndx();
4170 // Tell Arm_relobj that this input section is converted.
4171 arm_relobj->convert_input_section_to_relaxed_section(shndx);
4172 }
4173 }
4174
4175 // Arm_relobj methods.
4176
4177 // Determine if we want to scan the SHNDX-th section for relocation stubs.
4178 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
4179
4180 template<bool big_endian>
4181 bool
4182 Arm_relobj<big_endian>::section_needs_reloc_stub_scanning(
4183 const elfcpp::Shdr<32, big_endian>& shdr,
4184 const Relobj::Output_sections& out_sections,
4185 const Symbol_table *symtab)
4186 {
4187 unsigned int sh_type = shdr.get_sh_type();
4188 if (sh_type != elfcpp::SHT_REL && sh_type != elfcpp::SHT_RELA)
4189 return false;
4190
4191 // Ignore empty section.
4192 off_t sh_size = shdr.get_sh_size();
4193 if (sh_size == 0)
4194 return false;
4195
4196 // Ignore reloc section with bad info. This error will be
4197 // reported in the final link.
4198 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
4199 if (index >= this->shnum())
4200 return false;
4201
4202 // This relocation section is against a section which we
4203 // discarded or if the section is folded into another
4204 // section due to ICF.
4205 if (out_sections[index] == NULL || symtab->is_section_folded(this, index))
4206 return false;
4207
4208 // Ignore reloc section with unexpected symbol table. The
4209 // error will be reported in the final link.
4210 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
4211 return false;
4212
4213 unsigned int reloc_size;
4214 if (sh_type == elfcpp::SHT_REL)
4215 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
4216 else
4217 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
4218
4219 // Ignore reloc section with unexpected entsize or uneven size.
4220 // The error will be reported in the final link.
4221 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
4222 return false;
4223
4224 return true;
4225 }
4226
4227 // Determine if we want to scan the SHNDX-th section for non-relocation stubs.
4228 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
4229
4230 template<bool big_endian>
4231 bool
4232 Arm_relobj<big_endian>::section_needs_cortex_a8_stub_scanning(
4233 const elfcpp::Shdr<32, big_endian>& shdr,
4234 unsigned int shndx,
4235 Output_section* os,
4236 const Symbol_table* symtab)
4237 {
4238 // We only scan non-empty code sections.
4239 if ((shdr.get_sh_flags() & elfcpp::SHF_EXECINSTR) == 0
4240 || shdr.get_sh_size() == 0)
4241 return false;
4242
4243 // Ignore discarded or ICF'ed sections.
4244 if (os == NULL || symtab->is_section_folded(this, shndx))
4245 return false;
4246
4247 // Find output address of section.
4248 Arm_address address = os->output_address(this, shndx, 0);
4249
4250 // If the section does not cross any 4K-boundaries, it does not need to
4251 // be scanned.
4252 if ((address & ~0xfffU) == ((address + shdr.get_sh_size() - 1) & ~0xfffU))
4253 return false;
4254
4255 return true;
4256 }
4257
4258 // Scan a section for Cortex-A8 workaround.
4259
4260 template<bool big_endian>
4261 void
4262 Arm_relobj<big_endian>::scan_section_for_cortex_a8_erratum(
4263 const elfcpp::Shdr<32, big_endian>& shdr,
4264 unsigned int shndx,
4265 Output_section* os,
4266 Target_arm<big_endian>* arm_target)
4267 {
4268 Arm_address output_address = os->output_address(this, shndx, 0);
4269
4270 // Get the section contents.
4271 section_size_type input_view_size = 0;
4272 const unsigned char* input_view =
4273 this->section_contents(shndx, &input_view_size, false);
4274
4275 // We need to go through the mapping symbols to determine what to
4276 // scan. There are two reasons. First, we should look at THUMB code and
4277 // THUMB code only. Second, we only want to look at the 4K-page boundary
4278 // to speed up the scanning.
4279
4280 // Look for the first mapping symbol in this section. It should be
4281 // at (shndx, 0).
4282 Mapping_symbol_position section_start(shndx, 0);
4283 typename Mapping_symbols_info::const_iterator p =
4284 this->mapping_symbols_info_.lower_bound(section_start);
4285
4286 if (p == this->mapping_symbols_info_.end()
4287 || p->first != section_start)
4288 {
4289 gold_warning(_("Cortex-A8 erratum scanning failed because there "
4290 "is no mapping symbols for section %u of %s"),
4291 shndx, this->name().c_str());
4292 return;
4293 }
4294
4295 while (p != this->mapping_symbols_info_.end()
4296 && p->first.first == shndx)
4297 {
4298 typename Mapping_symbols_info::const_iterator next =
4299 this->mapping_symbols_info_.upper_bound(p->first);
4300
4301 // Only scan part of a section with THUMB code.
4302 if (p->second == 't')
4303 {
4304 // Determine the end of this range.
4305 section_size_type span_start =
4306 convert_to_section_size_type(p->first.second);
4307 section_size_type span_end;
4308 if (next != this->mapping_symbols_info_.end()
4309 && next->first.first == shndx)
4310 span_end = convert_to_section_size_type(next->first.second);
4311 else
4312 span_end = convert_to_section_size_type(shdr.get_sh_size());
4313
4314 if (((span_start + output_address) & ~0xfffUL)
4315 != ((span_end + output_address - 1) & ~0xfffUL))
4316 {
4317 arm_target->scan_span_for_cortex_a8_erratum(this, shndx,
4318 span_start, span_end,
4319 input_view,
4320 output_address);
4321 }
4322 }
4323
4324 p = next;
4325 }
4326 }
4327
4328 // Scan relocations for stub generation.
4329
4330 template<bool big_endian>
4331 void
4332 Arm_relobj<big_endian>::scan_sections_for_stubs(
4333 Target_arm<big_endian>* arm_target,
4334 const Symbol_table* symtab,
4335 const Layout* layout)
4336 {
4337 unsigned int shnum = this->shnum();
4338 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
4339
4340 // Read the section headers.
4341 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
4342 shnum * shdr_size,
4343 true, true);
4344
4345 // To speed up processing, we set up hash tables for fast lookup of
4346 // input offsets to output addresses.
4347 this->initialize_input_to_output_maps();
4348
4349 const Relobj::Output_sections& out_sections(this->output_sections());
4350
4351 Relocate_info<32, big_endian> relinfo;
4352 relinfo.symtab = symtab;
4353 relinfo.layout = layout;
4354 relinfo.object = this;
4355
4356 // Do relocation stubs scanning.
4357 const unsigned char* p = pshdrs + shdr_size;
4358 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
4359 {
4360 const elfcpp::Shdr<32, big_endian> shdr(p);
4361 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab))
4362 {
4363 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
4364 Arm_address output_offset = this->get_output_section_offset(index);
4365 Arm_address output_address;
4366 if(output_offset != invalid_address)
4367 output_address = out_sections[index]->address() + output_offset;
4368 else
4369 {
4370 // Currently this only happens for a relaxed section.
4371 const Output_relaxed_input_section* poris =
4372 out_sections[index]->find_relaxed_input_section(this, index);
4373 gold_assert(poris != NULL);
4374 output_address = poris->address();
4375 }
4376
4377 // Get the relocations.
4378 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
4379 shdr.get_sh_size(),
4380 true, false);
4381
4382 // Get the section contents. This does work for the case in which
4383 // we modify the contents of an input section. We need to pass the
4384 // output view under such circumstances.
4385 section_size_type input_view_size = 0;
4386 const unsigned char* input_view =
4387 this->section_contents(index, &input_view_size, false);
4388
4389 relinfo.reloc_shndx = i;
4390 relinfo.data_shndx = index;
4391 unsigned int sh_type = shdr.get_sh_type();
4392 unsigned int reloc_size;
4393 if (sh_type == elfcpp::SHT_REL)
4394 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
4395 else
4396 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
4397
4398 Output_section* os = out_sections[index];
4399 arm_target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
4400 shdr.get_sh_size() / reloc_size,
4401 os,
4402 output_offset == invalid_address,
4403 input_view, output_address,
4404 input_view_size);
4405 }
4406 }
4407
4408 // Do Cortex-A8 erratum stubs scanning. This has to be done for a section
4409 // after its relocation section, if there is one, is processed for
4410 // relocation stubs. Merging this loop with the one above would have been
4411 // complicated since we would have had to make sure that relocation stub
4412 // scanning is done first.
4413 if (arm_target->fix_cortex_a8())
4414 {
4415 const unsigned char* p = pshdrs + shdr_size;
4416 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
4417 {
4418 const elfcpp::Shdr<32, big_endian> shdr(p);
4419 if (this->section_needs_cortex_a8_stub_scanning(shdr, i,
4420 out_sections[i],
4421 symtab))
4422 this->scan_section_for_cortex_a8_erratum(shdr, i, out_sections[i],
4423 arm_target);
4424 }
4425 }
4426
4427 // After we've done the relocations, we release the hash tables,
4428 // since we no longer need them.
4429 this->free_input_to_output_maps();
4430 }
4431
4432 // Count the local symbols. The ARM backend needs to know if a symbol
4433 // is a THUMB function or not. For global symbols, it is easy because
4434 // the Symbol object keeps the ELF symbol type. For local symbol it is
4435 // harder because we cannot access this information. So we override the
4436 // do_count_local_symbol in parent and scan local symbols to mark
4437 // THUMB functions. This is not the most efficient way but I do not want to
4438 // slow down other ports by calling a per symbol targer hook inside
4439 // Sized_relobj<size, big_endian>::do_count_local_symbols.
4440
4441 template<bool big_endian>
4442 void
4443 Arm_relobj<big_endian>::do_count_local_symbols(
4444 Stringpool_template<char>* pool,
4445 Stringpool_template<char>* dynpool)
4446 {
4447 // We need to fix-up the values of any local symbols whose type are
4448 // STT_ARM_TFUNC.
4449
4450 // Ask parent to count the local symbols.
4451 Sized_relobj<32, big_endian>::do_count_local_symbols(pool, dynpool);
4452 const unsigned int loccount = this->local_symbol_count();
4453 if (loccount == 0)
4454 return;
4455
4456 // Intialize the thumb function bit-vector.
4457 std::vector<bool> empty_vector(loccount, false);
4458 this->local_symbol_is_thumb_function_.swap(empty_vector);
4459
4460 // Read the symbol table section header.
4461 const unsigned int symtab_shndx = this->symtab_shndx();
4462 elfcpp::Shdr<32, big_endian>
4463 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
4464 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
4465
4466 // Read the local symbols.
4467 const int sym_size =elfcpp::Elf_sizes<32>::sym_size;
4468 gold_assert(loccount == symtabshdr.get_sh_info());
4469 off_t locsize = loccount * sym_size;
4470 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
4471 locsize, true, true);
4472
4473 // For mapping symbol processing, we need to read the symbol names.
4474 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
4475 if (strtab_shndx >= this->shnum())
4476 {
4477 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
4478 return;
4479 }
4480
4481 elfcpp::Shdr<32, big_endian>
4482 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
4483 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
4484 {
4485 this->error(_("symbol table name section has wrong type: %u"),
4486 static_cast<unsigned int>(strtabshdr.get_sh_type()));
4487 return;
4488 }
4489 const char* pnames =
4490 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
4491 strtabshdr.get_sh_size(),
4492 false, false));
4493
4494 // Loop over the local symbols and mark any local symbols pointing
4495 // to THUMB functions.
4496
4497 // Skip the first dummy symbol.
4498 psyms += sym_size;
4499 typename Sized_relobj<32, big_endian>::Local_values* plocal_values =
4500 this->local_values();
4501 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
4502 {
4503 elfcpp::Sym<32, big_endian> sym(psyms);
4504 elfcpp::STT st_type = sym.get_st_type();
4505 Symbol_value<32>& lv((*plocal_values)[i]);
4506 Arm_address input_value = lv.input_value();
4507
4508 // Check to see if this is a mapping symbol.
4509 const char* sym_name = pnames + sym.get_st_name();
4510 if (Target_arm<big_endian>::is_mapping_symbol_name(sym_name))
4511 {
4512 unsigned int input_shndx = sym.get_st_shndx();
4513
4514 // Strip of LSB in case this is a THUMB symbol.
4515 Mapping_symbol_position msp(input_shndx, input_value & ~1U);
4516 this->mapping_symbols_info_[msp] = sym_name[1];
4517 }
4518
4519 if (st_type == elfcpp::STT_ARM_TFUNC
4520 || (st_type == elfcpp::STT_FUNC && ((input_value & 1) != 0)))
4521 {
4522 // This is a THUMB function. Mark this and canonicalize the
4523 // symbol value by setting LSB.
4524 this->local_symbol_is_thumb_function_[i] = true;
4525 if ((input_value & 1) == 0)
4526 lv.set_input_value(input_value | 1);
4527 }
4528 }
4529 }
4530
4531 // Relocate sections.
4532 template<bool big_endian>
4533 void
4534 Arm_relobj<big_endian>::do_relocate_sections(
4535 const Symbol_table* symtab,
4536 const Layout* layout,
4537 const unsigned char* pshdrs,
4538 typename Sized_relobj<32, big_endian>::Views* pviews)
4539 {
4540 // Call parent to relocate sections.
4541 Sized_relobj<32, big_endian>::do_relocate_sections(symtab, layout, pshdrs,
4542 pviews);
4543
4544 // We do not generate stubs if doing a relocatable link.
4545 if (parameters->options().relocatable())
4546 return;
4547
4548 // Relocate stub tables.
4549 unsigned int shnum = this->shnum();
4550
4551 Target_arm<big_endian>* arm_target =
4552 Target_arm<big_endian>::default_target();
4553
4554 Relocate_info<32, big_endian> relinfo;
4555 relinfo.symtab = symtab;
4556 relinfo.layout = layout;
4557 relinfo.object = this;
4558
4559 for (unsigned int i = 1; i < shnum; ++i)
4560 {
4561 Arm_input_section<big_endian>* arm_input_section =
4562 arm_target->find_arm_input_section(this, i);
4563
4564 if (arm_input_section != NULL
4565 && arm_input_section->is_stub_table_owner()
4566 && !arm_input_section->stub_table()->empty())
4567 {
4568 // We cannot discard a section if it owns a stub table.
4569 Output_section* os = this->output_section(i);
4570 gold_assert(os != NULL);
4571
4572 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
4573 relinfo.reloc_shdr = NULL;
4574 relinfo.data_shndx = i;
4575 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<32>::shdr_size;
4576
4577 gold_assert((*pviews)[i].view != NULL);
4578
4579 // We are passed the output section view. Adjust it to cover the
4580 // stub table only.
4581 Stub_table<big_endian>* stub_table = arm_input_section->stub_table();
4582 gold_assert((stub_table->address() >= (*pviews)[i].address)
4583 && ((stub_table->address() + stub_table->data_size())
4584 <= (*pviews)[i].address + (*pviews)[i].view_size));
4585
4586 off_t offset = stub_table->address() - (*pviews)[i].address;
4587 unsigned char* view = (*pviews)[i].view + offset;
4588 Arm_address address = stub_table->address();
4589 section_size_type view_size = stub_table->data_size();
4590
4591 stub_table->relocate_stubs(&relinfo, arm_target, os, view, address,
4592 view_size);
4593 }
4594
4595 // Apply Cortex A8 workaround if applicable.
4596 if (this->section_has_cortex_a8_workaround(i))
4597 {
4598 unsigned char* view = (*pviews)[i].view;
4599 Arm_address view_address = (*pviews)[i].address;
4600 section_size_type view_size = (*pviews)[i].view_size;
4601 Stub_table<big_endian>* stub_table = this->stub_tables_[i];
4602
4603 // Adjust view to cover section.
4604 Output_section* os = this->output_section(i);
4605 gold_assert(os != NULL);
4606 Arm_address section_address = os->output_address(this, i, 0);
4607 uint64_t section_size = this->section_size(i);
4608
4609 gold_assert(section_address >= view_address
4610 && ((section_address + section_size)
4611 <= (view_address + view_size)));
4612
4613 unsigned char* section_view = view + (section_address - view_address);
4614
4615 // Apply the Cortex-A8 workaround to the output address range
4616 // corresponding to this input section.
4617 stub_table->apply_cortex_a8_workaround_to_address_range(
4618 arm_target,
4619 section_view,
4620 section_address,
4621 section_size);
4622 }
4623 }
4624 }
4625
4626 // Helper functions for both Arm_relobj and Arm_dynobj to read ARM
4627 // ABI information.
4628
4629 template<bool big_endian>
4630 Attributes_section_data*
4631 read_arm_attributes_section(
4632 Object* object,
4633 Read_symbols_data *sd)
4634 {
4635 // Read the attributes section if there is one.
4636 // We read from the end because gas seems to put it near the end of
4637 // the section headers.
4638 const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
4639 const unsigned char *ps =
4640 sd->section_headers->data() + shdr_size * (object->shnum() - 1);
4641 for (unsigned int i = object->shnum(); i > 0; --i, ps -= shdr_size)
4642 {
4643 elfcpp::Shdr<32, big_endian> shdr(ps);
4644 if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
4645 {
4646 section_offset_type section_offset = shdr.get_sh_offset();
4647 section_size_type section_size =
4648 convert_to_section_size_type(shdr.get_sh_size());
4649 File_view* view = object->get_lasting_view(section_offset,
4650 section_size, true, false);
4651 return new Attributes_section_data(view->data(), section_size);
4652 }
4653 }
4654 return NULL;
4655 }
4656
4657 // Read the symbol information.
4658
4659 template<bool big_endian>
4660 void
4661 Arm_relobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
4662 {
4663 // Call parent class to read symbol information.
4664 Sized_relobj<32, big_endian>::do_read_symbols(sd);
4665
4666 // Read processor-specific flags in ELF file header.
4667 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
4668 elfcpp::Elf_sizes<32>::ehdr_size,
4669 true, false);
4670 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
4671 this->processor_specific_flags_ = ehdr.get_e_flags();
4672 this->attributes_section_data_ =
4673 read_arm_attributes_section<big_endian>(this, sd);
4674 }
4675
4676 // Process relocations for garbage collection. The ARM target uses .ARM.exidx
4677 // sections for unwinding. These sections are referenced implicitly by
4678 // text sections linked in the section headers. If we ignore these implict
4679 // references, the .ARM.exidx sections and any .ARM.extab sections they use
4680 // will be garbage-collected incorrectly. Hence we override the same function
4681 // in the base class to handle these implicit references.
4682
4683 template<bool big_endian>
4684 void
4685 Arm_relobj<big_endian>::do_gc_process_relocs(Symbol_table* symtab,
4686 Layout* layout,
4687 Read_relocs_data* rd)
4688 {
4689 // First, call base class method to process relocations in this object.
4690 Sized_relobj<32, big_endian>::do_gc_process_relocs(symtab, layout, rd);
4691
4692 unsigned int shnum = this->shnum();
4693 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
4694 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
4695 shnum * shdr_size,
4696 true, true);
4697
4698 // Scan section headers for sections of type SHT_ARM_EXIDX. Add references
4699 // to these from the linked text sections.
4700 const unsigned char* ps = pshdrs + shdr_size;
4701 for (unsigned int i = 1; i < shnum; ++i, ps += shdr_size)
4702 {
4703 elfcpp::Shdr<32, big_endian> shdr(ps);
4704 if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
4705 {
4706 // Found an .ARM.exidx section, add it to the set of reachable
4707 // sections from its linked text section.
4708 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_link());
4709 symtab->gc()->add_reference(this, text_shndx, this, i);
4710 }
4711 }
4712 }
4713
4714 // Arm_dynobj methods.
4715
4716 // Read the symbol information.
4717
4718 template<bool big_endian>
4719 void
4720 Arm_dynobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
4721 {
4722 // Call parent class to read symbol information.
4723 Sized_dynobj<32, big_endian>::do_read_symbols(sd);
4724
4725 // Read processor-specific flags in ELF file header.
4726 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
4727 elfcpp::Elf_sizes<32>::ehdr_size,
4728 true, false);
4729 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
4730 this->processor_specific_flags_ = ehdr.get_e_flags();
4731 this->attributes_section_data_ =
4732 read_arm_attributes_section<big_endian>(this, sd);
4733 }
4734
4735 // Stub_addend_reader methods.
4736
4737 // Read the addend of a REL relocation of type R_TYPE at VIEW.
4738
4739 template<bool big_endian>
4740 elfcpp::Elf_types<32>::Elf_Swxword
4741 Stub_addend_reader<elfcpp::SHT_REL, big_endian>::operator()(
4742 unsigned int r_type,
4743 const unsigned char* view,
4744 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const
4745 {
4746 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
4747
4748 switch (r_type)
4749 {
4750 case elfcpp::R_ARM_CALL:
4751 case elfcpp::R_ARM_JUMP24:
4752 case elfcpp::R_ARM_PLT32:
4753 {
4754 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
4755 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
4756 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
4757 return utils::sign_extend<26>(val << 2);
4758 }
4759
4760 case elfcpp::R_ARM_THM_CALL:
4761 case elfcpp::R_ARM_THM_JUMP24:
4762 case elfcpp::R_ARM_THM_XPC22:
4763 {
4764 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
4765 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
4766 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
4767 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
4768 return RelocFuncs::thumb32_branch_offset(upper_insn, lower_insn);
4769 }
4770
4771 case elfcpp::R_ARM_THM_JUMP19:
4772 {
4773 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
4774 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
4775 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
4776 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
4777 return RelocFuncs::thumb32_cond_branch_offset(upper_insn, lower_insn);
4778 }
4779
4780 default:
4781 gold_unreachable();
4782 }
4783 }
4784
4785 // A class to handle the PLT data.
4786
4787 template<bool big_endian>
4788 class Output_data_plt_arm : public Output_section_data
4789 {
4790 public:
4791 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
4792 Reloc_section;
4793
4794 Output_data_plt_arm(Layout*, Output_data_space*);
4795
4796 // Add an entry to the PLT.
4797 void
4798 add_entry(Symbol* gsym);
4799
4800 // Return the .rel.plt section data.
4801 const Reloc_section*
4802 rel_plt() const
4803 { return this->rel_; }
4804
4805 protected:
4806 void
4807 do_adjust_output_section(Output_section* os);
4808
4809 // Write to a map file.
4810 void
4811 do_print_to_mapfile(Mapfile* mapfile) const
4812 { mapfile->print_output_data(this, _("** PLT")); }
4813
4814 private:
4815 // Template for the first PLT entry.
4816 static const uint32_t first_plt_entry[5];
4817
4818 // Template for subsequent PLT entries.
4819 static const uint32_t plt_entry[3];
4820
4821 // Set the final size.
4822 void
4823 set_final_data_size()
4824 {
4825 this->set_data_size(sizeof(first_plt_entry)
4826 + this->count_ * sizeof(plt_entry));
4827 }
4828
4829 // Write out the PLT data.
4830 void
4831 do_write(Output_file*);
4832
4833 // The reloc section.
4834 Reloc_section* rel_;
4835 // The .got.plt section.
4836 Output_data_space* got_plt_;
4837 // The number of PLT entries.
4838 unsigned int count_;
4839 };
4840
4841 // Create the PLT section. The ordinary .got section is an argument,
4842 // since we need to refer to the start. We also create our own .got
4843 // section just for PLT entries.
4844
4845 template<bool big_endian>
4846 Output_data_plt_arm<big_endian>::Output_data_plt_arm(Layout* layout,
4847 Output_data_space* got_plt)
4848 : Output_section_data(4), got_plt_(got_plt), count_(0)
4849 {
4850 this->rel_ = new Reloc_section(false);
4851 layout->add_output_section_data(".rel.plt", elfcpp::SHT_REL,
4852 elfcpp::SHF_ALLOC, this->rel_, true, false,
4853 false, false);
4854 }
4855
4856 template<bool big_endian>
4857 void
4858 Output_data_plt_arm<big_endian>::do_adjust_output_section(Output_section* os)
4859 {
4860 os->set_entsize(0);
4861 }
4862
4863 // Add an entry to the PLT.
4864
4865 template<bool big_endian>
4866 void
4867 Output_data_plt_arm<big_endian>::add_entry(Symbol* gsym)
4868 {
4869 gold_assert(!gsym->has_plt_offset());
4870
4871 // Note that when setting the PLT offset we skip the initial
4872 // reserved PLT entry.
4873 gsym->set_plt_offset((this->count_) * sizeof(plt_entry)
4874 + sizeof(first_plt_entry));
4875
4876 ++this->count_;
4877
4878 section_offset_type got_offset = this->got_plt_->current_data_size();
4879
4880 // Every PLT entry needs a GOT entry which points back to the PLT
4881 // entry (this will be changed by the dynamic linker, normally
4882 // lazily when the function is called).
4883 this->got_plt_->set_current_data_size(got_offset + 4);
4884
4885 // Every PLT entry needs a reloc.
4886 gsym->set_needs_dynsym_entry();
4887 this->rel_->add_global(gsym, elfcpp::R_ARM_JUMP_SLOT, this->got_plt_,
4888 got_offset);
4889
4890 // Note that we don't need to save the symbol. The contents of the
4891 // PLT are independent of which symbols are used. The symbols only
4892 // appear in the relocations.
4893 }
4894
4895 // ARM PLTs.
4896 // FIXME: This is not very flexible. Right now this has only been tested
4897 // on armv5te. If we are to support additional architecture features like
4898 // Thumb-2 or BE8, we need to make this more flexible like GNU ld.
4899
4900 // The first entry in the PLT.
4901 template<bool big_endian>
4902 const uint32_t Output_data_plt_arm<big_endian>::first_plt_entry[5] =
4903 {
4904 0xe52de004, // str lr, [sp, #-4]!
4905 0xe59fe004, // ldr lr, [pc, #4]
4906 0xe08fe00e, // add lr, pc, lr
4907 0xe5bef008, // ldr pc, [lr, #8]!
4908 0x00000000, // &GOT[0] - .
4909 };
4910
4911 // Subsequent entries in the PLT.
4912
4913 template<bool big_endian>
4914 const uint32_t Output_data_plt_arm<big_endian>::plt_entry[3] =
4915 {
4916 0xe28fc600, // add ip, pc, #0xNN00000
4917 0xe28cca00, // add ip, ip, #0xNN000
4918 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
4919 };
4920
4921 // Write out the PLT. This uses the hand-coded instructions above,
4922 // and adjusts them as needed. This is all specified by the arm ELF
4923 // Processor Supplement.
4924
4925 template<bool big_endian>
4926 void
4927 Output_data_plt_arm<big_endian>::do_write(Output_file* of)
4928 {
4929 const off_t offset = this->offset();
4930 const section_size_type oview_size =
4931 convert_to_section_size_type(this->data_size());
4932 unsigned char* const oview = of->get_output_view(offset, oview_size);
4933
4934 const off_t got_file_offset = this->got_plt_->offset();
4935 const section_size_type got_size =
4936 convert_to_section_size_type(this->got_plt_->data_size());
4937 unsigned char* const got_view = of->get_output_view(got_file_offset,
4938 got_size);
4939 unsigned char* pov = oview;
4940
4941 Arm_address plt_address = this->address();
4942 Arm_address got_address = this->got_plt_->address();
4943
4944 // Write first PLT entry. All but the last word are constants.
4945 const size_t num_first_plt_words = (sizeof(first_plt_entry)
4946 / sizeof(plt_entry[0]));
4947 for (size_t i = 0; i < num_first_plt_words - 1; i++)
4948 elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
4949 // Last word in first PLT entry is &GOT[0] - .
4950 elfcpp::Swap<32, big_endian>::writeval(pov + 16,
4951 got_address - (plt_address + 16));
4952 pov += sizeof(first_plt_entry);
4953
4954 unsigned char* got_pov = got_view;
4955
4956 memset(got_pov, 0, 12);
4957 got_pov += 12;
4958
4959 const int rel_size = elfcpp::Elf_sizes<32>::rel_size;
4960 unsigned int plt_offset = sizeof(first_plt_entry);
4961 unsigned int plt_rel_offset = 0;
4962 unsigned int got_offset = 12;
4963 const unsigned int count = this->count_;
4964 for (unsigned int i = 0;
4965 i < count;
4966 ++i,
4967 pov += sizeof(plt_entry),
4968 got_pov += 4,
4969 plt_offset += sizeof(plt_entry),
4970 plt_rel_offset += rel_size,
4971 got_offset += 4)
4972 {
4973 // Set and adjust the PLT entry itself.
4974 int32_t offset = ((got_address + got_offset)
4975 - (plt_address + plt_offset + 8));
4976
4977 gold_assert(offset >= 0 && offset < 0x0fffffff);
4978 uint32_t plt_insn0 = plt_entry[0] | ((offset >> 20) & 0xff);
4979 elfcpp::Swap<32, big_endian>::writeval(pov, plt_insn0);
4980 uint32_t plt_insn1 = plt_entry[1] | ((offset >> 12) & 0xff);
4981 elfcpp::Swap<32, big_endian>::writeval(pov + 4, plt_insn1);
4982 uint32_t plt_insn2 = plt_entry[2] | (offset & 0xfff);
4983 elfcpp::Swap<32, big_endian>::writeval(pov + 8, plt_insn2);
4984
4985 // Set the entry in the GOT.
4986 elfcpp::Swap<32, big_endian>::writeval(got_pov, plt_address);
4987 }
4988
4989 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4990 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4991
4992 of->write_output_view(offset, oview_size, oview);
4993 of->write_output_view(got_file_offset, got_size, got_view);
4994 }
4995
4996 // Create a PLT entry for a global symbol.
4997
4998 template<bool big_endian>
4999 void
5000 Target_arm<big_endian>::make_plt_entry(Symbol_table* symtab, Layout* layout,
5001 Symbol* gsym)
5002 {
5003 if (gsym->has_plt_offset())
5004 return;
5005
5006 if (this->plt_ == NULL)
5007 {
5008 // Create the GOT sections first.
5009 this->got_section(symtab, layout);
5010
5011 this->plt_ = new Output_data_plt_arm<big_endian>(layout, this->got_plt_);
5012 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
5013 (elfcpp::SHF_ALLOC
5014 | elfcpp::SHF_EXECINSTR),
5015 this->plt_, false, false, false, false);
5016 }
5017 this->plt_->add_entry(gsym);
5018 }
5019
5020 // Report an unsupported relocation against a local symbol.
5021
5022 template<bool big_endian>
5023 void
5024 Target_arm<big_endian>::Scan::unsupported_reloc_local(
5025 Sized_relobj<32, big_endian>* object,
5026 unsigned int r_type)
5027 {
5028 gold_error(_("%s: unsupported reloc %u against local symbol"),
5029 object->name().c_str(), r_type);
5030 }
5031
5032 // We are about to emit a dynamic relocation of type R_TYPE. If the
5033 // dynamic linker does not support it, issue an error. The GNU linker
5034 // only issues a non-PIC error for an allocated read-only section.
5035 // Here we know the section is allocated, but we don't know that it is
5036 // read-only. But we check for all the relocation types which the
5037 // glibc dynamic linker supports, so it seems appropriate to issue an
5038 // error even if the section is not read-only.
5039
5040 template<bool big_endian>
5041 void
5042 Target_arm<big_endian>::Scan::check_non_pic(Relobj* object,
5043 unsigned int r_type)
5044 {
5045 switch (r_type)
5046 {
5047 // These are the relocation types supported by glibc for ARM.
5048 case elfcpp::R_ARM_RELATIVE:
5049 case elfcpp::R_ARM_COPY:
5050 case elfcpp::R_ARM_GLOB_DAT:
5051 case elfcpp::R_ARM_JUMP_SLOT:
5052 case elfcpp::R_ARM_ABS32:
5053 case elfcpp::R_ARM_ABS32_NOI:
5054 case elfcpp::R_ARM_PC24:
5055 // FIXME: The following 3 types are not supported by Android's dynamic
5056 // linker.
5057 case elfcpp::R_ARM_TLS_DTPMOD32:
5058 case elfcpp::R_ARM_TLS_DTPOFF32:
5059 case elfcpp::R_ARM_TLS_TPOFF32:
5060 return;
5061
5062 default:
5063 // This prevents us from issuing more than one error per reloc
5064 // section. But we can still wind up issuing more than one
5065 // error per object file.
5066 if (this->issued_non_pic_error_)
5067 return;
5068 object->error(_("requires unsupported dynamic reloc; "
5069 "recompile with -fPIC"));
5070 this->issued_non_pic_error_ = true;
5071 return;
5072
5073 case elfcpp::R_ARM_NONE:
5074 gold_unreachable();
5075 }
5076 }
5077
5078 // Scan a relocation for a local symbol.
5079 // FIXME: This only handles a subset of relocation types used by Android
5080 // on ARM v5te devices.
5081
5082 template<bool big_endian>
5083 inline void
5084 Target_arm<big_endian>::Scan::local(Symbol_table* symtab,
5085 Layout* layout,
5086 Target_arm* target,
5087 Sized_relobj<32, big_endian>* object,
5088 unsigned int data_shndx,
5089 Output_section* output_section,
5090 const elfcpp::Rel<32, big_endian>& reloc,
5091 unsigned int r_type,
5092 const elfcpp::Sym<32, big_endian>&)
5093 {
5094 r_type = get_real_reloc_type(r_type);
5095 switch (r_type)
5096 {
5097 case elfcpp::R_ARM_NONE:
5098 break;
5099
5100 case elfcpp::R_ARM_ABS32:
5101 case elfcpp::R_ARM_ABS32_NOI:
5102 // If building a shared library (or a position-independent
5103 // executable), we need to create a dynamic relocation for
5104 // this location. The relocation applied at link time will
5105 // apply the link-time value, so we flag the location with
5106 // an R_ARM_RELATIVE relocation so the dynamic loader can
5107 // relocate it easily.
5108 if (parameters->options().output_is_position_independent())
5109 {
5110 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5111 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
5112 // If we are to add more other reloc types than R_ARM_ABS32,
5113 // we need to add check_non_pic(object, r_type) here.
5114 rel_dyn->add_local_relative(object, r_sym, elfcpp::R_ARM_RELATIVE,
5115 output_section, data_shndx,
5116 reloc.get_r_offset());
5117 }
5118 break;
5119
5120 case elfcpp::R_ARM_REL32:
5121 case elfcpp::R_ARM_THM_CALL:
5122 case elfcpp::R_ARM_CALL:
5123 case elfcpp::R_ARM_PREL31:
5124 case elfcpp::R_ARM_JUMP24:
5125 case elfcpp::R_ARM_THM_JUMP24:
5126 case elfcpp::R_ARM_THM_JUMP19:
5127 case elfcpp::R_ARM_PLT32:
5128 case elfcpp::R_ARM_THM_ABS5:
5129 case elfcpp::R_ARM_ABS8:
5130 case elfcpp::R_ARM_ABS12:
5131 case elfcpp::R_ARM_ABS16:
5132 case elfcpp::R_ARM_BASE_ABS:
5133 case elfcpp::R_ARM_MOVW_ABS_NC:
5134 case elfcpp::R_ARM_MOVT_ABS:
5135 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
5136 case elfcpp::R_ARM_THM_MOVT_ABS:
5137 case elfcpp::R_ARM_MOVW_PREL_NC:
5138 case elfcpp::R_ARM_MOVT_PREL:
5139 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
5140 case elfcpp::R_ARM_THM_MOVT_PREL:
5141 break;
5142
5143 case elfcpp::R_ARM_GOTOFF32:
5144 // We need a GOT section:
5145 target->got_section(symtab, layout);
5146 break;
5147
5148 case elfcpp::R_ARM_BASE_PREL:
5149 // FIXME: What about this?
5150 break;
5151
5152 case elfcpp::R_ARM_GOT_BREL:
5153 case elfcpp::R_ARM_GOT_PREL:
5154 {
5155 // The symbol requires a GOT entry.
5156 Output_data_got<32, big_endian>* got =
5157 target->got_section(symtab, layout);
5158 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
5159 if (got->add_local(object, r_sym, GOT_TYPE_STANDARD))
5160 {
5161 // If we are generating a shared object, we need to add a
5162 // dynamic RELATIVE relocation for this symbol's GOT entry.
5163 if (parameters->options().output_is_position_independent())
5164 {
5165 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5166 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
5167 rel_dyn->add_local_relative(
5168 object, r_sym, elfcpp::R_ARM_RELATIVE, got,
5169 object->local_got_offset(r_sym, GOT_TYPE_STANDARD));
5170 }
5171 }
5172 }
5173 break;
5174
5175 case elfcpp::R_ARM_TARGET1:
5176 // This should have been mapped to another type already.
5177 // Fall through.
5178 case elfcpp::R_ARM_COPY:
5179 case elfcpp::R_ARM_GLOB_DAT:
5180 case elfcpp::R_ARM_JUMP_SLOT:
5181 case elfcpp::R_ARM_RELATIVE:
5182 // These are relocations which should only be seen by the
5183 // dynamic linker, and should never be seen here.
5184 gold_error(_("%s: unexpected reloc %u in object file"),
5185 object->name().c_str(), r_type);
5186 break;
5187
5188 default:
5189 unsupported_reloc_local(object, r_type);
5190 break;
5191 }
5192 }
5193
5194 // Report an unsupported relocation against a global symbol.
5195
5196 template<bool big_endian>
5197 void
5198 Target_arm<big_endian>::Scan::unsupported_reloc_global(
5199 Sized_relobj<32, big_endian>* object,
5200 unsigned int r_type,
5201 Symbol* gsym)
5202 {
5203 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
5204 object->name().c_str(), r_type, gsym->demangled_name().c_str());
5205 }
5206
5207 // Scan a relocation for a global symbol.
5208 // FIXME: This only handles a subset of relocation types used by Android
5209 // on ARM v5te devices.
5210
5211 template<bool big_endian>
5212 inline void
5213 Target_arm<big_endian>::Scan::global(Symbol_table* symtab,
5214 Layout* layout,
5215 Target_arm* target,
5216 Sized_relobj<32, big_endian>* object,
5217 unsigned int data_shndx,
5218 Output_section* output_section,
5219 const elfcpp::Rel<32, big_endian>& reloc,
5220 unsigned int r_type,
5221 Symbol* gsym)
5222 {
5223 r_type = get_real_reloc_type(r_type);
5224 switch (r_type)
5225 {
5226 case elfcpp::R_ARM_NONE:
5227 break;
5228
5229 case elfcpp::R_ARM_ABS32:
5230 case elfcpp::R_ARM_ABS32_NOI:
5231 {
5232 // Make a dynamic relocation if necessary.
5233 if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF))
5234 {
5235 if (target->may_need_copy_reloc(gsym))
5236 {
5237 target->copy_reloc(symtab, layout, object,
5238 data_shndx, output_section, gsym, reloc);
5239 }
5240 else if (gsym->can_use_relative_reloc(false))
5241 {
5242 // If we are to add more other reloc types than R_ARM_ABS32,
5243 // we need to add check_non_pic(object, r_type) here.
5244 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5245 rel_dyn->add_global_relative(gsym, elfcpp::R_ARM_RELATIVE,
5246 output_section, object,
5247 data_shndx, reloc.get_r_offset());
5248 }
5249 else
5250 {
5251 // If we are to add more other reloc types than R_ARM_ABS32,
5252 // we need to add check_non_pic(object, r_type) here.
5253 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5254 rel_dyn->add_global(gsym, r_type, output_section, object,
5255 data_shndx, reloc.get_r_offset());
5256 }
5257 }
5258 }
5259 break;
5260
5261 case elfcpp::R_ARM_MOVW_ABS_NC:
5262 case elfcpp::R_ARM_MOVT_ABS:
5263 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
5264 case elfcpp::R_ARM_THM_MOVT_ABS:
5265 case elfcpp::R_ARM_MOVW_PREL_NC:
5266 case elfcpp::R_ARM_MOVT_PREL:
5267 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
5268 case elfcpp::R_ARM_THM_MOVT_PREL:
5269 break;
5270
5271 case elfcpp::R_ARM_THM_ABS5:
5272 case elfcpp::R_ARM_ABS8:
5273 case elfcpp::R_ARM_ABS12:
5274 case elfcpp::R_ARM_ABS16:
5275 case elfcpp::R_ARM_BASE_ABS:
5276 {
5277 // No dynamic relocs of this kinds.
5278 // Report the error in case of PIC.
5279 int flags = Symbol::NON_PIC_REF;
5280 if (gsym->type() == elfcpp::STT_FUNC
5281 || gsym->type() == elfcpp::STT_ARM_TFUNC)
5282 flags |= Symbol::FUNCTION_CALL;
5283 if (gsym->needs_dynamic_reloc(flags))
5284 check_non_pic(object, r_type);
5285 }
5286 break;
5287
5288 case elfcpp::R_ARM_REL32:
5289 case elfcpp::R_ARM_PREL31:
5290 {
5291 // Make a dynamic relocation if necessary.
5292 int flags = Symbol::NON_PIC_REF;
5293 if (gsym->needs_dynamic_reloc(flags))
5294 {
5295 if (target->may_need_copy_reloc(gsym))
5296 {
5297 target->copy_reloc(symtab, layout, object,
5298 data_shndx, output_section, gsym, reloc);
5299 }
5300 else
5301 {
5302 check_non_pic(object, r_type);
5303 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5304 rel_dyn->add_global(gsym, r_type, output_section, object,
5305 data_shndx, reloc.get_r_offset());
5306 }
5307 }
5308 }
5309 break;
5310
5311 case elfcpp::R_ARM_JUMP24:
5312 case elfcpp::R_ARM_THM_JUMP24:
5313 case elfcpp::R_ARM_THM_JUMP19:
5314 case elfcpp::R_ARM_CALL:
5315 case elfcpp::R_ARM_THM_CALL:
5316
5317 if (Target_arm<big_endian>::Scan::symbol_needs_plt_entry(gsym))
5318 target->make_plt_entry(symtab, layout, gsym);
5319 else
5320 {
5321 // Check to see if this is a function that would need a PLT
5322 // but does not get one because the function symbol is untyped.
5323 // This happens in assembly code missing a proper .type directive.
5324 if ((!gsym->is_undefined() || parameters->options().shared())
5325 && !parameters->doing_static_link()
5326 && gsym->type() == elfcpp::STT_NOTYPE
5327 && (gsym->is_from_dynobj()
5328 || gsym->is_undefined()
5329 || gsym->is_preemptible()))
5330 gold_error(_("%s is not a function."),
5331 gsym->demangled_name().c_str());
5332 }
5333 break;
5334
5335 case elfcpp::R_ARM_PLT32:
5336 // If the symbol is fully resolved, this is just a relative
5337 // local reloc. Otherwise we need a PLT entry.
5338 if (gsym->final_value_is_known())
5339 break;
5340 // If building a shared library, we can also skip the PLT entry
5341 // if the symbol is defined in the output file and is protected
5342 // or hidden.
5343 if (gsym->is_defined()
5344 && !gsym->is_from_dynobj()
5345 && !gsym->is_preemptible())
5346 break;
5347 target->make_plt_entry(symtab, layout, gsym);
5348 break;
5349
5350 case elfcpp::R_ARM_GOTOFF32:
5351 // We need a GOT section.
5352 target->got_section(symtab, layout);
5353 break;
5354
5355 case elfcpp::R_ARM_BASE_PREL:
5356 // FIXME: What about this?
5357 break;
5358
5359 case elfcpp::R_ARM_GOT_BREL:
5360 case elfcpp::R_ARM_GOT_PREL:
5361 {
5362 // The symbol requires a GOT entry.
5363 Output_data_got<32, big_endian>* got =
5364 target->got_section(symtab, layout);
5365 if (gsym->final_value_is_known())
5366 got->add_global(gsym, GOT_TYPE_STANDARD);
5367 else
5368 {
5369 // If this symbol is not fully resolved, we need to add a
5370 // GOT entry with a dynamic relocation.
5371 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5372 if (gsym->is_from_dynobj()
5373 || gsym->is_undefined()
5374 || gsym->is_preemptible())
5375 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
5376 rel_dyn, elfcpp::R_ARM_GLOB_DAT);
5377 else
5378 {
5379 if (got->add_global(gsym, GOT_TYPE_STANDARD))
5380 rel_dyn->add_global_relative(
5381 gsym, elfcpp::R_ARM_RELATIVE, got,
5382 gsym->got_offset(GOT_TYPE_STANDARD));
5383 }
5384 }
5385 }
5386 break;
5387
5388 case elfcpp::R_ARM_TARGET1:
5389 // This should have been mapped to another type already.
5390 // Fall through.
5391 case elfcpp::R_ARM_COPY:
5392 case elfcpp::R_ARM_GLOB_DAT:
5393 case elfcpp::R_ARM_JUMP_SLOT:
5394 case elfcpp::R_ARM_RELATIVE:
5395 // These are relocations which should only be seen by the
5396 // dynamic linker, and should never be seen here.
5397 gold_error(_("%s: unexpected reloc %u in object file"),
5398 object->name().c_str(), r_type);
5399 break;
5400
5401 default:
5402 unsupported_reloc_global(object, r_type, gsym);
5403 break;
5404 }
5405 }
5406
5407 // Process relocations for gc.
5408
5409 template<bool big_endian>
5410 void
5411 Target_arm<big_endian>::gc_process_relocs(Symbol_table* symtab,
5412 Layout* layout,
5413 Sized_relobj<32, big_endian>* object,
5414 unsigned int data_shndx,
5415 unsigned int,
5416 const unsigned char* prelocs,
5417 size_t reloc_count,
5418 Output_section* output_section,
5419 bool needs_special_offset_handling,
5420 size_t local_symbol_count,
5421 const unsigned char* plocal_symbols)
5422 {
5423 typedef Target_arm<big_endian> Arm;
5424 typedef typename Target_arm<big_endian>::Scan Scan;
5425
5426 gold::gc_process_relocs<32, big_endian, Arm, elfcpp::SHT_REL, Scan>(
5427 symtab,
5428 layout,
5429 this,
5430 object,
5431 data_shndx,
5432 prelocs,
5433 reloc_count,
5434 output_section,
5435 needs_special_offset_handling,
5436 local_symbol_count,
5437 plocal_symbols);
5438 }
5439
5440 // Scan relocations for a section.
5441
5442 template<bool big_endian>
5443 void
5444 Target_arm<big_endian>::scan_relocs(Symbol_table* symtab,
5445 Layout* layout,
5446 Sized_relobj<32, big_endian>* object,
5447 unsigned int data_shndx,
5448 unsigned int sh_type,
5449 const unsigned char* prelocs,
5450 size_t reloc_count,
5451 Output_section* output_section,
5452 bool needs_special_offset_handling,
5453 size_t local_symbol_count,
5454 const unsigned char* plocal_symbols)
5455 {
5456 typedef typename Target_arm<big_endian>::Scan Scan;
5457 if (sh_type == elfcpp::SHT_RELA)
5458 {
5459 gold_error(_("%s: unsupported RELA reloc section"),
5460 object->name().c_str());
5461 return;
5462 }
5463
5464 gold::scan_relocs<32, big_endian, Target_arm, elfcpp::SHT_REL, Scan>(
5465 symtab,
5466 layout,
5467 this,
5468 object,
5469 data_shndx,
5470 prelocs,
5471 reloc_count,
5472 output_section,
5473 needs_special_offset_handling,
5474 local_symbol_count,
5475 plocal_symbols);
5476 }
5477
5478 // Finalize the sections.
5479
5480 template<bool big_endian>
5481 void
5482 Target_arm<big_endian>::do_finalize_sections(
5483 Layout* layout,
5484 const Input_objects* input_objects,
5485 Symbol_table* symtab)
5486 {
5487 // Merge processor-specific flags.
5488 for (Input_objects::Relobj_iterator p = input_objects->relobj_begin();
5489 p != input_objects->relobj_end();
5490 ++p)
5491 {
5492 Arm_relobj<big_endian>* arm_relobj =
5493 Arm_relobj<big_endian>::as_arm_relobj(*p);
5494 this->merge_processor_specific_flags(
5495 arm_relobj->name(),
5496 arm_relobj->processor_specific_flags());
5497 this->merge_object_attributes(arm_relobj->name().c_str(),
5498 arm_relobj->attributes_section_data());
5499
5500 }
5501
5502 for (Input_objects::Dynobj_iterator p = input_objects->dynobj_begin();
5503 p != input_objects->dynobj_end();
5504 ++p)
5505 {
5506 Arm_dynobj<big_endian>* arm_dynobj =
5507 Arm_dynobj<big_endian>::as_arm_dynobj(*p);
5508 this->merge_processor_specific_flags(
5509 arm_dynobj->name(),
5510 arm_dynobj->processor_specific_flags());
5511 this->merge_object_attributes(arm_dynobj->name().c_str(),
5512 arm_dynobj->attributes_section_data());
5513 }
5514
5515 // Check BLX use.
5516 const Object_attribute* cpu_arch_attr =
5517 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
5518 if (cpu_arch_attr->int_value() > elfcpp::TAG_CPU_ARCH_V4)
5519 this->set_may_use_blx(true);
5520
5521 // Check if we need to use Cortex-A8 workaround.
5522 if (parameters->options().user_set_fix_cortex_a8())
5523 this->fix_cortex_a8_ = parameters->options().fix_cortex_a8();
5524 else
5525 {
5526 // If neither --fix-cortex-a8 nor --no-fix-cortex-a8 is used, turn on
5527 // Cortex-A8 erratum workaround for ARMv7-A or ARMv7 with unknown
5528 // profile.
5529 const Object_attribute* cpu_arch_profile_attr =
5530 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
5531 this->fix_cortex_a8_ =
5532 (cpu_arch_attr->int_value() == elfcpp::TAG_CPU_ARCH_V7
5533 && (cpu_arch_profile_attr->int_value() == 'A'
5534 || cpu_arch_profile_attr->int_value() == 0));
5535 }
5536
5537 // Fill in some more dynamic tags.
5538 const Reloc_section* rel_plt = (this->plt_ == NULL
5539 ? NULL
5540 : this->plt_->rel_plt());
5541 layout->add_target_dynamic_tags(true, this->got_plt_, rel_plt,
5542 this->rel_dyn_, true);
5543
5544 // Emit any relocs we saved in an attempt to avoid generating COPY
5545 // relocs.
5546 if (this->copy_relocs_.any_saved_relocs())
5547 this->copy_relocs_.emit(this->rel_dyn_section(layout));
5548
5549 // Handle the .ARM.exidx section.
5550 Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
5551 if (exidx_section != NULL
5552 && exidx_section->type() == elfcpp::SHT_ARM_EXIDX
5553 && !parameters->options().relocatable())
5554 {
5555 // Create __exidx_start and __exdix_end symbols.
5556 symtab->define_in_output_data("__exidx_start", NULL,
5557 Symbol_table::PREDEFINED,
5558 exidx_section, 0, 0, elfcpp::STT_OBJECT,
5559 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
5560 false, true);
5561 symtab->define_in_output_data("__exidx_end", NULL,
5562 Symbol_table::PREDEFINED,
5563 exidx_section, 0, 0, elfcpp::STT_OBJECT,
5564 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
5565 true, true);
5566
5567 // For the ARM target, we need to add a PT_ARM_EXIDX segment for
5568 // the .ARM.exidx section.
5569 if (!layout->script_options()->saw_phdrs_clause())
5570 {
5571 gold_assert(layout->find_output_segment(elfcpp::PT_ARM_EXIDX, 0, 0)
5572 == NULL);
5573 Output_segment* exidx_segment =
5574 layout->make_output_segment(elfcpp::PT_ARM_EXIDX, elfcpp::PF_R);
5575 exidx_segment->add_output_section(exidx_section, elfcpp::PF_R,
5576 false);
5577 }
5578 }
5579
5580 // Create an .ARM.attributes section if there is not one already.
5581 Output_attributes_section_data* attributes_section =
5582 new Output_attributes_section_data(*this->attributes_section_data_);
5583 layout->add_output_section_data(".ARM.attributes",
5584 elfcpp::SHT_ARM_ATTRIBUTES, 0,
5585 attributes_section, false, false, false,
5586 false);
5587 }
5588
5589 // Return whether a direct absolute static relocation needs to be applied.
5590 // In cases where Scan::local() or Scan::global() has created
5591 // a dynamic relocation other than R_ARM_RELATIVE, the addend
5592 // of the relocation is carried in the data, and we must not
5593 // apply the static relocation.
5594
5595 template<bool big_endian>
5596 inline bool
5597 Target_arm<big_endian>::Relocate::should_apply_static_reloc(
5598 const Sized_symbol<32>* gsym,
5599 int ref_flags,
5600 bool is_32bit,
5601 Output_section* output_section)
5602 {
5603 // If the output section is not allocated, then we didn't call
5604 // scan_relocs, we didn't create a dynamic reloc, and we must apply
5605 // the reloc here.
5606 if ((output_section->flags() & elfcpp::SHF_ALLOC) == 0)
5607 return true;
5608
5609 // For local symbols, we will have created a non-RELATIVE dynamic
5610 // relocation only if (a) the output is position independent,
5611 // (b) the relocation is absolute (not pc- or segment-relative), and
5612 // (c) the relocation is not 32 bits wide.
5613 if (gsym == NULL)
5614 return !(parameters->options().output_is_position_independent()
5615 && (ref_flags & Symbol::ABSOLUTE_REF)
5616 && !is_32bit);
5617
5618 // For global symbols, we use the same helper routines used in the
5619 // scan pass. If we did not create a dynamic relocation, or if we
5620 // created a RELATIVE dynamic relocation, we should apply the static
5621 // relocation.
5622 bool has_dyn = gsym->needs_dynamic_reloc(ref_flags);
5623 bool is_rel = (ref_flags & Symbol::ABSOLUTE_REF)
5624 && gsym->can_use_relative_reloc(ref_flags
5625 & Symbol::FUNCTION_CALL);
5626 return !has_dyn || is_rel;
5627 }
5628
5629 // Perform a relocation.
5630
5631 template<bool big_endian>
5632 inline bool
5633 Target_arm<big_endian>::Relocate::relocate(
5634 const Relocate_info<32, big_endian>* relinfo,
5635 Target_arm* target,
5636 Output_section *output_section,
5637 size_t relnum,
5638 const elfcpp::Rel<32, big_endian>& rel,
5639 unsigned int r_type,
5640 const Sized_symbol<32>* gsym,
5641 const Symbol_value<32>* psymval,
5642 unsigned char* view,
5643 Arm_address address,
5644 section_size_type /* view_size */ )
5645 {
5646 typedef Arm_relocate_functions<big_endian> Arm_relocate_functions;
5647
5648 r_type = get_real_reloc_type(r_type);
5649
5650 const Arm_relobj<big_endian>* object =
5651 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
5652
5653 // If the final branch target of a relocation is THUMB instruction, this
5654 // is 1. Otherwise it is 0.
5655 Arm_address thumb_bit = 0;
5656 Symbol_value<32> symval;
5657 bool is_weakly_undefined_without_plt = false;
5658 if (relnum != Target_arm<big_endian>::fake_relnum_for_stubs)
5659 {
5660 if (gsym != NULL)
5661 {
5662 // This is a global symbol. Determine if we use PLT and if the
5663 // final target is THUMB.
5664 if (gsym->use_plt_offset(reloc_is_non_pic(r_type)))
5665 {
5666 // This uses a PLT, change the symbol value.
5667 symval.set_output_value(target->plt_section()->address()
5668 + gsym->plt_offset());
5669 psymval = &symval;
5670 }
5671 else if (gsym->is_weak_undefined())
5672 {
5673 // This is a weakly undefined symbol and we do not use PLT
5674 // for this relocation. A branch targeting this symbol will
5675 // be converted into an NOP.
5676 is_weakly_undefined_without_plt = true;
5677 }
5678 else
5679 {
5680 // Set thumb bit if symbol:
5681 // -Has type STT_ARM_TFUNC or
5682 // -Has type STT_FUNC, is defined and with LSB in value set.
5683 thumb_bit =
5684 (((gsym->type() == elfcpp::STT_ARM_TFUNC)
5685 || (gsym->type() == elfcpp::STT_FUNC
5686 && !gsym->is_undefined()
5687 && ((psymval->value(object, 0) & 1) != 0)))
5688 ? 1
5689 : 0);
5690 }
5691 }
5692 else
5693 {
5694 // This is a local symbol. Determine if the final target is THUMB.
5695 // We saved this information when all the local symbols were read.
5696 elfcpp::Elf_types<32>::Elf_WXword r_info = rel.get_r_info();
5697 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
5698 thumb_bit = object->local_symbol_is_thumb_function(r_sym) ? 1 : 0;
5699 }
5700 }
5701 else
5702 {
5703 // This is a fake relocation synthesized for a stub. It does not have
5704 // a real symbol. We just look at the LSB of the symbol value to
5705 // determine if the target is THUMB or not.
5706 thumb_bit = ((psymval->value(object, 0) & 1) != 0);
5707 }
5708
5709 // Strip LSB if this points to a THUMB target.
5710 if (thumb_bit != 0
5711 && Target_arm<big_endian>::reloc_uses_thumb_bit(r_type)
5712 && ((psymval->value(object, 0) & 1) != 0))
5713 {
5714 Arm_address stripped_value =
5715 psymval->value(object, 0) & ~static_cast<Arm_address>(1);
5716 symval.set_output_value(stripped_value);
5717 psymval = &symval;
5718 }
5719
5720 // Get the GOT offset if needed.
5721 // The GOT pointer points to the end of the GOT section.
5722 // We need to subtract the size of the GOT section to get
5723 // the actual offset to use in the relocation.
5724 bool have_got_offset = false;
5725 unsigned int got_offset = 0;
5726 switch (r_type)
5727 {
5728 case elfcpp::R_ARM_GOT_BREL:
5729 case elfcpp::R_ARM_GOT_PREL:
5730 if (gsym != NULL)
5731 {
5732 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
5733 got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
5734 - target->got_size());
5735 }
5736 else
5737 {
5738 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
5739 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
5740 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
5741 - target->got_size());
5742 }
5743 have_got_offset = true;
5744 break;
5745
5746 default:
5747 break;
5748 }
5749
5750 // To look up relocation stubs, we need to pass the symbol table index of
5751 // a local symbol.
5752 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
5753
5754 typename Arm_relocate_functions::Status reloc_status =
5755 Arm_relocate_functions::STATUS_OKAY;
5756 switch (r_type)
5757 {
5758 case elfcpp::R_ARM_NONE:
5759 break;
5760
5761 case elfcpp::R_ARM_ABS8:
5762 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
5763 output_section))
5764 reloc_status = Arm_relocate_functions::abs8(view, object, psymval);
5765 break;
5766
5767 case elfcpp::R_ARM_ABS12:
5768 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
5769 output_section))
5770 reloc_status = Arm_relocate_functions::abs12(view, object, psymval);
5771 break;
5772
5773 case elfcpp::R_ARM_ABS16:
5774 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
5775 output_section))
5776 reloc_status = Arm_relocate_functions::abs16(view, object, psymval);
5777 break;
5778
5779 case elfcpp::R_ARM_ABS32:
5780 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5781 output_section))
5782 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
5783 thumb_bit);
5784 break;
5785
5786 case elfcpp::R_ARM_ABS32_NOI:
5787 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5788 output_section))
5789 // No thumb bit for this relocation: (S + A)
5790 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
5791 0);
5792 break;
5793
5794 case elfcpp::R_ARM_MOVW_ABS_NC:
5795 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5796 output_section))
5797 reloc_status = Arm_relocate_functions::movw_abs_nc(view, object,
5798 psymval,
5799 thumb_bit);
5800 else
5801 gold_error(_("relocation R_ARM_MOVW_ABS_NC cannot be used when making"
5802 "a shared object; recompile with -fPIC"));
5803 break;
5804
5805 case elfcpp::R_ARM_MOVT_ABS:
5806 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5807 output_section))
5808 reloc_status = Arm_relocate_functions::movt_abs(view, object, psymval);
5809 else
5810 gold_error(_("relocation R_ARM_MOVT_ABS cannot be used when making"
5811 "a shared object; recompile with -fPIC"));
5812 break;
5813
5814 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
5815 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5816 output_section))
5817 reloc_status = Arm_relocate_functions::thm_movw_abs_nc(view, object,
5818 psymval,
5819 thumb_bit);
5820 else
5821 gold_error(_("relocation R_ARM_THM_MOVW_ABS_NC cannot be used when"
5822 "making a shared object; recompile with -fPIC"));
5823 break;
5824
5825 case elfcpp::R_ARM_THM_MOVT_ABS:
5826 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5827 output_section))
5828 reloc_status = Arm_relocate_functions::thm_movt_abs(view, object,
5829 psymval);
5830 else
5831 gold_error(_("relocation R_ARM_THM_MOVT_ABS cannot be used when"
5832 "making a shared object; recompile with -fPIC"));
5833 break;
5834
5835 case elfcpp::R_ARM_MOVW_PREL_NC:
5836 reloc_status = Arm_relocate_functions::movw_prel_nc(view, object,
5837 psymval, address,
5838 thumb_bit);
5839 break;
5840
5841 case elfcpp::R_ARM_MOVT_PREL:
5842 reloc_status = Arm_relocate_functions::movt_prel(view, object,
5843 psymval, address);
5844 break;
5845
5846 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
5847 reloc_status = Arm_relocate_functions::thm_movw_prel_nc(view, object,
5848 psymval, address,
5849 thumb_bit);
5850 break;
5851
5852 case elfcpp::R_ARM_THM_MOVT_PREL:
5853 reloc_status = Arm_relocate_functions::thm_movt_prel(view, object,
5854 psymval, address);
5855 break;
5856
5857 case elfcpp::R_ARM_REL32:
5858 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
5859 address, thumb_bit);
5860 break;
5861
5862 case elfcpp::R_ARM_THM_ABS5:
5863 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
5864 output_section))
5865 reloc_status = Arm_relocate_functions::thm_abs5(view, object, psymval);
5866 break;
5867
5868 case elfcpp::R_ARM_THM_CALL:
5869 reloc_status =
5870 Arm_relocate_functions::thm_call(relinfo, view, gsym, object, r_sym,
5871 psymval, address, thumb_bit,
5872 is_weakly_undefined_without_plt);
5873 break;
5874
5875 case elfcpp::R_ARM_XPC25:
5876 reloc_status =
5877 Arm_relocate_functions::xpc25(relinfo, view, gsym, object, r_sym,
5878 psymval, address, thumb_bit,
5879 is_weakly_undefined_without_plt);
5880 break;
5881
5882 case elfcpp::R_ARM_THM_XPC22:
5883 reloc_status =
5884 Arm_relocate_functions::thm_xpc22(relinfo, view, gsym, object, r_sym,
5885 psymval, address, thumb_bit,
5886 is_weakly_undefined_without_plt);
5887 break;
5888
5889 case elfcpp::R_ARM_GOTOFF32:
5890 {
5891 Arm_address got_origin;
5892 got_origin = target->got_plt_section()->address();
5893 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
5894 got_origin, thumb_bit);
5895 }
5896 break;
5897
5898 case elfcpp::R_ARM_BASE_PREL:
5899 {
5900 uint32_t origin;
5901 // Get the addressing origin of the output segment defining the
5902 // symbol gsym (AAELF 4.6.1.2 Relocation types)
5903 gold_assert(gsym != NULL);
5904 if (gsym->source() == Symbol::IN_OUTPUT_SEGMENT)
5905 origin = gsym->output_segment()->vaddr();
5906 else if (gsym->source () == Symbol::IN_OUTPUT_DATA)
5907 origin = gsym->output_data()->address();
5908 else
5909 {
5910 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
5911 _("cannot find origin of R_ARM_BASE_PREL"));
5912 return true;
5913 }
5914 reloc_status = Arm_relocate_functions::base_prel(view, origin, address);
5915 }
5916 break;
5917
5918 case elfcpp::R_ARM_BASE_ABS:
5919 {
5920 if (!should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
5921 output_section))
5922 break;
5923
5924 uint32_t origin;
5925 // Get the addressing origin of the output segment defining
5926 // the symbol gsym (AAELF 4.6.1.2 Relocation types).
5927 if (gsym == NULL)
5928 // R_ARM_BASE_ABS with the NULL symbol will give the
5929 // absolute address of the GOT origin (GOT_ORG) (see ARM IHI
5930 // 0044C (AAELF): 4.6.1.8 Proxy generating relocations).
5931 origin = target->got_plt_section()->address();
5932 else if (gsym->source() == Symbol::IN_OUTPUT_SEGMENT)
5933 origin = gsym->output_segment()->vaddr();
5934 else if (gsym->source () == Symbol::IN_OUTPUT_DATA)
5935 origin = gsym->output_data()->address();
5936 else
5937 {
5938 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
5939 _("cannot find origin of R_ARM_BASE_ABS"));
5940 return true;
5941 }
5942
5943 reloc_status = Arm_relocate_functions::base_abs(view, origin);
5944 }
5945 break;
5946
5947 case elfcpp::R_ARM_GOT_BREL:
5948 gold_assert(have_got_offset);
5949 reloc_status = Arm_relocate_functions::got_brel(view, got_offset);
5950 break;
5951
5952 case elfcpp::R_ARM_GOT_PREL:
5953 gold_assert(have_got_offset);
5954 // Get the address origin for GOT PLT, which is allocated right
5955 // after the GOT section, to calculate an absolute address of
5956 // the symbol GOT entry (got_origin + got_offset).
5957 Arm_address got_origin;
5958 got_origin = target->got_plt_section()->address();
5959 reloc_status = Arm_relocate_functions::got_prel(view,
5960 got_origin + got_offset,
5961 address);
5962 break;
5963
5964 case elfcpp::R_ARM_PLT32:
5965 gold_assert(gsym == NULL
5966 || gsym->has_plt_offset()
5967 || gsym->final_value_is_known()
5968 || (gsym->is_defined()
5969 && !gsym->is_from_dynobj()
5970 && !gsym->is_preemptible()));
5971 reloc_status =
5972 Arm_relocate_functions::plt32(relinfo, view, gsym, object, r_sym,
5973 psymval, address, thumb_bit,
5974 is_weakly_undefined_without_plt);
5975 break;
5976
5977 case elfcpp::R_ARM_CALL:
5978 reloc_status =
5979 Arm_relocate_functions::call(relinfo, view, gsym, object, r_sym,
5980 psymval, address, thumb_bit,
5981 is_weakly_undefined_without_plt);
5982 break;
5983
5984 case elfcpp::R_ARM_JUMP24:
5985 reloc_status =
5986 Arm_relocate_functions::jump24(relinfo, view, gsym, object, r_sym,
5987 psymval, address, thumb_bit,
5988 is_weakly_undefined_without_plt);
5989 break;
5990
5991 case elfcpp::R_ARM_THM_JUMP24:
5992 reloc_status =
5993 Arm_relocate_functions::thm_jump24(relinfo, view, gsym, object, r_sym,
5994 psymval, address, thumb_bit,
5995 is_weakly_undefined_without_plt);
5996 break;
5997
5998 case elfcpp::R_ARM_THM_JUMP19:
5999 reloc_status =
6000 Arm_relocate_functions::thm_jump19(view, object, psymval, address,
6001 thumb_bit);
6002 break;
6003
6004 case elfcpp::R_ARM_PREL31:
6005 reloc_status = Arm_relocate_functions::prel31(view, object, psymval,
6006 address, thumb_bit);
6007 break;
6008
6009 case elfcpp::R_ARM_TARGET1:
6010 // This should have been mapped to another type already.
6011 // Fall through.
6012 case elfcpp::R_ARM_COPY:
6013 case elfcpp::R_ARM_GLOB_DAT:
6014 case elfcpp::R_ARM_JUMP_SLOT:
6015 case elfcpp::R_ARM_RELATIVE:
6016 // These are relocations which should only be seen by the
6017 // dynamic linker, and should never be seen here.
6018 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6019 _("unexpected reloc %u in object file"),
6020 r_type);
6021 break;
6022
6023 default:
6024 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6025 _("unsupported reloc %u"),
6026 r_type);
6027 break;
6028 }
6029
6030 // Report any errors.
6031 switch (reloc_status)
6032 {
6033 case Arm_relocate_functions::STATUS_OKAY:
6034 break;
6035 case Arm_relocate_functions::STATUS_OVERFLOW:
6036 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6037 _("relocation overflow in relocation %u"),
6038 r_type);
6039 break;
6040 case Arm_relocate_functions::STATUS_BAD_RELOC:
6041 gold_error_at_location(
6042 relinfo,
6043 relnum,
6044 rel.get_r_offset(),
6045 _("unexpected opcode while processing relocation %u"),
6046 r_type);
6047 break;
6048 default:
6049 gold_unreachable();
6050 }
6051
6052 return true;
6053 }
6054
6055 // Relocate section data.
6056
6057 template<bool big_endian>
6058 void
6059 Target_arm<big_endian>::relocate_section(
6060 const Relocate_info<32, big_endian>* relinfo,
6061 unsigned int sh_type,
6062 const unsigned char* prelocs,
6063 size_t reloc_count,
6064 Output_section* output_section,
6065 bool needs_special_offset_handling,
6066 unsigned char* view,
6067 Arm_address address,
6068 section_size_type view_size,
6069 const Reloc_symbol_changes* reloc_symbol_changes)
6070 {
6071 typedef typename Target_arm<big_endian>::Relocate Arm_relocate;
6072 gold_assert(sh_type == elfcpp::SHT_REL);
6073
6074 Arm_input_section<big_endian>* arm_input_section =
6075 this->find_arm_input_section(relinfo->object, relinfo->data_shndx);
6076
6077 // This is an ARM input section and the view covers the whole output
6078 // section.
6079 if (arm_input_section != NULL)
6080 {
6081 gold_assert(needs_special_offset_handling);
6082 Arm_address section_address = arm_input_section->address();
6083 section_size_type section_size = arm_input_section->data_size();
6084
6085 gold_assert((arm_input_section->address() >= address)
6086 && ((arm_input_section->address()
6087 + arm_input_section->data_size())
6088 <= (address + view_size)));
6089
6090 off_t offset = section_address - address;
6091 view += offset;
6092 address += offset;
6093 view_size = section_size;
6094 }
6095
6096 gold::relocate_section<32, big_endian, Target_arm, elfcpp::SHT_REL,
6097 Arm_relocate>(
6098 relinfo,
6099 this,
6100 prelocs,
6101 reloc_count,
6102 output_section,
6103 needs_special_offset_handling,
6104 view,
6105 address,
6106 view_size,
6107 reloc_symbol_changes);
6108 }
6109
6110 // Return the size of a relocation while scanning during a relocatable
6111 // link.
6112
6113 template<bool big_endian>
6114 unsigned int
6115 Target_arm<big_endian>::Relocatable_size_for_reloc::get_size_for_reloc(
6116 unsigned int r_type,
6117 Relobj* object)
6118 {
6119 r_type = get_real_reloc_type(r_type);
6120 switch (r_type)
6121 {
6122 case elfcpp::R_ARM_NONE:
6123 return 0;
6124
6125 case elfcpp::R_ARM_ABS8:
6126 return 1;
6127
6128 case elfcpp::R_ARM_ABS16:
6129 case elfcpp::R_ARM_THM_ABS5:
6130 return 2;
6131
6132 case elfcpp::R_ARM_ABS32:
6133 case elfcpp::R_ARM_ABS32_NOI:
6134 case elfcpp::R_ARM_ABS12:
6135 case elfcpp::R_ARM_BASE_ABS:
6136 case elfcpp::R_ARM_REL32:
6137 case elfcpp::R_ARM_THM_CALL:
6138 case elfcpp::R_ARM_GOTOFF32:
6139 case elfcpp::R_ARM_BASE_PREL:
6140 case elfcpp::R_ARM_GOT_BREL:
6141 case elfcpp::R_ARM_GOT_PREL:
6142 case elfcpp::R_ARM_PLT32:
6143 case elfcpp::R_ARM_CALL:
6144 case elfcpp::R_ARM_JUMP24:
6145 case elfcpp::R_ARM_PREL31:
6146 case elfcpp::R_ARM_MOVW_ABS_NC:
6147 case elfcpp::R_ARM_MOVT_ABS:
6148 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
6149 case elfcpp::R_ARM_THM_MOVT_ABS:
6150 case elfcpp::R_ARM_MOVW_PREL_NC:
6151 case elfcpp::R_ARM_MOVT_PREL:
6152 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
6153 case elfcpp::R_ARM_THM_MOVT_PREL:
6154 return 4;
6155
6156 case elfcpp::R_ARM_TARGET1:
6157 // This should have been mapped to another type already.
6158 // Fall through.
6159 case elfcpp::R_ARM_COPY:
6160 case elfcpp::R_ARM_GLOB_DAT:
6161 case elfcpp::R_ARM_JUMP_SLOT:
6162 case elfcpp::R_ARM_RELATIVE:
6163 // These are relocations which should only be seen by the
6164 // dynamic linker, and should never be seen here.
6165 gold_error(_("%s: unexpected reloc %u in object file"),
6166 object->name().c_str(), r_type);
6167 return 0;
6168
6169 default:
6170 object->error(_("unsupported reloc %u in object file"), r_type);
6171 return 0;
6172 }
6173 }
6174
6175 // Scan the relocs during a relocatable link.
6176
6177 template<bool big_endian>
6178 void
6179 Target_arm<big_endian>::scan_relocatable_relocs(
6180 Symbol_table* symtab,
6181 Layout* layout,
6182 Sized_relobj<32, big_endian>* object,
6183 unsigned int data_shndx,
6184 unsigned int sh_type,
6185 const unsigned char* prelocs,
6186 size_t reloc_count,
6187 Output_section* output_section,
6188 bool needs_special_offset_handling,
6189 size_t local_symbol_count,
6190 const unsigned char* plocal_symbols,
6191 Relocatable_relocs* rr)
6192 {
6193 gold_assert(sh_type == elfcpp::SHT_REL);
6194
6195 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_REL,
6196 Relocatable_size_for_reloc> Scan_relocatable_relocs;
6197
6198 gold::scan_relocatable_relocs<32, big_endian, elfcpp::SHT_REL,
6199 Scan_relocatable_relocs>(
6200 symtab,
6201 layout,
6202 object,
6203 data_shndx,
6204 prelocs,
6205 reloc_count,
6206 output_section,
6207 needs_special_offset_handling,
6208 local_symbol_count,
6209 plocal_symbols,
6210 rr);
6211 }
6212
6213 // Relocate a section during a relocatable link.
6214
6215 template<bool big_endian>
6216 void
6217 Target_arm<big_endian>::relocate_for_relocatable(
6218 const Relocate_info<32, big_endian>* relinfo,
6219 unsigned int sh_type,
6220 const unsigned char* prelocs,
6221 size_t reloc_count,
6222 Output_section* output_section,
6223 off_t offset_in_output_section,
6224 const Relocatable_relocs* rr,
6225 unsigned char* view,
6226 Arm_address view_address,
6227 section_size_type view_size,
6228 unsigned char* reloc_view,
6229 section_size_type reloc_view_size)
6230 {
6231 gold_assert(sh_type == elfcpp::SHT_REL);
6232
6233 gold::relocate_for_relocatable<32, big_endian, elfcpp::SHT_REL>(
6234 relinfo,
6235 prelocs,
6236 reloc_count,
6237 output_section,
6238 offset_in_output_section,
6239 rr,
6240 view,
6241 view_address,
6242 view_size,
6243 reloc_view,
6244 reloc_view_size);
6245 }
6246
6247 // Return the value to use for a dynamic symbol which requires special
6248 // treatment. This is how we support equality comparisons of function
6249 // pointers across shared library boundaries, as described in the
6250 // processor specific ABI supplement.
6251
6252 template<bool big_endian>
6253 uint64_t
6254 Target_arm<big_endian>::do_dynsym_value(const Symbol* gsym) const
6255 {
6256 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6257 return this->plt_section()->address() + gsym->plt_offset();
6258 }
6259
6260 // Map platform-specific relocs to real relocs
6261 //
6262 template<bool big_endian>
6263 unsigned int
6264 Target_arm<big_endian>::get_real_reloc_type (unsigned int r_type)
6265 {
6266 switch (r_type)
6267 {
6268 case elfcpp::R_ARM_TARGET1:
6269 // This is either R_ARM_ABS32 or R_ARM_REL32;
6270 return elfcpp::R_ARM_ABS32;
6271
6272 case elfcpp::R_ARM_TARGET2:
6273 // This can be any reloc type but ususally is R_ARM_GOT_PREL
6274 return elfcpp::R_ARM_GOT_PREL;
6275
6276 default:
6277 return r_type;
6278 }
6279 }
6280
6281 // Whether if two EABI versions V1 and V2 are compatible.
6282
6283 template<bool big_endian>
6284 bool
6285 Target_arm<big_endian>::are_eabi_versions_compatible(
6286 elfcpp::Elf_Word v1,
6287 elfcpp::Elf_Word v2)
6288 {
6289 // v4 and v5 are the same spec before and after it was released,
6290 // so allow mixing them.
6291 if ((v1 == elfcpp::EF_ARM_EABI_VER4 && v2 == elfcpp::EF_ARM_EABI_VER5)
6292 || (v1 == elfcpp::EF_ARM_EABI_VER5 && v2 == elfcpp::EF_ARM_EABI_VER4))
6293 return true;
6294
6295 return v1 == v2;
6296 }
6297
6298 // Combine FLAGS from an input object called NAME and the processor-specific
6299 // flags in the ELF header of the output. Much of this is adapted from the
6300 // processor-specific flags merging code in elf32_arm_merge_private_bfd_data
6301 // in bfd/elf32-arm.c.
6302
6303 template<bool big_endian>
6304 void
6305 Target_arm<big_endian>::merge_processor_specific_flags(
6306 const std::string& name,
6307 elfcpp::Elf_Word flags)
6308 {
6309 if (this->are_processor_specific_flags_set())
6310 {
6311 elfcpp::Elf_Word out_flags = this->processor_specific_flags();
6312
6313 // Nothing to merge if flags equal to those in output.
6314 if (flags == out_flags)
6315 return;
6316
6317 // Complain about various flag mismatches.
6318 elfcpp::Elf_Word version1 = elfcpp::arm_eabi_version(flags);
6319 elfcpp::Elf_Word version2 = elfcpp::arm_eabi_version(out_flags);
6320 if (!this->are_eabi_versions_compatible(version1, version2))
6321 gold_error(_("Source object %s has EABI version %d but output has "
6322 "EABI version %d."),
6323 name.c_str(),
6324 (flags & elfcpp::EF_ARM_EABIMASK) >> 24,
6325 (out_flags & elfcpp::EF_ARM_EABIMASK) >> 24);
6326 }
6327 else
6328 {
6329 // If the input is the default architecture and had the default
6330 // flags then do not bother setting the flags for the output
6331 // architecture, instead allow future merges to do this. If no
6332 // future merges ever set these flags then they will retain their
6333 // uninitialised values, which surprise surprise, correspond
6334 // to the default values.
6335 if (flags == 0)
6336 return;
6337
6338 // This is the first time, just copy the flags.
6339 // We only copy the EABI version for now.
6340 this->set_processor_specific_flags(flags & elfcpp::EF_ARM_EABIMASK);
6341 }
6342 }
6343
6344 // Adjust ELF file header.
6345 template<bool big_endian>
6346 void
6347 Target_arm<big_endian>::do_adjust_elf_header(
6348 unsigned char* view,
6349 int len) const
6350 {
6351 gold_assert(len == elfcpp::Elf_sizes<32>::ehdr_size);
6352
6353 elfcpp::Ehdr<32, big_endian> ehdr(view);
6354 unsigned char e_ident[elfcpp::EI_NIDENT];
6355 memcpy(e_ident, ehdr.get_e_ident(), elfcpp::EI_NIDENT);
6356
6357 if (elfcpp::arm_eabi_version(this->processor_specific_flags())
6358 == elfcpp::EF_ARM_EABI_UNKNOWN)
6359 e_ident[elfcpp::EI_OSABI] = elfcpp::ELFOSABI_ARM;
6360 else
6361 e_ident[elfcpp::EI_OSABI] = 0;
6362 e_ident[elfcpp::EI_ABIVERSION] = 0;
6363
6364 // FIXME: Do EF_ARM_BE8 adjustment.
6365
6366 elfcpp::Ehdr_write<32, big_endian> oehdr(view);
6367 oehdr.put_e_ident(e_ident);
6368 }
6369
6370 // do_make_elf_object to override the same function in the base class.
6371 // We need to use a target-specific sub-class of Sized_relobj<32, big_endian>
6372 // to store ARM specific information. Hence we need to have our own
6373 // ELF object creation.
6374
6375 template<bool big_endian>
6376 Object*
6377 Target_arm<big_endian>::do_make_elf_object(
6378 const std::string& name,
6379 Input_file* input_file,
6380 off_t offset, const elfcpp::Ehdr<32, big_endian>& ehdr)
6381 {
6382 int et = ehdr.get_e_type();
6383 if (et == elfcpp::ET_REL)
6384 {
6385 Arm_relobj<big_endian>* obj =
6386 new Arm_relobj<big_endian>(name, input_file, offset, ehdr);
6387 obj->setup();
6388 return obj;
6389 }
6390 else if (et == elfcpp::ET_DYN)
6391 {
6392 Sized_dynobj<32, big_endian>* obj =
6393 new Arm_dynobj<big_endian>(name, input_file, offset, ehdr);
6394 obj->setup();
6395 return obj;
6396 }
6397 else
6398 {
6399 gold_error(_("%s: unsupported ELF file type %d"),
6400 name.c_str(), et);
6401 return NULL;
6402 }
6403 }
6404
6405 // Read the architecture from the Tag_also_compatible_with attribute, if any.
6406 // Returns -1 if no architecture could be read.
6407 // This is adapted from get_secondary_compatible_arch() in bfd/elf32-arm.c.
6408
6409 template<bool big_endian>
6410 int
6411 Target_arm<big_endian>::get_secondary_compatible_arch(
6412 const Attributes_section_data* pasd)
6413 {
6414 const Object_attribute *known_attributes =
6415 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
6416
6417 // Note: the tag and its argument below are uleb128 values, though
6418 // currently-defined values fit in one byte for each.
6419 const std::string& sv =
6420 known_attributes[elfcpp::Tag_also_compatible_with].string_value();
6421 if (sv.size() == 2
6422 && sv.data()[0] == elfcpp::Tag_CPU_arch
6423 && (sv.data()[1] & 128) != 128)
6424 return sv.data()[1];
6425
6426 // This tag is "safely ignorable", so don't complain if it looks funny.
6427 return -1;
6428 }
6429
6430 // Set, or unset, the architecture of the Tag_also_compatible_with attribute.
6431 // The tag is removed if ARCH is -1.
6432 // This is adapted from set_secondary_compatible_arch() in bfd/elf32-arm.c.
6433
6434 template<bool big_endian>
6435 void
6436 Target_arm<big_endian>::set_secondary_compatible_arch(
6437 Attributes_section_data* pasd,
6438 int arch)
6439 {
6440 Object_attribute *known_attributes =
6441 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
6442
6443 if (arch == -1)
6444 {
6445 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value("");
6446 return;
6447 }
6448
6449 // Note: the tag and its argument below are uleb128 values, though
6450 // currently-defined values fit in one byte for each.
6451 char sv[3];
6452 sv[0] = elfcpp::Tag_CPU_arch;
6453 gold_assert(arch != 0);
6454 sv[1] = arch;
6455 sv[2] = '\0';
6456
6457 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value(sv);
6458 }
6459
6460 // Combine two values for Tag_CPU_arch, taking secondary compatibility tags
6461 // into account.
6462 // This is adapted from tag_cpu_arch_combine() in bfd/elf32-arm.c.
6463
6464 template<bool big_endian>
6465 int
6466 Target_arm<big_endian>::tag_cpu_arch_combine(
6467 const char* name,
6468 int oldtag,
6469 int* secondary_compat_out,
6470 int newtag,
6471 int secondary_compat)
6472 {
6473 #define T(X) elfcpp::TAG_CPU_ARCH_##X
6474 static const int v6t2[] =
6475 {
6476 T(V6T2), // PRE_V4.
6477 T(V6T2), // V4.
6478 T(V6T2), // V4T.
6479 T(V6T2), // V5T.
6480 T(V6T2), // V5TE.
6481 T(V6T2), // V5TEJ.
6482 T(V6T2), // V6.
6483 T(V7), // V6KZ.
6484 T(V6T2) // V6T2.
6485 };
6486 static const int v6k[] =
6487 {
6488 T(V6K), // PRE_V4.
6489 T(V6K), // V4.
6490 T(V6K), // V4T.
6491 T(V6K), // V5T.
6492 T(V6K), // V5TE.
6493 T(V6K), // V5TEJ.
6494 T(V6K), // V6.
6495 T(V6KZ), // V6KZ.
6496 T(V7), // V6T2.
6497 T(V6K) // V6K.
6498 };
6499 static const int v7[] =
6500 {
6501 T(V7), // PRE_V4.
6502 T(V7), // V4.
6503 T(V7), // V4T.
6504 T(V7), // V5T.
6505 T(V7), // V5TE.
6506 T(V7), // V5TEJ.
6507 T(V7), // V6.
6508 T(V7), // V6KZ.
6509 T(V7), // V6T2.
6510 T(V7), // V6K.
6511 T(V7) // V7.
6512 };
6513 static const int v6_m[] =
6514 {
6515 -1, // PRE_V4.
6516 -1, // V4.
6517 T(V6K), // V4T.
6518 T(V6K), // V5T.
6519 T(V6K), // V5TE.
6520 T(V6K), // V5TEJ.
6521 T(V6K), // V6.
6522 T(V6KZ), // V6KZ.
6523 T(V7), // V6T2.
6524 T(V6K), // V6K.
6525 T(V7), // V7.
6526 T(V6_M) // V6_M.
6527 };
6528 static const int v6s_m[] =
6529 {
6530 -1, // PRE_V4.
6531 -1, // V4.
6532 T(V6K), // V4T.
6533 T(V6K), // V5T.
6534 T(V6K), // V5TE.
6535 T(V6K), // V5TEJ.
6536 T(V6K), // V6.
6537 T(V6KZ), // V6KZ.
6538 T(V7), // V6T2.
6539 T(V6K), // V6K.
6540 T(V7), // V7.
6541 T(V6S_M), // V6_M.
6542 T(V6S_M) // V6S_M.
6543 };
6544 static const int v7e_m[] =
6545 {
6546 -1, // PRE_V4.
6547 -1, // V4.
6548 T(V7E_M), // V4T.
6549 T(V7E_M), // V5T.
6550 T(V7E_M), // V5TE.
6551 T(V7E_M), // V5TEJ.
6552 T(V7E_M), // V6.
6553 T(V7E_M), // V6KZ.
6554 T(V7E_M), // V6T2.
6555 T(V7E_M), // V6K.
6556 T(V7E_M), // V7.
6557 T(V7E_M), // V6_M.
6558 T(V7E_M), // V6S_M.
6559 T(V7E_M) // V7E_M.
6560 };
6561 static const int v4t_plus_v6_m[] =
6562 {
6563 -1, // PRE_V4.
6564 -1, // V4.
6565 T(V4T), // V4T.
6566 T(V5T), // V5T.
6567 T(V5TE), // V5TE.
6568 T(V5TEJ), // V5TEJ.
6569 T(V6), // V6.
6570 T(V6KZ), // V6KZ.
6571 T(V6T2), // V6T2.
6572 T(V6K), // V6K.
6573 T(V7), // V7.
6574 T(V6_M), // V6_M.
6575 T(V6S_M), // V6S_M.
6576 T(V7E_M), // V7E_M.
6577 T(V4T_PLUS_V6_M) // V4T plus V6_M.
6578 };
6579 static const int *comb[] =
6580 {
6581 v6t2,
6582 v6k,
6583 v7,
6584 v6_m,
6585 v6s_m,
6586 v7e_m,
6587 // Pseudo-architecture.
6588 v4t_plus_v6_m
6589 };
6590
6591 // Check we've not got a higher architecture than we know about.
6592
6593 if (oldtag >= elfcpp::MAX_TAG_CPU_ARCH || newtag >= elfcpp::MAX_TAG_CPU_ARCH)
6594 {
6595 gold_error(_("%s: unknown CPU architecture"), name);
6596 return -1;
6597 }
6598
6599 // Override old tag if we have a Tag_also_compatible_with on the output.
6600
6601 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
6602 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
6603 oldtag = T(V4T_PLUS_V6_M);
6604
6605 // And override the new tag if we have a Tag_also_compatible_with on the
6606 // input.
6607
6608 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
6609 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
6610 newtag = T(V4T_PLUS_V6_M);
6611
6612 // Architectures before V6KZ add features monotonically.
6613 int tagh = std::max(oldtag, newtag);
6614 if (tagh <= elfcpp::TAG_CPU_ARCH_V6KZ)
6615 return tagh;
6616
6617 int tagl = std::min(oldtag, newtag);
6618 int result = comb[tagh - T(V6T2)][tagl];
6619
6620 // Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
6621 // as the canonical version.
6622 if (result == T(V4T_PLUS_V6_M))
6623 {
6624 result = T(V4T);
6625 *secondary_compat_out = T(V6_M);
6626 }
6627 else
6628 *secondary_compat_out = -1;
6629
6630 if (result == -1)
6631 {
6632 gold_error(_("%s: conflicting CPU architectures %d/%d"),
6633 name, oldtag, newtag);
6634 return -1;
6635 }
6636
6637 return result;
6638 #undef T
6639 }
6640
6641 // Helper to print AEABI enum tag value.
6642
6643 template<bool big_endian>
6644 std::string
6645 Target_arm<big_endian>::aeabi_enum_name(unsigned int value)
6646 {
6647 static const char *aeabi_enum_names[] =
6648 { "", "variable-size", "32-bit", "" };
6649 const size_t aeabi_enum_names_size =
6650 sizeof(aeabi_enum_names) / sizeof(aeabi_enum_names[0]);
6651
6652 if (value < aeabi_enum_names_size)
6653 return std::string(aeabi_enum_names[value]);
6654 else
6655 {
6656 char buffer[100];
6657 sprintf(buffer, "<unknown value %u>", value);
6658 return std::string(buffer);
6659 }
6660 }
6661
6662 // Return the string value to store in TAG_CPU_name.
6663
6664 template<bool big_endian>
6665 std::string
6666 Target_arm<big_endian>::tag_cpu_name_value(unsigned int value)
6667 {
6668 static const char *name_table[] = {
6669 // These aren't real CPU names, but we can't guess
6670 // that from the architecture version alone.
6671 "Pre v4",
6672 "ARM v4",
6673 "ARM v4T",
6674 "ARM v5T",
6675 "ARM v5TE",
6676 "ARM v5TEJ",
6677 "ARM v6",
6678 "ARM v6KZ",
6679 "ARM v6T2",
6680 "ARM v6K",
6681 "ARM v7",
6682 "ARM v6-M",
6683 "ARM v6S-M",
6684 "ARM v7E-M"
6685 };
6686 const size_t name_table_size = sizeof(name_table) / sizeof(name_table[0]);
6687
6688 if (value < name_table_size)
6689 return std::string(name_table[value]);
6690 else
6691 {
6692 char buffer[100];
6693 sprintf(buffer, "<unknown CPU value %u>", value);
6694 return std::string(buffer);
6695 }
6696 }
6697
6698 // Merge object attributes from input file called NAME with those of the
6699 // output. The input object attributes are in the object pointed by PASD.
6700
6701 template<bool big_endian>
6702 void
6703 Target_arm<big_endian>::merge_object_attributes(
6704 const char* name,
6705 const Attributes_section_data* pasd)
6706 {
6707 // Return if there is no attributes section data.
6708 if (pasd == NULL)
6709 return;
6710
6711 // If output has no object attributes, just copy.
6712 if (this->attributes_section_data_ == NULL)
6713 {
6714 this->attributes_section_data_ = new Attributes_section_data(*pasd);
6715 return;
6716 }
6717
6718 const int vendor = Object_attribute::OBJ_ATTR_PROC;
6719 const Object_attribute* in_attr = pasd->known_attributes(vendor);
6720 Object_attribute* out_attr =
6721 this->attributes_section_data_->known_attributes(vendor);
6722
6723 // This needs to happen before Tag_ABI_FP_number_model is merged. */
6724 if (in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
6725 != out_attr[elfcpp::Tag_ABI_VFP_args].int_value())
6726 {
6727 // Ignore mismatches if the object doesn't use floating point. */
6728 if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value() == 0)
6729 out_attr[elfcpp::Tag_ABI_VFP_args].set_int_value(
6730 in_attr[elfcpp::Tag_ABI_VFP_args].int_value());
6731 else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value() != 0)
6732 gold_error(_("%s uses VFP register arguments, output does not"),
6733 name);
6734 }
6735
6736 for (int i = 4; i < Vendor_object_attributes::NUM_KNOWN_ATTRIBUTES; ++i)
6737 {
6738 // Merge this attribute with existing attributes.
6739 switch (i)
6740 {
6741 case elfcpp::Tag_CPU_raw_name:
6742 case elfcpp::Tag_CPU_name:
6743 // These are merged after Tag_CPU_arch.
6744 break;
6745
6746 case elfcpp::Tag_ABI_optimization_goals:
6747 case elfcpp::Tag_ABI_FP_optimization_goals:
6748 // Use the first value seen.
6749 break;
6750
6751 case elfcpp::Tag_CPU_arch:
6752 {
6753 unsigned int saved_out_attr = out_attr->int_value();
6754 // Merge Tag_CPU_arch and Tag_also_compatible_with.
6755 int secondary_compat =
6756 this->get_secondary_compatible_arch(pasd);
6757 int secondary_compat_out =
6758 this->get_secondary_compatible_arch(
6759 this->attributes_section_data_);
6760 out_attr[i].set_int_value(
6761 tag_cpu_arch_combine(name, out_attr[i].int_value(),
6762 &secondary_compat_out,
6763 in_attr[i].int_value(),
6764 secondary_compat));
6765 this->set_secondary_compatible_arch(this->attributes_section_data_,
6766 secondary_compat_out);
6767
6768 // Merge Tag_CPU_name and Tag_CPU_raw_name.
6769 if (out_attr[i].int_value() == saved_out_attr)
6770 ; // Leave the names alone.
6771 else if (out_attr[i].int_value() == in_attr[i].int_value())
6772 {
6773 // The output architecture has been changed to match the
6774 // input architecture. Use the input names.
6775 out_attr[elfcpp::Tag_CPU_name].set_string_value(
6776 in_attr[elfcpp::Tag_CPU_name].string_value());
6777 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value(
6778 in_attr[elfcpp::Tag_CPU_raw_name].string_value());
6779 }
6780 else
6781 {
6782 out_attr[elfcpp::Tag_CPU_name].set_string_value("");
6783 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value("");
6784 }
6785
6786 // If we still don't have a value for Tag_CPU_name,
6787 // make one up now. Tag_CPU_raw_name remains blank.
6788 if (out_attr[elfcpp::Tag_CPU_name].string_value() == "")
6789 {
6790 const std::string cpu_name =
6791 this->tag_cpu_name_value(out_attr[i].int_value());
6792 // FIXME: If we see an unknown CPU, this will be set
6793 // to "<unknown CPU n>", where n is the attribute value.
6794 // This is different from BFD, which leaves the name alone.
6795 out_attr[elfcpp::Tag_CPU_name].set_string_value(cpu_name);
6796 }
6797 }
6798 break;
6799
6800 case elfcpp::Tag_ARM_ISA_use:
6801 case elfcpp::Tag_THUMB_ISA_use:
6802 case elfcpp::Tag_WMMX_arch:
6803 case elfcpp::Tag_Advanced_SIMD_arch:
6804 // ??? Do Advanced_SIMD (NEON) and WMMX conflict?
6805 case elfcpp::Tag_ABI_FP_rounding:
6806 case elfcpp::Tag_ABI_FP_exceptions:
6807 case elfcpp::Tag_ABI_FP_user_exceptions:
6808 case elfcpp::Tag_ABI_FP_number_model:
6809 case elfcpp::Tag_VFP_HP_extension:
6810 case elfcpp::Tag_CPU_unaligned_access:
6811 case elfcpp::Tag_T2EE_use:
6812 case elfcpp::Tag_Virtualization_use:
6813 case elfcpp::Tag_MPextension_use:
6814 // Use the largest value specified.
6815 if (in_attr[i].int_value() > out_attr[i].int_value())
6816 out_attr[i].set_int_value(in_attr[i].int_value());
6817 break;
6818
6819 case elfcpp::Tag_ABI_align8_preserved:
6820 case elfcpp::Tag_ABI_PCS_RO_data:
6821 // Use the smallest value specified.
6822 if (in_attr[i].int_value() < out_attr[i].int_value())
6823 out_attr[i].set_int_value(in_attr[i].int_value());
6824 break;
6825
6826 case elfcpp::Tag_ABI_align8_needed:
6827 if ((in_attr[i].int_value() > 0 || out_attr[i].int_value() > 0)
6828 && (in_attr[elfcpp::Tag_ABI_align8_preserved].int_value() == 0
6829 || (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
6830 == 0)))
6831 {
6832 // This error message should be enabled once all non-conformant
6833 // binaries in the toolchain have had the attributes set
6834 // properly.
6835 // gold_error(_("output 8-byte data alignment conflicts with %s"),
6836 // name);
6837 }
6838 // Fall through.
6839 case elfcpp::Tag_ABI_FP_denormal:
6840 case elfcpp::Tag_ABI_PCS_GOT_use:
6841 {
6842 // These tags have 0 = don't care, 1 = strong requirement,
6843 // 2 = weak requirement.
6844 static const int order_021[3] = {0, 2, 1};
6845
6846 // Use the "greatest" from the sequence 0, 2, 1, or the largest
6847 // value if greater than 2 (for future-proofing).
6848 if ((in_attr[i].int_value() > 2
6849 && in_attr[i].int_value() > out_attr[i].int_value())
6850 || (in_attr[i].int_value() <= 2
6851 && out_attr[i].int_value() <= 2
6852 && (order_021[in_attr[i].int_value()]
6853 > order_021[out_attr[i].int_value()])))
6854 out_attr[i].set_int_value(in_attr[i].int_value());
6855 }
6856 break;
6857
6858 case elfcpp::Tag_CPU_arch_profile:
6859 if (out_attr[i].int_value() != in_attr[i].int_value())
6860 {
6861 // 0 will merge with anything.
6862 // 'A' and 'S' merge to 'A'.
6863 // 'R' and 'S' merge to 'R'.
6864 // 'M' and 'A|R|S' is an error.
6865 if (out_attr[i].int_value() == 0
6866 || (out_attr[i].int_value() == 'S'
6867 && (in_attr[i].int_value() == 'A'
6868 || in_attr[i].int_value() == 'R')))
6869 out_attr[i].set_int_value(in_attr[i].int_value());
6870 else if (in_attr[i].int_value() == 0
6871 || (in_attr[i].int_value() == 'S'
6872 && (out_attr[i].int_value() == 'A'
6873 || out_attr[i].int_value() == 'R')))
6874 ; // Do nothing.
6875 else
6876 {
6877 gold_error
6878 (_("conflicting architecture profiles %c/%c"),
6879 in_attr[i].int_value() ? in_attr[i].int_value() : '0',
6880 out_attr[i].int_value() ? out_attr[i].int_value() : '0');
6881 }
6882 }
6883 break;
6884 case elfcpp::Tag_VFP_arch:
6885 {
6886 static const struct
6887 {
6888 int ver;
6889 int regs;
6890 } vfp_versions[7] =
6891 {
6892 {0, 0},
6893 {1, 16},
6894 {2, 16},
6895 {3, 32},
6896 {3, 16},
6897 {4, 32},
6898 {4, 16}
6899 };
6900
6901 // Values greater than 6 aren't defined, so just pick the
6902 // biggest.
6903 if (in_attr[i].int_value() > 6
6904 && in_attr[i].int_value() > out_attr[i].int_value())
6905 {
6906 *out_attr = *in_attr;
6907 break;
6908 }
6909 // The output uses the superset of input features
6910 // (ISA version) and registers.
6911 int ver = std::max(vfp_versions[in_attr[i].int_value()].ver,
6912 vfp_versions[out_attr[i].int_value()].ver);
6913 int regs = std::max(vfp_versions[in_attr[i].int_value()].regs,
6914 vfp_versions[out_attr[i].int_value()].regs);
6915 // This assumes all possible supersets are also a valid
6916 // options.
6917 int newval;
6918 for (newval = 6; newval > 0; newval--)
6919 {
6920 if (regs == vfp_versions[newval].regs
6921 && ver == vfp_versions[newval].ver)
6922 break;
6923 }
6924 out_attr[i].set_int_value(newval);
6925 }
6926 break;
6927 case elfcpp::Tag_PCS_config:
6928 if (out_attr[i].int_value() == 0)
6929 out_attr[i].set_int_value(in_attr[i].int_value());
6930 else if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
6931 {
6932 // It's sometimes ok to mix different configs, so this is only
6933 // a warning.
6934 gold_warning(_("%s: conflicting platform configuration"), name);
6935 }
6936 break;
6937 case elfcpp::Tag_ABI_PCS_R9_use:
6938 if (in_attr[i].int_value() != out_attr[i].int_value()
6939 && out_attr[i].int_value() != elfcpp::AEABI_R9_unused
6940 && in_attr[i].int_value() != elfcpp::AEABI_R9_unused)
6941 {
6942 gold_error(_("%s: conflicting use of R9"), name);
6943 }
6944 if (out_attr[i].int_value() == elfcpp::AEABI_R9_unused)
6945 out_attr[i].set_int_value(in_attr[i].int_value());
6946 break;
6947 case elfcpp::Tag_ABI_PCS_RW_data:
6948 if (in_attr[i].int_value() == elfcpp::AEABI_PCS_RW_data_SBrel
6949 && (in_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
6950 != elfcpp::AEABI_R9_SB)
6951 && (out_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
6952 != elfcpp::AEABI_R9_unused))
6953 {
6954 gold_error(_("%s: SB relative addressing conflicts with use "
6955 "of R9"),
6956 name);
6957 }
6958 // Use the smallest value specified.
6959 if (in_attr[i].int_value() < out_attr[i].int_value())
6960 out_attr[i].set_int_value(in_attr[i].int_value());
6961 break;
6962 case elfcpp::Tag_ABI_PCS_wchar_t:
6963 // FIXME: Make it possible to turn off this warning.
6964 if (out_attr[i].int_value()
6965 && in_attr[i].int_value()
6966 && out_attr[i].int_value() != in_attr[i].int_value())
6967 {
6968 gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
6969 "use %u-byte wchar_t; use of wchar_t values "
6970 "across objects may fail"),
6971 name, in_attr[i].int_value(),
6972 out_attr[i].int_value());
6973 }
6974 else if (in_attr[i].int_value() && !out_attr[i].int_value())
6975 out_attr[i].set_int_value(in_attr[i].int_value());
6976 break;
6977 case elfcpp::Tag_ABI_enum_size:
6978 if (in_attr[i].int_value() != elfcpp::AEABI_enum_unused)
6979 {
6980 if (out_attr[i].int_value() == elfcpp::AEABI_enum_unused
6981 || out_attr[i].int_value() == elfcpp::AEABI_enum_forced_wide)
6982 {
6983 // The existing object is compatible with anything.
6984 // Use whatever requirements the new object has.
6985 out_attr[i].set_int_value(in_attr[i].int_value());
6986 }
6987 // FIXME: Make it possible to turn off this warning.
6988 else if (in_attr[i].int_value() != elfcpp::AEABI_enum_forced_wide
6989 && out_attr[i].int_value() != in_attr[i].int_value())
6990 {
6991 unsigned int in_value = in_attr[i].int_value();
6992 unsigned int out_value = out_attr[i].int_value();
6993 gold_warning(_("%s uses %s enums yet the output is to use "
6994 "%s enums; use of enum values across objects "
6995 "may fail"),
6996 name,
6997 this->aeabi_enum_name(in_value).c_str(),
6998 this->aeabi_enum_name(out_value).c_str());
6999 }
7000 }
7001 break;
7002 case elfcpp::Tag_ABI_VFP_args:
7003 // Aready done.
7004 break;
7005 case elfcpp::Tag_ABI_WMMX_args:
7006 if (in_attr[i].int_value() != out_attr[i].int_value())
7007 {
7008 gold_error(_("%s uses iWMMXt register arguments, output does "
7009 "not"),
7010 name);
7011 }
7012 break;
7013 case Object_attribute::Tag_compatibility:
7014 // Merged in target-independent code.
7015 break;
7016 case elfcpp::Tag_ABI_HardFP_use:
7017 // 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP).
7018 if ((in_attr[i].int_value() == 1 && out_attr[i].int_value() == 2)
7019 || (in_attr[i].int_value() == 2 && out_attr[i].int_value() == 1))
7020 out_attr[i].set_int_value(3);
7021 else if (in_attr[i].int_value() > out_attr[i].int_value())
7022 out_attr[i].set_int_value(in_attr[i].int_value());
7023 break;
7024 case elfcpp::Tag_ABI_FP_16bit_format:
7025 if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
7026 {
7027 if (in_attr[i].int_value() != out_attr[i].int_value())
7028 gold_error(_("fp16 format mismatch between %s and output"),
7029 name);
7030 }
7031 if (in_attr[i].int_value() != 0)
7032 out_attr[i].set_int_value(in_attr[i].int_value());
7033 break;
7034
7035 case elfcpp::Tag_nodefaults:
7036 // This tag is set if it exists, but the value is unused (and is
7037 // typically zero). We don't actually need to do anything here -
7038 // the merge happens automatically when the type flags are merged
7039 // below.
7040 break;
7041 case elfcpp::Tag_also_compatible_with:
7042 // Already done in Tag_CPU_arch.
7043 break;
7044 case elfcpp::Tag_conformance:
7045 // Keep the attribute if it matches. Throw it away otherwise.
7046 // No attribute means no claim to conform.
7047 if (in_attr[i].string_value() != out_attr[i].string_value())
7048 out_attr[i].set_string_value("");
7049 break;
7050
7051 default:
7052 {
7053 const char* err_object = NULL;
7054
7055 // The "known_obj_attributes" table does contain some undefined
7056 // attributes. Ensure that there are unused.
7057 if (out_attr[i].int_value() != 0
7058 || out_attr[i].string_value() != "")
7059 err_object = "output";
7060 else if (in_attr[i].int_value() != 0
7061 || in_attr[i].string_value() != "")
7062 err_object = name;
7063
7064 if (err_object != NULL)
7065 {
7066 // Attribute numbers >=64 (mod 128) can be safely ignored.
7067 if ((i & 127) < 64)
7068 gold_error(_("%s: unknown mandatory EABI object attribute "
7069 "%d"),
7070 err_object, i);
7071 else
7072 gold_warning(_("%s: unknown EABI object attribute %d"),
7073 err_object, i);
7074 }
7075
7076 // Only pass on attributes that match in both inputs.
7077 if (!in_attr[i].matches(out_attr[i]))
7078 {
7079 out_attr[i].set_int_value(0);
7080 out_attr[i].set_string_value("");
7081 }
7082 }
7083 }
7084
7085 // If out_attr was copied from in_attr then it won't have a type yet.
7086 if (in_attr[i].type() && !out_attr[i].type())
7087 out_attr[i].set_type(in_attr[i].type());
7088 }
7089
7090 // Merge Tag_compatibility attributes and any common GNU ones.
7091 this->attributes_section_data_->merge(name, pasd);
7092
7093 // Check for any attributes not known on ARM.
7094 typedef Vendor_object_attributes::Other_attributes Other_attributes;
7095 const Other_attributes* in_other_attributes = pasd->other_attributes(vendor);
7096 Other_attributes::const_iterator in_iter = in_other_attributes->begin();
7097 Other_attributes* out_other_attributes =
7098 this->attributes_section_data_->other_attributes(vendor);
7099 Other_attributes::iterator out_iter = out_other_attributes->begin();
7100
7101 while (in_iter != in_other_attributes->end()
7102 || out_iter != out_other_attributes->end())
7103 {
7104 const char* err_object = NULL;
7105 int err_tag = 0;
7106
7107 // The tags for each list are in numerical order.
7108 // If the tags are equal, then merge.
7109 if (out_iter != out_other_attributes->end()
7110 && (in_iter == in_other_attributes->end()
7111 || in_iter->first > out_iter->first))
7112 {
7113 // This attribute only exists in output. We can't merge, and we
7114 // don't know what the tag means, so delete it.
7115 err_object = "output";
7116 err_tag = out_iter->first;
7117 int saved_tag = out_iter->first;
7118 delete out_iter->second;
7119 out_other_attributes->erase(out_iter);
7120 out_iter = out_other_attributes->upper_bound(saved_tag);
7121 }
7122 else if (in_iter != in_other_attributes->end()
7123 && (out_iter != out_other_attributes->end()
7124 || in_iter->first < out_iter->first))
7125 {
7126 // This attribute only exists in input. We can't merge, and we
7127 // don't know what the tag means, so ignore it.
7128 err_object = name;
7129 err_tag = in_iter->first;
7130 ++in_iter;
7131 }
7132 else // The tags are equal.
7133 {
7134 // As present, all attributes in the list are unknown, and
7135 // therefore can't be merged meaningfully.
7136 err_object = "output";
7137 err_tag = out_iter->first;
7138
7139 // Only pass on attributes that match in both inputs.
7140 if (!in_iter->second->matches(*(out_iter->second)))
7141 {
7142 // No match. Delete the attribute.
7143 int saved_tag = out_iter->first;
7144 delete out_iter->second;
7145 out_other_attributes->erase(out_iter);
7146 out_iter = out_other_attributes->upper_bound(saved_tag);
7147 }
7148 else
7149 {
7150 // Matched. Keep the attribute and move to the next.
7151 ++out_iter;
7152 ++in_iter;
7153 }
7154 }
7155
7156 if (err_object)
7157 {
7158 // Attribute numbers >=64 (mod 128) can be safely ignored. */
7159 if ((err_tag & 127) < 64)
7160 {
7161 gold_error(_("%s: unknown mandatory EABI object attribute %d"),
7162 err_object, err_tag);
7163 }
7164 else
7165 {
7166 gold_warning(_("%s: unknown EABI object attribute %d"),
7167 err_object, err_tag);
7168 }
7169 }
7170 }
7171 }
7172
7173 // Return whether a relocation type used the LSB to distinguish THUMB
7174 // addresses.
7175 template<bool big_endian>
7176 bool
7177 Target_arm<big_endian>::reloc_uses_thumb_bit(unsigned int r_type)
7178 {
7179 switch (r_type)
7180 {
7181 case elfcpp::R_ARM_PC24:
7182 case elfcpp::R_ARM_ABS32:
7183 case elfcpp::R_ARM_REL32:
7184 case elfcpp::R_ARM_SBREL32:
7185 case elfcpp::R_ARM_THM_CALL:
7186 case elfcpp::R_ARM_GLOB_DAT:
7187 case elfcpp::R_ARM_JUMP_SLOT:
7188 case elfcpp::R_ARM_GOTOFF32:
7189 case elfcpp::R_ARM_PLT32:
7190 case elfcpp::R_ARM_CALL:
7191 case elfcpp::R_ARM_JUMP24:
7192 case elfcpp::R_ARM_THM_JUMP24:
7193 case elfcpp::R_ARM_SBREL31:
7194 case elfcpp::R_ARM_PREL31:
7195 case elfcpp::R_ARM_MOVW_ABS_NC:
7196 case elfcpp::R_ARM_MOVW_PREL_NC:
7197 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
7198 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
7199 case elfcpp::R_ARM_THM_JUMP19:
7200 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
7201 case elfcpp::R_ARM_ALU_PC_G0_NC:
7202 case elfcpp::R_ARM_ALU_PC_G0:
7203 case elfcpp::R_ARM_ALU_PC_G1_NC:
7204 case elfcpp::R_ARM_ALU_PC_G1:
7205 case elfcpp::R_ARM_ALU_PC_G2:
7206 case elfcpp::R_ARM_ALU_SB_G0_NC:
7207 case elfcpp::R_ARM_ALU_SB_G0:
7208 case elfcpp::R_ARM_ALU_SB_G1_NC:
7209 case elfcpp::R_ARM_ALU_SB_G1:
7210 case elfcpp::R_ARM_ALU_SB_G2:
7211 case elfcpp::R_ARM_MOVW_BREL_NC:
7212 case elfcpp::R_ARM_MOVW_BREL:
7213 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
7214 case elfcpp::R_ARM_THM_MOVW_BREL:
7215 return true;
7216 default:
7217 return false;
7218 }
7219 }
7220
7221 // Stub-generation methods for Target_arm.
7222
7223 // Make a new Arm_input_section object.
7224
7225 template<bool big_endian>
7226 Arm_input_section<big_endian>*
7227 Target_arm<big_endian>::new_arm_input_section(
7228 Relobj* relobj,
7229 unsigned int shndx)
7230 {
7231 Input_section_specifier iss(relobj, shndx);
7232
7233 Arm_input_section<big_endian>* arm_input_section =
7234 new Arm_input_section<big_endian>(relobj, shndx);
7235 arm_input_section->init();
7236
7237 // Register new Arm_input_section in map for look-up.
7238 std::pair<typename Arm_input_section_map::iterator, bool> ins =
7239 this->arm_input_section_map_.insert(std::make_pair(iss, arm_input_section));
7240
7241 // Make sure that it we have not created another Arm_input_section
7242 // for this input section already.
7243 gold_assert(ins.second);
7244
7245 return arm_input_section;
7246 }
7247
7248 // Find the Arm_input_section object corresponding to the SHNDX-th input
7249 // section of RELOBJ.
7250
7251 template<bool big_endian>
7252 Arm_input_section<big_endian>*
7253 Target_arm<big_endian>::find_arm_input_section(
7254 Relobj* relobj,
7255 unsigned int shndx) const
7256 {
7257 Input_section_specifier iss(relobj, shndx);
7258 typename Arm_input_section_map::const_iterator p =
7259 this->arm_input_section_map_.find(iss);
7260 return (p != this->arm_input_section_map_.end()) ? p->second : NULL;
7261 }
7262
7263 // Make a new stub table.
7264
7265 template<bool big_endian>
7266 Stub_table<big_endian>*
7267 Target_arm<big_endian>::new_stub_table(Arm_input_section<big_endian>* owner)
7268 {
7269 Stub_table<big_endian>* stub_table =
7270 new Stub_table<big_endian>(owner);
7271 this->stub_tables_.push_back(stub_table);
7272
7273 stub_table->set_address(owner->address() + owner->data_size());
7274 stub_table->set_file_offset(owner->offset() + owner->data_size());
7275 stub_table->finalize_data_size();
7276
7277 return stub_table;
7278 }
7279
7280 // Scan a relocation for stub generation.
7281
7282 template<bool big_endian>
7283 void
7284 Target_arm<big_endian>::scan_reloc_for_stub(
7285 const Relocate_info<32, big_endian>* relinfo,
7286 unsigned int r_type,
7287 const Sized_symbol<32>* gsym,
7288 unsigned int r_sym,
7289 const Symbol_value<32>* psymval,
7290 elfcpp::Elf_types<32>::Elf_Swxword addend,
7291 Arm_address address)
7292 {
7293 typedef typename Target_arm<big_endian>::Relocate Relocate;
7294
7295 const Arm_relobj<big_endian>* arm_relobj =
7296 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
7297
7298 bool target_is_thumb;
7299 Symbol_value<32> symval;
7300 if (gsym != NULL)
7301 {
7302 // This is a global symbol. Determine if we use PLT and if the
7303 // final target is THUMB.
7304 if (gsym->use_plt_offset(Relocate::reloc_is_non_pic(r_type)))
7305 {
7306 // This uses a PLT, change the symbol value.
7307 symval.set_output_value(this->plt_section()->address()
7308 + gsym->plt_offset());
7309 psymval = &symval;
7310 target_is_thumb = false;
7311 }
7312 else if (gsym->is_undefined())
7313 // There is no need to generate a stub symbol is undefined.
7314 return;
7315 else
7316 {
7317 target_is_thumb =
7318 ((gsym->type() == elfcpp::STT_ARM_TFUNC)
7319 || (gsym->type() == elfcpp::STT_FUNC
7320 && !gsym->is_undefined()
7321 && ((psymval->value(arm_relobj, 0) & 1) != 0)));
7322 }
7323 }
7324 else
7325 {
7326 // This is a local symbol. Determine if the final target is THUMB.
7327 target_is_thumb = arm_relobj->local_symbol_is_thumb_function(r_sym);
7328 }
7329
7330 // Strip LSB if this points to a THUMB target.
7331 if (target_is_thumb
7332 && Target_arm<big_endian>::reloc_uses_thumb_bit(r_type)
7333 && ((psymval->value(arm_relobj, 0) & 1) != 0))
7334 {
7335 Arm_address stripped_value =
7336 psymval->value(arm_relobj, 0) & ~static_cast<Arm_address>(1);
7337 symval.set_output_value(stripped_value);
7338 psymval = &symval;
7339 }
7340
7341 // Get the symbol value.
7342 Symbol_value<32>::Value value = psymval->value(arm_relobj, 0);
7343
7344 // Owing to pipelining, the PC relative branches below actually skip
7345 // two instructions when the branch offset is 0.
7346 Arm_address destination;
7347 switch (r_type)
7348 {
7349 case elfcpp::R_ARM_CALL:
7350 case elfcpp::R_ARM_JUMP24:
7351 case elfcpp::R_ARM_PLT32:
7352 // ARM branches.
7353 destination = value + addend + 8;
7354 break;
7355 case elfcpp::R_ARM_THM_CALL:
7356 case elfcpp::R_ARM_THM_XPC22:
7357 case elfcpp::R_ARM_THM_JUMP24:
7358 case elfcpp::R_ARM_THM_JUMP19:
7359 // THUMB branches.
7360 destination = value + addend + 4;
7361 break;
7362 default:
7363 gold_unreachable();
7364 }
7365
7366 Reloc_stub* stub = NULL;
7367 Stub_type stub_type =
7368 Reloc_stub::stub_type_for_reloc(r_type, address, destination,
7369 target_is_thumb);
7370 if (stub_type != arm_stub_none)
7371 {
7372 // Try looking up an existing stub from a stub table.
7373 Stub_table<big_endian>* stub_table =
7374 arm_relobj->stub_table(relinfo->data_shndx);
7375 gold_assert(stub_table != NULL);
7376
7377 // Locate stub by destination.
7378 Reloc_stub::Key stub_key(stub_type, gsym, arm_relobj, r_sym, addend);
7379
7380 // Create a stub if there is not one already
7381 stub = stub_table->find_reloc_stub(stub_key);
7382 if (stub == NULL)
7383 {
7384 // create a new stub and add it to stub table.
7385 stub = this->stub_factory().make_reloc_stub(stub_type);
7386 stub_table->add_reloc_stub(stub, stub_key);
7387 }
7388
7389 // Record the destination address.
7390 stub->set_destination_address(destination
7391 | (target_is_thumb ? 1 : 0));
7392 }
7393
7394 // For Cortex-A8, we need to record a relocation at 4K page boundary.
7395 if (this->fix_cortex_a8_
7396 && (r_type == elfcpp::R_ARM_THM_JUMP24
7397 || r_type == elfcpp::R_ARM_THM_JUMP19
7398 || r_type == elfcpp::R_ARM_THM_CALL
7399 || r_type == elfcpp::R_ARM_THM_XPC22)
7400 && (address & 0xfffU) == 0xffeU)
7401 {
7402 // Found a candidate. Note we haven't checked the destination is
7403 // within 4K here: if we do so (and don't create a record) we can't
7404 // tell that a branch should have been relocated when scanning later.
7405 this->cortex_a8_relocs_info_[address] =
7406 new Cortex_a8_reloc(stub, r_type,
7407 destination | (target_is_thumb ? 1 : 0));
7408 }
7409 }
7410
7411 // This function scans a relocation sections for stub generation.
7412 // The template parameter Relocate must be a class type which provides
7413 // a single function, relocate(), which implements the machine
7414 // specific part of a relocation.
7415
7416 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
7417 // SHT_REL or SHT_RELA.
7418
7419 // PRELOCS points to the relocation data. RELOC_COUNT is the number
7420 // of relocs. OUTPUT_SECTION is the output section.
7421 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
7422 // mapped to output offsets.
7423
7424 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
7425 // VIEW_SIZE is the size. These refer to the input section, unless
7426 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
7427 // the output section.
7428
7429 template<bool big_endian>
7430 template<int sh_type>
7431 void inline
7432 Target_arm<big_endian>::scan_reloc_section_for_stubs(
7433 const Relocate_info<32, big_endian>* relinfo,
7434 const unsigned char* prelocs,
7435 size_t reloc_count,
7436 Output_section* output_section,
7437 bool needs_special_offset_handling,
7438 const unsigned char* view,
7439 elfcpp::Elf_types<32>::Elf_Addr view_address,
7440 section_size_type)
7441 {
7442 typedef typename Reloc_types<sh_type, 32, big_endian>::Reloc Reltype;
7443 const int reloc_size =
7444 Reloc_types<sh_type, 32, big_endian>::reloc_size;
7445
7446 Arm_relobj<big_endian>* arm_object =
7447 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
7448 unsigned int local_count = arm_object->local_symbol_count();
7449
7450 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
7451
7452 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
7453 {
7454 Reltype reloc(prelocs);
7455
7456 typename elfcpp::Elf_types<32>::Elf_WXword r_info = reloc.get_r_info();
7457 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
7458 unsigned int r_type = elfcpp::elf_r_type<32>(r_info);
7459
7460 r_type = this->get_real_reloc_type(r_type);
7461
7462 // Only a few relocation types need stubs.
7463 if ((r_type != elfcpp::R_ARM_CALL)
7464 && (r_type != elfcpp::R_ARM_JUMP24)
7465 && (r_type != elfcpp::R_ARM_PLT32)
7466 && (r_type != elfcpp::R_ARM_THM_CALL)
7467 && (r_type != elfcpp::R_ARM_THM_XPC22)
7468 && (r_type != elfcpp::R_ARM_THM_JUMP24)
7469 && (r_type != elfcpp::R_ARM_THM_JUMP19))
7470 continue;
7471
7472 section_offset_type offset =
7473 convert_to_section_size_type(reloc.get_r_offset());
7474
7475 if (needs_special_offset_handling)
7476 {
7477 offset = output_section->output_offset(relinfo->object,
7478 relinfo->data_shndx,
7479 offset);
7480 if (offset == -1)
7481 continue;
7482 }
7483
7484 // Get the addend.
7485 Stub_addend_reader<sh_type, big_endian> stub_addend_reader;
7486 elfcpp::Elf_types<32>::Elf_Swxword addend =
7487 stub_addend_reader(r_type, view + offset, reloc);
7488
7489 const Sized_symbol<32>* sym;
7490
7491 Symbol_value<32> symval;
7492 const Symbol_value<32> *psymval;
7493 if (r_sym < local_count)
7494 {
7495 sym = NULL;
7496 psymval = arm_object->local_symbol(r_sym);
7497
7498 // If the local symbol belongs to a section we are discarding,
7499 // and that section is a debug section, try to find the
7500 // corresponding kept section and map this symbol to its
7501 // counterpart in the kept section. The symbol must not
7502 // correspond to a section we are folding.
7503 bool is_ordinary;
7504 unsigned int shndx = psymval->input_shndx(&is_ordinary);
7505 if (is_ordinary
7506 && shndx != elfcpp::SHN_UNDEF
7507 && !arm_object->is_section_included(shndx)
7508 && !(relinfo->symtab->is_section_folded(arm_object, shndx)))
7509 {
7510 if (comdat_behavior == CB_UNDETERMINED)
7511 {
7512 std::string name =
7513 arm_object->section_name(relinfo->data_shndx);
7514 comdat_behavior = get_comdat_behavior(name.c_str());
7515 }
7516 if (comdat_behavior == CB_PRETEND)
7517 {
7518 bool found;
7519 typename elfcpp::Elf_types<32>::Elf_Addr value =
7520 arm_object->map_to_kept_section(shndx, &found);
7521 if (found)
7522 symval.set_output_value(value + psymval->input_value());
7523 else
7524 symval.set_output_value(0);
7525 }
7526 else
7527 {
7528 symval.set_output_value(0);
7529 }
7530 symval.set_no_output_symtab_entry();
7531 psymval = &symval;
7532 }
7533 }
7534 else
7535 {
7536 const Symbol* gsym = arm_object->global_symbol(r_sym);
7537 gold_assert(gsym != NULL);
7538 if (gsym->is_forwarder())
7539 gsym = relinfo->symtab->resolve_forwards(gsym);
7540
7541 sym = static_cast<const Sized_symbol<32>*>(gsym);
7542 if (sym->has_symtab_index())
7543 symval.set_output_symtab_index(sym->symtab_index());
7544 else
7545 symval.set_no_output_symtab_entry();
7546
7547 // We need to compute the would-be final value of this global
7548 // symbol.
7549 const Symbol_table* symtab = relinfo->symtab;
7550 const Sized_symbol<32>* sized_symbol =
7551 symtab->get_sized_symbol<32>(gsym);
7552 Symbol_table::Compute_final_value_status status;
7553 Arm_address value =
7554 symtab->compute_final_value<32>(sized_symbol, &status);
7555
7556 // Skip this if the symbol has not output section.
7557 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
7558 continue;
7559
7560 symval.set_output_value(value);
7561 psymval = &symval;
7562 }
7563
7564 // If symbol is a section symbol, we don't know the actual type of
7565 // destination. Give up.
7566 if (psymval->is_section_symbol())
7567 continue;
7568
7569 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
7570 addend, view_address + offset);
7571 }
7572 }
7573
7574 // Scan an input section for stub generation.
7575
7576 template<bool big_endian>
7577 void
7578 Target_arm<big_endian>::scan_section_for_stubs(
7579 const Relocate_info<32, big_endian>* relinfo,
7580 unsigned int sh_type,
7581 const unsigned char* prelocs,
7582 size_t reloc_count,
7583 Output_section* output_section,
7584 bool needs_special_offset_handling,
7585 const unsigned char* view,
7586 Arm_address view_address,
7587 section_size_type view_size)
7588 {
7589 if (sh_type == elfcpp::SHT_REL)
7590 this->scan_reloc_section_for_stubs<elfcpp::SHT_REL>(
7591 relinfo,
7592 prelocs,
7593 reloc_count,
7594 output_section,
7595 needs_special_offset_handling,
7596 view,
7597 view_address,
7598 view_size);
7599 else if (sh_type == elfcpp::SHT_RELA)
7600 // We do not support RELA type relocations yet. This is provided for
7601 // completeness.
7602 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
7603 relinfo,
7604 prelocs,
7605 reloc_count,
7606 output_section,
7607 needs_special_offset_handling,
7608 view,
7609 view_address,
7610 view_size);
7611 else
7612 gold_unreachable();
7613 }
7614
7615 // Group input sections for stub generation.
7616 //
7617 // We goup input sections in an output sections so that the total size,
7618 // including any padding space due to alignment is smaller than GROUP_SIZE
7619 // unless the only input section in group is bigger than GROUP_SIZE already.
7620 // Then an ARM stub table is created to follow the last input section
7621 // in group. For each group an ARM stub table is created an is placed
7622 // after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
7623 // extend the group after the stub table.
7624
7625 template<bool big_endian>
7626 void
7627 Target_arm<big_endian>::group_sections(
7628 Layout* layout,
7629 section_size_type group_size,
7630 bool stubs_always_after_branch)
7631 {
7632 // Group input sections and insert stub table
7633 Layout::Section_list section_list;
7634 layout->get_allocated_sections(&section_list);
7635 for (Layout::Section_list::const_iterator p = section_list.begin();
7636 p != section_list.end();
7637 ++p)
7638 {
7639 Arm_output_section<big_endian>* output_section =
7640 Arm_output_section<big_endian>::as_arm_output_section(*p);
7641 output_section->group_sections(group_size, stubs_always_after_branch,
7642 this);
7643 }
7644 }
7645
7646 // Relaxation hook. This is where we do stub generation.
7647
7648 template<bool big_endian>
7649 bool
7650 Target_arm<big_endian>::do_relax(
7651 int pass,
7652 const Input_objects* input_objects,
7653 Symbol_table* symtab,
7654 Layout* layout)
7655 {
7656 // No need to generate stubs if this is a relocatable link.
7657 gold_assert(!parameters->options().relocatable());
7658
7659 // If this is the first pass, we need to group input sections into
7660 // stub groups.
7661 if (pass == 1)
7662 {
7663 // Determine the stub group size. The group size is the absolute
7664 // value of the parameter --stub-group-size. If --stub-group-size
7665 // is passed a negative value, we restict stubs to be always after
7666 // the stubbed branches.
7667 int32_t stub_group_size_param =
7668 parameters->options().stub_group_size();
7669 bool stubs_always_after_branch = stub_group_size_param < 0;
7670 section_size_type stub_group_size = abs(stub_group_size_param);
7671
7672 // The Cortex-A8 erratum fix depends on stubs not being in the same 4K
7673 // page as the first half of a 32-bit branch straddling two 4K pages.
7674 // This is a crude way of enforcing that.
7675 if (this->fix_cortex_a8_)
7676 stubs_always_after_branch = true;
7677
7678 if (stub_group_size == 1)
7679 {
7680 // Default value.
7681 // Thumb branch range is +-4MB has to be used as the default
7682 // maximum size (a given section can contain both ARM and Thumb
7683 // code, so the worst case has to be taken into account).
7684 //
7685 // This value is 24K less than that, which allows for 2025
7686 // 12-byte stubs. If we exceed that, then we will fail to link.
7687 // The user will have to relink with an explicit group size
7688 // option.
7689 stub_group_size = 4170000;
7690 }
7691
7692 group_sections(layout, stub_group_size, stubs_always_after_branch);
7693 }
7694
7695 // The Cortex-A8 stubs are sensitive to layout of code sections. At the
7696 // beginning of each relaxation pass, just blow away all the stubs.
7697 // Alternatively, we could selectively remove only the stubs and reloc
7698 // information for code sections that have moved since the last pass.
7699 // That would require more book-keeping.
7700 typedef typename Stub_table_list::iterator Stub_table_iterator;
7701 if (this->fix_cortex_a8_)
7702 {
7703 // Clear all Cortex-A8 reloc information.
7704 for (typename Cortex_a8_relocs_info::const_iterator p =
7705 this->cortex_a8_relocs_info_.begin();
7706 p != this->cortex_a8_relocs_info_.end();
7707 ++p)
7708 delete p->second;
7709 this->cortex_a8_relocs_info_.clear();
7710
7711 // Remove all Cortex-A8 stubs.
7712 for (Stub_table_iterator sp = this->stub_tables_.begin();
7713 sp != this->stub_tables_.end();
7714 ++sp)
7715 (*sp)->remove_all_cortex_a8_stubs();
7716 }
7717
7718 // Scan relocs for relocation stubs
7719 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
7720 op != input_objects->relobj_end();
7721 ++op)
7722 {
7723 Arm_relobj<big_endian>* arm_relobj =
7724 Arm_relobj<big_endian>::as_arm_relobj(*op);
7725 arm_relobj->scan_sections_for_stubs(this, symtab, layout);
7726 }
7727
7728 // Check all stub tables to see if any of them have their data sizes
7729 // or addresses alignments changed. These are the only things that
7730 // matter.
7731 bool any_stub_table_changed = false;
7732 for (Stub_table_iterator sp = this->stub_tables_.begin();
7733 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
7734 ++sp)
7735 {
7736 if ((*sp)->update_data_size_and_addralign())
7737 any_stub_table_changed = true;
7738 }
7739
7740 // Finalize the stubs in the last relaxation pass.
7741 if (!any_stub_table_changed)
7742 for (Stub_table_iterator sp = this->stub_tables_.begin();
7743 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
7744 ++sp)
7745 (*sp)->finalize_stubs();
7746
7747 return any_stub_table_changed;
7748 }
7749
7750 // Relocate a stub.
7751
7752 template<bool big_endian>
7753 void
7754 Target_arm<big_endian>::relocate_stub(
7755 Stub* stub,
7756 const Relocate_info<32, big_endian>* relinfo,
7757 Output_section* output_section,
7758 unsigned char* view,
7759 Arm_address address,
7760 section_size_type view_size)
7761 {
7762 Relocate relocate;
7763 const Stub_template* stub_template = stub->stub_template();
7764 for (size_t i = 0; i < stub_template->reloc_count(); i++)
7765 {
7766 size_t reloc_insn_index = stub_template->reloc_insn_index(i);
7767 const Insn_template* insn = &stub_template->insns()[reloc_insn_index];
7768
7769 unsigned int r_type = insn->r_type();
7770 section_size_type reloc_offset = stub_template->reloc_offset(i);
7771 section_size_type reloc_size = insn->size();
7772 gold_assert(reloc_offset + reloc_size <= view_size);
7773
7774 // This is the address of the stub destination.
7775 Arm_address target = stub->reloc_target(i) + insn->reloc_addend();
7776 Symbol_value<32> symval;
7777 symval.set_output_value(target);
7778
7779 // Synthesize a fake reloc just in case. We don't have a symbol so
7780 // we use 0.
7781 unsigned char reloc_buffer[elfcpp::Elf_sizes<32>::rel_size];
7782 memset(reloc_buffer, 0, sizeof(reloc_buffer));
7783 elfcpp::Rel_write<32, big_endian> reloc_write(reloc_buffer);
7784 reloc_write.put_r_offset(reloc_offset);
7785 reloc_write.put_r_info(elfcpp::elf_r_info<32>(0, r_type));
7786 elfcpp::Rel<32, big_endian> rel(reloc_buffer);
7787
7788 relocate.relocate(relinfo, this, output_section,
7789 this->fake_relnum_for_stubs, rel, r_type,
7790 NULL, &symval, view + reloc_offset,
7791 address + reloc_offset, reloc_size);
7792 }
7793 }
7794
7795 // Determine whether an object attribute tag takes an integer, a
7796 // string or both.
7797
7798 template<bool big_endian>
7799 int
7800 Target_arm<big_endian>::do_attribute_arg_type(int tag) const
7801 {
7802 if (tag == Object_attribute::Tag_compatibility)
7803 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
7804 | Object_attribute::ATTR_TYPE_FLAG_STR_VAL);
7805 else if (tag == elfcpp::Tag_nodefaults)
7806 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
7807 | Object_attribute::ATTR_TYPE_FLAG_NO_DEFAULT);
7808 else if (tag == elfcpp::Tag_CPU_raw_name || tag == elfcpp::Tag_CPU_name)
7809 return Object_attribute::ATTR_TYPE_FLAG_STR_VAL;
7810 else if (tag < 32)
7811 return Object_attribute::ATTR_TYPE_FLAG_INT_VAL;
7812 else
7813 return ((tag & 1) != 0
7814 ? Object_attribute::ATTR_TYPE_FLAG_STR_VAL
7815 : Object_attribute::ATTR_TYPE_FLAG_INT_VAL);
7816 }
7817
7818 // Reorder attributes.
7819 //
7820 // The ABI defines that Tag_conformance should be emitted first, and that
7821 // Tag_nodefaults should be second (if either is defined). This sets those
7822 // two positions, and bumps up the position of all the remaining tags to
7823 // compensate.
7824
7825 template<bool big_endian>
7826 int
7827 Target_arm<big_endian>::do_attributes_order(int num) const
7828 {
7829 // Reorder the known object attributes in output. We want to move
7830 // Tag_conformance to position 4 and Tag_conformance to position 5
7831 // and shift eveything between 4 .. Tag_conformance - 1 to make room.
7832 if (num == 4)
7833 return elfcpp::Tag_conformance;
7834 if (num == 5)
7835 return elfcpp::Tag_nodefaults;
7836 if ((num - 2) < elfcpp::Tag_nodefaults)
7837 return num - 2;
7838 if ((num - 1) < elfcpp::Tag_conformance)
7839 return num - 1;
7840 return num;
7841 }
7842
7843 // Scan a span of THUMB code for Cortex-A8 erratum.
7844
7845 template<bool big_endian>
7846 void
7847 Target_arm<big_endian>::scan_span_for_cortex_a8_erratum(
7848 Arm_relobj<big_endian>* arm_relobj,
7849 unsigned int shndx,
7850 section_size_type span_start,
7851 section_size_type span_end,
7852 const unsigned char* view,
7853 Arm_address address)
7854 {
7855 // Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
7856 //
7857 // The opcode is BLX.W, BL.W, B.W, Bcc.W
7858 // The branch target is in the same 4KB region as the
7859 // first half of the branch.
7860 // The instruction before the branch is a 32-bit
7861 // length non-branch instruction.
7862 section_size_type i = span_start;
7863 bool last_was_32bit = false;
7864 bool last_was_branch = false;
7865 while (i < span_end)
7866 {
7867 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
7868 const Valtype* wv = reinterpret_cast<const Valtype*>(view + i);
7869 uint32_t insn = elfcpp::Swap<16, big_endian>::readval(wv);
7870 bool is_blx = false, is_b = false;
7871 bool is_bl = false, is_bcc = false;
7872
7873 bool insn_32bit = (insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000;
7874 if (insn_32bit)
7875 {
7876 // Load the rest of the insn (in manual-friendly order).
7877 insn = (insn << 16) | elfcpp::Swap<16, big_endian>::readval(wv + 1);
7878
7879 // Encoding T4: B<c>.W.
7880 is_b = (insn & 0xf800d000U) == 0xf0009000U;
7881 // Encoding T1: BL<c>.W.
7882 is_bl = (insn & 0xf800d000U) == 0xf000d000U;
7883 // Encoding T2: BLX<c>.W.
7884 is_blx = (insn & 0xf800d000U) == 0xf000c000U;
7885 // Encoding T3: B<c>.W (not permitted in IT block).
7886 is_bcc = ((insn & 0xf800d000U) == 0xf0008000U
7887 && (insn & 0x07f00000U) != 0x03800000U);
7888 }
7889
7890 bool is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
7891
7892 // If this instruction is a 32-bit THUMB branch that crosses a 4K
7893 // page boundary and it follows 32-bit non-branch instruction,
7894 // we need to work around.
7895 if (is_32bit_branch
7896 && ((address + i) & 0xfffU) == 0xffeU
7897 && last_was_32bit
7898 && !last_was_branch)
7899 {
7900 // Check to see if there is a relocation stub for this branch.
7901 bool force_target_arm = false;
7902 bool force_target_thumb = false;
7903 const Cortex_a8_reloc* cortex_a8_reloc = NULL;
7904 Cortex_a8_relocs_info::const_iterator p =
7905 this->cortex_a8_relocs_info_.find(address + i);
7906
7907 if (p != this->cortex_a8_relocs_info_.end())
7908 {
7909 cortex_a8_reloc = p->second;
7910 bool target_is_thumb = (cortex_a8_reloc->destination() & 1) != 0;
7911
7912 if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
7913 && !target_is_thumb)
7914 force_target_arm = true;
7915 else if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
7916 && target_is_thumb)
7917 force_target_thumb = true;
7918 }
7919
7920 off_t offset;
7921 Stub_type stub_type = arm_stub_none;
7922
7923 // Check if we have an offending branch instruction.
7924 uint16_t upper_insn = (insn >> 16) & 0xffffU;
7925 uint16_t lower_insn = insn & 0xffffU;
7926 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
7927
7928 if (cortex_a8_reloc != NULL
7929 && cortex_a8_reloc->reloc_stub() != NULL)
7930 // We've already made a stub for this instruction, e.g.
7931 // it's a long branch or a Thumb->ARM stub. Assume that
7932 // stub will suffice to work around the A8 erratum (see
7933 // setting of always_after_branch above).
7934 ;
7935 else if (is_bcc)
7936 {
7937 offset = RelocFuncs::thumb32_cond_branch_offset(upper_insn,
7938 lower_insn);
7939 stub_type = arm_stub_a8_veneer_b_cond;
7940 }
7941 else if (is_b || is_bl || is_blx)
7942 {
7943 offset = RelocFuncs::thumb32_branch_offset(upper_insn,
7944 lower_insn);
7945 if (is_blx)
7946 offset &= ~3;
7947
7948 stub_type = (is_blx
7949 ? arm_stub_a8_veneer_blx
7950 : (is_bl
7951 ? arm_stub_a8_veneer_bl
7952 : arm_stub_a8_veneer_b));
7953 }
7954
7955 if (stub_type != arm_stub_none)
7956 {
7957 Arm_address pc_for_insn = address + i + 4;
7958
7959 // The original instruction is a BL, but the target is
7960 // an ARM instruction. If we were not making a stub,
7961 // the BL would have been converted to a BLX. Use the
7962 // BLX stub instead in that case.
7963 if (this->may_use_blx() && force_target_arm
7964 && stub_type == arm_stub_a8_veneer_bl)
7965 {
7966 stub_type = arm_stub_a8_veneer_blx;
7967 is_blx = true;
7968 is_bl = false;
7969 }
7970 // Conversely, if the original instruction was
7971 // BLX but the target is Thumb mode, use the BL stub.
7972 else if (force_target_thumb
7973 && stub_type == arm_stub_a8_veneer_blx)
7974 {
7975 stub_type = arm_stub_a8_veneer_bl;
7976 is_blx = false;
7977 is_bl = true;
7978 }
7979
7980 if (is_blx)
7981 pc_for_insn &= ~3;
7982
7983 // If we found a relocation, use the proper destination,
7984 // not the offset in the (unrelocated) instruction.
7985 // Note this is always done if we switched the stub type above.
7986 if (cortex_a8_reloc != NULL)
7987 offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
7988
7989 Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
7990
7991 // Add a new stub if destination address in in the same page.
7992 if (((address + i) & ~0xfffU) == (target & ~0xfffU))
7993 {
7994 Cortex_a8_stub* stub =
7995 this->stub_factory_.make_cortex_a8_stub(stub_type,
7996 arm_relobj, shndx,
7997 address + i,
7998 target, insn);
7999 Stub_table<big_endian>* stub_table =
8000 arm_relobj->stub_table(shndx);
8001 gold_assert(stub_table != NULL);
8002 stub_table->add_cortex_a8_stub(address + i, stub);
8003 }
8004 }
8005 }
8006
8007 i += insn_32bit ? 4 : 2;
8008 last_was_32bit = insn_32bit;
8009 last_was_branch = is_32bit_branch;
8010 }
8011 }
8012
8013 // Apply the Cortex-A8 workaround.
8014
8015 template<bool big_endian>
8016 void
8017 Target_arm<big_endian>::apply_cortex_a8_workaround(
8018 const Cortex_a8_stub* stub,
8019 Arm_address stub_address,
8020 unsigned char* insn_view,
8021 Arm_address insn_address)
8022 {
8023 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
8024 Valtype* wv = reinterpret_cast<Valtype*>(insn_view);
8025 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
8026 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
8027 off_t branch_offset = stub_address - (insn_address + 4);
8028
8029 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
8030 switch (stub->stub_template()->type())
8031 {
8032 case arm_stub_a8_veneer_b_cond:
8033 gold_assert(!utils::has_overflow<21>(branch_offset));
8034 upper_insn = RelocFuncs::thumb32_cond_branch_upper(upper_insn,
8035 branch_offset);
8036 lower_insn = RelocFuncs::thumb32_cond_branch_lower(lower_insn,
8037 branch_offset);
8038 break;
8039
8040 case arm_stub_a8_veneer_b:
8041 case arm_stub_a8_veneer_bl:
8042 case arm_stub_a8_veneer_blx:
8043 if ((lower_insn & 0x5000U) == 0x4000U)
8044 // For a BLX instruction, make sure that the relocation is
8045 // rounded up to a word boundary. This follows the semantics of
8046 // the instruction which specifies that bit 1 of the target
8047 // address will come from bit 1 of the base address.
8048 branch_offset = (branch_offset + 2) & ~3;
8049
8050 // Put BRANCH_OFFSET back into the insn.
8051 gold_assert(!utils::has_overflow<25>(branch_offset));
8052 upper_insn = RelocFuncs::thumb32_branch_upper(upper_insn, branch_offset);
8053 lower_insn = RelocFuncs::thumb32_branch_lower(lower_insn, branch_offset);
8054 break;
8055
8056 default:
8057 gold_unreachable();
8058 }
8059
8060 // Put the relocated value back in the object file:
8061 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
8062 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
8063 }
8064
8065 template<bool big_endian>
8066 class Target_selector_arm : public Target_selector
8067 {
8068 public:
8069 Target_selector_arm()
8070 : Target_selector(elfcpp::EM_ARM, 32, big_endian,
8071 (big_endian ? "elf32-bigarm" : "elf32-littlearm"))
8072 { }
8073
8074 Target*
8075 do_instantiate_target()
8076 { return new Target_arm<big_endian>(); }
8077 };
8078
8079 Target_selector_arm<false> target_selector_arm;
8080 Target_selector_arm<true> target_selector_armbe;
8081
8082 } // End anonymous namespace.
This page took 0.205141 seconds and 4 git commands to generate.