gas: avoid spurious failures in non-ELF targets in the SPARC testsuite.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851
1852 struct elf32_arm_reloc_map
1853 {
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1856 };
1857
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860 {
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951 };
1952
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1956 {
1957 unsigned int i;
1958
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962
1963 return NULL;
1964 }
1965
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 const char *r_name)
1969 {
1970 unsigned int i;
1971
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1976
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1981
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1986
1987 return NULL;
1988 }
1989
1990 /* Support for core dump NOTE sections. */
1991
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995 int offset;
1996 size_t size;
1997
1998 switch (note->descsz)
1999 {
2000 default:
2001 return FALSE;
2002
2003 case 148: /* Linux/ARM 32-bit. */
2004 /* pr_cursig */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006
2007 /* pr_pid */
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009
2010 /* pr_reg */
2011 offset = 72;
2012 size = 72;
2013
2014 break;
2015 }
2016
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2020 }
2021
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025 switch (note->descsz)
2026 {
2027 default:
2028 return FALSE;
2029
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037 }
2038
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2042 {
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2045
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 int note_type, ...)
2056 {
2057 switch (note_type)
2058 {
2059 default:
2060 return NULL;
2061
2062 case NT_PRPSINFO:
2063 {
2064 char data[124];
2065 va_list ap;
2066
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2071 va_end (ap);
2072
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2075 }
2076
2077 case NT_PRSTATUS:
2078 {
2079 char data[148];
2080 va_list ap;
2081 long pid;
2082 int cursig;
2083 const void *greg;
2084
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2093 va_end (ap);
2094
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2097 }
2098 }
2099 }
2100
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2105
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2109
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114 interworkable. */
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2119
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2126
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2129
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2132
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2135
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2138
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2140
2141 /* The name of the dynamic interpreter. This is put in the .interp
2142 section. */
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2144
2145 static const unsigned long tls_trampoline [] =
2146 {
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2150 };
2151
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164
2165 #ifdef FOUR_WORD_PLT
2166
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2170 linker first. */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2177 };
2178
2179 /* Subsequent entries in a procedure linkage table look like
2180 this. */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2187 };
2188
2189 #else /* not FOUR_WORD_PLT */
2190
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2194 linker first. */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2202 };
2203
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2211 };
2212
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224
2225 #endif /* not FOUR_WORD_PLT */
2226
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2236 /* add lr, pc */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2239 };
2240
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242 look like this. */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2251 /* nop */
2252 };
2253
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2262 };
2263
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2273 };
2274
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2284 };
2285
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290 0x4778, /* bx pc */
2291 0x46c0 /* nop */
2292 };
2293
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295 address spaces. */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2300 };
2301
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2305 linker first. */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308 /* First bundle: */
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2318 /* Third bundle: */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2322 /* .Lplt_tail: */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2331
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2339 };
2340
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349
2350 enum stub_insn_type
2351 {
2352 THUMB16_TYPE = 1,
2353 THUMB32_TYPE,
2354 ARM_TYPE,
2355 DATA_TYPE
2356 };
2357
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2364 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2365 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2366 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2367 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2368 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2369
2370 typedef struct
2371 {
2372 bfd_vma data;
2373 enum stub_insn_type type;
2374 unsigned int r_type;
2375 int reloc_addend;
2376 } insn_sequence;
2377
2378 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2379 to reach the stub if necessary. */
2380 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2381 {
2382 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2383 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2384 };
2385
2386 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2387 available. */
2388 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2389 {
2390 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2391 ARM_INSN (0xe12fff1c), /* bx ip */
2392 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2393 };
2394
2395 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2396 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2397 {
2398 THUMB16_INSN (0xb401), /* push {r0} */
2399 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2400 THUMB16_INSN (0x4684), /* mov ip, r0 */
2401 THUMB16_INSN (0xbc01), /* pop {r0} */
2402 THUMB16_INSN (0x4760), /* bx ip */
2403 THUMB16_INSN (0xbf00), /* nop */
2404 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2405 };
2406
2407 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2408 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2409 {
2410 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2411 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2412 };
2413
2414 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2415 M-profile architectures. */
2416 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2417 {
2418 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2419 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2420 THUMB16_INSN (0x4760), /* bx ip */
2421 };
2422
2423 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2424 allowed. */
2425 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2426 {
2427 THUMB16_INSN (0x4778), /* bx pc */
2428 THUMB16_INSN (0x46c0), /* nop */
2429 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2430 ARM_INSN (0xe12fff1c), /* bx ip */
2431 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2432 };
2433
2434 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2435 available. */
2436 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2437 {
2438 THUMB16_INSN (0x4778), /* bx pc */
2439 THUMB16_INSN (0x46c0), /* nop */
2440 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2441 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2442 };
2443
2444 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2445 one, when the destination is close enough. */
2446 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2447 {
2448 THUMB16_INSN (0x4778), /* bx pc */
2449 THUMB16_INSN (0x46c0), /* nop */
2450 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2451 };
2452
2453 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2454 blx to reach the stub if necessary. */
2455 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2456 {
2457 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2458 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2459 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2460 };
2461
2462 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2463 blx to reach the stub if necessary. We can not add into pc;
2464 it is not guaranteed to mode switch (different in ARMv6 and
2465 ARMv7). */
2466 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2467 {
2468 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2469 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2470 ARM_INSN (0xe12fff1c), /* bx ip */
2471 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2472 };
2473
2474 /* V4T ARM -> ARM long branch stub, PIC. */
2475 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2476 {
2477 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2478 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2479 ARM_INSN (0xe12fff1c), /* bx ip */
2480 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2481 };
2482
2483 /* V4T Thumb -> ARM long branch stub, PIC. */
2484 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2485 {
2486 THUMB16_INSN (0x4778), /* bx pc */
2487 THUMB16_INSN (0x46c0), /* nop */
2488 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2489 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2490 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2491 };
2492
2493 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2494 architectures. */
2495 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2496 {
2497 THUMB16_INSN (0xb401), /* push {r0} */
2498 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2499 THUMB16_INSN (0x46fc), /* mov ip, pc */
2500 THUMB16_INSN (0x4484), /* add ip, r0 */
2501 THUMB16_INSN (0xbc01), /* pop {r0} */
2502 THUMB16_INSN (0x4760), /* bx ip */
2503 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2504 };
2505
2506 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2507 allowed. */
2508 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2509 {
2510 THUMB16_INSN (0x4778), /* bx pc */
2511 THUMB16_INSN (0x46c0), /* nop */
2512 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2513 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2514 ARM_INSN (0xe12fff1c), /* bx ip */
2515 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2516 };
2517
2518 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2519 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2520 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2521 {
2522 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2523 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2524 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2525 };
2526
2527 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2528 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2529 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2530 {
2531 THUMB16_INSN (0x4778), /* bx pc */
2532 THUMB16_INSN (0x46c0), /* nop */
2533 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2534 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2535 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2536 };
2537
2538 /* NaCl ARM -> ARM long branch stub. */
2539 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2540 {
2541 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2542 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2543 ARM_INSN (0xe12fff1c), /* bx ip */
2544 ARM_INSN (0xe320f000), /* nop */
2545 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2546 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2547 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2548 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2549 };
2550
2551 /* NaCl ARM -> ARM long branch stub, PIC. */
2552 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2553 {
2554 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2555 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2556 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2557 ARM_INSN (0xe12fff1c), /* bx ip */
2558 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2559 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2560 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2561 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2562 };
2563
2564
2565 /* Cortex-A8 erratum-workaround stubs. */
2566
2567 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2568 can't use a conditional branch to reach this stub). */
2569
2570 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2571 {
2572 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2573 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2574 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2575 };
2576
2577 /* Stub used for b.w and bl.w instructions. */
2578
2579 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2580 {
2581 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2582 };
2583
2584 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2585 {
2586 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2587 };
2588
2589 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2590 instruction (which switches to ARM mode) to point to this stub. Jump to the
2591 real destination using an ARM-mode branch. */
2592
2593 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2594 {
2595 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2596 };
2597
2598 /* For each section group there can be a specially created linker section
2599 to hold the stubs for that group. The name of the stub section is based
2600 upon the name of another section within that group with the suffix below
2601 applied.
2602
2603 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2604 create what appeared to be a linker stub section when it actually
2605 contained user code/data. For example, consider this fragment:
2606
2607 const char * stubborn_problems[] = { "np" };
2608
2609 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2610 section called:
2611
2612 .data.rel.local.stubborn_problems
2613
2614 This then causes problems in arm32_arm_build_stubs() as it triggers:
2615
2616 // Ignore non-stub sections.
2617 if (!strstr (stub_sec->name, STUB_SUFFIX))
2618 continue;
2619
2620 And so the section would be ignored instead of being processed. Hence
2621 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2622 C identifier. */
2623 #define STUB_SUFFIX ".__stub"
2624
2625 /* One entry per long/short branch stub defined above. */
2626 #define DEF_STUBS \
2627 DEF_STUB(long_branch_any_any) \
2628 DEF_STUB(long_branch_v4t_arm_thumb) \
2629 DEF_STUB(long_branch_thumb_only) \
2630 DEF_STUB(long_branch_v4t_thumb_thumb) \
2631 DEF_STUB(long_branch_v4t_thumb_arm) \
2632 DEF_STUB(short_branch_v4t_thumb_arm) \
2633 DEF_STUB(long_branch_any_arm_pic) \
2634 DEF_STUB(long_branch_any_thumb_pic) \
2635 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2636 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2637 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2638 DEF_STUB(long_branch_thumb_only_pic) \
2639 DEF_STUB(long_branch_any_tls_pic) \
2640 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2641 DEF_STUB(long_branch_arm_nacl) \
2642 DEF_STUB(long_branch_arm_nacl_pic) \
2643 DEF_STUB(a8_veneer_b_cond) \
2644 DEF_STUB(a8_veneer_b) \
2645 DEF_STUB(a8_veneer_bl) \
2646 DEF_STUB(a8_veneer_blx) \
2647 DEF_STUB(long_branch_thumb2_only) \
2648 DEF_STUB(long_branch_thumb2_only_pure)
2649
2650 #define DEF_STUB(x) arm_stub_##x,
2651 enum elf32_arm_stub_type
2652 {
2653 arm_stub_none,
2654 DEF_STUBS
2655 max_stub_type
2656 };
2657 #undef DEF_STUB
2658
2659 /* Note the first a8_veneer type. */
2660 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2661
2662 typedef struct
2663 {
2664 const insn_sequence* template_sequence;
2665 int template_size;
2666 } stub_def;
2667
2668 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2669 static const stub_def stub_definitions[] =
2670 {
2671 {NULL, 0},
2672 DEF_STUBS
2673 };
2674
2675 struct elf32_arm_stub_hash_entry
2676 {
2677 /* Base hash table entry structure. */
2678 struct bfd_hash_entry root;
2679
2680 /* The stub section. */
2681 asection *stub_sec;
2682
2683 /* Offset within stub_sec of the beginning of this stub. */
2684 bfd_vma stub_offset;
2685
2686 /* Given the symbol's value and its section we can determine its final
2687 value when building the stubs (so the stub knows where to jump). */
2688 bfd_vma target_value;
2689 asection *target_section;
2690
2691 /* Same as above but for the source of the branch to the stub. Used for
2692 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2693 such, source section does not need to be recorded since Cortex-A8 erratum
2694 workaround stubs are only generated when both source and target are in the
2695 same section. */
2696 bfd_vma source_value;
2697
2698 /* The instruction which caused this stub to be generated (only valid for
2699 Cortex-A8 erratum workaround stubs at present). */
2700 unsigned long orig_insn;
2701
2702 /* The stub type. */
2703 enum elf32_arm_stub_type stub_type;
2704 /* Its encoding size in bytes. */
2705 int stub_size;
2706 /* Its template. */
2707 const insn_sequence *stub_template;
2708 /* The size of the template (number of entries). */
2709 int stub_template_size;
2710
2711 /* The symbol table entry, if any, that this was derived from. */
2712 struct elf32_arm_link_hash_entry *h;
2713
2714 /* Type of branch. */
2715 enum arm_st_branch_type branch_type;
2716
2717 /* Where this stub is being called from, or, in the case of combined
2718 stub sections, the first input section in the group. */
2719 asection *id_sec;
2720
2721 /* The name for the local symbol at the start of this stub. The
2722 stub name in the hash table has to be unique; this does not, so
2723 it can be friendlier. */
2724 char *output_name;
2725 };
2726
2727 /* Used to build a map of a section. This is required for mixed-endian
2728 code/data. */
2729
2730 typedef struct elf32_elf_section_map
2731 {
2732 bfd_vma vma;
2733 char type;
2734 }
2735 elf32_arm_section_map;
2736
2737 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2738
2739 typedef enum
2740 {
2741 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2742 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2743 VFP11_ERRATUM_ARM_VENEER,
2744 VFP11_ERRATUM_THUMB_VENEER
2745 }
2746 elf32_vfp11_erratum_type;
2747
2748 typedef struct elf32_vfp11_erratum_list
2749 {
2750 struct elf32_vfp11_erratum_list *next;
2751 bfd_vma vma;
2752 union
2753 {
2754 struct
2755 {
2756 struct elf32_vfp11_erratum_list *veneer;
2757 unsigned int vfp_insn;
2758 } b;
2759 struct
2760 {
2761 struct elf32_vfp11_erratum_list *branch;
2762 unsigned int id;
2763 } v;
2764 } u;
2765 elf32_vfp11_erratum_type type;
2766 }
2767 elf32_vfp11_erratum_list;
2768
2769 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2770 veneer. */
2771 typedef enum
2772 {
2773 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2774 STM32L4XX_ERRATUM_VENEER
2775 }
2776 elf32_stm32l4xx_erratum_type;
2777
2778 typedef struct elf32_stm32l4xx_erratum_list
2779 {
2780 struct elf32_stm32l4xx_erratum_list *next;
2781 bfd_vma vma;
2782 union
2783 {
2784 struct
2785 {
2786 struct elf32_stm32l4xx_erratum_list *veneer;
2787 unsigned int insn;
2788 } b;
2789 struct
2790 {
2791 struct elf32_stm32l4xx_erratum_list *branch;
2792 unsigned int id;
2793 } v;
2794 } u;
2795 elf32_stm32l4xx_erratum_type type;
2796 }
2797 elf32_stm32l4xx_erratum_list;
2798
2799 typedef enum
2800 {
2801 DELETE_EXIDX_ENTRY,
2802 INSERT_EXIDX_CANTUNWIND_AT_END
2803 }
2804 arm_unwind_edit_type;
2805
2806 /* A (sorted) list of edits to apply to an unwind table. */
2807 typedef struct arm_unwind_table_edit
2808 {
2809 arm_unwind_edit_type type;
2810 /* Note: we sometimes want to insert an unwind entry corresponding to a
2811 section different from the one we're currently writing out, so record the
2812 (text) section this edit relates to here. */
2813 asection *linked_section;
2814 unsigned int index;
2815 struct arm_unwind_table_edit *next;
2816 }
2817 arm_unwind_table_edit;
2818
2819 typedef struct _arm_elf_section_data
2820 {
2821 /* Information about mapping symbols. */
2822 struct bfd_elf_section_data elf;
2823 unsigned int mapcount;
2824 unsigned int mapsize;
2825 elf32_arm_section_map *map;
2826 /* Information about CPU errata. */
2827 unsigned int erratumcount;
2828 elf32_vfp11_erratum_list *erratumlist;
2829 unsigned int stm32l4xx_erratumcount;
2830 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2831 unsigned int additional_reloc_count;
2832 /* Information about unwind tables. */
2833 union
2834 {
2835 /* Unwind info attached to a text section. */
2836 struct
2837 {
2838 asection *arm_exidx_sec;
2839 } text;
2840
2841 /* Unwind info attached to an .ARM.exidx section. */
2842 struct
2843 {
2844 arm_unwind_table_edit *unwind_edit_list;
2845 arm_unwind_table_edit *unwind_edit_tail;
2846 } exidx;
2847 } u;
2848 }
2849 _arm_elf_section_data;
2850
2851 #define elf32_arm_section_data(sec) \
2852 ((_arm_elf_section_data *) elf_section_data (sec))
2853
2854 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2855 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2856 so may be created multiple times: we use an array of these entries whilst
2857 relaxing which we can refresh easily, then create stubs for each potentially
2858 erratum-triggering instruction once we've settled on a solution. */
2859
2860 struct a8_erratum_fix
2861 {
2862 bfd *input_bfd;
2863 asection *section;
2864 bfd_vma offset;
2865 bfd_vma target_offset;
2866 unsigned long orig_insn;
2867 char *stub_name;
2868 enum elf32_arm_stub_type stub_type;
2869 enum arm_st_branch_type branch_type;
2870 };
2871
2872 /* A table of relocs applied to branches which might trigger Cortex-A8
2873 erratum. */
2874
2875 struct a8_erratum_reloc
2876 {
2877 bfd_vma from;
2878 bfd_vma destination;
2879 struct elf32_arm_link_hash_entry *hash;
2880 const char *sym_name;
2881 unsigned int r_type;
2882 enum arm_st_branch_type branch_type;
2883 bfd_boolean non_a8_stub;
2884 };
2885
2886 /* The size of the thread control block. */
2887 #define TCB_SIZE 8
2888
2889 /* ARM-specific information about a PLT entry, over and above the usual
2890 gotplt_union. */
2891 struct arm_plt_info
2892 {
2893 /* We reference count Thumb references to a PLT entry separately,
2894 so that we can emit the Thumb trampoline only if needed. */
2895 bfd_signed_vma thumb_refcount;
2896
2897 /* Some references from Thumb code may be eliminated by BL->BLX
2898 conversion, so record them separately. */
2899 bfd_signed_vma maybe_thumb_refcount;
2900
2901 /* How many of the recorded PLT accesses were from non-call relocations.
2902 This information is useful when deciding whether anything takes the
2903 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2904 non-call references to the function should resolve directly to the
2905 real runtime target. */
2906 unsigned int noncall_refcount;
2907
2908 /* Since PLT entries have variable size if the Thumb prologue is
2909 used, we need to record the index into .got.plt instead of
2910 recomputing it from the PLT offset. */
2911 bfd_signed_vma got_offset;
2912 };
2913
2914 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2915 struct arm_local_iplt_info
2916 {
2917 /* The information that is usually found in the generic ELF part of
2918 the hash table entry. */
2919 union gotplt_union root;
2920
2921 /* The information that is usually found in the ARM-specific part of
2922 the hash table entry. */
2923 struct arm_plt_info arm;
2924
2925 /* A list of all potential dynamic relocations against this symbol. */
2926 struct elf_dyn_relocs *dyn_relocs;
2927 };
2928
2929 struct elf_arm_obj_tdata
2930 {
2931 struct elf_obj_tdata root;
2932
2933 /* tls_type for each local got entry. */
2934 char *local_got_tls_type;
2935
2936 /* GOTPLT entries for TLS descriptors. */
2937 bfd_vma *local_tlsdesc_gotent;
2938
2939 /* Information for local symbols that need entries in .iplt. */
2940 struct arm_local_iplt_info **local_iplt;
2941
2942 /* Zero to warn when linking objects with incompatible enum sizes. */
2943 int no_enum_size_warning;
2944
2945 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2946 int no_wchar_size_warning;
2947 };
2948
2949 #define elf_arm_tdata(bfd) \
2950 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2951
2952 #define elf32_arm_local_got_tls_type(bfd) \
2953 (elf_arm_tdata (bfd)->local_got_tls_type)
2954
2955 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2956 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2957
2958 #define elf32_arm_local_iplt(bfd) \
2959 (elf_arm_tdata (bfd)->local_iplt)
2960
2961 #define is_arm_elf(bfd) \
2962 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2963 && elf_tdata (bfd) != NULL \
2964 && elf_object_id (bfd) == ARM_ELF_DATA)
2965
2966 static bfd_boolean
2967 elf32_arm_mkobject (bfd *abfd)
2968 {
2969 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2970 ARM_ELF_DATA);
2971 }
2972
2973 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2974
2975 /* Arm ELF linker hash entry. */
2976 struct elf32_arm_link_hash_entry
2977 {
2978 struct elf_link_hash_entry root;
2979
2980 /* Track dynamic relocs copied for this symbol. */
2981 struct elf_dyn_relocs *dyn_relocs;
2982
2983 /* ARM-specific PLT information. */
2984 struct arm_plt_info plt;
2985
2986 #define GOT_UNKNOWN 0
2987 #define GOT_NORMAL 1
2988 #define GOT_TLS_GD 2
2989 #define GOT_TLS_IE 4
2990 #define GOT_TLS_GDESC 8
2991 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2992 unsigned int tls_type : 8;
2993
2994 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2995 unsigned int is_iplt : 1;
2996
2997 unsigned int unused : 23;
2998
2999 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3000 starting at the end of the jump table. */
3001 bfd_vma tlsdesc_got;
3002
3003 /* The symbol marking the real symbol location for exported thumb
3004 symbols with Arm stubs. */
3005 struct elf_link_hash_entry *export_glue;
3006
3007 /* A pointer to the most recently used stub hash entry against this
3008 symbol. */
3009 struct elf32_arm_stub_hash_entry *stub_cache;
3010 };
3011
3012 /* Traverse an arm ELF linker hash table. */
3013 #define elf32_arm_link_hash_traverse(table, func, info) \
3014 (elf_link_hash_traverse \
3015 (&(table)->root, \
3016 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3017 (info)))
3018
3019 /* Get the ARM elf linker hash table from a link_info structure. */
3020 #define elf32_arm_hash_table(info) \
3021 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3022 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3023
3024 #define arm_stub_hash_lookup(table, string, create, copy) \
3025 ((struct elf32_arm_stub_hash_entry *) \
3026 bfd_hash_lookup ((table), (string), (create), (copy)))
3027
3028 /* Array to keep track of which stub sections have been created, and
3029 information on stub grouping. */
3030 struct map_stub
3031 {
3032 /* This is the section to which stubs in the group will be
3033 attached. */
3034 asection *link_sec;
3035 /* The stub section. */
3036 asection *stub_sec;
3037 };
3038
3039 #define elf32_arm_compute_jump_table_size(htab) \
3040 ((htab)->next_tls_desc_index * 4)
3041
3042 /* ARM ELF linker hash table. */
3043 struct elf32_arm_link_hash_table
3044 {
3045 /* The main hash table. */
3046 struct elf_link_hash_table root;
3047
3048 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3049 bfd_size_type thumb_glue_size;
3050
3051 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3052 bfd_size_type arm_glue_size;
3053
3054 /* The size in bytes of section containing the ARMv4 BX veneers. */
3055 bfd_size_type bx_glue_size;
3056
3057 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3058 veneer has been populated. */
3059 bfd_vma bx_glue_offset[15];
3060
3061 /* The size in bytes of the section containing glue for VFP11 erratum
3062 veneers. */
3063 bfd_size_type vfp11_erratum_glue_size;
3064
3065 /* The size in bytes of the section containing glue for STM32L4XX erratum
3066 veneers. */
3067 bfd_size_type stm32l4xx_erratum_glue_size;
3068
3069 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3070 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3071 elf32_arm_write_section(). */
3072 struct a8_erratum_fix *a8_erratum_fixes;
3073 unsigned int num_a8_erratum_fixes;
3074
3075 /* An arbitrary input BFD chosen to hold the glue sections. */
3076 bfd * bfd_of_glue_owner;
3077
3078 /* Nonzero to output a BE8 image. */
3079 int byteswap_code;
3080
3081 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3082 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3083 int target1_is_rel;
3084
3085 /* The relocation to use for R_ARM_TARGET2 relocations. */
3086 int target2_reloc;
3087
3088 /* 0 = Ignore R_ARM_V4BX.
3089 1 = Convert BX to MOV PC.
3090 2 = Generate v4 interworing stubs. */
3091 int fix_v4bx;
3092
3093 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3094 int fix_cortex_a8;
3095
3096 /* Whether we should fix the ARM1176 BLX immediate issue. */
3097 int fix_arm1176;
3098
3099 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3100 int use_blx;
3101
3102 /* What sort of code sequences we should look for which may trigger the
3103 VFP11 denorm erratum. */
3104 bfd_arm_vfp11_fix vfp11_fix;
3105
3106 /* Global counter for the number of fixes we have emitted. */
3107 int num_vfp11_fixes;
3108
3109 /* What sort of code sequences we should look for which may trigger the
3110 STM32L4XX erratum. */
3111 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3112
3113 /* Global counter for the number of fixes we have emitted. */
3114 int num_stm32l4xx_fixes;
3115
3116 /* Nonzero to force PIC branch veneers. */
3117 int pic_veneer;
3118
3119 /* The number of bytes in the initial entry in the PLT. */
3120 bfd_size_type plt_header_size;
3121
3122 /* The number of bytes in the subsequent PLT etries. */
3123 bfd_size_type plt_entry_size;
3124
3125 /* True if the target system is VxWorks. */
3126 int vxworks_p;
3127
3128 /* True if the target system is Symbian OS. */
3129 int symbian_p;
3130
3131 /* True if the target system is Native Client. */
3132 int nacl_p;
3133
3134 /* True if the target uses REL relocations. */
3135 int use_rel;
3136
3137 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3138 bfd_vma next_tls_desc_index;
3139
3140 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3141 bfd_vma num_tls_desc;
3142
3143 /* Short-cuts to get to dynamic linker sections. */
3144 asection *sdynbss;
3145 asection *srelbss;
3146
3147 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3148 asection *srelplt2;
3149
3150 /* The offset into splt of the PLT entry for the TLS descriptor
3151 resolver. Special values are 0, if not necessary (or not found
3152 to be necessary yet), and -1 if needed but not determined
3153 yet. */
3154 bfd_vma dt_tlsdesc_plt;
3155
3156 /* The offset into sgot of the GOT entry used by the PLT entry
3157 above. */
3158 bfd_vma dt_tlsdesc_got;
3159
3160 /* Offset in .plt section of tls_arm_trampoline. */
3161 bfd_vma tls_trampoline;
3162
3163 /* Data for R_ARM_TLS_LDM32 relocations. */
3164 union
3165 {
3166 bfd_signed_vma refcount;
3167 bfd_vma offset;
3168 } tls_ldm_got;
3169
3170 /* Small local sym cache. */
3171 struct sym_cache sym_cache;
3172
3173 /* For convenience in allocate_dynrelocs. */
3174 bfd * obfd;
3175
3176 /* The amount of space used by the reserved portion of the sgotplt
3177 section, plus whatever space is used by the jump slots. */
3178 bfd_vma sgotplt_jump_table_size;
3179
3180 /* The stub hash table. */
3181 struct bfd_hash_table stub_hash_table;
3182
3183 /* Linker stub bfd. */
3184 bfd *stub_bfd;
3185
3186 /* Linker call-backs. */
3187 asection * (*add_stub_section) (const char *, asection *, asection *,
3188 unsigned int);
3189 void (*layout_sections_again) (void);
3190
3191 /* Array to keep track of which stub sections have been created, and
3192 information on stub grouping. */
3193 struct map_stub *stub_group;
3194
3195 /* Number of elements in stub_group. */
3196 unsigned int top_id;
3197
3198 /* Assorted information used by elf32_arm_size_stubs. */
3199 unsigned int bfd_count;
3200 unsigned int top_index;
3201 asection **input_list;
3202 };
3203
3204 static inline int
3205 ctz (unsigned int mask)
3206 {
3207 #if GCC_VERSION >= 3004
3208 return __builtin_ctz (mask);
3209 #else
3210 unsigned int i;
3211
3212 for (i = 0; i < 8 * sizeof (mask); i++)
3213 {
3214 if (mask & 0x1)
3215 break;
3216 mask = (mask >> 1);
3217 }
3218 return i;
3219 #endif
3220 }
3221
3222 static inline int
3223 popcount (unsigned int mask)
3224 {
3225 #if GCC_VERSION >= 3004
3226 return __builtin_popcount (mask);
3227 #else
3228 unsigned int i, sum = 0;
3229
3230 for (i = 0; i < 8 * sizeof (mask); i++)
3231 {
3232 if (mask & 0x1)
3233 sum++;
3234 mask = (mask >> 1);
3235 }
3236 return sum;
3237 #endif
3238 }
3239
3240 /* Create an entry in an ARM ELF linker hash table. */
3241
3242 static struct bfd_hash_entry *
3243 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3244 struct bfd_hash_table * table,
3245 const char * string)
3246 {
3247 struct elf32_arm_link_hash_entry * ret =
3248 (struct elf32_arm_link_hash_entry *) entry;
3249
3250 /* Allocate the structure if it has not already been allocated by a
3251 subclass. */
3252 if (ret == NULL)
3253 ret = (struct elf32_arm_link_hash_entry *)
3254 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3255 if (ret == NULL)
3256 return (struct bfd_hash_entry *) ret;
3257
3258 /* Call the allocation method of the superclass. */
3259 ret = ((struct elf32_arm_link_hash_entry *)
3260 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3261 table, string));
3262 if (ret != NULL)
3263 {
3264 ret->dyn_relocs = NULL;
3265 ret->tls_type = GOT_UNKNOWN;
3266 ret->tlsdesc_got = (bfd_vma) -1;
3267 ret->plt.thumb_refcount = 0;
3268 ret->plt.maybe_thumb_refcount = 0;
3269 ret->plt.noncall_refcount = 0;
3270 ret->plt.got_offset = -1;
3271 ret->is_iplt = FALSE;
3272 ret->export_glue = NULL;
3273
3274 ret->stub_cache = NULL;
3275 }
3276
3277 return (struct bfd_hash_entry *) ret;
3278 }
3279
3280 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3281 symbols. */
3282
3283 static bfd_boolean
3284 elf32_arm_allocate_local_sym_info (bfd *abfd)
3285 {
3286 if (elf_local_got_refcounts (abfd) == NULL)
3287 {
3288 bfd_size_type num_syms;
3289 bfd_size_type size;
3290 char *data;
3291
3292 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3293 size = num_syms * (sizeof (bfd_signed_vma)
3294 + sizeof (struct arm_local_iplt_info *)
3295 + sizeof (bfd_vma)
3296 + sizeof (char));
3297 data = bfd_zalloc (abfd, size);
3298 if (data == NULL)
3299 return FALSE;
3300
3301 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3302 data += num_syms * sizeof (bfd_signed_vma);
3303
3304 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3305 data += num_syms * sizeof (struct arm_local_iplt_info *);
3306
3307 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3308 data += num_syms * sizeof (bfd_vma);
3309
3310 elf32_arm_local_got_tls_type (abfd) = data;
3311 }
3312 return TRUE;
3313 }
3314
3315 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3316 to input bfd ABFD. Create the information if it doesn't already exist.
3317 Return null if an allocation fails. */
3318
3319 static struct arm_local_iplt_info *
3320 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3321 {
3322 struct arm_local_iplt_info **ptr;
3323
3324 if (!elf32_arm_allocate_local_sym_info (abfd))
3325 return NULL;
3326
3327 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3328 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3329 if (*ptr == NULL)
3330 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3331 return *ptr;
3332 }
3333
3334 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3335 in ABFD's symbol table. If the symbol is global, H points to its
3336 hash table entry, otherwise H is null.
3337
3338 Return true if the symbol does have PLT information. When returning
3339 true, point *ROOT_PLT at the target-independent reference count/offset
3340 union and *ARM_PLT at the ARM-specific information. */
3341
3342 static bfd_boolean
3343 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3344 unsigned long r_symndx, union gotplt_union **root_plt,
3345 struct arm_plt_info **arm_plt)
3346 {
3347 struct arm_local_iplt_info *local_iplt;
3348
3349 if (h != NULL)
3350 {
3351 *root_plt = &h->root.plt;
3352 *arm_plt = &h->plt;
3353 return TRUE;
3354 }
3355
3356 if (elf32_arm_local_iplt (abfd) == NULL)
3357 return FALSE;
3358
3359 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3360 if (local_iplt == NULL)
3361 return FALSE;
3362
3363 *root_plt = &local_iplt->root;
3364 *arm_plt = &local_iplt->arm;
3365 return TRUE;
3366 }
3367
3368 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3369 before it. */
3370
3371 static bfd_boolean
3372 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3373 struct arm_plt_info *arm_plt)
3374 {
3375 struct elf32_arm_link_hash_table *htab;
3376
3377 htab = elf32_arm_hash_table (info);
3378 return (arm_plt->thumb_refcount != 0
3379 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3380 }
3381
3382 /* Return a pointer to the head of the dynamic reloc list that should
3383 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3384 ABFD's symbol table. Return null if an error occurs. */
3385
3386 static struct elf_dyn_relocs **
3387 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3388 Elf_Internal_Sym *isym)
3389 {
3390 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3391 {
3392 struct arm_local_iplt_info *local_iplt;
3393
3394 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3395 if (local_iplt == NULL)
3396 return NULL;
3397 return &local_iplt->dyn_relocs;
3398 }
3399 else
3400 {
3401 /* Track dynamic relocs needed for local syms too.
3402 We really need local syms available to do this
3403 easily. Oh well. */
3404 asection *s;
3405 void *vpp;
3406
3407 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3408 if (s == NULL)
3409 abort ();
3410
3411 vpp = &elf_section_data (s)->local_dynrel;
3412 return (struct elf_dyn_relocs **) vpp;
3413 }
3414 }
3415
3416 /* Initialize an entry in the stub hash table. */
3417
3418 static struct bfd_hash_entry *
3419 stub_hash_newfunc (struct bfd_hash_entry *entry,
3420 struct bfd_hash_table *table,
3421 const char *string)
3422 {
3423 /* Allocate the structure if it has not already been allocated by a
3424 subclass. */
3425 if (entry == NULL)
3426 {
3427 entry = (struct bfd_hash_entry *)
3428 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3429 if (entry == NULL)
3430 return entry;
3431 }
3432
3433 /* Call the allocation method of the superclass. */
3434 entry = bfd_hash_newfunc (entry, table, string);
3435 if (entry != NULL)
3436 {
3437 struct elf32_arm_stub_hash_entry *eh;
3438
3439 /* Initialize the local fields. */
3440 eh = (struct elf32_arm_stub_hash_entry *) entry;
3441 eh->stub_sec = NULL;
3442 eh->stub_offset = 0;
3443 eh->source_value = 0;
3444 eh->target_value = 0;
3445 eh->target_section = NULL;
3446 eh->orig_insn = 0;
3447 eh->stub_type = arm_stub_none;
3448 eh->stub_size = 0;
3449 eh->stub_template = NULL;
3450 eh->stub_template_size = 0;
3451 eh->h = NULL;
3452 eh->id_sec = NULL;
3453 eh->output_name = NULL;
3454 }
3455
3456 return entry;
3457 }
3458
3459 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3460 shortcuts to them in our hash table. */
3461
3462 static bfd_boolean
3463 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3464 {
3465 struct elf32_arm_link_hash_table *htab;
3466
3467 htab = elf32_arm_hash_table (info);
3468 if (htab == NULL)
3469 return FALSE;
3470
3471 /* BPABI objects never have a GOT, or associated sections. */
3472 if (htab->symbian_p)
3473 return TRUE;
3474
3475 if (! _bfd_elf_create_got_section (dynobj, info))
3476 return FALSE;
3477
3478 return TRUE;
3479 }
3480
3481 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3482
3483 static bfd_boolean
3484 create_ifunc_sections (struct bfd_link_info *info)
3485 {
3486 struct elf32_arm_link_hash_table *htab;
3487 const struct elf_backend_data *bed;
3488 bfd *dynobj;
3489 asection *s;
3490 flagword flags;
3491
3492 htab = elf32_arm_hash_table (info);
3493 dynobj = htab->root.dynobj;
3494 bed = get_elf_backend_data (dynobj);
3495 flags = bed->dynamic_sec_flags;
3496
3497 if (htab->root.iplt == NULL)
3498 {
3499 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3500 flags | SEC_READONLY | SEC_CODE);
3501 if (s == NULL
3502 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3503 return FALSE;
3504 htab->root.iplt = s;
3505 }
3506
3507 if (htab->root.irelplt == NULL)
3508 {
3509 s = bfd_make_section_anyway_with_flags (dynobj,
3510 RELOC_SECTION (htab, ".iplt"),
3511 flags | SEC_READONLY);
3512 if (s == NULL
3513 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3514 return FALSE;
3515 htab->root.irelplt = s;
3516 }
3517
3518 if (htab->root.igotplt == NULL)
3519 {
3520 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3521 if (s == NULL
3522 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3523 return FALSE;
3524 htab->root.igotplt = s;
3525 }
3526 return TRUE;
3527 }
3528
3529 /* Determine if we're dealing with a Thumb only architecture. */
3530
3531 static bfd_boolean
3532 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3533 {
3534 int arch;
3535 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3536 Tag_CPU_arch_profile);
3537
3538 if (profile)
3539 return profile == 'M';
3540
3541 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3542
3543 /* Force return logic to be reviewed for each new architecture. */
3544 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3545 || arch == TAG_CPU_ARCH_V8M_BASE
3546 || arch == TAG_CPU_ARCH_V8M_MAIN);
3547
3548 if (arch == TAG_CPU_ARCH_V6_M
3549 || arch == TAG_CPU_ARCH_V6S_M
3550 || arch == TAG_CPU_ARCH_V7E_M
3551 || arch == TAG_CPU_ARCH_V8M_BASE
3552 || arch == TAG_CPU_ARCH_V8M_MAIN)
3553 return TRUE;
3554
3555 return FALSE;
3556 }
3557
3558 /* Determine if we're dealing with a Thumb-2 object. */
3559
3560 static bfd_boolean
3561 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3562 {
3563 int arch;
3564 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3565 Tag_THUMB_ISA_use);
3566
3567 if (thumb_isa)
3568 return thumb_isa == 2;
3569
3570 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3571
3572 /* Force return logic to be reviewed for each new architecture. */
3573 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3574 || arch == TAG_CPU_ARCH_V8M_BASE
3575 || arch == TAG_CPU_ARCH_V8M_MAIN);
3576
3577 return (arch == TAG_CPU_ARCH_V6T2
3578 || arch == TAG_CPU_ARCH_V7
3579 || arch == TAG_CPU_ARCH_V7E_M
3580 || arch == TAG_CPU_ARCH_V8
3581 || arch == TAG_CPU_ARCH_V8M_MAIN);
3582 }
3583
3584 /* Determine whether Thumb-2 BL instruction is available. */
3585
3586 static bfd_boolean
3587 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3588 {
3589 int arch =
3590 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3591
3592 /* Force return logic to be reviewed for each new architecture. */
3593 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3594 || arch == TAG_CPU_ARCH_V8M_BASE
3595 || arch == TAG_CPU_ARCH_V8M_MAIN);
3596
3597 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3598 return (arch == TAG_CPU_ARCH_V6T2
3599 || arch >= TAG_CPU_ARCH_V7);
3600 }
3601
3602 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3603 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3604 hash table. */
3605
3606 static bfd_boolean
3607 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3608 {
3609 struct elf32_arm_link_hash_table *htab;
3610
3611 htab = elf32_arm_hash_table (info);
3612 if (htab == NULL)
3613 return FALSE;
3614
3615 if (!htab->root.sgot && !create_got_section (dynobj, info))
3616 return FALSE;
3617
3618 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3619 return FALSE;
3620
3621 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3622 if (!bfd_link_pic (info))
3623 htab->srelbss = bfd_get_linker_section (dynobj,
3624 RELOC_SECTION (htab, ".bss"));
3625
3626 if (htab->vxworks_p)
3627 {
3628 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3629 return FALSE;
3630
3631 if (bfd_link_pic (info))
3632 {
3633 htab->plt_header_size = 0;
3634 htab->plt_entry_size
3635 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3636 }
3637 else
3638 {
3639 htab->plt_header_size
3640 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3641 htab->plt_entry_size
3642 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3643 }
3644
3645 if (elf_elfheader (dynobj))
3646 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3647 }
3648 else
3649 {
3650 /* PR ld/16017
3651 Test for thumb only architectures. Note - we cannot just call
3652 using_thumb_only() as the attributes in the output bfd have not been
3653 initialised at this point, so instead we use the input bfd. */
3654 bfd * saved_obfd = htab->obfd;
3655
3656 htab->obfd = dynobj;
3657 if (using_thumb_only (htab))
3658 {
3659 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3660 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3661 }
3662 htab->obfd = saved_obfd;
3663 }
3664
3665 if (!htab->root.splt
3666 || !htab->root.srelplt
3667 || !htab->sdynbss
3668 || (!bfd_link_pic (info) && !htab->srelbss))
3669 abort ();
3670
3671 return TRUE;
3672 }
3673
3674 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3675
3676 static void
3677 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3678 struct elf_link_hash_entry *dir,
3679 struct elf_link_hash_entry *ind)
3680 {
3681 struct elf32_arm_link_hash_entry *edir, *eind;
3682
3683 edir = (struct elf32_arm_link_hash_entry *) dir;
3684 eind = (struct elf32_arm_link_hash_entry *) ind;
3685
3686 if (eind->dyn_relocs != NULL)
3687 {
3688 if (edir->dyn_relocs != NULL)
3689 {
3690 struct elf_dyn_relocs **pp;
3691 struct elf_dyn_relocs *p;
3692
3693 /* Add reloc counts against the indirect sym to the direct sym
3694 list. Merge any entries against the same section. */
3695 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3696 {
3697 struct elf_dyn_relocs *q;
3698
3699 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3700 if (q->sec == p->sec)
3701 {
3702 q->pc_count += p->pc_count;
3703 q->count += p->count;
3704 *pp = p->next;
3705 break;
3706 }
3707 if (q == NULL)
3708 pp = &p->next;
3709 }
3710 *pp = edir->dyn_relocs;
3711 }
3712
3713 edir->dyn_relocs = eind->dyn_relocs;
3714 eind->dyn_relocs = NULL;
3715 }
3716
3717 if (ind->root.type == bfd_link_hash_indirect)
3718 {
3719 /* Copy over PLT info. */
3720 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3721 eind->plt.thumb_refcount = 0;
3722 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3723 eind->plt.maybe_thumb_refcount = 0;
3724 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3725 eind->plt.noncall_refcount = 0;
3726
3727 /* We should only allocate a function to .iplt once the final
3728 symbol information is known. */
3729 BFD_ASSERT (!eind->is_iplt);
3730
3731 if (dir->got.refcount <= 0)
3732 {
3733 edir->tls_type = eind->tls_type;
3734 eind->tls_type = GOT_UNKNOWN;
3735 }
3736 }
3737
3738 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3739 }
3740
3741 /* Destroy an ARM elf linker hash table. */
3742
3743 static void
3744 elf32_arm_link_hash_table_free (bfd *obfd)
3745 {
3746 struct elf32_arm_link_hash_table *ret
3747 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3748
3749 bfd_hash_table_free (&ret->stub_hash_table);
3750 _bfd_elf_link_hash_table_free (obfd);
3751 }
3752
3753 /* Create an ARM elf linker hash table. */
3754
3755 static struct bfd_link_hash_table *
3756 elf32_arm_link_hash_table_create (bfd *abfd)
3757 {
3758 struct elf32_arm_link_hash_table *ret;
3759 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3760
3761 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3762 if (ret == NULL)
3763 return NULL;
3764
3765 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3766 elf32_arm_link_hash_newfunc,
3767 sizeof (struct elf32_arm_link_hash_entry),
3768 ARM_ELF_DATA))
3769 {
3770 free (ret);
3771 return NULL;
3772 }
3773
3774 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3775 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3776 #ifdef FOUR_WORD_PLT
3777 ret->plt_header_size = 16;
3778 ret->plt_entry_size = 16;
3779 #else
3780 ret->plt_header_size = 20;
3781 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3782 #endif
3783 ret->use_rel = 1;
3784 ret->obfd = abfd;
3785
3786 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3787 sizeof (struct elf32_arm_stub_hash_entry)))
3788 {
3789 _bfd_elf_link_hash_table_free (abfd);
3790 return NULL;
3791 }
3792 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3793
3794 return &ret->root.root;
3795 }
3796
3797 /* Determine what kind of NOPs are available. */
3798
3799 static bfd_boolean
3800 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3801 {
3802 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3803 Tag_CPU_arch);
3804
3805 /* Force return logic to be reviewed for each new architecture. */
3806 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3807 || arch == TAG_CPU_ARCH_V8M_BASE
3808 || arch == TAG_CPU_ARCH_V8M_MAIN);
3809
3810 return (arch == TAG_CPU_ARCH_V6T2
3811 || arch == TAG_CPU_ARCH_V6K
3812 || arch == TAG_CPU_ARCH_V7
3813 || arch == TAG_CPU_ARCH_V8);
3814 }
3815
3816 static bfd_boolean
3817 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3818 {
3819 switch (stub_type)
3820 {
3821 case arm_stub_long_branch_thumb_only:
3822 case arm_stub_long_branch_thumb2_only:
3823 case arm_stub_long_branch_thumb2_only_pure:
3824 case arm_stub_long_branch_v4t_thumb_arm:
3825 case arm_stub_short_branch_v4t_thumb_arm:
3826 case arm_stub_long_branch_v4t_thumb_arm_pic:
3827 case arm_stub_long_branch_v4t_thumb_tls_pic:
3828 case arm_stub_long_branch_thumb_only_pic:
3829 return TRUE;
3830 case arm_stub_none:
3831 BFD_FAIL ();
3832 return FALSE;
3833 break;
3834 default:
3835 return FALSE;
3836 }
3837 }
3838
3839 /* Determine the type of stub needed, if any, for a call. */
3840
3841 static enum elf32_arm_stub_type
3842 arm_type_of_stub (struct bfd_link_info *info,
3843 asection *input_sec,
3844 const Elf_Internal_Rela *rel,
3845 unsigned char st_type,
3846 enum arm_st_branch_type *actual_branch_type,
3847 struct elf32_arm_link_hash_entry *hash,
3848 bfd_vma destination,
3849 asection *sym_sec,
3850 bfd *input_bfd,
3851 const char *name)
3852 {
3853 bfd_vma location;
3854 bfd_signed_vma branch_offset;
3855 unsigned int r_type;
3856 struct elf32_arm_link_hash_table * globals;
3857 bfd_boolean thumb2, thumb2_bl, thumb_only;
3858 enum elf32_arm_stub_type stub_type = arm_stub_none;
3859 int use_plt = 0;
3860 enum arm_st_branch_type branch_type = *actual_branch_type;
3861 union gotplt_union *root_plt;
3862 struct arm_plt_info *arm_plt;
3863 int arch;
3864 int thumb2_movw;
3865
3866 if (branch_type == ST_BRANCH_LONG)
3867 return stub_type;
3868
3869 globals = elf32_arm_hash_table (info);
3870 if (globals == NULL)
3871 return stub_type;
3872
3873 thumb_only = using_thumb_only (globals);
3874 thumb2 = using_thumb2 (globals);
3875 thumb2_bl = using_thumb2_bl (globals);
3876
3877 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3878
3879 /* True for architectures that implement the thumb2 movw instruction. */
3880 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
3881
3882 /* Determine where the call point is. */
3883 location = (input_sec->output_offset
3884 + input_sec->output_section->vma
3885 + rel->r_offset);
3886
3887 r_type = ELF32_R_TYPE (rel->r_info);
3888
3889 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3890 are considering a function call relocation. */
3891 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3892 || r_type == R_ARM_THM_JUMP19)
3893 && branch_type == ST_BRANCH_TO_ARM)
3894 branch_type = ST_BRANCH_TO_THUMB;
3895
3896 /* For TLS call relocs, it is the caller's responsibility to provide
3897 the address of the appropriate trampoline. */
3898 if (r_type != R_ARM_TLS_CALL
3899 && r_type != R_ARM_THM_TLS_CALL
3900 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3901 &root_plt, &arm_plt)
3902 && root_plt->offset != (bfd_vma) -1)
3903 {
3904 asection *splt;
3905
3906 if (hash == NULL || hash->is_iplt)
3907 splt = globals->root.iplt;
3908 else
3909 splt = globals->root.splt;
3910 if (splt != NULL)
3911 {
3912 use_plt = 1;
3913
3914 /* Note when dealing with PLT entries: the main PLT stub is in
3915 ARM mode, so if the branch is in Thumb mode, another
3916 Thumb->ARM stub will be inserted later just before the ARM
3917 PLT stub. We don't take this extra distance into account
3918 here, because if a long branch stub is needed, we'll add a
3919 Thumb->Arm one and branch directly to the ARM PLT entry
3920 because it avoids spreading offset corrections in several
3921 places. */
3922
3923 destination = (splt->output_section->vma
3924 + splt->output_offset
3925 + root_plt->offset);
3926 st_type = STT_FUNC;
3927 branch_type = ST_BRANCH_TO_ARM;
3928 }
3929 }
3930 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3931 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3932
3933 branch_offset = (bfd_signed_vma)(destination - location);
3934
3935 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3936 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3937 {
3938 /* Handle cases where:
3939 - this call goes too far (different Thumb/Thumb2 max
3940 distance)
3941 - it's a Thumb->Arm call and blx is not available, or it's a
3942 Thumb->Arm branch (not bl). A stub is needed in this case,
3943 but only if this call is not through a PLT entry. Indeed,
3944 PLT stubs handle mode switching already.
3945 */
3946 if ((!thumb2_bl
3947 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3948 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3949 || (thumb2_bl
3950 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3951 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3952 || (thumb2
3953 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3954 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3955 && (r_type == R_ARM_THM_JUMP19))
3956 || (branch_type == ST_BRANCH_TO_ARM
3957 && (((r_type == R_ARM_THM_CALL
3958 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3959 || (r_type == R_ARM_THM_JUMP24)
3960 || (r_type == R_ARM_THM_JUMP19))
3961 && !use_plt))
3962 {
3963 if (branch_type == ST_BRANCH_TO_THUMB)
3964 {
3965 /* Thumb to thumb. */
3966 if (!thumb_only)
3967 {
3968 if (input_sec->flags & SEC_ELF_PURECODE)
3969 (*_bfd_error_handler) (_("%B(%s): warning: long branch "
3970 " veneers used in section with "
3971 "SHF_ARM_PURECODE section "
3972 "attribute is only supported"
3973 " for M-profile targets that "
3974 "implement the movw "
3975 "instruction."));
3976
3977 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3978 /* PIC stubs. */
3979 ? ((globals->use_blx
3980 && (r_type == R_ARM_THM_CALL))
3981 /* V5T and above. Stub starts with ARM code, so
3982 we must be able to switch mode before
3983 reaching it, which is only possible for 'bl'
3984 (ie R_ARM_THM_CALL relocation). */
3985 ? arm_stub_long_branch_any_thumb_pic
3986 /* On V4T, use Thumb code only. */
3987 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3988
3989 /* non-PIC stubs. */
3990 : ((globals->use_blx
3991 && (r_type == R_ARM_THM_CALL))
3992 /* V5T and above. */
3993 ? arm_stub_long_branch_any_any
3994 /* V4T. */
3995 : arm_stub_long_branch_v4t_thumb_thumb);
3996 }
3997 else
3998 {
3999 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4000 stub_type = arm_stub_long_branch_thumb2_only_pure;
4001 else
4002 {
4003 if (input_sec->flags & SEC_ELF_PURECODE)
4004 (*_bfd_error_handler) (_("%B(%s): warning: long branch "
4005 " veneers used in section with "
4006 "SHF_ARM_PURECODE section "
4007 "attribute is only supported"
4008 " for M-profile targets that "
4009 "implement the movw "
4010 "instruction."));
4011
4012 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4013 /* PIC stub. */
4014 ? arm_stub_long_branch_thumb_only_pic
4015 /* non-PIC stub. */
4016 : (thumb2 ? arm_stub_long_branch_thumb2_only
4017 : arm_stub_long_branch_thumb_only);
4018 }
4019 }
4020 }
4021 else
4022 {
4023 if (input_sec->flags & SEC_ELF_PURECODE)
4024 (*_bfd_error_handler) (_("%B(%s): warning: long branch "
4025 " veneers used in section with "
4026 "SHF_ARM_PURECODE section "
4027 "attribute is only supported"
4028 " for M-profile targets that "
4029 "implement the movw "
4030 "instruction."));
4031
4032 /* Thumb to arm. */
4033 if (sym_sec != NULL
4034 && sym_sec->owner != NULL
4035 && !INTERWORK_FLAG (sym_sec->owner))
4036 {
4037 (*_bfd_error_handler)
4038 (_("%B(%s): warning: interworking not enabled.\n"
4039 " first occurrence: %B: Thumb call to ARM"),
4040 sym_sec->owner, input_bfd, name);
4041 }
4042
4043 stub_type =
4044 (bfd_link_pic (info) | globals->pic_veneer)
4045 /* PIC stubs. */
4046 ? (r_type == R_ARM_THM_TLS_CALL
4047 /* TLS PIC stubs. */
4048 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4049 : arm_stub_long_branch_v4t_thumb_tls_pic)
4050 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4051 /* V5T PIC and above. */
4052 ? arm_stub_long_branch_any_arm_pic
4053 /* V4T PIC stub. */
4054 : arm_stub_long_branch_v4t_thumb_arm_pic))
4055
4056 /* non-PIC stubs. */
4057 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4058 /* V5T and above. */
4059 ? arm_stub_long_branch_any_any
4060 /* V4T. */
4061 : arm_stub_long_branch_v4t_thumb_arm);
4062
4063 /* Handle v4t short branches. */
4064 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4065 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4066 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4067 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4068 }
4069 }
4070 }
4071 else if (r_type == R_ARM_CALL
4072 || r_type == R_ARM_JUMP24
4073 || r_type == R_ARM_PLT32
4074 || r_type == R_ARM_TLS_CALL)
4075 {
4076 if (input_sec->flags & SEC_ELF_PURECODE)
4077 (*_bfd_error_handler) (_("%B(%s): warning: long branch "
4078 " veneers used in section with "
4079 "SHF_ARM_PURECODE section "
4080 "attribute is only supported"
4081 " for M-profile targets that "
4082 "implement the movw "
4083 "instruction."));
4084 if (branch_type == ST_BRANCH_TO_THUMB)
4085 {
4086 /* Arm to thumb. */
4087
4088 if (sym_sec != NULL
4089 && sym_sec->owner != NULL
4090 && !INTERWORK_FLAG (sym_sec->owner))
4091 {
4092 (*_bfd_error_handler)
4093 (_("%B(%s): warning: interworking not enabled.\n"
4094 " first occurrence: %B: ARM call to Thumb"),
4095 sym_sec->owner, input_bfd, name);
4096 }
4097
4098 /* We have an extra 2-bytes reach because of
4099 the mode change (bit 24 (H) of BLX encoding). */
4100 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4101 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4102 || (r_type == R_ARM_CALL && !globals->use_blx)
4103 || (r_type == R_ARM_JUMP24)
4104 || (r_type == R_ARM_PLT32))
4105 {
4106 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4107 /* PIC stubs. */
4108 ? ((globals->use_blx)
4109 /* V5T and above. */
4110 ? arm_stub_long_branch_any_thumb_pic
4111 /* V4T stub. */
4112 : arm_stub_long_branch_v4t_arm_thumb_pic)
4113
4114 /* non-PIC stubs. */
4115 : ((globals->use_blx)
4116 /* V5T and above. */
4117 ? arm_stub_long_branch_any_any
4118 /* V4T. */
4119 : arm_stub_long_branch_v4t_arm_thumb);
4120 }
4121 }
4122 else
4123 {
4124 /* Arm to arm. */
4125 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4126 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4127 {
4128 stub_type =
4129 (bfd_link_pic (info) | globals->pic_veneer)
4130 /* PIC stubs. */
4131 ? (r_type == R_ARM_TLS_CALL
4132 /* TLS PIC Stub. */
4133 ? arm_stub_long_branch_any_tls_pic
4134 : (globals->nacl_p
4135 ? arm_stub_long_branch_arm_nacl_pic
4136 : arm_stub_long_branch_any_arm_pic))
4137 /* non-PIC stubs. */
4138 : (globals->nacl_p
4139 ? arm_stub_long_branch_arm_nacl
4140 : arm_stub_long_branch_any_any);
4141 }
4142 }
4143 }
4144
4145 /* If a stub is needed, record the actual destination type. */
4146 if (stub_type != arm_stub_none)
4147 *actual_branch_type = branch_type;
4148
4149 return stub_type;
4150 }
4151
4152 /* Build a name for an entry in the stub hash table. */
4153
4154 static char *
4155 elf32_arm_stub_name (const asection *input_section,
4156 const asection *sym_sec,
4157 const struct elf32_arm_link_hash_entry *hash,
4158 const Elf_Internal_Rela *rel,
4159 enum elf32_arm_stub_type stub_type)
4160 {
4161 char *stub_name;
4162 bfd_size_type len;
4163
4164 if (hash)
4165 {
4166 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4167 stub_name = (char *) bfd_malloc (len);
4168 if (stub_name != NULL)
4169 sprintf (stub_name, "%08x_%s+%x_%d",
4170 input_section->id & 0xffffffff,
4171 hash->root.root.root.string,
4172 (int) rel->r_addend & 0xffffffff,
4173 (int) stub_type);
4174 }
4175 else
4176 {
4177 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4178 stub_name = (char *) bfd_malloc (len);
4179 if (stub_name != NULL)
4180 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4181 input_section->id & 0xffffffff,
4182 sym_sec->id & 0xffffffff,
4183 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4184 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4185 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4186 (int) rel->r_addend & 0xffffffff,
4187 (int) stub_type);
4188 }
4189
4190 return stub_name;
4191 }
4192
4193 /* Look up an entry in the stub hash. Stub entries are cached because
4194 creating the stub name takes a bit of time. */
4195
4196 static struct elf32_arm_stub_hash_entry *
4197 elf32_arm_get_stub_entry (const asection *input_section,
4198 const asection *sym_sec,
4199 struct elf_link_hash_entry *hash,
4200 const Elf_Internal_Rela *rel,
4201 struct elf32_arm_link_hash_table *htab,
4202 enum elf32_arm_stub_type stub_type)
4203 {
4204 struct elf32_arm_stub_hash_entry *stub_entry;
4205 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4206 const asection *id_sec;
4207
4208 if ((input_section->flags & SEC_CODE) == 0)
4209 return NULL;
4210
4211 /* If this input section is part of a group of sections sharing one
4212 stub section, then use the id of the first section in the group.
4213 Stub names need to include a section id, as there may well be
4214 more than one stub used to reach say, printf, and we need to
4215 distinguish between them. */
4216 id_sec = htab->stub_group[input_section->id].link_sec;
4217
4218 if (h != NULL && h->stub_cache != NULL
4219 && h->stub_cache->h == h
4220 && h->stub_cache->id_sec == id_sec
4221 && h->stub_cache->stub_type == stub_type)
4222 {
4223 stub_entry = h->stub_cache;
4224 }
4225 else
4226 {
4227 char *stub_name;
4228
4229 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4230 if (stub_name == NULL)
4231 return NULL;
4232
4233 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4234 stub_name, FALSE, FALSE);
4235 if (h != NULL)
4236 h->stub_cache = stub_entry;
4237
4238 free (stub_name);
4239 }
4240
4241 return stub_entry;
4242 }
4243
4244 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4245 section. */
4246
4247 static bfd_boolean
4248 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4249 {
4250 if (stub_type >= max_stub_type)
4251 abort (); /* Should be unreachable. */
4252
4253 return FALSE;
4254 }
4255
4256 /* Required alignment (as a power of 2) for the dedicated section holding
4257 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4258 with input sections. */
4259
4260 static int
4261 arm_dedicated_stub_output_section_required_alignment
4262 (enum elf32_arm_stub_type stub_type)
4263 {
4264 if (stub_type >= max_stub_type)
4265 abort (); /* Should be unreachable. */
4266
4267 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4268 return 0;
4269 }
4270
4271 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4272 NULL if veneers of this type are interspersed with input sections. */
4273
4274 static const char *
4275 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4276 {
4277 if (stub_type >= max_stub_type)
4278 abort (); /* Should be unreachable. */
4279
4280 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4281 return NULL;
4282 }
4283
4284 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4285 returns the address of the hash table field in HTAB holding a pointer to the
4286 corresponding input section. Otherwise, returns NULL. */
4287
4288 static asection **
4289 arm_dedicated_stub_input_section_ptr
4290 (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4291 enum elf32_arm_stub_type stub_type)
4292 {
4293 if (stub_type >= max_stub_type)
4294 abort (); /* Should be unreachable. */
4295
4296 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4297 return NULL;
4298 }
4299
4300 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4301 is the section that branch into veneer and can be NULL if stub should go in
4302 a dedicated output section. Returns a pointer to the stub section, and the
4303 section to which the stub section will be attached (in *LINK_SEC_P).
4304 LINK_SEC_P may be NULL. */
4305
4306 static asection *
4307 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4308 struct elf32_arm_link_hash_table *htab,
4309 enum elf32_arm_stub_type stub_type)
4310 {
4311 asection *link_sec, *out_sec, **stub_sec_p;
4312 const char *stub_sec_prefix;
4313 bfd_boolean dedicated_output_section =
4314 arm_dedicated_stub_output_section_required (stub_type);
4315 int align;
4316
4317 if (dedicated_output_section)
4318 {
4319 bfd *output_bfd = htab->obfd;
4320 const char *out_sec_name =
4321 arm_dedicated_stub_output_section_name (stub_type);
4322 link_sec = NULL;
4323 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4324 stub_sec_prefix = out_sec_name;
4325 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4326 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4327 if (out_sec == NULL)
4328 {
4329 (*_bfd_error_handler) (_("No address assigned to the veneers output "
4330 "section %s"), out_sec_name);
4331 return NULL;
4332 }
4333 }
4334 else
4335 {
4336 link_sec = htab->stub_group[section->id].link_sec;
4337 BFD_ASSERT (link_sec != NULL);
4338 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4339 if (*stub_sec_p == NULL)
4340 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4341 stub_sec_prefix = link_sec->name;
4342 out_sec = link_sec->output_section;
4343 align = htab->nacl_p ? 4 : 3;
4344 }
4345
4346 if (*stub_sec_p == NULL)
4347 {
4348 size_t namelen;
4349 bfd_size_type len;
4350 char *s_name;
4351
4352 namelen = strlen (stub_sec_prefix);
4353 len = namelen + sizeof (STUB_SUFFIX);
4354 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4355 if (s_name == NULL)
4356 return NULL;
4357
4358 memcpy (s_name, stub_sec_prefix, namelen);
4359 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4360 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4361 align);
4362 if (*stub_sec_p == NULL)
4363 return NULL;
4364
4365 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4366 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4367 | SEC_KEEP;
4368 }
4369
4370 if (!dedicated_output_section)
4371 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4372
4373 if (link_sec_p)
4374 *link_sec_p = link_sec;
4375
4376 return *stub_sec_p;
4377 }
4378
4379 /* Add a new stub entry to the stub hash. Not all fields of the new
4380 stub entry are initialised. */
4381
4382 static struct elf32_arm_stub_hash_entry *
4383 elf32_arm_add_stub (const char *stub_name, asection *section,
4384 struct elf32_arm_link_hash_table *htab,
4385 enum elf32_arm_stub_type stub_type)
4386 {
4387 asection *link_sec;
4388 asection *stub_sec;
4389 struct elf32_arm_stub_hash_entry *stub_entry;
4390
4391 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4392 stub_type);
4393 if (stub_sec == NULL)
4394 return NULL;
4395
4396 /* Enter this entry into the linker stub hash table. */
4397 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4398 TRUE, FALSE);
4399 if (stub_entry == NULL)
4400 {
4401 if (section == NULL)
4402 section = stub_sec;
4403 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4404 section->owner,
4405 stub_name);
4406 return NULL;
4407 }
4408
4409 stub_entry->stub_sec = stub_sec;
4410 stub_entry->stub_offset = 0;
4411 stub_entry->id_sec = link_sec;
4412
4413 return stub_entry;
4414 }
4415
4416 /* Store an Arm insn into an output section not processed by
4417 elf32_arm_write_section. */
4418
4419 static void
4420 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4421 bfd * output_bfd, bfd_vma val, void * ptr)
4422 {
4423 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4424 bfd_putl32 (val, ptr);
4425 else
4426 bfd_putb32 (val, ptr);
4427 }
4428
4429 /* Store a 16-bit Thumb insn into an output section not processed by
4430 elf32_arm_write_section. */
4431
4432 static void
4433 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4434 bfd * output_bfd, bfd_vma val, void * ptr)
4435 {
4436 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4437 bfd_putl16 (val, ptr);
4438 else
4439 bfd_putb16 (val, ptr);
4440 }
4441
4442 /* Store a Thumb2 insn into an output section not processed by
4443 elf32_arm_write_section. */
4444
4445 static void
4446 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4447 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4448 {
4449 /* T2 instructions are 16-bit streamed. */
4450 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4451 {
4452 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4453 bfd_putl16 ((val & 0xffff), ptr + 2);
4454 }
4455 else
4456 {
4457 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4458 bfd_putb16 ((val & 0xffff), ptr + 2);
4459 }
4460 }
4461
4462 /* If it's possible to change R_TYPE to a more efficient access
4463 model, return the new reloc type. */
4464
4465 static unsigned
4466 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4467 struct elf_link_hash_entry *h)
4468 {
4469 int is_local = (h == NULL);
4470
4471 if (bfd_link_pic (info)
4472 || (h && h->root.type == bfd_link_hash_undefweak))
4473 return r_type;
4474
4475 /* We do not support relaxations for Old TLS models. */
4476 switch (r_type)
4477 {
4478 case R_ARM_TLS_GOTDESC:
4479 case R_ARM_TLS_CALL:
4480 case R_ARM_THM_TLS_CALL:
4481 case R_ARM_TLS_DESCSEQ:
4482 case R_ARM_THM_TLS_DESCSEQ:
4483 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4484 }
4485
4486 return r_type;
4487 }
4488
4489 static bfd_reloc_status_type elf32_arm_final_link_relocate
4490 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4491 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4492 const char *, unsigned char, enum arm_st_branch_type,
4493 struct elf_link_hash_entry *, bfd_boolean *, char **);
4494
4495 static unsigned int
4496 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4497 {
4498 switch (stub_type)
4499 {
4500 case arm_stub_a8_veneer_b_cond:
4501 case arm_stub_a8_veneer_b:
4502 case arm_stub_a8_veneer_bl:
4503 return 2;
4504
4505 case arm_stub_long_branch_any_any:
4506 case arm_stub_long_branch_v4t_arm_thumb:
4507 case arm_stub_long_branch_thumb_only:
4508 case arm_stub_long_branch_thumb2_only:
4509 case arm_stub_long_branch_thumb2_only_pure:
4510 case arm_stub_long_branch_v4t_thumb_thumb:
4511 case arm_stub_long_branch_v4t_thumb_arm:
4512 case arm_stub_short_branch_v4t_thumb_arm:
4513 case arm_stub_long_branch_any_arm_pic:
4514 case arm_stub_long_branch_any_thumb_pic:
4515 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4516 case arm_stub_long_branch_v4t_arm_thumb_pic:
4517 case arm_stub_long_branch_v4t_thumb_arm_pic:
4518 case arm_stub_long_branch_thumb_only_pic:
4519 case arm_stub_long_branch_any_tls_pic:
4520 case arm_stub_long_branch_v4t_thumb_tls_pic:
4521 case arm_stub_a8_veneer_blx:
4522 return 4;
4523
4524 case arm_stub_long_branch_arm_nacl:
4525 case arm_stub_long_branch_arm_nacl_pic:
4526 return 16;
4527
4528 default:
4529 abort (); /* Should be unreachable. */
4530 }
4531 }
4532
4533 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4534 veneering (TRUE) or have their own symbol (FALSE). */
4535
4536 static bfd_boolean
4537 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4538 {
4539 if (stub_type >= max_stub_type)
4540 abort (); /* Should be unreachable. */
4541
4542 return FALSE;
4543 }
4544
4545 /* Returns the padding needed for the dedicated section used stubs of type
4546 STUB_TYPE. */
4547
4548 static int
4549 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4550 {
4551 if (stub_type >= max_stub_type)
4552 abort (); /* Should be unreachable. */
4553
4554 return 0;
4555 }
4556
4557 static bfd_boolean
4558 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4559 void * in_arg)
4560 {
4561 #define MAXRELOCS 3
4562 struct elf32_arm_stub_hash_entry *stub_entry;
4563 struct elf32_arm_link_hash_table *globals;
4564 struct bfd_link_info *info;
4565 asection *stub_sec;
4566 bfd *stub_bfd;
4567 bfd_byte *loc;
4568 bfd_vma sym_value;
4569 int template_size;
4570 int size;
4571 const insn_sequence *template_sequence;
4572 int i;
4573 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4574 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4575 int nrelocs = 0;
4576
4577 /* Massage our args to the form they really have. */
4578 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4579 info = (struct bfd_link_info *) in_arg;
4580
4581 globals = elf32_arm_hash_table (info);
4582 if (globals == NULL)
4583 return FALSE;
4584
4585 stub_sec = stub_entry->stub_sec;
4586
4587 if ((globals->fix_cortex_a8 < 0)
4588 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4589 /* We have to do less-strictly-aligned fixes last. */
4590 return TRUE;
4591
4592 /* Make a note of the offset within the stubs for this entry. */
4593 stub_entry->stub_offset = stub_sec->size;
4594 loc = stub_sec->contents + stub_entry->stub_offset;
4595
4596 stub_bfd = stub_sec->owner;
4597
4598 /* This is the address of the stub destination. */
4599 sym_value = (stub_entry->target_value
4600 + stub_entry->target_section->output_offset
4601 + stub_entry->target_section->output_section->vma);
4602
4603 template_sequence = stub_entry->stub_template;
4604 template_size = stub_entry->stub_template_size;
4605
4606 size = 0;
4607 for (i = 0; i < template_size; i++)
4608 {
4609 switch (template_sequence[i].type)
4610 {
4611 case THUMB16_TYPE:
4612 {
4613 bfd_vma data = (bfd_vma) template_sequence[i].data;
4614 if (template_sequence[i].reloc_addend != 0)
4615 {
4616 /* We've borrowed the reloc_addend field to mean we should
4617 insert a condition code into this (Thumb-1 branch)
4618 instruction. See THUMB16_BCOND_INSN. */
4619 BFD_ASSERT ((data & 0xff00) == 0xd000);
4620 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4621 }
4622 bfd_put_16 (stub_bfd, data, loc + size);
4623 size += 2;
4624 }
4625 break;
4626
4627 case THUMB32_TYPE:
4628 bfd_put_16 (stub_bfd,
4629 (template_sequence[i].data >> 16) & 0xffff,
4630 loc + size);
4631 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4632 loc + size + 2);
4633 if (template_sequence[i].r_type != R_ARM_NONE)
4634 {
4635 stub_reloc_idx[nrelocs] = i;
4636 stub_reloc_offset[nrelocs++] = size;
4637 }
4638 size += 4;
4639 break;
4640
4641 case ARM_TYPE:
4642 bfd_put_32 (stub_bfd, template_sequence[i].data,
4643 loc + size);
4644 /* Handle cases where the target is encoded within the
4645 instruction. */
4646 if (template_sequence[i].r_type == R_ARM_JUMP24)
4647 {
4648 stub_reloc_idx[nrelocs] = i;
4649 stub_reloc_offset[nrelocs++] = size;
4650 }
4651 size += 4;
4652 break;
4653
4654 case DATA_TYPE:
4655 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4656 stub_reloc_idx[nrelocs] = i;
4657 stub_reloc_offset[nrelocs++] = size;
4658 size += 4;
4659 break;
4660
4661 default:
4662 BFD_FAIL ();
4663 return FALSE;
4664 }
4665 }
4666
4667 stub_sec->size += size;
4668
4669 /* Stub size has already been computed in arm_size_one_stub. Check
4670 consistency. */
4671 BFD_ASSERT (size == stub_entry->stub_size);
4672
4673 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4674 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4675 sym_value |= 1;
4676
4677 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4678 in each stub. */
4679 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4680
4681 for (i = 0; i < nrelocs; i++)
4682 {
4683 Elf_Internal_Rela rel;
4684 bfd_boolean unresolved_reloc;
4685 char *error_message;
4686 bfd_vma points_to =
4687 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4688
4689 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4690 rel.r_info = ELF32_R_INFO (0,
4691 template_sequence[stub_reloc_idx[i]].r_type);
4692 rel.r_addend = 0;
4693
4694 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4695 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4696 template should refer back to the instruction after the original
4697 branch. We use target_section as Cortex-A8 erratum workaround stubs
4698 are only generated when both source and target are in the same
4699 section. */
4700 points_to = stub_entry->target_section->output_section->vma
4701 + stub_entry->target_section->output_offset
4702 + stub_entry->source_value;
4703
4704 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4705 (template_sequence[stub_reloc_idx[i]].r_type),
4706 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4707 points_to, info, stub_entry->target_section, "", STT_FUNC,
4708 stub_entry->branch_type,
4709 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4710 &error_message);
4711 }
4712
4713 return TRUE;
4714 #undef MAXRELOCS
4715 }
4716
4717 /* Calculate the template, template size and instruction size for a stub.
4718 Return value is the instruction size. */
4719
4720 static unsigned int
4721 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4722 const insn_sequence **stub_template,
4723 int *stub_template_size)
4724 {
4725 const insn_sequence *template_sequence = NULL;
4726 int template_size = 0, i;
4727 unsigned int size;
4728
4729 template_sequence = stub_definitions[stub_type].template_sequence;
4730 if (stub_template)
4731 *stub_template = template_sequence;
4732
4733 template_size = stub_definitions[stub_type].template_size;
4734 if (stub_template_size)
4735 *stub_template_size = template_size;
4736
4737 size = 0;
4738 for (i = 0; i < template_size; i++)
4739 {
4740 switch (template_sequence[i].type)
4741 {
4742 case THUMB16_TYPE:
4743 size += 2;
4744 break;
4745
4746 case ARM_TYPE:
4747 case THUMB32_TYPE:
4748 case DATA_TYPE:
4749 size += 4;
4750 break;
4751
4752 default:
4753 BFD_FAIL ();
4754 return 0;
4755 }
4756 }
4757
4758 return size;
4759 }
4760
4761 /* As above, but don't actually build the stub. Just bump offset so
4762 we know stub section sizes. */
4763
4764 static bfd_boolean
4765 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4766 void *in_arg ATTRIBUTE_UNUSED)
4767 {
4768 struct elf32_arm_stub_hash_entry *stub_entry;
4769 const insn_sequence *template_sequence;
4770 int template_size, size;
4771
4772 /* Massage our args to the form they really have. */
4773 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4774
4775 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4776 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4777
4778 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4779 &template_size);
4780
4781 stub_entry->stub_size = size;
4782 stub_entry->stub_template = template_sequence;
4783 stub_entry->stub_template_size = template_size;
4784
4785 size = (size + 7) & ~7;
4786 stub_entry->stub_sec->size += size;
4787
4788 return TRUE;
4789 }
4790
4791 /* External entry points for sizing and building linker stubs. */
4792
4793 /* Set up various things so that we can make a list of input sections
4794 for each output section included in the link. Returns -1 on error,
4795 0 when no stubs will be needed, and 1 on success. */
4796
4797 int
4798 elf32_arm_setup_section_lists (bfd *output_bfd,
4799 struct bfd_link_info *info)
4800 {
4801 bfd *input_bfd;
4802 unsigned int bfd_count;
4803 unsigned int top_id, top_index;
4804 asection *section;
4805 asection **input_list, **list;
4806 bfd_size_type amt;
4807 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4808
4809 if (htab == NULL)
4810 return 0;
4811 if (! is_elf_hash_table (htab))
4812 return 0;
4813
4814 /* Count the number of input BFDs and find the top input section id. */
4815 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4816 input_bfd != NULL;
4817 input_bfd = input_bfd->link.next)
4818 {
4819 bfd_count += 1;
4820 for (section = input_bfd->sections;
4821 section != NULL;
4822 section = section->next)
4823 {
4824 if (top_id < section->id)
4825 top_id = section->id;
4826 }
4827 }
4828 htab->bfd_count = bfd_count;
4829
4830 amt = sizeof (struct map_stub) * (top_id + 1);
4831 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4832 if (htab->stub_group == NULL)
4833 return -1;
4834 htab->top_id = top_id;
4835
4836 /* We can't use output_bfd->section_count here to find the top output
4837 section index as some sections may have been removed, and
4838 _bfd_strip_section_from_output doesn't renumber the indices. */
4839 for (section = output_bfd->sections, top_index = 0;
4840 section != NULL;
4841 section = section->next)
4842 {
4843 if (top_index < section->index)
4844 top_index = section->index;
4845 }
4846
4847 htab->top_index = top_index;
4848 amt = sizeof (asection *) * (top_index + 1);
4849 input_list = (asection **) bfd_malloc (amt);
4850 htab->input_list = input_list;
4851 if (input_list == NULL)
4852 return -1;
4853
4854 /* For sections we aren't interested in, mark their entries with a
4855 value we can check later. */
4856 list = input_list + top_index;
4857 do
4858 *list = bfd_abs_section_ptr;
4859 while (list-- != input_list);
4860
4861 for (section = output_bfd->sections;
4862 section != NULL;
4863 section = section->next)
4864 {
4865 if ((section->flags & SEC_CODE) != 0)
4866 input_list[section->index] = NULL;
4867 }
4868
4869 return 1;
4870 }
4871
4872 /* The linker repeatedly calls this function for each input section,
4873 in the order that input sections are linked into output sections.
4874 Build lists of input sections to determine groupings between which
4875 we may insert linker stubs. */
4876
4877 void
4878 elf32_arm_next_input_section (struct bfd_link_info *info,
4879 asection *isec)
4880 {
4881 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4882
4883 if (htab == NULL)
4884 return;
4885
4886 if (isec->output_section->index <= htab->top_index)
4887 {
4888 asection **list = htab->input_list + isec->output_section->index;
4889
4890 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4891 {
4892 /* Steal the link_sec pointer for our list. */
4893 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4894 /* This happens to make the list in reverse order,
4895 which we reverse later. */
4896 PREV_SEC (isec) = *list;
4897 *list = isec;
4898 }
4899 }
4900 }
4901
4902 /* See whether we can group stub sections together. Grouping stub
4903 sections may result in fewer stubs. More importantly, we need to
4904 put all .init* and .fini* stubs at the end of the .init or
4905 .fini output sections respectively, because glibc splits the
4906 _init and _fini functions into multiple parts. Putting a stub in
4907 the middle of a function is not a good idea. */
4908
4909 static void
4910 group_sections (struct elf32_arm_link_hash_table *htab,
4911 bfd_size_type stub_group_size,
4912 bfd_boolean stubs_always_after_branch)
4913 {
4914 asection **list = htab->input_list;
4915
4916 do
4917 {
4918 asection *tail = *list;
4919 asection *head;
4920
4921 if (tail == bfd_abs_section_ptr)
4922 continue;
4923
4924 /* Reverse the list: we must avoid placing stubs at the
4925 beginning of the section because the beginning of the text
4926 section may be required for an interrupt vector in bare metal
4927 code. */
4928 #define NEXT_SEC PREV_SEC
4929 head = NULL;
4930 while (tail != NULL)
4931 {
4932 /* Pop from tail. */
4933 asection *item = tail;
4934 tail = PREV_SEC (item);
4935
4936 /* Push on head. */
4937 NEXT_SEC (item) = head;
4938 head = item;
4939 }
4940
4941 while (head != NULL)
4942 {
4943 asection *curr;
4944 asection *next;
4945 bfd_vma stub_group_start = head->output_offset;
4946 bfd_vma end_of_next;
4947
4948 curr = head;
4949 while (NEXT_SEC (curr) != NULL)
4950 {
4951 next = NEXT_SEC (curr);
4952 end_of_next = next->output_offset + next->size;
4953 if (end_of_next - stub_group_start >= stub_group_size)
4954 /* End of NEXT is too far from start, so stop. */
4955 break;
4956 /* Add NEXT to the group. */
4957 curr = next;
4958 }
4959
4960 /* OK, the size from the start to the start of CURR is less
4961 than stub_group_size and thus can be handled by one stub
4962 section. (Or the head section is itself larger than
4963 stub_group_size, in which case we may be toast.)
4964 We should really be keeping track of the total size of
4965 stubs added here, as stubs contribute to the final output
4966 section size. */
4967 do
4968 {
4969 next = NEXT_SEC (head);
4970 /* Set up this stub group. */
4971 htab->stub_group[head->id].link_sec = curr;
4972 }
4973 while (head != curr && (head = next) != NULL);
4974
4975 /* But wait, there's more! Input sections up to stub_group_size
4976 bytes after the stub section can be handled by it too. */
4977 if (!stubs_always_after_branch)
4978 {
4979 stub_group_start = curr->output_offset + curr->size;
4980
4981 while (next != NULL)
4982 {
4983 end_of_next = next->output_offset + next->size;
4984 if (end_of_next - stub_group_start >= stub_group_size)
4985 /* End of NEXT is too far from stubs, so stop. */
4986 break;
4987 /* Add NEXT to the stub group. */
4988 head = next;
4989 next = NEXT_SEC (head);
4990 htab->stub_group[head->id].link_sec = curr;
4991 }
4992 }
4993 head = next;
4994 }
4995 }
4996 while (list++ != htab->input_list + htab->top_index);
4997
4998 free (htab->input_list);
4999 #undef PREV_SEC
5000 #undef NEXT_SEC
5001 }
5002
5003 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5004 erratum fix. */
5005
5006 static int
5007 a8_reloc_compare (const void *a, const void *b)
5008 {
5009 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5010 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5011
5012 if (ra->from < rb->from)
5013 return -1;
5014 else if (ra->from > rb->from)
5015 return 1;
5016 else
5017 return 0;
5018 }
5019
5020 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5021 const char *, char **);
5022
5023 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5024 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5025 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5026 otherwise. */
5027
5028 static bfd_boolean
5029 cortex_a8_erratum_scan (bfd *input_bfd,
5030 struct bfd_link_info *info,
5031 struct a8_erratum_fix **a8_fixes_p,
5032 unsigned int *num_a8_fixes_p,
5033 unsigned int *a8_fix_table_size_p,
5034 struct a8_erratum_reloc *a8_relocs,
5035 unsigned int num_a8_relocs,
5036 unsigned prev_num_a8_fixes,
5037 bfd_boolean *stub_changed_p)
5038 {
5039 asection *section;
5040 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5041 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5042 unsigned int num_a8_fixes = *num_a8_fixes_p;
5043 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5044
5045 if (htab == NULL)
5046 return FALSE;
5047
5048 for (section = input_bfd->sections;
5049 section != NULL;
5050 section = section->next)
5051 {
5052 bfd_byte *contents = NULL;
5053 struct _arm_elf_section_data *sec_data;
5054 unsigned int span;
5055 bfd_vma base_vma;
5056
5057 if (elf_section_type (section) != SHT_PROGBITS
5058 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5059 || (section->flags & SEC_EXCLUDE) != 0
5060 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5061 || (section->output_section == bfd_abs_section_ptr))
5062 continue;
5063
5064 base_vma = section->output_section->vma + section->output_offset;
5065
5066 if (elf_section_data (section)->this_hdr.contents != NULL)
5067 contents = elf_section_data (section)->this_hdr.contents;
5068 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5069 return TRUE;
5070
5071 sec_data = elf32_arm_section_data (section);
5072
5073 for (span = 0; span < sec_data->mapcount; span++)
5074 {
5075 unsigned int span_start = sec_data->map[span].vma;
5076 unsigned int span_end = (span == sec_data->mapcount - 1)
5077 ? section->size : sec_data->map[span + 1].vma;
5078 unsigned int i;
5079 char span_type = sec_data->map[span].type;
5080 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5081
5082 if (span_type != 't')
5083 continue;
5084
5085 /* Span is entirely within a single 4KB region: skip scanning. */
5086 if (((base_vma + span_start) & ~0xfff)
5087 == ((base_vma + span_end) & ~0xfff))
5088 continue;
5089
5090 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5091
5092 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5093 * The branch target is in the same 4KB region as the
5094 first half of the branch.
5095 * The instruction before the branch is a 32-bit
5096 length non-branch instruction. */
5097 for (i = span_start; i < span_end;)
5098 {
5099 unsigned int insn = bfd_getl16 (&contents[i]);
5100 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5101 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5102
5103 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5104 insn_32bit = TRUE;
5105
5106 if (insn_32bit)
5107 {
5108 /* Load the rest of the insn (in manual-friendly order). */
5109 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5110
5111 /* Encoding T4: B<c>.W. */
5112 is_b = (insn & 0xf800d000) == 0xf0009000;
5113 /* Encoding T1: BL<c>.W. */
5114 is_bl = (insn & 0xf800d000) == 0xf000d000;
5115 /* Encoding T2: BLX<c>.W. */
5116 is_blx = (insn & 0xf800d000) == 0xf000c000;
5117 /* Encoding T3: B<c>.W (not permitted in IT block). */
5118 is_bcc = (insn & 0xf800d000) == 0xf0008000
5119 && (insn & 0x07f00000) != 0x03800000;
5120 }
5121
5122 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5123
5124 if (((base_vma + i) & 0xfff) == 0xffe
5125 && insn_32bit
5126 && is_32bit_branch
5127 && last_was_32bit
5128 && ! last_was_branch)
5129 {
5130 bfd_signed_vma offset = 0;
5131 bfd_boolean force_target_arm = FALSE;
5132 bfd_boolean force_target_thumb = FALSE;
5133 bfd_vma target;
5134 enum elf32_arm_stub_type stub_type = arm_stub_none;
5135 struct a8_erratum_reloc key, *found;
5136 bfd_boolean use_plt = FALSE;
5137
5138 key.from = base_vma + i;
5139 found = (struct a8_erratum_reloc *)
5140 bsearch (&key, a8_relocs, num_a8_relocs,
5141 sizeof (struct a8_erratum_reloc),
5142 &a8_reloc_compare);
5143
5144 if (found)
5145 {
5146 char *error_message = NULL;
5147 struct elf_link_hash_entry *entry;
5148
5149 /* We don't care about the error returned from this
5150 function, only if there is glue or not. */
5151 entry = find_thumb_glue (info, found->sym_name,
5152 &error_message);
5153
5154 if (entry)
5155 found->non_a8_stub = TRUE;
5156
5157 /* Keep a simpler condition, for the sake of clarity. */
5158 if (htab->root.splt != NULL && found->hash != NULL
5159 && found->hash->root.plt.offset != (bfd_vma) -1)
5160 use_plt = TRUE;
5161
5162 if (found->r_type == R_ARM_THM_CALL)
5163 {
5164 if (found->branch_type == ST_BRANCH_TO_ARM
5165 || use_plt)
5166 force_target_arm = TRUE;
5167 else
5168 force_target_thumb = TRUE;
5169 }
5170 }
5171
5172 /* Check if we have an offending branch instruction. */
5173
5174 if (found && found->non_a8_stub)
5175 /* We've already made a stub for this instruction, e.g.
5176 it's a long branch or a Thumb->ARM stub. Assume that
5177 stub will suffice to work around the A8 erratum (see
5178 setting of always_after_branch above). */
5179 ;
5180 else if (is_bcc)
5181 {
5182 offset = (insn & 0x7ff) << 1;
5183 offset |= (insn & 0x3f0000) >> 4;
5184 offset |= (insn & 0x2000) ? 0x40000 : 0;
5185 offset |= (insn & 0x800) ? 0x80000 : 0;
5186 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5187 if (offset & 0x100000)
5188 offset |= ~ ((bfd_signed_vma) 0xfffff);
5189 stub_type = arm_stub_a8_veneer_b_cond;
5190 }
5191 else if (is_b || is_bl || is_blx)
5192 {
5193 int s = (insn & 0x4000000) != 0;
5194 int j1 = (insn & 0x2000) != 0;
5195 int j2 = (insn & 0x800) != 0;
5196 int i1 = !(j1 ^ s);
5197 int i2 = !(j2 ^ s);
5198
5199 offset = (insn & 0x7ff) << 1;
5200 offset |= (insn & 0x3ff0000) >> 4;
5201 offset |= i2 << 22;
5202 offset |= i1 << 23;
5203 offset |= s << 24;
5204 if (offset & 0x1000000)
5205 offset |= ~ ((bfd_signed_vma) 0xffffff);
5206
5207 if (is_blx)
5208 offset &= ~ ((bfd_signed_vma) 3);
5209
5210 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5211 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5212 }
5213
5214 if (stub_type != arm_stub_none)
5215 {
5216 bfd_vma pc_for_insn = base_vma + i + 4;
5217
5218 /* The original instruction is a BL, but the target is
5219 an ARM instruction. If we were not making a stub,
5220 the BL would have been converted to a BLX. Use the
5221 BLX stub instead in that case. */
5222 if (htab->use_blx && force_target_arm
5223 && stub_type == arm_stub_a8_veneer_bl)
5224 {
5225 stub_type = arm_stub_a8_veneer_blx;
5226 is_blx = TRUE;
5227 is_bl = FALSE;
5228 }
5229 /* Conversely, if the original instruction was
5230 BLX but the target is Thumb mode, use the BL
5231 stub. */
5232 else if (force_target_thumb
5233 && stub_type == arm_stub_a8_veneer_blx)
5234 {
5235 stub_type = arm_stub_a8_veneer_bl;
5236 is_blx = FALSE;
5237 is_bl = TRUE;
5238 }
5239
5240 if (is_blx)
5241 pc_for_insn &= ~ ((bfd_vma) 3);
5242
5243 /* If we found a relocation, use the proper destination,
5244 not the offset in the (unrelocated) instruction.
5245 Note this is always done if we switched the stub type
5246 above. */
5247 if (found)
5248 offset =
5249 (bfd_signed_vma) (found->destination - pc_for_insn);
5250
5251 /* If the stub will use a Thumb-mode branch to a
5252 PLT target, redirect it to the preceding Thumb
5253 entry point. */
5254 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5255 offset -= PLT_THUMB_STUB_SIZE;
5256
5257 target = pc_for_insn + offset;
5258
5259 /* The BLX stub is ARM-mode code. Adjust the offset to
5260 take the different PC value (+8 instead of +4) into
5261 account. */
5262 if (stub_type == arm_stub_a8_veneer_blx)
5263 offset += 4;
5264
5265 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5266 {
5267 char *stub_name = NULL;
5268
5269 if (num_a8_fixes == a8_fix_table_size)
5270 {
5271 a8_fix_table_size *= 2;
5272 a8_fixes = (struct a8_erratum_fix *)
5273 bfd_realloc (a8_fixes,
5274 sizeof (struct a8_erratum_fix)
5275 * a8_fix_table_size);
5276 }
5277
5278 if (num_a8_fixes < prev_num_a8_fixes)
5279 {
5280 /* If we're doing a subsequent scan,
5281 check if we've found the same fix as
5282 before, and try and reuse the stub
5283 name. */
5284 stub_name = a8_fixes[num_a8_fixes].stub_name;
5285 if ((a8_fixes[num_a8_fixes].section != section)
5286 || (a8_fixes[num_a8_fixes].offset != i))
5287 {
5288 free (stub_name);
5289 stub_name = NULL;
5290 *stub_changed_p = TRUE;
5291 }
5292 }
5293
5294 if (!stub_name)
5295 {
5296 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5297 if (stub_name != NULL)
5298 sprintf (stub_name, "%x:%x", section->id, i);
5299 }
5300
5301 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5302 a8_fixes[num_a8_fixes].section = section;
5303 a8_fixes[num_a8_fixes].offset = i;
5304 a8_fixes[num_a8_fixes].target_offset =
5305 target - base_vma;
5306 a8_fixes[num_a8_fixes].orig_insn = insn;
5307 a8_fixes[num_a8_fixes].stub_name = stub_name;
5308 a8_fixes[num_a8_fixes].stub_type = stub_type;
5309 a8_fixes[num_a8_fixes].branch_type =
5310 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5311
5312 num_a8_fixes++;
5313 }
5314 }
5315 }
5316
5317 i += insn_32bit ? 4 : 2;
5318 last_was_32bit = insn_32bit;
5319 last_was_branch = is_32bit_branch;
5320 }
5321 }
5322
5323 if (elf_section_data (section)->this_hdr.contents == NULL)
5324 free (contents);
5325 }
5326
5327 *a8_fixes_p = a8_fixes;
5328 *num_a8_fixes_p = num_a8_fixes;
5329 *a8_fix_table_size_p = a8_fix_table_size;
5330
5331 return FALSE;
5332 }
5333
5334 /* Create or update a stub entry depending on whether the stub can already be
5335 found in HTAB. The stub is identified by:
5336 - its type STUB_TYPE
5337 - its source branch (note that several can share the same stub) whose
5338 section and relocation (if any) are given by SECTION and IRELA
5339 respectively
5340 - its target symbol whose input section, hash, name, value and branch type
5341 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5342 respectively
5343
5344 If found, the value of the stub's target symbol is updated from SYM_VALUE
5345 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5346 TRUE and the stub entry is initialized.
5347
5348 Returns whether the stub could be successfully created or updated, or FALSE
5349 if an error occured. */
5350
5351 static bfd_boolean
5352 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5353 enum elf32_arm_stub_type stub_type, asection *section,
5354 Elf_Internal_Rela *irela, asection *sym_sec,
5355 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5356 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5357 bfd_boolean *new_stub)
5358 {
5359 const asection *id_sec;
5360 char *stub_name;
5361 struct elf32_arm_stub_hash_entry *stub_entry;
5362 unsigned int r_type;
5363 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5364
5365 BFD_ASSERT (stub_type != arm_stub_none);
5366 *new_stub = FALSE;
5367
5368 if (sym_claimed)
5369 stub_name = sym_name;
5370 else
5371 {
5372 BFD_ASSERT (irela);
5373 BFD_ASSERT (section);
5374
5375 /* Support for grouping stub sections. */
5376 id_sec = htab->stub_group[section->id].link_sec;
5377
5378 /* Get the name of this stub. */
5379 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5380 stub_type);
5381 if (!stub_name)
5382 return FALSE;
5383 }
5384
5385 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5386 FALSE);
5387 /* The proper stub has already been created, just update its value. */
5388 if (stub_entry != NULL)
5389 {
5390 if (!sym_claimed)
5391 free (stub_name);
5392 stub_entry->target_value = sym_value;
5393 return TRUE;
5394 }
5395
5396 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5397 if (stub_entry == NULL)
5398 {
5399 if (!sym_claimed)
5400 free (stub_name);
5401 return FALSE;
5402 }
5403
5404 stub_entry->target_value = sym_value;
5405 stub_entry->target_section = sym_sec;
5406 stub_entry->stub_type = stub_type;
5407 stub_entry->h = hash;
5408 stub_entry->branch_type = branch_type;
5409
5410 if (sym_claimed)
5411 stub_entry->output_name = sym_name;
5412 else
5413 {
5414 if (sym_name == NULL)
5415 sym_name = "unnamed";
5416 stub_entry->output_name = (char *)
5417 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5418 + strlen (sym_name));
5419 if (stub_entry->output_name == NULL)
5420 {
5421 free (stub_name);
5422 return FALSE;
5423 }
5424
5425 /* For historical reasons, use the existing names for ARM-to-Thumb and
5426 Thumb-to-ARM stubs. */
5427 r_type = ELF32_R_TYPE (irela->r_info);
5428 if ((r_type == (unsigned int) R_ARM_THM_CALL
5429 || r_type == (unsigned int) R_ARM_THM_JUMP24
5430 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5431 && branch_type == ST_BRANCH_TO_ARM)
5432 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5433 else if ((r_type == (unsigned int) R_ARM_CALL
5434 || r_type == (unsigned int) R_ARM_JUMP24)
5435 && branch_type == ST_BRANCH_TO_THUMB)
5436 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5437 else
5438 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5439 }
5440
5441 *new_stub = TRUE;
5442 return TRUE;
5443 }
5444
5445 /* Determine and set the size of the stub section for a final link.
5446
5447 The basic idea here is to examine all the relocations looking for
5448 PC-relative calls to a target that is unreachable with a "bl"
5449 instruction. */
5450
5451 bfd_boolean
5452 elf32_arm_size_stubs (bfd *output_bfd,
5453 bfd *stub_bfd,
5454 struct bfd_link_info *info,
5455 bfd_signed_vma group_size,
5456 asection * (*add_stub_section) (const char *, asection *,
5457 asection *,
5458 unsigned int),
5459 void (*layout_sections_again) (void))
5460 {
5461 bfd_size_type stub_group_size;
5462 bfd_boolean stubs_always_after_branch;
5463 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5464 struct a8_erratum_fix *a8_fixes = NULL;
5465 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5466 struct a8_erratum_reloc *a8_relocs = NULL;
5467 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5468
5469 if (htab == NULL)
5470 return FALSE;
5471
5472 if (htab->fix_cortex_a8)
5473 {
5474 a8_fixes = (struct a8_erratum_fix *)
5475 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5476 a8_relocs = (struct a8_erratum_reloc *)
5477 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5478 }
5479
5480 /* Propagate mach to stub bfd, because it may not have been
5481 finalized when we created stub_bfd. */
5482 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5483 bfd_get_mach (output_bfd));
5484
5485 /* Stash our params away. */
5486 htab->stub_bfd = stub_bfd;
5487 htab->add_stub_section = add_stub_section;
5488 htab->layout_sections_again = layout_sections_again;
5489 stubs_always_after_branch = group_size < 0;
5490
5491 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5492 as the first half of a 32-bit branch straddling two 4K pages. This is a
5493 crude way of enforcing that. */
5494 if (htab->fix_cortex_a8)
5495 stubs_always_after_branch = 1;
5496
5497 if (group_size < 0)
5498 stub_group_size = -group_size;
5499 else
5500 stub_group_size = group_size;
5501
5502 if (stub_group_size == 1)
5503 {
5504 /* Default values. */
5505 /* Thumb branch range is +-4MB has to be used as the default
5506 maximum size (a given section can contain both ARM and Thumb
5507 code, so the worst case has to be taken into account).
5508
5509 This value is 24K less than that, which allows for 2025
5510 12-byte stubs. If we exceed that, then we will fail to link.
5511 The user will have to relink with an explicit group size
5512 option. */
5513 stub_group_size = 4170000;
5514 }
5515
5516 group_sections (htab, stub_group_size, stubs_always_after_branch);
5517
5518 /* If we're applying the cortex A8 fix, we need to determine the
5519 program header size now, because we cannot change it later --
5520 that could alter section placements. Notice the A8 erratum fix
5521 ends up requiring the section addresses to remain unchanged
5522 modulo the page size. That's something we cannot represent
5523 inside BFD, and we don't want to force the section alignment to
5524 be the page size. */
5525 if (htab->fix_cortex_a8)
5526 (*htab->layout_sections_again) ();
5527
5528 while (1)
5529 {
5530 bfd *input_bfd;
5531 unsigned int bfd_indx;
5532 asection *stub_sec;
5533 enum elf32_arm_stub_type stub_type;
5534 bfd_boolean stub_changed = FALSE;
5535 unsigned prev_num_a8_fixes = num_a8_fixes;
5536
5537 num_a8_fixes = 0;
5538 for (input_bfd = info->input_bfds, bfd_indx = 0;
5539 input_bfd != NULL;
5540 input_bfd = input_bfd->link.next, bfd_indx++)
5541 {
5542 Elf_Internal_Shdr *symtab_hdr;
5543 asection *section;
5544 Elf_Internal_Sym *local_syms = NULL;
5545
5546 if (!is_arm_elf (input_bfd))
5547 continue;
5548
5549 num_a8_relocs = 0;
5550
5551 /* We'll need the symbol table in a second. */
5552 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5553 if (symtab_hdr->sh_info == 0)
5554 continue;
5555
5556 /* Walk over each section attached to the input bfd. */
5557 for (section = input_bfd->sections;
5558 section != NULL;
5559 section = section->next)
5560 {
5561 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5562
5563 /* If there aren't any relocs, then there's nothing more
5564 to do. */
5565 if ((section->flags & SEC_RELOC) == 0
5566 || section->reloc_count == 0
5567 || (section->flags & SEC_CODE) == 0)
5568 continue;
5569
5570 /* If this section is a link-once section that will be
5571 discarded, then don't create any stubs. */
5572 if (section->output_section == NULL
5573 || section->output_section->owner != output_bfd)
5574 continue;
5575
5576 /* Get the relocs. */
5577 internal_relocs
5578 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5579 NULL, info->keep_memory);
5580 if (internal_relocs == NULL)
5581 goto error_ret_free_local;
5582
5583 /* Now examine each relocation. */
5584 irela = internal_relocs;
5585 irelaend = irela + section->reloc_count;
5586 for (; irela < irelaend; irela++)
5587 {
5588 unsigned int r_type, r_indx;
5589 asection *sym_sec;
5590 bfd_vma sym_value;
5591 bfd_vma destination;
5592 struct elf32_arm_link_hash_entry *hash;
5593 const char *sym_name;
5594 unsigned char st_type;
5595 enum arm_st_branch_type branch_type;
5596 bfd_boolean created_stub = FALSE;
5597
5598 r_type = ELF32_R_TYPE (irela->r_info);
5599 r_indx = ELF32_R_SYM (irela->r_info);
5600
5601 if (r_type >= (unsigned int) R_ARM_max)
5602 {
5603 bfd_set_error (bfd_error_bad_value);
5604 error_ret_free_internal:
5605 if (elf_section_data (section)->relocs == NULL)
5606 free (internal_relocs);
5607 /* Fall through. */
5608 error_ret_free_local:
5609 if (local_syms != NULL
5610 && (symtab_hdr->contents
5611 != (unsigned char *) local_syms))
5612 free (local_syms);
5613 return FALSE;
5614 }
5615
5616 hash = NULL;
5617 if (r_indx >= symtab_hdr->sh_info)
5618 hash = elf32_arm_hash_entry
5619 (elf_sym_hashes (input_bfd)
5620 [r_indx - symtab_hdr->sh_info]);
5621
5622 /* Only look for stubs on branch instructions, or
5623 non-relaxed TLSCALL */
5624 if ((r_type != (unsigned int) R_ARM_CALL)
5625 && (r_type != (unsigned int) R_ARM_THM_CALL)
5626 && (r_type != (unsigned int) R_ARM_JUMP24)
5627 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5628 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5629 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5630 && (r_type != (unsigned int) R_ARM_PLT32)
5631 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5632 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5633 && r_type == elf32_arm_tls_transition
5634 (info, r_type, &hash->root)
5635 && ((hash ? hash->tls_type
5636 : (elf32_arm_local_got_tls_type
5637 (input_bfd)[r_indx]))
5638 & GOT_TLS_GDESC) != 0))
5639 continue;
5640
5641 /* Now determine the call target, its name, value,
5642 section. */
5643 sym_sec = NULL;
5644 sym_value = 0;
5645 destination = 0;
5646 sym_name = NULL;
5647
5648 if (r_type == (unsigned int) R_ARM_TLS_CALL
5649 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5650 {
5651 /* A non-relaxed TLS call. The target is the
5652 plt-resident trampoline and nothing to do
5653 with the symbol. */
5654 BFD_ASSERT (htab->tls_trampoline > 0);
5655 sym_sec = htab->root.splt;
5656 sym_value = htab->tls_trampoline;
5657 hash = 0;
5658 st_type = STT_FUNC;
5659 branch_type = ST_BRANCH_TO_ARM;
5660 }
5661 else if (!hash)
5662 {
5663 /* It's a local symbol. */
5664 Elf_Internal_Sym *sym;
5665
5666 if (local_syms == NULL)
5667 {
5668 local_syms
5669 = (Elf_Internal_Sym *) symtab_hdr->contents;
5670 if (local_syms == NULL)
5671 local_syms
5672 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5673 symtab_hdr->sh_info, 0,
5674 NULL, NULL, NULL);
5675 if (local_syms == NULL)
5676 goto error_ret_free_internal;
5677 }
5678
5679 sym = local_syms + r_indx;
5680 if (sym->st_shndx == SHN_UNDEF)
5681 sym_sec = bfd_und_section_ptr;
5682 else if (sym->st_shndx == SHN_ABS)
5683 sym_sec = bfd_abs_section_ptr;
5684 else if (sym->st_shndx == SHN_COMMON)
5685 sym_sec = bfd_com_section_ptr;
5686 else
5687 sym_sec =
5688 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5689
5690 if (!sym_sec)
5691 /* This is an undefined symbol. It can never
5692 be resolved. */
5693 continue;
5694
5695 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5696 sym_value = sym->st_value;
5697 destination = (sym_value + irela->r_addend
5698 + sym_sec->output_offset
5699 + sym_sec->output_section->vma);
5700 st_type = ELF_ST_TYPE (sym->st_info);
5701 branch_type =
5702 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5703 sym_name
5704 = bfd_elf_string_from_elf_section (input_bfd,
5705 symtab_hdr->sh_link,
5706 sym->st_name);
5707 }
5708 else
5709 {
5710 /* It's an external symbol. */
5711 while (hash->root.root.type == bfd_link_hash_indirect
5712 || hash->root.root.type == bfd_link_hash_warning)
5713 hash = ((struct elf32_arm_link_hash_entry *)
5714 hash->root.root.u.i.link);
5715
5716 if (hash->root.root.type == bfd_link_hash_defined
5717 || hash->root.root.type == bfd_link_hash_defweak)
5718 {
5719 sym_sec = hash->root.root.u.def.section;
5720 sym_value = hash->root.root.u.def.value;
5721
5722 struct elf32_arm_link_hash_table *globals =
5723 elf32_arm_hash_table (info);
5724
5725 /* For a destination in a shared library,
5726 use the PLT stub as target address to
5727 decide whether a branch stub is
5728 needed. */
5729 if (globals != NULL
5730 && globals->root.splt != NULL
5731 && hash != NULL
5732 && hash->root.plt.offset != (bfd_vma) -1)
5733 {
5734 sym_sec = globals->root.splt;
5735 sym_value = hash->root.plt.offset;
5736 if (sym_sec->output_section != NULL)
5737 destination = (sym_value
5738 + sym_sec->output_offset
5739 + sym_sec->output_section->vma);
5740 }
5741 else if (sym_sec->output_section != NULL)
5742 destination = (sym_value + irela->r_addend
5743 + sym_sec->output_offset
5744 + sym_sec->output_section->vma);
5745 }
5746 else if ((hash->root.root.type == bfd_link_hash_undefined)
5747 || (hash->root.root.type == bfd_link_hash_undefweak))
5748 {
5749 /* For a shared library, use the PLT stub as
5750 target address to decide whether a long
5751 branch stub is needed.
5752 For absolute code, they cannot be handled. */
5753 struct elf32_arm_link_hash_table *globals =
5754 elf32_arm_hash_table (info);
5755
5756 if (globals != NULL
5757 && globals->root.splt != NULL
5758 && hash != NULL
5759 && hash->root.plt.offset != (bfd_vma) -1)
5760 {
5761 sym_sec = globals->root.splt;
5762 sym_value = hash->root.plt.offset;
5763 if (sym_sec->output_section != NULL)
5764 destination = (sym_value
5765 + sym_sec->output_offset
5766 + sym_sec->output_section->vma);
5767 }
5768 else
5769 continue;
5770 }
5771 else
5772 {
5773 bfd_set_error (bfd_error_bad_value);
5774 goto error_ret_free_internal;
5775 }
5776 st_type = hash->root.type;
5777 branch_type =
5778 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5779 sym_name = hash->root.root.root.string;
5780 }
5781
5782 do
5783 {
5784 bfd_boolean new_stub;
5785
5786 /* Determine what (if any) linker stub is needed. */
5787 stub_type = arm_type_of_stub (info, section, irela,
5788 st_type, &branch_type,
5789 hash, destination, sym_sec,
5790 input_bfd, sym_name);
5791 if (stub_type == arm_stub_none)
5792 break;
5793
5794 /* We've either created a stub for this reloc already,
5795 or we are about to. */
5796 created_stub =
5797 elf32_arm_create_stub (htab, stub_type, section, irela,
5798 sym_sec, hash,
5799 (char *) sym_name, sym_value,
5800 branch_type, &new_stub);
5801
5802 if (!created_stub)
5803 goto error_ret_free_internal;
5804 else if (!new_stub)
5805 break;
5806 else
5807 stub_changed = TRUE;
5808 }
5809 while (0);
5810
5811 /* Look for relocations which might trigger Cortex-A8
5812 erratum. */
5813 if (htab->fix_cortex_a8
5814 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5815 || r_type == (unsigned int) R_ARM_THM_JUMP19
5816 || r_type == (unsigned int) R_ARM_THM_CALL
5817 || r_type == (unsigned int) R_ARM_THM_XPC22))
5818 {
5819 bfd_vma from = section->output_section->vma
5820 + section->output_offset
5821 + irela->r_offset;
5822
5823 if ((from & 0xfff) == 0xffe)
5824 {
5825 /* Found a candidate. Note we haven't checked the
5826 destination is within 4K here: if we do so (and
5827 don't create an entry in a8_relocs) we can't tell
5828 that a branch should have been relocated when
5829 scanning later. */
5830 if (num_a8_relocs == a8_reloc_table_size)
5831 {
5832 a8_reloc_table_size *= 2;
5833 a8_relocs = (struct a8_erratum_reloc *)
5834 bfd_realloc (a8_relocs,
5835 sizeof (struct a8_erratum_reloc)
5836 * a8_reloc_table_size);
5837 }
5838
5839 a8_relocs[num_a8_relocs].from = from;
5840 a8_relocs[num_a8_relocs].destination = destination;
5841 a8_relocs[num_a8_relocs].r_type = r_type;
5842 a8_relocs[num_a8_relocs].branch_type = branch_type;
5843 a8_relocs[num_a8_relocs].sym_name = sym_name;
5844 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5845 a8_relocs[num_a8_relocs].hash = hash;
5846
5847 num_a8_relocs++;
5848 }
5849 }
5850 }
5851
5852 /* We're done with the internal relocs, free them. */
5853 if (elf_section_data (section)->relocs == NULL)
5854 free (internal_relocs);
5855 }
5856
5857 if (htab->fix_cortex_a8)
5858 {
5859 /* Sort relocs which might apply to Cortex-A8 erratum. */
5860 qsort (a8_relocs, num_a8_relocs,
5861 sizeof (struct a8_erratum_reloc),
5862 &a8_reloc_compare);
5863
5864 /* Scan for branches which might trigger Cortex-A8 erratum. */
5865 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5866 &num_a8_fixes, &a8_fix_table_size,
5867 a8_relocs, num_a8_relocs,
5868 prev_num_a8_fixes, &stub_changed)
5869 != 0)
5870 goto error_ret_free_local;
5871 }
5872
5873 if (local_syms != NULL
5874 && symtab_hdr->contents != (unsigned char *) local_syms)
5875 {
5876 if (!info->keep_memory)
5877 free (local_syms);
5878 else
5879 symtab_hdr->contents = (unsigned char *) local_syms;
5880 }
5881 }
5882
5883 if (prev_num_a8_fixes != num_a8_fixes)
5884 stub_changed = TRUE;
5885
5886 if (!stub_changed)
5887 break;
5888
5889 /* OK, we've added some stubs. Find out the new size of the
5890 stub sections. */
5891 for (stub_sec = htab->stub_bfd->sections;
5892 stub_sec != NULL;
5893 stub_sec = stub_sec->next)
5894 {
5895 /* Ignore non-stub sections. */
5896 if (!strstr (stub_sec->name, STUB_SUFFIX))
5897 continue;
5898
5899 stub_sec->size = 0;
5900 }
5901
5902 /* Compute stub section size, considering padding. */
5903 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5904 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
5905 stub_type++)
5906 {
5907 int size, padding;
5908 asection **stub_sec_p;
5909
5910 padding = arm_dedicated_stub_section_padding (stub_type);
5911 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
5912 /* Skip if no stub input section or no stub section padding
5913 required. */
5914 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
5915 continue;
5916 /* Stub section padding required but no dedicated section. */
5917 BFD_ASSERT (stub_sec_p);
5918
5919 size = (*stub_sec_p)->size;
5920 size = (size + padding - 1) & ~(padding - 1);
5921 (*stub_sec_p)->size = size;
5922 }
5923
5924 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5925 if (htab->fix_cortex_a8)
5926 for (i = 0; i < num_a8_fixes; i++)
5927 {
5928 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5929 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5930
5931 if (stub_sec == NULL)
5932 return FALSE;
5933
5934 stub_sec->size
5935 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5936 NULL);
5937 }
5938
5939
5940 /* Ask the linker to do its stuff. */
5941 (*htab->layout_sections_again) ();
5942 }
5943
5944 /* Add stubs for Cortex-A8 erratum fixes now. */
5945 if (htab->fix_cortex_a8)
5946 {
5947 for (i = 0; i < num_a8_fixes; i++)
5948 {
5949 struct elf32_arm_stub_hash_entry *stub_entry;
5950 char *stub_name = a8_fixes[i].stub_name;
5951 asection *section = a8_fixes[i].section;
5952 unsigned int section_id = a8_fixes[i].section->id;
5953 asection *link_sec = htab->stub_group[section_id].link_sec;
5954 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5955 const insn_sequence *template_sequence;
5956 int template_size, size = 0;
5957
5958 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5959 TRUE, FALSE);
5960 if (stub_entry == NULL)
5961 {
5962 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5963 section->owner,
5964 stub_name);
5965 return FALSE;
5966 }
5967
5968 stub_entry->stub_sec = stub_sec;
5969 stub_entry->stub_offset = 0;
5970 stub_entry->id_sec = link_sec;
5971 stub_entry->stub_type = a8_fixes[i].stub_type;
5972 stub_entry->source_value = a8_fixes[i].offset;
5973 stub_entry->target_section = a8_fixes[i].section;
5974 stub_entry->target_value = a8_fixes[i].target_offset;
5975 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5976 stub_entry->branch_type = a8_fixes[i].branch_type;
5977
5978 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5979 &template_sequence,
5980 &template_size);
5981
5982 stub_entry->stub_size = size;
5983 stub_entry->stub_template = template_sequence;
5984 stub_entry->stub_template_size = template_size;
5985 }
5986
5987 /* Stash the Cortex-A8 erratum fix array for use later in
5988 elf32_arm_write_section(). */
5989 htab->a8_erratum_fixes = a8_fixes;
5990 htab->num_a8_erratum_fixes = num_a8_fixes;
5991 }
5992 else
5993 {
5994 htab->a8_erratum_fixes = NULL;
5995 htab->num_a8_erratum_fixes = 0;
5996 }
5997 return TRUE;
5998 }
5999
6000 /* Build all the stubs associated with the current output file. The
6001 stubs are kept in a hash table attached to the main linker hash
6002 table. We also set up the .plt entries for statically linked PIC
6003 functions here. This function is called via arm_elf_finish in the
6004 linker. */
6005
6006 bfd_boolean
6007 elf32_arm_build_stubs (struct bfd_link_info *info)
6008 {
6009 asection *stub_sec;
6010 struct bfd_hash_table *table;
6011 struct elf32_arm_link_hash_table *htab;
6012
6013 htab = elf32_arm_hash_table (info);
6014 if (htab == NULL)
6015 return FALSE;
6016
6017 for (stub_sec = htab->stub_bfd->sections;
6018 stub_sec != NULL;
6019 stub_sec = stub_sec->next)
6020 {
6021 bfd_size_type size;
6022
6023 /* Ignore non-stub sections. */
6024 if (!strstr (stub_sec->name, STUB_SUFFIX))
6025 continue;
6026
6027 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6028 must at least be done for stub section requiring padding. */
6029 size = stub_sec->size;
6030 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6031 if (stub_sec->contents == NULL && size != 0)
6032 return FALSE;
6033 stub_sec->size = 0;
6034 }
6035
6036 /* Build the stubs as directed by the stub hash table. */
6037 table = &htab->stub_hash_table;
6038 bfd_hash_traverse (table, arm_build_one_stub, info);
6039 if (htab->fix_cortex_a8)
6040 {
6041 /* Place the cortex a8 stubs last. */
6042 htab->fix_cortex_a8 = -1;
6043 bfd_hash_traverse (table, arm_build_one_stub, info);
6044 }
6045
6046 return TRUE;
6047 }
6048
6049 /* Locate the Thumb encoded calling stub for NAME. */
6050
6051 static struct elf_link_hash_entry *
6052 find_thumb_glue (struct bfd_link_info *link_info,
6053 const char *name,
6054 char **error_message)
6055 {
6056 char *tmp_name;
6057 struct elf_link_hash_entry *hash;
6058 struct elf32_arm_link_hash_table *hash_table;
6059
6060 /* We need a pointer to the armelf specific hash table. */
6061 hash_table = elf32_arm_hash_table (link_info);
6062 if (hash_table == NULL)
6063 return NULL;
6064
6065 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6066 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
6067
6068 BFD_ASSERT (tmp_name);
6069
6070 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
6071
6072 hash = elf_link_hash_lookup
6073 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6074
6075 if (hash == NULL
6076 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
6077 tmp_name, name) == -1)
6078 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6079
6080 free (tmp_name);
6081
6082 return hash;
6083 }
6084
6085 /* Locate the ARM encoded calling stub for NAME. */
6086
6087 static struct elf_link_hash_entry *
6088 find_arm_glue (struct bfd_link_info *link_info,
6089 const char *name,
6090 char **error_message)
6091 {
6092 char *tmp_name;
6093 struct elf_link_hash_entry *myh;
6094 struct elf32_arm_link_hash_table *hash_table;
6095
6096 /* We need a pointer to the elfarm specific hash table. */
6097 hash_table = elf32_arm_hash_table (link_info);
6098 if (hash_table == NULL)
6099 return NULL;
6100
6101 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6102 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6103
6104 BFD_ASSERT (tmp_name);
6105
6106 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6107
6108 myh = elf_link_hash_lookup
6109 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6110
6111 if (myh == NULL
6112 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
6113 tmp_name, name) == -1)
6114 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6115
6116 free (tmp_name);
6117
6118 return myh;
6119 }
6120
6121 /* ARM->Thumb glue (static images):
6122
6123 .arm
6124 __func_from_arm:
6125 ldr r12, __func_addr
6126 bx r12
6127 __func_addr:
6128 .word func @ behave as if you saw a ARM_32 reloc.
6129
6130 (v5t static images)
6131 .arm
6132 __func_from_arm:
6133 ldr pc, __func_addr
6134 __func_addr:
6135 .word func @ behave as if you saw a ARM_32 reloc.
6136
6137 (relocatable images)
6138 .arm
6139 __func_from_arm:
6140 ldr r12, __func_offset
6141 add r12, r12, pc
6142 bx r12
6143 __func_offset:
6144 .word func - . */
6145
6146 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6147 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6148 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6149 static const insn32 a2t3_func_addr_insn = 0x00000001;
6150
6151 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6152 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6153 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6154
6155 #define ARM2THUMB_PIC_GLUE_SIZE 16
6156 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6157 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6158 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6159
6160 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6161
6162 .thumb .thumb
6163 .align 2 .align 2
6164 __func_from_thumb: __func_from_thumb:
6165 bx pc push {r6, lr}
6166 nop ldr r6, __func_addr
6167 .arm mov lr, pc
6168 b func bx r6
6169 .arm
6170 ;; back_to_thumb
6171 ldmia r13! {r6, lr}
6172 bx lr
6173 __func_addr:
6174 .word func */
6175
6176 #define THUMB2ARM_GLUE_SIZE 8
6177 static const insn16 t2a1_bx_pc_insn = 0x4778;
6178 static const insn16 t2a2_noop_insn = 0x46c0;
6179 static const insn32 t2a3_b_insn = 0xea000000;
6180
6181 #define VFP11_ERRATUM_VENEER_SIZE 8
6182 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6183 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6184
6185 #define ARM_BX_VENEER_SIZE 12
6186 static const insn32 armbx1_tst_insn = 0xe3100001;
6187 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6188 static const insn32 armbx3_bx_insn = 0xe12fff10;
6189
6190 #ifndef ELFARM_NABI_C_INCLUDED
6191 static void
6192 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6193 {
6194 asection * s;
6195 bfd_byte * contents;
6196
6197 if (size == 0)
6198 {
6199 /* Do not include empty glue sections in the output. */
6200 if (abfd != NULL)
6201 {
6202 s = bfd_get_linker_section (abfd, name);
6203 if (s != NULL)
6204 s->flags |= SEC_EXCLUDE;
6205 }
6206 return;
6207 }
6208
6209 BFD_ASSERT (abfd != NULL);
6210
6211 s = bfd_get_linker_section (abfd, name);
6212 BFD_ASSERT (s != NULL);
6213
6214 contents = (bfd_byte *) bfd_alloc (abfd, size);
6215
6216 BFD_ASSERT (s->size == size);
6217 s->contents = contents;
6218 }
6219
6220 bfd_boolean
6221 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6222 {
6223 struct elf32_arm_link_hash_table * globals;
6224
6225 globals = elf32_arm_hash_table (info);
6226 BFD_ASSERT (globals != NULL);
6227
6228 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6229 globals->arm_glue_size,
6230 ARM2THUMB_GLUE_SECTION_NAME);
6231
6232 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6233 globals->thumb_glue_size,
6234 THUMB2ARM_GLUE_SECTION_NAME);
6235
6236 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6237 globals->vfp11_erratum_glue_size,
6238 VFP11_ERRATUM_VENEER_SECTION_NAME);
6239
6240 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6241 globals->stm32l4xx_erratum_glue_size,
6242 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6243
6244 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6245 globals->bx_glue_size,
6246 ARM_BX_GLUE_SECTION_NAME);
6247
6248 return TRUE;
6249 }
6250
6251 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6252 returns the symbol identifying the stub. */
6253
6254 static struct elf_link_hash_entry *
6255 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6256 struct elf_link_hash_entry * h)
6257 {
6258 const char * name = h->root.root.string;
6259 asection * s;
6260 char * tmp_name;
6261 struct elf_link_hash_entry * myh;
6262 struct bfd_link_hash_entry * bh;
6263 struct elf32_arm_link_hash_table * globals;
6264 bfd_vma val;
6265 bfd_size_type size;
6266
6267 globals = elf32_arm_hash_table (link_info);
6268 BFD_ASSERT (globals != NULL);
6269 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6270
6271 s = bfd_get_linker_section
6272 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6273
6274 BFD_ASSERT (s != NULL);
6275
6276 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6277 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6278
6279 BFD_ASSERT (tmp_name);
6280
6281 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6282
6283 myh = elf_link_hash_lookup
6284 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6285
6286 if (myh != NULL)
6287 {
6288 /* We've already seen this guy. */
6289 free (tmp_name);
6290 return myh;
6291 }
6292
6293 /* The only trick here is using hash_table->arm_glue_size as the value.
6294 Even though the section isn't allocated yet, this is where we will be
6295 putting it. The +1 on the value marks that the stub has not been
6296 output yet - not that it is a Thumb function. */
6297 bh = NULL;
6298 val = globals->arm_glue_size + 1;
6299 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6300 tmp_name, BSF_GLOBAL, s, val,
6301 NULL, TRUE, FALSE, &bh);
6302
6303 myh = (struct elf_link_hash_entry *) bh;
6304 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6305 myh->forced_local = 1;
6306
6307 free (tmp_name);
6308
6309 if (bfd_link_pic (link_info)
6310 || globals->root.is_relocatable_executable
6311 || globals->pic_veneer)
6312 size = ARM2THUMB_PIC_GLUE_SIZE;
6313 else if (globals->use_blx)
6314 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6315 else
6316 size = ARM2THUMB_STATIC_GLUE_SIZE;
6317
6318 s->size += size;
6319 globals->arm_glue_size += size;
6320
6321 return myh;
6322 }
6323
6324 /* Allocate space for ARMv4 BX veneers. */
6325
6326 static void
6327 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6328 {
6329 asection * s;
6330 struct elf32_arm_link_hash_table *globals;
6331 char *tmp_name;
6332 struct elf_link_hash_entry *myh;
6333 struct bfd_link_hash_entry *bh;
6334 bfd_vma val;
6335
6336 /* BX PC does not need a veneer. */
6337 if (reg == 15)
6338 return;
6339
6340 globals = elf32_arm_hash_table (link_info);
6341 BFD_ASSERT (globals != NULL);
6342 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6343
6344 /* Check if this veneer has already been allocated. */
6345 if (globals->bx_glue_offset[reg])
6346 return;
6347
6348 s = bfd_get_linker_section
6349 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6350
6351 BFD_ASSERT (s != NULL);
6352
6353 /* Add symbol for veneer. */
6354 tmp_name = (char *)
6355 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6356
6357 BFD_ASSERT (tmp_name);
6358
6359 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6360
6361 myh = elf_link_hash_lookup
6362 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6363
6364 BFD_ASSERT (myh == NULL);
6365
6366 bh = NULL;
6367 val = globals->bx_glue_size;
6368 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6369 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6370 NULL, TRUE, FALSE, &bh);
6371
6372 myh = (struct elf_link_hash_entry *) bh;
6373 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6374 myh->forced_local = 1;
6375
6376 s->size += ARM_BX_VENEER_SIZE;
6377 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6378 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6379 }
6380
6381
6382 /* Add an entry to the code/data map for section SEC. */
6383
6384 static void
6385 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6386 {
6387 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6388 unsigned int newidx;
6389
6390 if (sec_data->map == NULL)
6391 {
6392 sec_data->map = (elf32_arm_section_map *)
6393 bfd_malloc (sizeof (elf32_arm_section_map));
6394 sec_data->mapcount = 0;
6395 sec_data->mapsize = 1;
6396 }
6397
6398 newidx = sec_data->mapcount++;
6399
6400 if (sec_data->mapcount > sec_data->mapsize)
6401 {
6402 sec_data->mapsize *= 2;
6403 sec_data->map = (elf32_arm_section_map *)
6404 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6405 * sizeof (elf32_arm_section_map));
6406 }
6407
6408 if (sec_data->map)
6409 {
6410 sec_data->map[newidx].vma = vma;
6411 sec_data->map[newidx].type = type;
6412 }
6413 }
6414
6415
6416 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6417 veneers are handled for now. */
6418
6419 static bfd_vma
6420 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6421 elf32_vfp11_erratum_list *branch,
6422 bfd *branch_bfd,
6423 asection *branch_sec,
6424 unsigned int offset)
6425 {
6426 asection *s;
6427 struct elf32_arm_link_hash_table *hash_table;
6428 char *tmp_name;
6429 struct elf_link_hash_entry *myh;
6430 struct bfd_link_hash_entry *bh;
6431 bfd_vma val;
6432 struct _arm_elf_section_data *sec_data;
6433 elf32_vfp11_erratum_list *newerr;
6434
6435 hash_table = elf32_arm_hash_table (link_info);
6436 BFD_ASSERT (hash_table != NULL);
6437 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6438
6439 s = bfd_get_linker_section
6440 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6441
6442 sec_data = elf32_arm_section_data (s);
6443
6444 BFD_ASSERT (s != NULL);
6445
6446 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6447 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6448
6449 BFD_ASSERT (tmp_name);
6450
6451 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6452 hash_table->num_vfp11_fixes);
6453
6454 myh = elf_link_hash_lookup
6455 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6456
6457 BFD_ASSERT (myh == NULL);
6458
6459 bh = NULL;
6460 val = hash_table->vfp11_erratum_glue_size;
6461 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6462 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6463 NULL, TRUE, FALSE, &bh);
6464
6465 myh = (struct elf_link_hash_entry *) bh;
6466 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6467 myh->forced_local = 1;
6468
6469 /* Link veneer back to calling location. */
6470 sec_data->erratumcount += 1;
6471 newerr = (elf32_vfp11_erratum_list *)
6472 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6473
6474 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6475 newerr->vma = -1;
6476 newerr->u.v.branch = branch;
6477 newerr->u.v.id = hash_table->num_vfp11_fixes;
6478 branch->u.b.veneer = newerr;
6479
6480 newerr->next = sec_data->erratumlist;
6481 sec_data->erratumlist = newerr;
6482
6483 /* A symbol for the return from the veneer. */
6484 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6485 hash_table->num_vfp11_fixes);
6486
6487 myh = elf_link_hash_lookup
6488 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6489
6490 if (myh != NULL)
6491 abort ();
6492
6493 bh = NULL;
6494 val = offset + 4;
6495 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6496 branch_sec, val, NULL, TRUE, FALSE, &bh);
6497
6498 myh = (struct elf_link_hash_entry *) bh;
6499 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6500 myh->forced_local = 1;
6501
6502 free (tmp_name);
6503
6504 /* Generate a mapping symbol for the veneer section, and explicitly add an
6505 entry for that symbol to the code/data map for the section. */
6506 if (hash_table->vfp11_erratum_glue_size == 0)
6507 {
6508 bh = NULL;
6509 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6510 ever requires this erratum fix. */
6511 _bfd_generic_link_add_one_symbol (link_info,
6512 hash_table->bfd_of_glue_owner, "$a",
6513 BSF_LOCAL, s, 0, NULL,
6514 TRUE, FALSE, &bh);
6515
6516 myh = (struct elf_link_hash_entry *) bh;
6517 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6518 myh->forced_local = 1;
6519
6520 /* The elf32_arm_init_maps function only cares about symbols from input
6521 BFDs. We must make a note of this generated mapping symbol
6522 ourselves so that code byteswapping works properly in
6523 elf32_arm_write_section. */
6524 elf32_arm_section_map_add (s, 'a', 0);
6525 }
6526
6527 s->size += VFP11_ERRATUM_VENEER_SIZE;
6528 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6529 hash_table->num_vfp11_fixes++;
6530
6531 /* The offset of the veneer. */
6532 return val;
6533 }
6534
6535 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6536 veneers need to be handled because used only in Cortex-M. */
6537
6538 static bfd_vma
6539 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6540 elf32_stm32l4xx_erratum_list *branch,
6541 bfd *branch_bfd,
6542 asection *branch_sec,
6543 unsigned int offset,
6544 bfd_size_type veneer_size)
6545 {
6546 asection *s;
6547 struct elf32_arm_link_hash_table *hash_table;
6548 char *tmp_name;
6549 struct elf_link_hash_entry *myh;
6550 struct bfd_link_hash_entry *bh;
6551 bfd_vma val;
6552 struct _arm_elf_section_data *sec_data;
6553 elf32_stm32l4xx_erratum_list *newerr;
6554
6555 hash_table = elf32_arm_hash_table (link_info);
6556 BFD_ASSERT (hash_table != NULL);
6557 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6558
6559 s = bfd_get_linker_section
6560 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6561
6562 BFD_ASSERT (s != NULL);
6563
6564 sec_data = elf32_arm_section_data (s);
6565
6566 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6567 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6568
6569 BFD_ASSERT (tmp_name);
6570
6571 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6572 hash_table->num_stm32l4xx_fixes);
6573
6574 myh = elf_link_hash_lookup
6575 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6576
6577 BFD_ASSERT (myh == NULL);
6578
6579 bh = NULL;
6580 val = hash_table->stm32l4xx_erratum_glue_size;
6581 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6582 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6583 NULL, TRUE, FALSE, &bh);
6584
6585 myh = (struct elf_link_hash_entry *) bh;
6586 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6587 myh->forced_local = 1;
6588
6589 /* Link veneer back to calling location. */
6590 sec_data->stm32l4xx_erratumcount += 1;
6591 newerr = (elf32_stm32l4xx_erratum_list *)
6592 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6593
6594 newerr->type = STM32L4XX_ERRATUM_VENEER;
6595 newerr->vma = -1;
6596 newerr->u.v.branch = branch;
6597 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6598 branch->u.b.veneer = newerr;
6599
6600 newerr->next = sec_data->stm32l4xx_erratumlist;
6601 sec_data->stm32l4xx_erratumlist = newerr;
6602
6603 /* A symbol for the return from the veneer. */
6604 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6605 hash_table->num_stm32l4xx_fixes);
6606
6607 myh = elf_link_hash_lookup
6608 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6609
6610 if (myh != NULL)
6611 abort ();
6612
6613 bh = NULL;
6614 val = offset + 4;
6615 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6616 branch_sec, val, NULL, TRUE, FALSE, &bh);
6617
6618 myh = (struct elf_link_hash_entry *) bh;
6619 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6620 myh->forced_local = 1;
6621
6622 free (tmp_name);
6623
6624 /* Generate a mapping symbol for the veneer section, and explicitly add an
6625 entry for that symbol to the code/data map for the section. */
6626 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6627 {
6628 bh = NULL;
6629 /* Creates a THUMB symbol since there is no other choice. */
6630 _bfd_generic_link_add_one_symbol (link_info,
6631 hash_table->bfd_of_glue_owner, "$t",
6632 BSF_LOCAL, s, 0, NULL,
6633 TRUE, FALSE, &bh);
6634
6635 myh = (struct elf_link_hash_entry *) bh;
6636 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6637 myh->forced_local = 1;
6638
6639 /* The elf32_arm_init_maps function only cares about symbols from input
6640 BFDs. We must make a note of this generated mapping symbol
6641 ourselves so that code byteswapping works properly in
6642 elf32_arm_write_section. */
6643 elf32_arm_section_map_add (s, 't', 0);
6644 }
6645
6646 s->size += veneer_size;
6647 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6648 hash_table->num_stm32l4xx_fixes++;
6649
6650 /* The offset of the veneer. */
6651 return val;
6652 }
6653
6654 #define ARM_GLUE_SECTION_FLAGS \
6655 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6656 | SEC_READONLY | SEC_LINKER_CREATED)
6657
6658 /* Create a fake section for use by the ARM backend of the linker. */
6659
6660 static bfd_boolean
6661 arm_make_glue_section (bfd * abfd, const char * name)
6662 {
6663 asection * sec;
6664
6665 sec = bfd_get_linker_section (abfd, name);
6666 if (sec != NULL)
6667 /* Already made. */
6668 return TRUE;
6669
6670 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6671
6672 if (sec == NULL
6673 || !bfd_set_section_alignment (abfd, sec, 2))
6674 return FALSE;
6675
6676 /* Set the gc mark to prevent the section from being removed by garbage
6677 collection, despite the fact that no relocs refer to this section. */
6678 sec->gc_mark = 1;
6679
6680 return TRUE;
6681 }
6682
6683 /* Set size of .plt entries. This function is called from the
6684 linker scripts in ld/emultempl/{armelf}.em. */
6685
6686 void
6687 bfd_elf32_arm_use_long_plt (void)
6688 {
6689 elf32_arm_use_long_plt_entry = TRUE;
6690 }
6691
6692 /* Add the glue sections to ABFD. This function is called from the
6693 linker scripts in ld/emultempl/{armelf}.em. */
6694
6695 bfd_boolean
6696 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6697 struct bfd_link_info *info)
6698 {
6699 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6700 bfd_boolean dostm32l4xx = globals
6701 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6702 bfd_boolean addglue;
6703
6704 /* If we are only performing a partial
6705 link do not bother adding the glue. */
6706 if (bfd_link_relocatable (info))
6707 return TRUE;
6708
6709 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6710 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6711 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6712 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6713
6714 if (!dostm32l4xx)
6715 return addglue;
6716
6717 return addglue
6718 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6719 }
6720
6721 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6722 ensures they are not marked for deletion by
6723 strip_excluded_output_sections () when veneers are going to be created
6724 later. Not doing so would trigger assert on empty section size in
6725 lang_size_sections_1 (). */
6726
6727 void
6728 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6729 {
6730 enum elf32_arm_stub_type stub_type;
6731
6732 /* If we are only performing a partial
6733 link do not bother adding the glue. */
6734 if (bfd_link_relocatable (info))
6735 return;
6736
6737 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6738 {
6739 asection *out_sec;
6740 const char *out_sec_name;
6741
6742 if (!arm_dedicated_stub_output_section_required (stub_type))
6743 continue;
6744
6745 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6746 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6747 if (out_sec != NULL)
6748 out_sec->flags |= SEC_KEEP;
6749 }
6750 }
6751
6752 /* Select a BFD to be used to hold the sections used by the glue code.
6753 This function is called from the linker scripts in ld/emultempl/
6754 {armelf/pe}.em. */
6755
6756 bfd_boolean
6757 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6758 {
6759 struct elf32_arm_link_hash_table *globals;
6760
6761 /* If we are only performing a partial link
6762 do not bother getting a bfd to hold the glue. */
6763 if (bfd_link_relocatable (info))
6764 return TRUE;
6765
6766 /* Make sure we don't attach the glue sections to a dynamic object. */
6767 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6768
6769 globals = elf32_arm_hash_table (info);
6770 BFD_ASSERT (globals != NULL);
6771
6772 if (globals->bfd_of_glue_owner != NULL)
6773 return TRUE;
6774
6775 /* Save the bfd for later use. */
6776 globals->bfd_of_glue_owner = abfd;
6777
6778 return TRUE;
6779 }
6780
6781 static void
6782 check_use_blx (struct elf32_arm_link_hash_table *globals)
6783 {
6784 int cpu_arch;
6785
6786 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6787 Tag_CPU_arch);
6788
6789 if (globals->fix_arm1176)
6790 {
6791 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6792 globals->use_blx = 1;
6793 }
6794 else
6795 {
6796 if (cpu_arch > TAG_CPU_ARCH_V4T)
6797 globals->use_blx = 1;
6798 }
6799 }
6800
6801 bfd_boolean
6802 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6803 struct bfd_link_info *link_info)
6804 {
6805 Elf_Internal_Shdr *symtab_hdr;
6806 Elf_Internal_Rela *internal_relocs = NULL;
6807 Elf_Internal_Rela *irel, *irelend;
6808 bfd_byte *contents = NULL;
6809
6810 asection *sec;
6811 struct elf32_arm_link_hash_table *globals;
6812
6813 /* If we are only performing a partial link do not bother
6814 to construct any glue. */
6815 if (bfd_link_relocatable (link_info))
6816 return TRUE;
6817
6818 /* Here we have a bfd that is to be included on the link. We have a
6819 hook to do reloc rummaging, before section sizes are nailed down. */
6820 globals = elf32_arm_hash_table (link_info);
6821 BFD_ASSERT (globals != NULL);
6822
6823 check_use_blx (globals);
6824
6825 if (globals->byteswap_code && !bfd_big_endian (abfd))
6826 {
6827 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6828 abfd);
6829 return FALSE;
6830 }
6831
6832 /* PR 5398: If we have not decided to include any loadable sections in
6833 the output then we will not have a glue owner bfd. This is OK, it
6834 just means that there is nothing else for us to do here. */
6835 if (globals->bfd_of_glue_owner == NULL)
6836 return TRUE;
6837
6838 /* Rummage around all the relocs and map the glue vectors. */
6839 sec = abfd->sections;
6840
6841 if (sec == NULL)
6842 return TRUE;
6843
6844 for (; sec != NULL; sec = sec->next)
6845 {
6846 if (sec->reloc_count == 0)
6847 continue;
6848
6849 if ((sec->flags & SEC_EXCLUDE) != 0)
6850 continue;
6851
6852 symtab_hdr = & elf_symtab_hdr (abfd);
6853
6854 /* Load the relocs. */
6855 internal_relocs
6856 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6857
6858 if (internal_relocs == NULL)
6859 goto error_return;
6860
6861 irelend = internal_relocs + sec->reloc_count;
6862 for (irel = internal_relocs; irel < irelend; irel++)
6863 {
6864 long r_type;
6865 unsigned long r_index;
6866
6867 struct elf_link_hash_entry *h;
6868
6869 r_type = ELF32_R_TYPE (irel->r_info);
6870 r_index = ELF32_R_SYM (irel->r_info);
6871
6872 /* These are the only relocation types we care about. */
6873 if ( r_type != R_ARM_PC24
6874 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6875 continue;
6876
6877 /* Get the section contents if we haven't done so already. */
6878 if (contents == NULL)
6879 {
6880 /* Get cached copy if it exists. */
6881 if (elf_section_data (sec)->this_hdr.contents != NULL)
6882 contents = elf_section_data (sec)->this_hdr.contents;
6883 else
6884 {
6885 /* Go get them off disk. */
6886 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6887 goto error_return;
6888 }
6889 }
6890
6891 if (r_type == R_ARM_V4BX)
6892 {
6893 int reg;
6894
6895 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6896 record_arm_bx_glue (link_info, reg);
6897 continue;
6898 }
6899
6900 /* If the relocation is not against a symbol it cannot concern us. */
6901 h = NULL;
6902
6903 /* We don't care about local symbols. */
6904 if (r_index < symtab_hdr->sh_info)
6905 continue;
6906
6907 /* This is an external symbol. */
6908 r_index -= symtab_hdr->sh_info;
6909 h = (struct elf_link_hash_entry *)
6910 elf_sym_hashes (abfd)[r_index];
6911
6912 /* If the relocation is against a static symbol it must be within
6913 the current section and so cannot be a cross ARM/Thumb relocation. */
6914 if (h == NULL)
6915 continue;
6916
6917 /* If the call will go through a PLT entry then we do not need
6918 glue. */
6919 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6920 continue;
6921
6922 switch (r_type)
6923 {
6924 case R_ARM_PC24:
6925 /* This one is a call from arm code. We need to look up
6926 the target of the call. If it is a thumb target, we
6927 insert glue. */
6928 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6929 == ST_BRANCH_TO_THUMB)
6930 record_arm_to_thumb_glue (link_info, h);
6931 break;
6932
6933 default:
6934 abort ();
6935 }
6936 }
6937
6938 if (contents != NULL
6939 && elf_section_data (sec)->this_hdr.contents != contents)
6940 free (contents);
6941 contents = NULL;
6942
6943 if (internal_relocs != NULL
6944 && elf_section_data (sec)->relocs != internal_relocs)
6945 free (internal_relocs);
6946 internal_relocs = NULL;
6947 }
6948
6949 return TRUE;
6950
6951 error_return:
6952 if (contents != NULL
6953 && elf_section_data (sec)->this_hdr.contents != contents)
6954 free (contents);
6955 if (internal_relocs != NULL
6956 && elf_section_data (sec)->relocs != internal_relocs)
6957 free (internal_relocs);
6958
6959 return FALSE;
6960 }
6961 #endif
6962
6963
6964 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6965
6966 void
6967 bfd_elf32_arm_init_maps (bfd *abfd)
6968 {
6969 Elf_Internal_Sym *isymbuf;
6970 Elf_Internal_Shdr *hdr;
6971 unsigned int i, localsyms;
6972
6973 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6974 if (! is_arm_elf (abfd))
6975 return;
6976
6977 if ((abfd->flags & DYNAMIC) != 0)
6978 return;
6979
6980 hdr = & elf_symtab_hdr (abfd);
6981 localsyms = hdr->sh_info;
6982
6983 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6984 should contain the number of local symbols, which should come before any
6985 global symbols. Mapping symbols are always local. */
6986 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6987 NULL);
6988
6989 /* No internal symbols read? Skip this BFD. */
6990 if (isymbuf == NULL)
6991 return;
6992
6993 for (i = 0; i < localsyms; i++)
6994 {
6995 Elf_Internal_Sym *isym = &isymbuf[i];
6996 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6997 const char *name;
6998
6999 if (sec != NULL
7000 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7001 {
7002 name = bfd_elf_string_from_elf_section (abfd,
7003 hdr->sh_link, isym->st_name);
7004
7005 if (bfd_is_arm_special_symbol_name (name,
7006 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7007 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7008 }
7009 }
7010 }
7011
7012
7013 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7014 say what they wanted. */
7015
7016 void
7017 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7018 {
7019 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7020 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7021
7022 if (globals == NULL)
7023 return;
7024
7025 if (globals->fix_cortex_a8 == -1)
7026 {
7027 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7028 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7029 && (out_attr[Tag_CPU_arch_profile].i == 'A'
7030 || out_attr[Tag_CPU_arch_profile].i == 0))
7031 globals->fix_cortex_a8 = 1;
7032 else
7033 globals->fix_cortex_a8 = 0;
7034 }
7035 }
7036
7037
7038 void
7039 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
7040 {
7041 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7042 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7043
7044 if (globals == NULL)
7045 return;
7046 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
7047 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
7048 {
7049 switch (globals->vfp11_fix)
7050 {
7051 case BFD_ARM_VFP11_FIX_DEFAULT:
7052 case BFD_ARM_VFP11_FIX_NONE:
7053 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7054 break;
7055
7056 default:
7057 /* Give a warning, but do as the user requests anyway. */
7058 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
7059 "workaround is not necessary for target architecture"), obfd);
7060 }
7061 }
7062 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
7063 /* For earlier architectures, we might need the workaround, but do not
7064 enable it by default. If users is running with broken hardware, they
7065 must enable the erratum fix explicitly. */
7066 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7067 }
7068
7069 void
7070 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
7071 {
7072 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7073 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7074
7075 if (globals == NULL)
7076 return;
7077
7078 /* We assume only Cortex-M4 may require the fix. */
7079 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
7080 || out_attr[Tag_CPU_arch_profile].i != 'M')
7081 {
7082 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
7083 /* Give a warning, but do as the user requests anyway. */
7084 (*_bfd_error_handler)
7085 (_("%B: warning: selected STM32L4XX erratum "
7086 "workaround is not necessary for target architecture"), obfd);
7087 }
7088 }
7089
7090 enum bfd_arm_vfp11_pipe
7091 {
7092 VFP11_FMAC,
7093 VFP11_LS,
7094 VFP11_DS,
7095 VFP11_BAD
7096 };
7097
7098 /* Return a VFP register number. This is encoded as RX:X for single-precision
7099 registers, or X:RX for double-precision registers, where RX is the group of
7100 four bits in the instruction encoding and X is the single extension bit.
7101 RX and X fields are specified using their lowest (starting) bit. The return
7102 value is:
7103
7104 0...31: single-precision registers s0...s31
7105 32...63: double-precision registers d0...d31.
7106
7107 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7108 encounter VFP3 instructions, so we allow the full range for DP registers. */
7109
7110 static unsigned int
7111 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7112 unsigned int x)
7113 {
7114 if (is_double)
7115 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7116 else
7117 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7118 }
7119
7120 /* Set bits in *WMASK according to a register number REG as encoded by
7121 bfd_arm_vfp11_regno(). Ignore d16-d31. */
7122
7123 static void
7124 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7125 {
7126 if (reg < 32)
7127 *wmask |= 1 << reg;
7128 else if (reg < 48)
7129 *wmask |= 3 << ((reg - 32) * 2);
7130 }
7131
7132 /* Return TRUE if WMASK overwrites anything in REGS. */
7133
7134 static bfd_boolean
7135 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7136 {
7137 int i;
7138
7139 for (i = 0; i < numregs; i++)
7140 {
7141 unsigned int reg = regs[i];
7142
7143 if (reg < 32 && (wmask & (1 << reg)) != 0)
7144 return TRUE;
7145
7146 reg -= 32;
7147
7148 if (reg >= 16)
7149 continue;
7150
7151 if ((wmask & (3 << (reg * 2))) != 0)
7152 return TRUE;
7153 }
7154
7155 return FALSE;
7156 }
7157
7158 /* In this function, we're interested in two things: finding input registers
7159 for VFP data-processing instructions, and finding the set of registers which
7160 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7161 hold the written set, so FLDM etc. are easy to deal with (we're only
7162 interested in 32 SP registers or 16 dp registers, due to the VFP version
7163 implemented by the chip in question). DP registers are marked by setting
7164 both SP registers in the write mask). */
7165
7166 static enum bfd_arm_vfp11_pipe
7167 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7168 int *numregs)
7169 {
7170 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7171 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7172
7173 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7174 {
7175 unsigned int pqrs;
7176 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7177 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7178
7179 pqrs = ((insn & 0x00800000) >> 20)
7180 | ((insn & 0x00300000) >> 19)
7181 | ((insn & 0x00000040) >> 6);
7182
7183 switch (pqrs)
7184 {
7185 case 0: /* fmac[sd]. */
7186 case 1: /* fnmac[sd]. */
7187 case 2: /* fmsc[sd]. */
7188 case 3: /* fnmsc[sd]. */
7189 vpipe = VFP11_FMAC;
7190 bfd_arm_vfp11_write_mask (destmask, fd);
7191 regs[0] = fd;
7192 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7193 regs[2] = fm;
7194 *numregs = 3;
7195 break;
7196
7197 case 4: /* fmul[sd]. */
7198 case 5: /* fnmul[sd]. */
7199 case 6: /* fadd[sd]. */
7200 case 7: /* fsub[sd]. */
7201 vpipe = VFP11_FMAC;
7202 goto vfp_binop;
7203
7204 case 8: /* fdiv[sd]. */
7205 vpipe = VFP11_DS;
7206 vfp_binop:
7207 bfd_arm_vfp11_write_mask (destmask, fd);
7208 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7209 regs[1] = fm;
7210 *numregs = 2;
7211 break;
7212
7213 case 15: /* extended opcode. */
7214 {
7215 unsigned int extn = ((insn >> 15) & 0x1e)
7216 | ((insn >> 7) & 1);
7217
7218 switch (extn)
7219 {
7220 case 0: /* fcpy[sd]. */
7221 case 1: /* fabs[sd]. */
7222 case 2: /* fneg[sd]. */
7223 case 8: /* fcmp[sd]. */
7224 case 9: /* fcmpe[sd]. */
7225 case 10: /* fcmpz[sd]. */
7226 case 11: /* fcmpez[sd]. */
7227 case 16: /* fuito[sd]. */
7228 case 17: /* fsito[sd]. */
7229 case 24: /* ftoui[sd]. */
7230 case 25: /* ftouiz[sd]. */
7231 case 26: /* ftosi[sd]. */
7232 case 27: /* ftosiz[sd]. */
7233 /* These instructions will not bounce due to underflow. */
7234 *numregs = 0;
7235 vpipe = VFP11_FMAC;
7236 break;
7237
7238 case 3: /* fsqrt[sd]. */
7239 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7240 registers to cause the erratum in previous instructions. */
7241 bfd_arm_vfp11_write_mask (destmask, fd);
7242 vpipe = VFP11_DS;
7243 break;
7244
7245 case 15: /* fcvt{ds,sd}. */
7246 {
7247 int rnum = 0;
7248
7249 bfd_arm_vfp11_write_mask (destmask, fd);
7250
7251 /* Only FCVTSD can underflow. */
7252 if ((insn & 0x100) != 0)
7253 regs[rnum++] = fm;
7254
7255 *numregs = rnum;
7256
7257 vpipe = VFP11_FMAC;
7258 }
7259 break;
7260
7261 default:
7262 return VFP11_BAD;
7263 }
7264 }
7265 break;
7266
7267 default:
7268 return VFP11_BAD;
7269 }
7270 }
7271 /* Two-register transfer. */
7272 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7273 {
7274 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7275
7276 if ((insn & 0x100000) == 0)
7277 {
7278 if (is_double)
7279 bfd_arm_vfp11_write_mask (destmask, fm);
7280 else
7281 {
7282 bfd_arm_vfp11_write_mask (destmask, fm);
7283 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7284 }
7285 }
7286
7287 vpipe = VFP11_LS;
7288 }
7289 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7290 {
7291 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7292 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7293
7294 switch (puw)
7295 {
7296 case 0: /* Two-reg transfer. We should catch these above. */
7297 abort ();
7298
7299 case 2: /* fldm[sdx]. */
7300 case 3:
7301 case 5:
7302 {
7303 unsigned int i, offset = insn & 0xff;
7304
7305 if (is_double)
7306 offset >>= 1;
7307
7308 for (i = fd; i < fd + offset; i++)
7309 bfd_arm_vfp11_write_mask (destmask, i);
7310 }
7311 break;
7312
7313 case 4: /* fld[sd]. */
7314 case 6:
7315 bfd_arm_vfp11_write_mask (destmask, fd);
7316 break;
7317
7318 default:
7319 return VFP11_BAD;
7320 }
7321
7322 vpipe = VFP11_LS;
7323 }
7324 /* Single-register transfer. Note L==0. */
7325 else if ((insn & 0x0f100e10) == 0x0e000a10)
7326 {
7327 unsigned int opcode = (insn >> 21) & 7;
7328 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7329
7330 switch (opcode)
7331 {
7332 case 0: /* fmsr/fmdlr. */
7333 case 1: /* fmdhr. */
7334 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7335 destination register. I don't know if this is exactly right,
7336 but it is the conservative choice. */
7337 bfd_arm_vfp11_write_mask (destmask, fn);
7338 break;
7339
7340 case 7: /* fmxr. */
7341 break;
7342 }
7343
7344 vpipe = VFP11_LS;
7345 }
7346
7347 return vpipe;
7348 }
7349
7350
7351 static int elf32_arm_compare_mapping (const void * a, const void * b);
7352
7353
7354 /* Look for potentially-troublesome code sequences which might trigger the
7355 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7356 (available from ARM) for details of the erratum. A short version is
7357 described in ld.texinfo. */
7358
7359 bfd_boolean
7360 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7361 {
7362 asection *sec;
7363 bfd_byte *contents = NULL;
7364 int state = 0;
7365 int regs[3], numregs = 0;
7366 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7367 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7368
7369 if (globals == NULL)
7370 return FALSE;
7371
7372 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7373 The states transition as follows:
7374
7375 0 -> 1 (vector) or 0 -> 2 (scalar)
7376 A VFP FMAC-pipeline instruction has been seen. Fill
7377 regs[0]..regs[numregs-1] with its input operands. Remember this
7378 instruction in 'first_fmac'.
7379
7380 1 -> 2
7381 Any instruction, except for a VFP instruction which overwrites
7382 regs[*].
7383
7384 1 -> 3 [ -> 0 ] or
7385 2 -> 3 [ -> 0 ]
7386 A VFP instruction has been seen which overwrites any of regs[*].
7387 We must make a veneer! Reset state to 0 before examining next
7388 instruction.
7389
7390 2 -> 0
7391 If we fail to match anything in state 2, reset to state 0 and reset
7392 the instruction pointer to the instruction after 'first_fmac'.
7393
7394 If the VFP11 vector mode is in use, there must be at least two unrelated
7395 instructions between anti-dependent VFP11 instructions to properly avoid
7396 triggering the erratum, hence the use of the extra state 1. */
7397
7398 /* If we are only performing a partial link do not bother
7399 to construct any glue. */
7400 if (bfd_link_relocatable (link_info))
7401 return TRUE;
7402
7403 /* Skip if this bfd does not correspond to an ELF image. */
7404 if (! is_arm_elf (abfd))
7405 return TRUE;
7406
7407 /* We should have chosen a fix type by the time we get here. */
7408 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7409
7410 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7411 return TRUE;
7412
7413 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7414 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7415 return TRUE;
7416
7417 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7418 {
7419 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7420 struct _arm_elf_section_data *sec_data;
7421
7422 /* If we don't have executable progbits, we're not interested in this
7423 section. Also skip if section is to be excluded. */
7424 if (elf_section_type (sec) != SHT_PROGBITS
7425 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7426 || (sec->flags & SEC_EXCLUDE) != 0
7427 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7428 || sec->output_section == bfd_abs_section_ptr
7429 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7430 continue;
7431
7432 sec_data = elf32_arm_section_data (sec);
7433
7434 if (sec_data->mapcount == 0)
7435 continue;
7436
7437 if (elf_section_data (sec)->this_hdr.contents != NULL)
7438 contents = elf_section_data (sec)->this_hdr.contents;
7439 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7440 goto error_return;
7441
7442 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7443 elf32_arm_compare_mapping);
7444
7445 for (span = 0; span < sec_data->mapcount; span++)
7446 {
7447 unsigned int span_start = sec_data->map[span].vma;
7448 unsigned int span_end = (span == sec_data->mapcount - 1)
7449 ? sec->size : sec_data->map[span + 1].vma;
7450 char span_type = sec_data->map[span].type;
7451
7452 /* FIXME: Only ARM mode is supported at present. We may need to
7453 support Thumb-2 mode also at some point. */
7454 if (span_type != 'a')
7455 continue;
7456
7457 for (i = span_start; i < span_end;)
7458 {
7459 unsigned int next_i = i + 4;
7460 unsigned int insn = bfd_big_endian (abfd)
7461 ? (contents[i] << 24)
7462 | (contents[i + 1] << 16)
7463 | (contents[i + 2] << 8)
7464 | contents[i + 3]
7465 : (contents[i + 3] << 24)
7466 | (contents[i + 2] << 16)
7467 | (contents[i + 1] << 8)
7468 | contents[i];
7469 unsigned int writemask = 0;
7470 enum bfd_arm_vfp11_pipe vpipe;
7471
7472 switch (state)
7473 {
7474 case 0:
7475 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7476 &numregs);
7477 /* I'm assuming the VFP11 erratum can trigger with denorm
7478 operands on either the FMAC or the DS pipeline. This might
7479 lead to slightly overenthusiastic veneer insertion. */
7480 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7481 {
7482 state = use_vector ? 1 : 2;
7483 first_fmac = i;
7484 veneer_of_insn = insn;
7485 }
7486 break;
7487
7488 case 1:
7489 {
7490 int other_regs[3], other_numregs;
7491 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7492 other_regs,
7493 &other_numregs);
7494 if (vpipe != VFP11_BAD
7495 && bfd_arm_vfp11_antidependency (writemask, regs,
7496 numregs))
7497 state = 3;
7498 else
7499 state = 2;
7500 }
7501 break;
7502
7503 case 2:
7504 {
7505 int other_regs[3], other_numregs;
7506 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7507 other_regs,
7508 &other_numregs);
7509 if (vpipe != VFP11_BAD
7510 && bfd_arm_vfp11_antidependency (writemask, regs,
7511 numregs))
7512 state = 3;
7513 else
7514 {
7515 state = 0;
7516 next_i = first_fmac + 4;
7517 }
7518 }
7519 break;
7520
7521 case 3:
7522 abort (); /* Should be unreachable. */
7523 }
7524
7525 if (state == 3)
7526 {
7527 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7528 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7529
7530 elf32_arm_section_data (sec)->erratumcount += 1;
7531
7532 newerr->u.b.vfp_insn = veneer_of_insn;
7533
7534 switch (span_type)
7535 {
7536 case 'a':
7537 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7538 break;
7539
7540 default:
7541 abort ();
7542 }
7543
7544 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7545 first_fmac);
7546
7547 newerr->vma = -1;
7548
7549 newerr->next = sec_data->erratumlist;
7550 sec_data->erratumlist = newerr;
7551
7552 state = 0;
7553 }
7554
7555 i = next_i;
7556 }
7557 }
7558
7559 if (contents != NULL
7560 && elf_section_data (sec)->this_hdr.contents != contents)
7561 free (contents);
7562 contents = NULL;
7563 }
7564
7565 return TRUE;
7566
7567 error_return:
7568 if (contents != NULL
7569 && elf_section_data (sec)->this_hdr.contents != contents)
7570 free (contents);
7571
7572 return FALSE;
7573 }
7574
7575 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7576 after sections have been laid out, using specially-named symbols. */
7577
7578 void
7579 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7580 struct bfd_link_info *link_info)
7581 {
7582 asection *sec;
7583 struct elf32_arm_link_hash_table *globals;
7584 char *tmp_name;
7585
7586 if (bfd_link_relocatable (link_info))
7587 return;
7588
7589 /* Skip if this bfd does not correspond to an ELF image. */
7590 if (! is_arm_elf (abfd))
7591 return;
7592
7593 globals = elf32_arm_hash_table (link_info);
7594 if (globals == NULL)
7595 return;
7596
7597 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7598 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7599
7600 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7601 {
7602 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7603 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7604
7605 for (; errnode != NULL; errnode = errnode->next)
7606 {
7607 struct elf_link_hash_entry *myh;
7608 bfd_vma vma;
7609
7610 switch (errnode->type)
7611 {
7612 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7613 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7614 /* Find veneer symbol. */
7615 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7616 errnode->u.b.veneer->u.v.id);
7617
7618 myh = elf_link_hash_lookup
7619 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7620
7621 if (myh == NULL)
7622 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7623 "`%s'"), abfd, tmp_name);
7624
7625 vma = myh->root.u.def.section->output_section->vma
7626 + myh->root.u.def.section->output_offset
7627 + myh->root.u.def.value;
7628
7629 errnode->u.b.veneer->vma = vma;
7630 break;
7631
7632 case VFP11_ERRATUM_ARM_VENEER:
7633 case VFP11_ERRATUM_THUMB_VENEER:
7634 /* Find return location. */
7635 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7636 errnode->u.v.id);
7637
7638 myh = elf_link_hash_lookup
7639 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7640
7641 if (myh == NULL)
7642 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7643 "`%s'"), abfd, tmp_name);
7644
7645 vma = myh->root.u.def.section->output_section->vma
7646 + myh->root.u.def.section->output_offset
7647 + myh->root.u.def.value;
7648
7649 errnode->u.v.branch->vma = vma;
7650 break;
7651
7652 default:
7653 abort ();
7654 }
7655 }
7656 }
7657
7658 free (tmp_name);
7659 }
7660
7661 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7662 return locations after sections have been laid out, using
7663 specially-named symbols. */
7664
7665 void
7666 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7667 struct bfd_link_info *link_info)
7668 {
7669 asection *sec;
7670 struct elf32_arm_link_hash_table *globals;
7671 char *tmp_name;
7672
7673 if (bfd_link_relocatable (link_info))
7674 return;
7675
7676 /* Skip if this bfd does not correspond to an ELF image. */
7677 if (! is_arm_elf (abfd))
7678 return;
7679
7680 globals = elf32_arm_hash_table (link_info);
7681 if (globals == NULL)
7682 return;
7683
7684 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7685 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7686
7687 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7688 {
7689 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7690 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7691
7692 for (; errnode != NULL; errnode = errnode->next)
7693 {
7694 struct elf_link_hash_entry *myh;
7695 bfd_vma vma;
7696
7697 switch (errnode->type)
7698 {
7699 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7700 /* Find veneer symbol. */
7701 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7702 errnode->u.b.veneer->u.v.id);
7703
7704 myh = elf_link_hash_lookup
7705 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7706
7707 if (myh == NULL)
7708 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7709 "`%s'"), abfd, tmp_name);
7710
7711 vma = myh->root.u.def.section->output_section->vma
7712 + myh->root.u.def.section->output_offset
7713 + myh->root.u.def.value;
7714
7715 errnode->u.b.veneer->vma = vma;
7716 break;
7717
7718 case STM32L4XX_ERRATUM_VENEER:
7719 /* Find return location. */
7720 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7721 errnode->u.v.id);
7722
7723 myh = elf_link_hash_lookup
7724 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7725
7726 if (myh == NULL)
7727 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7728 "`%s'"), abfd, tmp_name);
7729
7730 vma = myh->root.u.def.section->output_section->vma
7731 + myh->root.u.def.section->output_offset
7732 + myh->root.u.def.value;
7733
7734 errnode->u.v.branch->vma = vma;
7735 break;
7736
7737 default:
7738 abort ();
7739 }
7740 }
7741 }
7742
7743 free (tmp_name);
7744 }
7745
7746 static inline bfd_boolean
7747 is_thumb2_ldmia (const insn32 insn)
7748 {
7749 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7750 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7751 return (insn & 0xffd02000) == 0xe8900000;
7752 }
7753
7754 static inline bfd_boolean
7755 is_thumb2_ldmdb (const insn32 insn)
7756 {
7757 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7758 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7759 return (insn & 0xffd02000) == 0xe9100000;
7760 }
7761
7762 static inline bfd_boolean
7763 is_thumb2_vldm (const insn32 insn)
7764 {
7765 /* A6.5 Extension register load or store instruction
7766 A7.7.229
7767 We look for SP 32-bit and DP 64-bit registers.
7768 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7769 <list> is consecutive 64-bit registers
7770 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7771 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7772 <list> is consecutive 32-bit registers
7773 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7774 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7775 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7776 return
7777 (((insn & 0xfe100f00) == 0xec100b00) ||
7778 ((insn & 0xfe100f00) == 0xec100a00))
7779 && /* (IA without !). */
7780 (((((insn << 7) >> 28) & 0xd) == 0x4)
7781 /* (IA with !), includes VPOP (when reg number is SP). */
7782 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7783 /* (DB with !). */
7784 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7785 }
7786
7787 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7788 VLDM opcode and:
7789 - computes the number and the mode of memory accesses
7790 - decides if the replacement should be done:
7791 . replaces only if > 8-word accesses
7792 . or (testing purposes only) replaces all accesses. */
7793
7794 static bfd_boolean
7795 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7796 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7797 {
7798 int nb_words = 0;
7799
7800 /* The field encoding the register list is the same for both LDMIA
7801 and LDMDB encodings. */
7802 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7803 nb_words = popcount (insn & 0x0000ffff);
7804 else if (is_thumb2_vldm (insn))
7805 nb_words = (insn & 0xff);
7806
7807 /* DEFAULT mode accounts for the real bug condition situation,
7808 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7809 return
7810 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7811 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7812 }
7813
7814 /* Look for potentially-troublesome code sequences which might trigger
7815 the STM STM32L4XX erratum. */
7816
7817 bfd_boolean
7818 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7819 struct bfd_link_info *link_info)
7820 {
7821 asection *sec;
7822 bfd_byte *contents = NULL;
7823 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7824
7825 if (globals == NULL)
7826 return FALSE;
7827
7828 /* If we are only performing a partial link do not bother
7829 to construct any glue. */
7830 if (bfd_link_relocatable (link_info))
7831 return TRUE;
7832
7833 /* Skip if this bfd does not correspond to an ELF image. */
7834 if (! is_arm_elf (abfd))
7835 return TRUE;
7836
7837 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7838 return TRUE;
7839
7840 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7841 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7842 return TRUE;
7843
7844 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7845 {
7846 unsigned int i, span;
7847 struct _arm_elf_section_data *sec_data;
7848
7849 /* If we don't have executable progbits, we're not interested in this
7850 section. Also skip if section is to be excluded. */
7851 if (elf_section_type (sec) != SHT_PROGBITS
7852 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7853 || (sec->flags & SEC_EXCLUDE) != 0
7854 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7855 || sec->output_section == bfd_abs_section_ptr
7856 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7857 continue;
7858
7859 sec_data = elf32_arm_section_data (sec);
7860
7861 if (sec_data->mapcount == 0)
7862 continue;
7863
7864 if (elf_section_data (sec)->this_hdr.contents != NULL)
7865 contents = elf_section_data (sec)->this_hdr.contents;
7866 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7867 goto error_return;
7868
7869 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7870 elf32_arm_compare_mapping);
7871
7872 for (span = 0; span < sec_data->mapcount; span++)
7873 {
7874 unsigned int span_start = sec_data->map[span].vma;
7875 unsigned int span_end = (span == sec_data->mapcount - 1)
7876 ? sec->size : sec_data->map[span + 1].vma;
7877 char span_type = sec_data->map[span].type;
7878 int itblock_current_pos = 0;
7879
7880 /* Only Thumb2 mode need be supported with this CM4 specific
7881 code, we should not encounter any arm mode eg span_type
7882 != 'a'. */
7883 if (span_type != 't')
7884 continue;
7885
7886 for (i = span_start; i < span_end;)
7887 {
7888 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7889 bfd_boolean insn_32bit = FALSE;
7890 bfd_boolean is_ldm = FALSE;
7891 bfd_boolean is_vldm = FALSE;
7892 bfd_boolean is_not_last_in_it_block = FALSE;
7893
7894 /* The first 16-bits of all 32-bit thumb2 instructions start
7895 with opcode[15..13]=0b111 and the encoded op1 can be anything
7896 except opcode[12..11]!=0b00.
7897 See 32-bit Thumb instruction encoding. */
7898 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7899 insn_32bit = TRUE;
7900
7901 /* Compute the predicate that tells if the instruction
7902 is concerned by the IT block
7903 - Creates an error if there is a ldm that is not
7904 last in the IT block thus cannot be replaced
7905 - Otherwise we can create a branch at the end of the
7906 IT block, it will be controlled naturally by IT
7907 with the proper pseudo-predicate
7908 - So the only interesting predicate is the one that
7909 tells that we are not on the last item of an IT
7910 block. */
7911 if (itblock_current_pos != 0)
7912 is_not_last_in_it_block = !!--itblock_current_pos;
7913
7914 if (insn_32bit)
7915 {
7916 /* Load the rest of the insn (in manual-friendly order). */
7917 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7918 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7919 is_vldm = is_thumb2_vldm (insn);
7920
7921 /* Veneers are created for (v)ldm depending on
7922 option flags and memory accesses conditions; but
7923 if the instruction is not the last instruction of
7924 an IT block, we cannot create a jump there, so we
7925 bail out. */
7926 if ((is_ldm || is_vldm) &&
7927 stm32l4xx_need_create_replacing_stub
7928 (insn, globals->stm32l4xx_fix))
7929 {
7930 if (is_not_last_in_it_block)
7931 {
7932 (*_bfd_error_handler)
7933 /* Note - overlong line used here to allow for translation. */
7934 (_("\
7935 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7936 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7937 abfd, sec, (long)i);
7938 }
7939 else
7940 {
7941 elf32_stm32l4xx_erratum_list *newerr =
7942 (elf32_stm32l4xx_erratum_list *)
7943 bfd_zmalloc
7944 (sizeof (elf32_stm32l4xx_erratum_list));
7945
7946 elf32_arm_section_data (sec)
7947 ->stm32l4xx_erratumcount += 1;
7948 newerr->u.b.insn = insn;
7949 /* We create only thumb branches. */
7950 newerr->type =
7951 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7952 record_stm32l4xx_erratum_veneer
7953 (link_info, newerr, abfd, sec,
7954 i,
7955 is_ldm ?
7956 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7957 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7958 newerr->vma = -1;
7959 newerr->next = sec_data->stm32l4xx_erratumlist;
7960 sec_data->stm32l4xx_erratumlist = newerr;
7961 }
7962 }
7963 }
7964 else
7965 {
7966 /* A7.7.37 IT p208
7967 IT blocks are only encoded in T1
7968 Encoding T1: IT{x{y{z}}} <firstcond>
7969 1 0 1 1 - 1 1 1 1 - firstcond - mask
7970 if mask = '0000' then see 'related encodings'
7971 We don't deal with UNPREDICTABLE, just ignore these.
7972 There can be no nested IT blocks so an IT block
7973 is naturally a new one for which it is worth
7974 computing its size. */
7975 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7976 ((insn & 0x000f) != 0x0000);
7977 /* If we have a new IT block we compute its size. */
7978 if (is_newitblock)
7979 {
7980 /* Compute the number of instructions controlled
7981 by the IT block, it will be used to decide
7982 whether we are inside an IT block or not. */
7983 unsigned int mask = insn & 0x000f;
7984 itblock_current_pos = 4 - ctz (mask);
7985 }
7986 }
7987
7988 i += insn_32bit ? 4 : 2;
7989 }
7990 }
7991
7992 if (contents != NULL
7993 && elf_section_data (sec)->this_hdr.contents != contents)
7994 free (contents);
7995 contents = NULL;
7996 }
7997
7998 return TRUE;
7999
8000 error_return:
8001 if (contents != NULL
8002 && elf_section_data (sec)->this_hdr.contents != contents)
8003 free (contents);
8004
8005 return FALSE;
8006 }
8007
8008 /* Set target relocation values needed during linking. */
8009
8010 void
8011 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
8012 struct bfd_link_info *link_info,
8013 int target1_is_rel,
8014 char * target2_type,
8015 int fix_v4bx,
8016 int use_blx,
8017 bfd_arm_vfp11_fix vfp11_fix,
8018 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
8019 int no_enum_warn, int no_wchar_warn,
8020 int pic_veneer, int fix_cortex_a8,
8021 int fix_arm1176)
8022 {
8023 struct elf32_arm_link_hash_table *globals;
8024
8025 globals = elf32_arm_hash_table (link_info);
8026 if (globals == NULL)
8027 return;
8028
8029 globals->target1_is_rel = target1_is_rel;
8030 if (strcmp (target2_type, "rel") == 0)
8031 globals->target2_reloc = R_ARM_REL32;
8032 else if (strcmp (target2_type, "abs") == 0)
8033 globals->target2_reloc = R_ARM_ABS32;
8034 else if (strcmp (target2_type, "got-rel") == 0)
8035 globals->target2_reloc = R_ARM_GOT_PREL;
8036 else
8037 {
8038 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
8039 target2_type);
8040 }
8041 globals->fix_v4bx = fix_v4bx;
8042 globals->use_blx |= use_blx;
8043 globals->vfp11_fix = vfp11_fix;
8044 globals->stm32l4xx_fix = stm32l4xx_fix;
8045 globals->pic_veneer = pic_veneer;
8046 globals->fix_cortex_a8 = fix_cortex_a8;
8047 globals->fix_arm1176 = fix_arm1176;
8048
8049 BFD_ASSERT (is_arm_elf (output_bfd));
8050 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
8051 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
8052 }
8053
8054 /* Replace the target offset of a Thumb bl or b.w instruction. */
8055
8056 static void
8057 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
8058 {
8059 bfd_vma upper;
8060 bfd_vma lower;
8061 int reloc_sign;
8062
8063 BFD_ASSERT ((offset & 1) == 0);
8064
8065 upper = bfd_get_16 (abfd, insn);
8066 lower = bfd_get_16 (abfd, insn + 2);
8067 reloc_sign = (offset < 0) ? 1 : 0;
8068 upper = (upper & ~(bfd_vma) 0x7ff)
8069 | ((offset >> 12) & 0x3ff)
8070 | (reloc_sign << 10);
8071 lower = (lower & ~(bfd_vma) 0x2fff)
8072 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
8073 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
8074 | ((offset >> 1) & 0x7ff);
8075 bfd_put_16 (abfd, upper, insn);
8076 bfd_put_16 (abfd, lower, insn + 2);
8077 }
8078
8079 /* Thumb code calling an ARM function. */
8080
8081 static int
8082 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
8083 const char * name,
8084 bfd * input_bfd,
8085 bfd * output_bfd,
8086 asection * input_section,
8087 bfd_byte * hit_data,
8088 asection * sym_sec,
8089 bfd_vma offset,
8090 bfd_signed_vma addend,
8091 bfd_vma val,
8092 char **error_message)
8093 {
8094 asection * s = 0;
8095 bfd_vma my_offset;
8096 long int ret_offset;
8097 struct elf_link_hash_entry * myh;
8098 struct elf32_arm_link_hash_table * globals;
8099
8100 myh = find_thumb_glue (info, name, error_message);
8101 if (myh == NULL)
8102 return FALSE;
8103
8104 globals = elf32_arm_hash_table (info);
8105 BFD_ASSERT (globals != NULL);
8106 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8107
8108 my_offset = myh->root.u.def.value;
8109
8110 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8111 THUMB2ARM_GLUE_SECTION_NAME);
8112
8113 BFD_ASSERT (s != NULL);
8114 BFD_ASSERT (s->contents != NULL);
8115 BFD_ASSERT (s->output_section != NULL);
8116
8117 if ((my_offset & 0x01) == 0x01)
8118 {
8119 if (sym_sec != NULL
8120 && sym_sec->owner != NULL
8121 && !INTERWORK_FLAG (sym_sec->owner))
8122 {
8123 (*_bfd_error_handler)
8124 (_("%B(%s): warning: interworking not enabled.\n"
8125 " first occurrence: %B: Thumb call to ARM"),
8126 sym_sec->owner, input_bfd, name);
8127
8128 return FALSE;
8129 }
8130
8131 --my_offset;
8132 myh->root.u.def.value = my_offset;
8133
8134 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8135 s->contents + my_offset);
8136
8137 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8138 s->contents + my_offset + 2);
8139
8140 ret_offset =
8141 /* Address of destination of the stub. */
8142 ((bfd_signed_vma) val)
8143 - ((bfd_signed_vma)
8144 /* Offset from the start of the current section
8145 to the start of the stubs. */
8146 (s->output_offset
8147 /* Offset of the start of this stub from the start of the stubs. */
8148 + my_offset
8149 /* Address of the start of the current section. */
8150 + s->output_section->vma)
8151 /* The branch instruction is 4 bytes into the stub. */
8152 + 4
8153 /* ARM branches work from the pc of the instruction + 8. */
8154 + 8);
8155
8156 put_arm_insn (globals, output_bfd,
8157 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8158 s->contents + my_offset + 4);
8159 }
8160
8161 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8162
8163 /* Now go back and fix up the original BL insn to point to here. */
8164 ret_offset =
8165 /* Address of where the stub is located. */
8166 (s->output_section->vma + s->output_offset + my_offset)
8167 /* Address of where the BL is located. */
8168 - (input_section->output_section->vma + input_section->output_offset
8169 + offset)
8170 /* Addend in the relocation. */
8171 - addend
8172 /* Biassing for PC-relative addressing. */
8173 - 8;
8174
8175 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8176
8177 return TRUE;
8178 }
8179
8180 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8181
8182 static struct elf_link_hash_entry *
8183 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8184 const char * name,
8185 bfd * input_bfd,
8186 bfd * output_bfd,
8187 asection * sym_sec,
8188 bfd_vma val,
8189 asection * s,
8190 char ** error_message)
8191 {
8192 bfd_vma my_offset;
8193 long int ret_offset;
8194 struct elf_link_hash_entry * myh;
8195 struct elf32_arm_link_hash_table * globals;
8196
8197 myh = find_arm_glue (info, name, error_message);
8198 if (myh == NULL)
8199 return NULL;
8200
8201 globals = elf32_arm_hash_table (info);
8202 BFD_ASSERT (globals != NULL);
8203 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8204
8205 my_offset = myh->root.u.def.value;
8206
8207 if ((my_offset & 0x01) == 0x01)
8208 {
8209 if (sym_sec != NULL
8210 && sym_sec->owner != NULL
8211 && !INTERWORK_FLAG (sym_sec->owner))
8212 {
8213 (*_bfd_error_handler)
8214 (_("%B(%s): warning: interworking not enabled.\n"
8215 " first occurrence: %B: arm call to thumb"),
8216 sym_sec->owner, input_bfd, name);
8217 }
8218
8219 --my_offset;
8220 myh->root.u.def.value = my_offset;
8221
8222 if (bfd_link_pic (info)
8223 || globals->root.is_relocatable_executable
8224 || globals->pic_veneer)
8225 {
8226 /* For relocatable objects we can't use absolute addresses,
8227 so construct the address from a relative offset. */
8228 /* TODO: If the offset is small it's probably worth
8229 constructing the address with adds. */
8230 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8231 s->contents + my_offset);
8232 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8233 s->contents + my_offset + 4);
8234 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8235 s->contents + my_offset + 8);
8236 /* Adjust the offset by 4 for the position of the add,
8237 and 8 for the pipeline offset. */
8238 ret_offset = (val - (s->output_offset
8239 + s->output_section->vma
8240 + my_offset + 12))
8241 | 1;
8242 bfd_put_32 (output_bfd, ret_offset,
8243 s->contents + my_offset + 12);
8244 }
8245 else if (globals->use_blx)
8246 {
8247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8248 s->contents + my_offset);
8249
8250 /* It's a thumb address. Add the low order bit. */
8251 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8252 s->contents + my_offset + 4);
8253 }
8254 else
8255 {
8256 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8257 s->contents + my_offset);
8258
8259 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8260 s->contents + my_offset + 4);
8261
8262 /* It's a thumb address. Add the low order bit. */
8263 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8264 s->contents + my_offset + 8);
8265
8266 my_offset += 12;
8267 }
8268 }
8269
8270 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8271
8272 return myh;
8273 }
8274
8275 /* Arm code calling a Thumb function. */
8276
8277 static int
8278 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8279 const char * name,
8280 bfd * input_bfd,
8281 bfd * output_bfd,
8282 asection * input_section,
8283 bfd_byte * hit_data,
8284 asection * sym_sec,
8285 bfd_vma offset,
8286 bfd_signed_vma addend,
8287 bfd_vma val,
8288 char **error_message)
8289 {
8290 unsigned long int tmp;
8291 bfd_vma my_offset;
8292 asection * s;
8293 long int ret_offset;
8294 struct elf_link_hash_entry * myh;
8295 struct elf32_arm_link_hash_table * globals;
8296
8297 globals = elf32_arm_hash_table (info);
8298 BFD_ASSERT (globals != NULL);
8299 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8300
8301 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8302 ARM2THUMB_GLUE_SECTION_NAME);
8303 BFD_ASSERT (s != NULL);
8304 BFD_ASSERT (s->contents != NULL);
8305 BFD_ASSERT (s->output_section != NULL);
8306
8307 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8308 sym_sec, val, s, error_message);
8309 if (!myh)
8310 return FALSE;
8311
8312 my_offset = myh->root.u.def.value;
8313 tmp = bfd_get_32 (input_bfd, hit_data);
8314 tmp = tmp & 0xFF000000;
8315
8316 /* Somehow these are both 4 too far, so subtract 8. */
8317 ret_offset = (s->output_offset
8318 + my_offset
8319 + s->output_section->vma
8320 - (input_section->output_offset
8321 + input_section->output_section->vma
8322 + offset + addend)
8323 - 8);
8324
8325 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8326
8327 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8328
8329 return TRUE;
8330 }
8331
8332 /* Populate Arm stub for an exported Thumb function. */
8333
8334 static bfd_boolean
8335 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8336 {
8337 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8338 asection * s;
8339 struct elf_link_hash_entry * myh;
8340 struct elf32_arm_link_hash_entry *eh;
8341 struct elf32_arm_link_hash_table * globals;
8342 asection *sec;
8343 bfd_vma val;
8344 char *error_message;
8345
8346 eh = elf32_arm_hash_entry (h);
8347 /* Allocate stubs for exported Thumb functions on v4t. */
8348 if (eh->export_glue == NULL)
8349 return TRUE;
8350
8351 globals = elf32_arm_hash_table (info);
8352 BFD_ASSERT (globals != NULL);
8353 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8354
8355 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8356 ARM2THUMB_GLUE_SECTION_NAME);
8357 BFD_ASSERT (s != NULL);
8358 BFD_ASSERT (s->contents != NULL);
8359 BFD_ASSERT (s->output_section != NULL);
8360
8361 sec = eh->export_glue->root.u.def.section;
8362
8363 BFD_ASSERT (sec->output_section != NULL);
8364
8365 val = eh->export_glue->root.u.def.value + sec->output_offset
8366 + sec->output_section->vma;
8367
8368 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8369 h->root.u.def.section->owner,
8370 globals->obfd, sec, val, s,
8371 &error_message);
8372 BFD_ASSERT (myh);
8373 return TRUE;
8374 }
8375
8376 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8377
8378 static bfd_vma
8379 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8380 {
8381 bfd_byte *p;
8382 bfd_vma glue_addr;
8383 asection *s;
8384 struct elf32_arm_link_hash_table *globals;
8385
8386 globals = elf32_arm_hash_table (info);
8387 BFD_ASSERT (globals != NULL);
8388 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8389
8390 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8391 ARM_BX_GLUE_SECTION_NAME);
8392 BFD_ASSERT (s != NULL);
8393 BFD_ASSERT (s->contents != NULL);
8394 BFD_ASSERT (s->output_section != NULL);
8395
8396 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8397
8398 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8399
8400 if ((globals->bx_glue_offset[reg] & 1) == 0)
8401 {
8402 p = s->contents + glue_addr;
8403 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8404 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8405 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8406 globals->bx_glue_offset[reg] |= 1;
8407 }
8408
8409 return glue_addr + s->output_section->vma + s->output_offset;
8410 }
8411
8412 /* Generate Arm stubs for exported Thumb symbols. */
8413 static void
8414 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8415 struct bfd_link_info *link_info)
8416 {
8417 struct elf32_arm_link_hash_table * globals;
8418
8419 if (link_info == NULL)
8420 /* Ignore this if we are not called by the ELF backend linker. */
8421 return;
8422
8423 globals = elf32_arm_hash_table (link_info);
8424 if (globals == NULL)
8425 return;
8426
8427 /* If blx is available then exported Thumb symbols are OK and there is
8428 nothing to do. */
8429 if (globals->use_blx)
8430 return;
8431
8432 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8433 link_info);
8434 }
8435
8436 /* Reserve space for COUNT dynamic relocations in relocation selection
8437 SRELOC. */
8438
8439 static void
8440 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8441 bfd_size_type count)
8442 {
8443 struct elf32_arm_link_hash_table *htab;
8444
8445 htab = elf32_arm_hash_table (info);
8446 BFD_ASSERT (htab->root.dynamic_sections_created);
8447 if (sreloc == NULL)
8448 abort ();
8449 sreloc->size += RELOC_SIZE (htab) * count;
8450 }
8451
8452 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8453 dynamic, the relocations should go in SRELOC, otherwise they should
8454 go in the special .rel.iplt section. */
8455
8456 static void
8457 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8458 bfd_size_type count)
8459 {
8460 struct elf32_arm_link_hash_table *htab;
8461
8462 htab = elf32_arm_hash_table (info);
8463 if (!htab->root.dynamic_sections_created)
8464 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8465 else
8466 {
8467 BFD_ASSERT (sreloc != NULL);
8468 sreloc->size += RELOC_SIZE (htab) * count;
8469 }
8470 }
8471
8472 /* Add relocation REL to the end of relocation section SRELOC. */
8473
8474 static void
8475 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8476 asection *sreloc, Elf_Internal_Rela *rel)
8477 {
8478 bfd_byte *loc;
8479 struct elf32_arm_link_hash_table *htab;
8480
8481 htab = elf32_arm_hash_table (info);
8482 if (!htab->root.dynamic_sections_created
8483 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8484 sreloc = htab->root.irelplt;
8485 if (sreloc == NULL)
8486 abort ();
8487 loc = sreloc->contents;
8488 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8489 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8490 abort ();
8491 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8492 }
8493
8494 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8495 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8496 to .plt. */
8497
8498 static void
8499 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8500 bfd_boolean is_iplt_entry,
8501 union gotplt_union *root_plt,
8502 struct arm_plt_info *arm_plt)
8503 {
8504 struct elf32_arm_link_hash_table *htab;
8505 asection *splt;
8506 asection *sgotplt;
8507
8508 htab = elf32_arm_hash_table (info);
8509
8510 if (is_iplt_entry)
8511 {
8512 splt = htab->root.iplt;
8513 sgotplt = htab->root.igotplt;
8514
8515 /* NaCl uses a special first entry in .iplt too. */
8516 if (htab->nacl_p && splt->size == 0)
8517 splt->size += htab->plt_header_size;
8518
8519 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8520 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8521 }
8522 else
8523 {
8524 splt = htab->root.splt;
8525 sgotplt = htab->root.sgotplt;
8526
8527 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8528 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8529
8530 /* If this is the first .plt entry, make room for the special
8531 first entry. */
8532 if (splt->size == 0)
8533 splt->size += htab->plt_header_size;
8534
8535 htab->next_tls_desc_index++;
8536 }
8537
8538 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8539 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8540 splt->size += PLT_THUMB_STUB_SIZE;
8541 root_plt->offset = splt->size;
8542 splt->size += htab->plt_entry_size;
8543
8544 if (!htab->symbian_p)
8545 {
8546 /* We also need to make an entry in the .got.plt section, which
8547 will be placed in the .got section by the linker script. */
8548 if (is_iplt_entry)
8549 arm_plt->got_offset = sgotplt->size;
8550 else
8551 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8552 sgotplt->size += 4;
8553 }
8554 }
8555
8556 static bfd_vma
8557 arm_movw_immediate (bfd_vma value)
8558 {
8559 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8560 }
8561
8562 static bfd_vma
8563 arm_movt_immediate (bfd_vma value)
8564 {
8565 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8566 }
8567
8568 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8569 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8570 Otherwise, DYNINDX is the index of the symbol in the dynamic
8571 symbol table and SYM_VALUE is undefined.
8572
8573 ROOT_PLT points to the offset of the PLT entry from the start of its
8574 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8575 bookkeeping information.
8576
8577 Returns FALSE if there was a problem. */
8578
8579 static bfd_boolean
8580 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8581 union gotplt_union *root_plt,
8582 struct arm_plt_info *arm_plt,
8583 int dynindx, bfd_vma sym_value)
8584 {
8585 struct elf32_arm_link_hash_table *htab;
8586 asection *sgot;
8587 asection *splt;
8588 asection *srel;
8589 bfd_byte *loc;
8590 bfd_vma plt_index;
8591 Elf_Internal_Rela rel;
8592 bfd_vma plt_header_size;
8593 bfd_vma got_header_size;
8594
8595 htab = elf32_arm_hash_table (info);
8596
8597 /* Pick the appropriate sections and sizes. */
8598 if (dynindx == -1)
8599 {
8600 splt = htab->root.iplt;
8601 sgot = htab->root.igotplt;
8602 srel = htab->root.irelplt;
8603
8604 /* There are no reserved entries in .igot.plt, and no special
8605 first entry in .iplt. */
8606 got_header_size = 0;
8607 plt_header_size = 0;
8608 }
8609 else
8610 {
8611 splt = htab->root.splt;
8612 sgot = htab->root.sgotplt;
8613 srel = htab->root.srelplt;
8614
8615 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8616 plt_header_size = htab->plt_header_size;
8617 }
8618 BFD_ASSERT (splt != NULL && srel != NULL);
8619
8620 /* Fill in the entry in the procedure linkage table. */
8621 if (htab->symbian_p)
8622 {
8623 BFD_ASSERT (dynindx >= 0);
8624 put_arm_insn (htab, output_bfd,
8625 elf32_arm_symbian_plt_entry[0],
8626 splt->contents + root_plt->offset);
8627 bfd_put_32 (output_bfd,
8628 elf32_arm_symbian_plt_entry[1],
8629 splt->contents + root_plt->offset + 4);
8630
8631 /* Fill in the entry in the .rel.plt section. */
8632 rel.r_offset = (splt->output_section->vma
8633 + splt->output_offset
8634 + root_plt->offset + 4);
8635 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8636
8637 /* Get the index in the procedure linkage table which
8638 corresponds to this symbol. This is the index of this symbol
8639 in all the symbols for which we are making plt entries. The
8640 first entry in the procedure linkage table is reserved. */
8641 plt_index = ((root_plt->offset - plt_header_size)
8642 / htab->plt_entry_size);
8643 }
8644 else
8645 {
8646 bfd_vma got_offset, got_address, plt_address;
8647 bfd_vma got_displacement, initial_got_entry;
8648 bfd_byte * ptr;
8649
8650 BFD_ASSERT (sgot != NULL);
8651
8652 /* Get the offset into the .(i)got.plt table of the entry that
8653 corresponds to this function. */
8654 got_offset = (arm_plt->got_offset & -2);
8655
8656 /* Get the index in the procedure linkage table which
8657 corresponds to this symbol. This is the index of this symbol
8658 in all the symbols for which we are making plt entries.
8659 After the reserved .got.plt entries, all symbols appear in
8660 the same order as in .plt. */
8661 plt_index = (got_offset - got_header_size) / 4;
8662
8663 /* Calculate the address of the GOT entry. */
8664 got_address = (sgot->output_section->vma
8665 + sgot->output_offset
8666 + got_offset);
8667
8668 /* ...and the address of the PLT entry. */
8669 plt_address = (splt->output_section->vma
8670 + splt->output_offset
8671 + root_plt->offset);
8672
8673 ptr = splt->contents + root_plt->offset;
8674 if (htab->vxworks_p && bfd_link_pic (info))
8675 {
8676 unsigned int i;
8677 bfd_vma val;
8678
8679 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8680 {
8681 val = elf32_arm_vxworks_shared_plt_entry[i];
8682 if (i == 2)
8683 val |= got_address - sgot->output_section->vma;
8684 if (i == 5)
8685 val |= plt_index * RELOC_SIZE (htab);
8686 if (i == 2 || i == 5)
8687 bfd_put_32 (output_bfd, val, ptr);
8688 else
8689 put_arm_insn (htab, output_bfd, val, ptr);
8690 }
8691 }
8692 else if (htab->vxworks_p)
8693 {
8694 unsigned int i;
8695 bfd_vma val;
8696
8697 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8698 {
8699 val = elf32_arm_vxworks_exec_plt_entry[i];
8700 if (i == 2)
8701 val |= got_address;
8702 if (i == 4)
8703 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8704 if (i == 5)
8705 val |= plt_index * RELOC_SIZE (htab);
8706 if (i == 2 || i == 5)
8707 bfd_put_32 (output_bfd, val, ptr);
8708 else
8709 put_arm_insn (htab, output_bfd, val, ptr);
8710 }
8711
8712 loc = (htab->srelplt2->contents
8713 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8714
8715 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8716 referencing the GOT for this PLT entry. */
8717 rel.r_offset = plt_address + 8;
8718 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8719 rel.r_addend = got_offset;
8720 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8721 loc += RELOC_SIZE (htab);
8722
8723 /* Create the R_ARM_ABS32 relocation referencing the
8724 beginning of the PLT for this GOT entry. */
8725 rel.r_offset = got_address;
8726 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8727 rel.r_addend = 0;
8728 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8729 }
8730 else if (htab->nacl_p)
8731 {
8732 /* Calculate the displacement between the PLT slot and the
8733 common tail that's part of the special initial PLT slot. */
8734 int32_t tail_displacement
8735 = ((splt->output_section->vma + splt->output_offset
8736 + ARM_NACL_PLT_TAIL_OFFSET)
8737 - (plt_address + htab->plt_entry_size + 4));
8738 BFD_ASSERT ((tail_displacement & 3) == 0);
8739 tail_displacement >>= 2;
8740
8741 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8742 || (-tail_displacement & 0xff000000) == 0);
8743
8744 /* Calculate the displacement between the PLT slot and the entry
8745 in the GOT. The offset accounts for the value produced by
8746 adding to pc in the penultimate instruction of the PLT stub. */
8747 got_displacement = (got_address
8748 - (plt_address + htab->plt_entry_size));
8749
8750 /* NaCl does not support interworking at all. */
8751 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8752
8753 put_arm_insn (htab, output_bfd,
8754 elf32_arm_nacl_plt_entry[0]
8755 | arm_movw_immediate (got_displacement),
8756 ptr + 0);
8757 put_arm_insn (htab, output_bfd,
8758 elf32_arm_nacl_plt_entry[1]
8759 | arm_movt_immediate (got_displacement),
8760 ptr + 4);
8761 put_arm_insn (htab, output_bfd,
8762 elf32_arm_nacl_plt_entry[2],
8763 ptr + 8);
8764 put_arm_insn (htab, output_bfd,
8765 elf32_arm_nacl_plt_entry[3]
8766 | (tail_displacement & 0x00ffffff),
8767 ptr + 12);
8768 }
8769 else if (using_thumb_only (htab))
8770 {
8771 /* PR ld/16017: Generate thumb only PLT entries. */
8772 if (!using_thumb2 (htab))
8773 {
8774 /* FIXME: We ought to be able to generate thumb-1 PLT
8775 instructions... */
8776 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8777 output_bfd);
8778 return FALSE;
8779 }
8780
8781 /* Calculate the displacement between the PLT slot and the entry in
8782 the GOT. The 12-byte offset accounts for the value produced by
8783 adding to pc in the 3rd instruction of the PLT stub. */
8784 got_displacement = got_address - (plt_address + 12);
8785
8786 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8787 instead of 'put_thumb_insn'. */
8788 put_arm_insn (htab, output_bfd,
8789 elf32_thumb2_plt_entry[0]
8790 | ((got_displacement & 0x000000ff) << 16)
8791 | ((got_displacement & 0x00000700) << 20)
8792 | ((got_displacement & 0x00000800) >> 1)
8793 | ((got_displacement & 0x0000f000) >> 12),
8794 ptr + 0);
8795 put_arm_insn (htab, output_bfd,
8796 elf32_thumb2_plt_entry[1]
8797 | ((got_displacement & 0x00ff0000) )
8798 | ((got_displacement & 0x07000000) << 4)
8799 | ((got_displacement & 0x08000000) >> 17)
8800 | ((got_displacement & 0xf0000000) >> 28),
8801 ptr + 4);
8802 put_arm_insn (htab, output_bfd,
8803 elf32_thumb2_plt_entry[2],
8804 ptr + 8);
8805 put_arm_insn (htab, output_bfd,
8806 elf32_thumb2_plt_entry[3],
8807 ptr + 12);
8808 }
8809 else
8810 {
8811 /* Calculate the displacement between the PLT slot and the
8812 entry in the GOT. The eight-byte offset accounts for the
8813 value produced by adding to pc in the first instruction
8814 of the PLT stub. */
8815 got_displacement = got_address - (plt_address + 8);
8816
8817 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8818 {
8819 put_thumb_insn (htab, output_bfd,
8820 elf32_arm_plt_thumb_stub[0], ptr - 4);
8821 put_thumb_insn (htab, output_bfd,
8822 elf32_arm_plt_thumb_stub[1], ptr - 2);
8823 }
8824
8825 if (!elf32_arm_use_long_plt_entry)
8826 {
8827 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8828
8829 put_arm_insn (htab, output_bfd,
8830 elf32_arm_plt_entry_short[0]
8831 | ((got_displacement & 0x0ff00000) >> 20),
8832 ptr + 0);
8833 put_arm_insn (htab, output_bfd,
8834 elf32_arm_plt_entry_short[1]
8835 | ((got_displacement & 0x000ff000) >> 12),
8836 ptr+ 4);
8837 put_arm_insn (htab, output_bfd,
8838 elf32_arm_plt_entry_short[2]
8839 | (got_displacement & 0x00000fff),
8840 ptr + 8);
8841 #ifdef FOUR_WORD_PLT
8842 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8843 #endif
8844 }
8845 else
8846 {
8847 put_arm_insn (htab, output_bfd,
8848 elf32_arm_plt_entry_long[0]
8849 | ((got_displacement & 0xf0000000) >> 28),
8850 ptr + 0);
8851 put_arm_insn (htab, output_bfd,
8852 elf32_arm_plt_entry_long[1]
8853 | ((got_displacement & 0x0ff00000) >> 20),
8854 ptr + 4);
8855 put_arm_insn (htab, output_bfd,
8856 elf32_arm_plt_entry_long[2]
8857 | ((got_displacement & 0x000ff000) >> 12),
8858 ptr+ 8);
8859 put_arm_insn (htab, output_bfd,
8860 elf32_arm_plt_entry_long[3]
8861 | (got_displacement & 0x00000fff),
8862 ptr + 12);
8863 }
8864 }
8865
8866 /* Fill in the entry in the .rel(a).(i)plt section. */
8867 rel.r_offset = got_address;
8868 rel.r_addend = 0;
8869 if (dynindx == -1)
8870 {
8871 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8872 The dynamic linker or static executable then calls SYM_VALUE
8873 to determine the correct run-time value of the .igot.plt entry. */
8874 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8875 initial_got_entry = sym_value;
8876 }
8877 else
8878 {
8879 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8880 initial_got_entry = (splt->output_section->vma
8881 + splt->output_offset);
8882 }
8883
8884 /* Fill in the entry in the global offset table. */
8885 bfd_put_32 (output_bfd, initial_got_entry,
8886 sgot->contents + got_offset);
8887 }
8888
8889 if (dynindx == -1)
8890 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8891 else
8892 {
8893 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8894 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8895 }
8896
8897 return TRUE;
8898 }
8899
8900 /* Some relocations map to different relocations depending on the
8901 target. Return the real relocation. */
8902
8903 static int
8904 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8905 int r_type)
8906 {
8907 switch (r_type)
8908 {
8909 case R_ARM_TARGET1:
8910 if (globals->target1_is_rel)
8911 return R_ARM_REL32;
8912 else
8913 return R_ARM_ABS32;
8914
8915 case R_ARM_TARGET2:
8916 return globals->target2_reloc;
8917
8918 default:
8919 return r_type;
8920 }
8921 }
8922
8923 /* Return the base VMA address which should be subtracted from real addresses
8924 when resolving @dtpoff relocation.
8925 This is PT_TLS segment p_vaddr. */
8926
8927 static bfd_vma
8928 dtpoff_base (struct bfd_link_info *info)
8929 {
8930 /* If tls_sec is NULL, we should have signalled an error already. */
8931 if (elf_hash_table (info)->tls_sec == NULL)
8932 return 0;
8933 return elf_hash_table (info)->tls_sec->vma;
8934 }
8935
8936 /* Return the relocation value for @tpoff relocation
8937 if STT_TLS virtual address is ADDRESS. */
8938
8939 static bfd_vma
8940 tpoff (struct bfd_link_info *info, bfd_vma address)
8941 {
8942 struct elf_link_hash_table *htab = elf_hash_table (info);
8943 bfd_vma base;
8944
8945 /* If tls_sec is NULL, we should have signalled an error already. */
8946 if (htab->tls_sec == NULL)
8947 return 0;
8948 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8949 return address - htab->tls_sec->vma + base;
8950 }
8951
8952 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8953 VALUE is the relocation value. */
8954
8955 static bfd_reloc_status_type
8956 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8957 {
8958 if (value > 0xfff)
8959 return bfd_reloc_overflow;
8960
8961 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8962 bfd_put_32 (abfd, value, data);
8963 return bfd_reloc_ok;
8964 }
8965
8966 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8967 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8968 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8969
8970 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8971 is to then call final_link_relocate. Return other values in the
8972 case of error.
8973
8974 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8975 the pre-relaxed code. It would be nice if the relocs were updated
8976 to match the optimization. */
8977
8978 static bfd_reloc_status_type
8979 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8980 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8981 Elf_Internal_Rela *rel, unsigned long is_local)
8982 {
8983 unsigned long insn;
8984
8985 switch (ELF32_R_TYPE (rel->r_info))
8986 {
8987 default:
8988 return bfd_reloc_notsupported;
8989
8990 case R_ARM_TLS_GOTDESC:
8991 if (is_local)
8992 insn = 0;
8993 else
8994 {
8995 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8996 if (insn & 1)
8997 insn -= 5; /* THUMB */
8998 else
8999 insn -= 8; /* ARM */
9000 }
9001 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9002 return bfd_reloc_continue;
9003
9004 case R_ARM_THM_TLS_DESCSEQ:
9005 /* Thumb insn. */
9006 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
9007 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
9008 {
9009 if (is_local)
9010 /* nop */
9011 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9012 }
9013 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
9014 {
9015 if (is_local)
9016 /* nop */
9017 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9018 else
9019 /* ldr rx,[ry] */
9020 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
9021 }
9022 else if ((insn & 0xff87) == 0x4780) /* blx rx */
9023 {
9024 if (is_local)
9025 /* nop */
9026 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9027 else
9028 /* mov r0, rx */
9029 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
9030 contents + rel->r_offset);
9031 }
9032 else
9033 {
9034 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9035 /* It's a 32 bit instruction, fetch the rest of it for
9036 error generation. */
9037 insn = (insn << 16)
9038 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
9039 (*_bfd_error_handler)
9040 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
9041 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
9042 return bfd_reloc_notsupported;
9043 }
9044 break;
9045
9046 case R_ARM_TLS_DESCSEQ:
9047 /* arm insn. */
9048 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
9049 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
9050 {
9051 if (is_local)
9052 /* mov rx, ry */
9053 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
9054 contents + rel->r_offset);
9055 }
9056 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
9057 {
9058 if (is_local)
9059 /* nop */
9060 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9061 else
9062 /* ldr rx,[ry] */
9063 bfd_put_32 (input_bfd, insn & 0xfffff000,
9064 contents + rel->r_offset);
9065 }
9066 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
9067 {
9068 if (is_local)
9069 /* nop */
9070 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9071 else
9072 /* mov r0, rx */
9073 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
9074 contents + rel->r_offset);
9075 }
9076 else
9077 {
9078 (*_bfd_error_handler)
9079 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
9080 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
9081 return bfd_reloc_notsupported;
9082 }
9083 break;
9084
9085 case R_ARM_TLS_CALL:
9086 /* GD->IE relaxation, turn the instruction into 'nop' or
9087 'ldr r0, [pc,r0]' */
9088 insn = is_local ? 0xe1a00000 : 0xe79f0000;
9089 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9090 break;
9091
9092 case R_ARM_THM_TLS_CALL:
9093 /* GD->IE relaxation. */
9094 if (!is_local)
9095 /* add r0,pc; ldr r0, [r0] */
9096 insn = 0x44786800;
9097 else if (using_thumb2 (globals))
9098 /* nop.w */
9099 insn = 0xf3af8000;
9100 else
9101 /* nop; nop */
9102 insn = 0xbf00bf00;
9103
9104 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9105 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9106 break;
9107 }
9108 return bfd_reloc_ok;
9109 }
9110
9111 /* For a given value of n, calculate the value of G_n as required to
9112 deal with group relocations. We return it in the form of an
9113 encoded constant-and-rotation, together with the final residual. If n is
9114 specified as less than zero, then final_residual is filled with the
9115 input value and no further action is performed. */
9116
9117 static bfd_vma
9118 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9119 {
9120 int current_n;
9121 bfd_vma g_n;
9122 bfd_vma encoded_g_n = 0;
9123 bfd_vma residual = value; /* Also known as Y_n. */
9124
9125 for (current_n = 0; current_n <= n; current_n++)
9126 {
9127 int shift;
9128
9129 /* Calculate which part of the value to mask. */
9130 if (residual == 0)
9131 shift = 0;
9132 else
9133 {
9134 int msb;
9135
9136 /* Determine the most significant bit in the residual and
9137 align the resulting value to a 2-bit boundary. */
9138 for (msb = 30; msb >= 0; msb -= 2)
9139 if (residual & (3 << msb))
9140 break;
9141
9142 /* The desired shift is now (msb - 6), or zero, whichever
9143 is the greater. */
9144 shift = msb - 6;
9145 if (shift < 0)
9146 shift = 0;
9147 }
9148
9149 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9150 g_n = residual & (0xff << shift);
9151 encoded_g_n = (g_n >> shift)
9152 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9153
9154 /* Calculate the residual for the next time around. */
9155 residual &= ~g_n;
9156 }
9157
9158 *final_residual = residual;
9159
9160 return encoded_g_n;
9161 }
9162
9163 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9164 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9165
9166 static int
9167 identify_add_or_sub (bfd_vma insn)
9168 {
9169 int opcode = insn & 0x1e00000;
9170
9171 if (opcode == 1 << 23) /* ADD */
9172 return 1;
9173
9174 if (opcode == 1 << 22) /* SUB */
9175 return -1;
9176
9177 return 0;
9178 }
9179
9180 /* Perform a relocation as part of a final link. */
9181
9182 static bfd_reloc_status_type
9183 elf32_arm_final_link_relocate (reloc_howto_type * howto,
9184 bfd * input_bfd,
9185 bfd * output_bfd,
9186 asection * input_section,
9187 bfd_byte * contents,
9188 Elf_Internal_Rela * rel,
9189 bfd_vma value,
9190 struct bfd_link_info * info,
9191 asection * sym_sec,
9192 const char * sym_name,
9193 unsigned char st_type,
9194 enum arm_st_branch_type branch_type,
9195 struct elf_link_hash_entry * h,
9196 bfd_boolean * unresolved_reloc_p,
9197 char ** error_message)
9198 {
9199 unsigned long r_type = howto->type;
9200 unsigned long r_symndx;
9201 bfd_byte * hit_data = contents + rel->r_offset;
9202 bfd_vma * local_got_offsets;
9203 bfd_vma * local_tlsdesc_gotents;
9204 asection * sgot;
9205 asection * splt;
9206 asection * sreloc = NULL;
9207 asection * srelgot;
9208 bfd_vma addend;
9209 bfd_signed_vma signed_addend;
9210 unsigned char dynreloc_st_type;
9211 bfd_vma dynreloc_value;
9212 struct elf32_arm_link_hash_table * globals;
9213 struct elf32_arm_link_hash_entry *eh;
9214 union gotplt_union *root_plt;
9215 struct arm_plt_info *arm_plt;
9216 bfd_vma plt_offset;
9217 bfd_vma gotplt_offset;
9218 bfd_boolean has_iplt_entry;
9219
9220 globals = elf32_arm_hash_table (info);
9221 if (globals == NULL)
9222 return bfd_reloc_notsupported;
9223
9224 BFD_ASSERT (is_arm_elf (input_bfd));
9225
9226 /* Some relocation types map to different relocations depending on the
9227 target. We pick the right one here. */
9228 r_type = arm_real_reloc_type (globals, r_type);
9229
9230 /* It is possible to have linker relaxations on some TLS access
9231 models. Update our information here. */
9232 r_type = elf32_arm_tls_transition (info, r_type, h);
9233
9234 if (r_type != howto->type)
9235 howto = elf32_arm_howto_from_type (r_type);
9236
9237 eh = (struct elf32_arm_link_hash_entry *) h;
9238 sgot = globals->root.sgot;
9239 local_got_offsets = elf_local_got_offsets (input_bfd);
9240 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9241
9242 if (globals->root.dynamic_sections_created)
9243 srelgot = globals->root.srelgot;
9244 else
9245 srelgot = NULL;
9246
9247 r_symndx = ELF32_R_SYM (rel->r_info);
9248
9249 if (globals->use_rel)
9250 {
9251 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9252
9253 if (addend & ((howto->src_mask + 1) >> 1))
9254 {
9255 signed_addend = -1;
9256 signed_addend &= ~ howto->src_mask;
9257 signed_addend |= addend;
9258 }
9259 else
9260 signed_addend = addend;
9261 }
9262 else
9263 addend = signed_addend = rel->r_addend;
9264
9265 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9266 are resolving a function call relocation. */
9267 if (using_thumb_only (globals)
9268 && (r_type == R_ARM_THM_CALL
9269 || r_type == R_ARM_THM_JUMP24)
9270 && branch_type == ST_BRANCH_TO_ARM)
9271 branch_type = ST_BRANCH_TO_THUMB;
9272
9273 /* Record the symbol information that should be used in dynamic
9274 relocations. */
9275 dynreloc_st_type = st_type;
9276 dynreloc_value = value;
9277 if (branch_type == ST_BRANCH_TO_THUMB)
9278 dynreloc_value |= 1;
9279
9280 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9281 VALUE appropriately for relocations that we resolve at link time. */
9282 has_iplt_entry = FALSE;
9283 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9284 && root_plt->offset != (bfd_vma) -1)
9285 {
9286 plt_offset = root_plt->offset;
9287 gotplt_offset = arm_plt->got_offset;
9288
9289 if (h == NULL || eh->is_iplt)
9290 {
9291 has_iplt_entry = TRUE;
9292 splt = globals->root.iplt;
9293
9294 /* Populate .iplt entries here, because not all of them will
9295 be seen by finish_dynamic_symbol. The lower bit is set if
9296 we have already populated the entry. */
9297 if (plt_offset & 1)
9298 plt_offset--;
9299 else
9300 {
9301 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9302 -1, dynreloc_value))
9303 root_plt->offset |= 1;
9304 else
9305 return bfd_reloc_notsupported;
9306 }
9307
9308 /* Static relocations always resolve to the .iplt entry. */
9309 st_type = STT_FUNC;
9310 value = (splt->output_section->vma
9311 + splt->output_offset
9312 + plt_offset);
9313 branch_type = ST_BRANCH_TO_ARM;
9314
9315 /* If there are non-call relocations that resolve to the .iplt
9316 entry, then all dynamic ones must too. */
9317 if (arm_plt->noncall_refcount != 0)
9318 {
9319 dynreloc_st_type = st_type;
9320 dynreloc_value = value;
9321 }
9322 }
9323 else
9324 /* We populate the .plt entry in finish_dynamic_symbol. */
9325 splt = globals->root.splt;
9326 }
9327 else
9328 {
9329 splt = NULL;
9330 plt_offset = (bfd_vma) -1;
9331 gotplt_offset = (bfd_vma) -1;
9332 }
9333
9334 switch (r_type)
9335 {
9336 case R_ARM_NONE:
9337 /* We don't need to find a value for this symbol. It's just a
9338 marker. */
9339 *unresolved_reloc_p = FALSE;
9340 return bfd_reloc_ok;
9341
9342 case R_ARM_ABS12:
9343 if (!globals->vxworks_p)
9344 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9345
9346 case R_ARM_PC24:
9347 case R_ARM_ABS32:
9348 case R_ARM_ABS32_NOI:
9349 case R_ARM_REL32:
9350 case R_ARM_REL32_NOI:
9351 case R_ARM_CALL:
9352 case R_ARM_JUMP24:
9353 case R_ARM_XPC25:
9354 case R_ARM_PREL31:
9355 case R_ARM_PLT32:
9356 /* Handle relocations which should use the PLT entry. ABS32/REL32
9357 will use the symbol's value, which may point to a PLT entry, but we
9358 don't need to handle that here. If we created a PLT entry, all
9359 branches in this object should go to it, except if the PLT is too
9360 far away, in which case a long branch stub should be inserted. */
9361 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9362 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9363 && r_type != R_ARM_CALL
9364 && r_type != R_ARM_JUMP24
9365 && r_type != R_ARM_PLT32)
9366 && plt_offset != (bfd_vma) -1)
9367 {
9368 /* If we've created a .plt section, and assigned a PLT entry
9369 to this function, it must either be a STT_GNU_IFUNC reference
9370 or not be known to bind locally. In other cases, we should
9371 have cleared the PLT entry by now. */
9372 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9373
9374 value = (splt->output_section->vma
9375 + splt->output_offset
9376 + plt_offset);
9377 *unresolved_reloc_p = FALSE;
9378 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9379 contents, rel->r_offset, value,
9380 rel->r_addend);
9381 }
9382
9383 /* When generating a shared object or relocatable executable, these
9384 relocations are copied into the output file to be resolved at
9385 run time. */
9386 if ((bfd_link_pic (info)
9387 || globals->root.is_relocatable_executable)
9388 && (input_section->flags & SEC_ALLOC)
9389 && !(globals->vxworks_p
9390 && strcmp (input_section->output_section->name,
9391 ".tls_vars") == 0)
9392 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9393 || !SYMBOL_CALLS_LOCAL (info, h))
9394 && !(input_bfd == globals->stub_bfd
9395 && strstr (input_section->name, STUB_SUFFIX))
9396 && (h == NULL
9397 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9398 || h->root.type != bfd_link_hash_undefweak)
9399 && r_type != R_ARM_PC24
9400 && r_type != R_ARM_CALL
9401 && r_type != R_ARM_JUMP24
9402 && r_type != R_ARM_PREL31
9403 && r_type != R_ARM_PLT32)
9404 {
9405 Elf_Internal_Rela outrel;
9406 bfd_boolean skip, relocate;
9407
9408 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9409 && !h->def_regular)
9410 {
9411 char *v = _("shared object");
9412
9413 if (bfd_link_executable (info))
9414 v = _("PIE executable");
9415
9416 (*_bfd_error_handler)
9417 (_("%B: relocation %s against external or undefined symbol `%s'"
9418 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9419 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9420 return bfd_reloc_notsupported;
9421 }
9422
9423 *unresolved_reloc_p = FALSE;
9424
9425 if (sreloc == NULL && globals->root.dynamic_sections_created)
9426 {
9427 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9428 ! globals->use_rel);
9429
9430 if (sreloc == NULL)
9431 return bfd_reloc_notsupported;
9432 }
9433
9434 skip = FALSE;
9435 relocate = FALSE;
9436
9437 outrel.r_addend = addend;
9438 outrel.r_offset =
9439 _bfd_elf_section_offset (output_bfd, info, input_section,
9440 rel->r_offset);
9441 if (outrel.r_offset == (bfd_vma) -1)
9442 skip = TRUE;
9443 else if (outrel.r_offset == (bfd_vma) -2)
9444 skip = TRUE, relocate = TRUE;
9445 outrel.r_offset += (input_section->output_section->vma
9446 + input_section->output_offset);
9447
9448 if (skip)
9449 memset (&outrel, 0, sizeof outrel);
9450 else if (h != NULL
9451 && h->dynindx != -1
9452 && (!bfd_link_pic (info)
9453 || !SYMBOLIC_BIND (info, h)
9454 || !h->def_regular))
9455 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9456 else
9457 {
9458 int symbol;
9459
9460 /* This symbol is local, or marked to become local. */
9461 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9462 if (globals->symbian_p)
9463 {
9464 asection *osec;
9465
9466 /* On Symbian OS, the data segment and text segement
9467 can be relocated independently. Therefore, we
9468 must indicate the segment to which this
9469 relocation is relative. The BPABI allows us to
9470 use any symbol in the right segment; we just use
9471 the section symbol as it is convenient. (We
9472 cannot use the symbol given by "h" directly as it
9473 will not appear in the dynamic symbol table.)
9474
9475 Note that the dynamic linker ignores the section
9476 symbol value, so we don't subtract osec->vma
9477 from the emitted reloc addend. */
9478 if (sym_sec)
9479 osec = sym_sec->output_section;
9480 else
9481 osec = input_section->output_section;
9482 symbol = elf_section_data (osec)->dynindx;
9483 if (symbol == 0)
9484 {
9485 struct elf_link_hash_table *htab = elf_hash_table (info);
9486
9487 if ((osec->flags & SEC_READONLY) == 0
9488 && htab->data_index_section != NULL)
9489 osec = htab->data_index_section;
9490 else
9491 osec = htab->text_index_section;
9492 symbol = elf_section_data (osec)->dynindx;
9493 }
9494 BFD_ASSERT (symbol != 0);
9495 }
9496 else
9497 /* On SVR4-ish systems, the dynamic loader cannot
9498 relocate the text and data segments independently,
9499 so the symbol does not matter. */
9500 symbol = 0;
9501 if (dynreloc_st_type == STT_GNU_IFUNC)
9502 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9503 to the .iplt entry. Instead, every non-call reference
9504 must use an R_ARM_IRELATIVE relocation to obtain the
9505 correct run-time address. */
9506 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9507 else
9508 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9509 if (globals->use_rel)
9510 relocate = TRUE;
9511 else
9512 outrel.r_addend += dynreloc_value;
9513 }
9514
9515 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9516
9517 /* If this reloc is against an external symbol, we do not want to
9518 fiddle with the addend. Otherwise, we need to include the symbol
9519 value so that it becomes an addend for the dynamic reloc. */
9520 if (! relocate)
9521 return bfd_reloc_ok;
9522
9523 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9524 contents, rel->r_offset,
9525 dynreloc_value, (bfd_vma) 0);
9526 }
9527 else switch (r_type)
9528 {
9529 case R_ARM_ABS12:
9530 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9531
9532 case R_ARM_XPC25: /* Arm BLX instruction. */
9533 case R_ARM_CALL:
9534 case R_ARM_JUMP24:
9535 case R_ARM_PC24: /* Arm B/BL instruction. */
9536 case R_ARM_PLT32:
9537 {
9538 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9539
9540 if (r_type == R_ARM_XPC25)
9541 {
9542 /* Check for Arm calling Arm function. */
9543 /* FIXME: Should we translate the instruction into a BL
9544 instruction instead ? */
9545 if (branch_type != ST_BRANCH_TO_THUMB)
9546 (*_bfd_error_handler)
9547 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9548 input_bfd,
9549 h ? h->root.root.string : "(local)");
9550 }
9551 else if (r_type == R_ARM_PC24)
9552 {
9553 /* Check for Arm calling Thumb function. */
9554 if (branch_type == ST_BRANCH_TO_THUMB)
9555 {
9556 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9557 output_bfd, input_section,
9558 hit_data, sym_sec, rel->r_offset,
9559 signed_addend, value,
9560 error_message))
9561 return bfd_reloc_ok;
9562 else
9563 return bfd_reloc_dangerous;
9564 }
9565 }
9566
9567 /* Check if a stub has to be inserted because the
9568 destination is too far or we are changing mode. */
9569 if ( r_type == R_ARM_CALL
9570 || r_type == R_ARM_JUMP24
9571 || r_type == R_ARM_PLT32)
9572 {
9573 enum elf32_arm_stub_type stub_type = arm_stub_none;
9574 struct elf32_arm_link_hash_entry *hash;
9575
9576 hash = (struct elf32_arm_link_hash_entry *) h;
9577 stub_type = arm_type_of_stub (info, input_section, rel,
9578 st_type, &branch_type,
9579 hash, value, sym_sec,
9580 input_bfd, sym_name);
9581
9582 if (stub_type != arm_stub_none)
9583 {
9584 /* The target is out of reach, so redirect the
9585 branch to the local stub for this function. */
9586 stub_entry = elf32_arm_get_stub_entry (input_section,
9587 sym_sec, h,
9588 rel, globals,
9589 stub_type);
9590 {
9591 if (stub_entry != NULL)
9592 value = (stub_entry->stub_offset
9593 + stub_entry->stub_sec->output_offset
9594 + stub_entry->stub_sec->output_section->vma);
9595
9596 if (plt_offset != (bfd_vma) -1)
9597 *unresolved_reloc_p = FALSE;
9598 }
9599 }
9600 else
9601 {
9602 /* If the call goes through a PLT entry, make sure to
9603 check distance to the right destination address. */
9604 if (plt_offset != (bfd_vma) -1)
9605 {
9606 value = (splt->output_section->vma
9607 + splt->output_offset
9608 + plt_offset);
9609 *unresolved_reloc_p = FALSE;
9610 /* The PLT entry is in ARM mode, regardless of the
9611 target function. */
9612 branch_type = ST_BRANCH_TO_ARM;
9613 }
9614 }
9615 }
9616
9617 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9618 where:
9619 S is the address of the symbol in the relocation.
9620 P is address of the instruction being relocated.
9621 A is the addend (extracted from the instruction) in bytes.
9622
9623 S is held in 'value'.
9624 P is the base address of the section containing the
9625 instruction plus the offset of the reloc into that
9626 section, ie:
9627 (input_section->output_section->vma +
9628 input_section->output_offset +
9629 rel->r_offset).
9630 A is the addend, converted into bytes, ie:
9631 (signed_addend * 4)
9632
9633 Note: None of these operations have knowledge of the pipeline
9634 size of the processor, thus it is up to the assembler to
9635 encode this information into the addend. */
9636 value -= (input_section->output_section->vma
9637 + input_section->output_offset);
9638 value -= rel->r_offset;
9639 if (globals->use_rel)
9640 value += (signed_addend << howto->size);
9641 else
9642 /* RELA addends do not have to be adjusted by howto->size. */
9643 value += signed_addend;
9644
9645 signed_addend = value;
9646 signed_addend >>= howto->rightshift;
9647
9648 /* A branch to an undefined weak symbol is turned into a jump to
9649 the next instruction unless a PLT entry will be created.
9650 Do the same for local undefined symbols (but not for STN_UNDEF).
9651 The jump to the next instruction is optimized as a NOP depending
9652 on the architecture. */
9653 if (h ? (h->root.type == bfd_link_hash_undefweak
9654 && plt_offset == (bfd_vma) -1)
9655 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9656 {
9657 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9658
9659 if (arch_has_arm_nop (globals))
9660 value |= 0x0320f000;
9661 else
9662 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9663 }
9664 else
9665 {
9666 /* Perform a signed range check. */
9667 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9668 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9669 return bfd_reloc_overflow;
9670
9671 addend = (value & 2);
9672
9673 value = (signed_addend & howto->dst_mask)
9674 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9675
9676 if (r_type == R_ARM_CALL)
9677 {
9678 /* Set the H bit in the BLX instruction. */
9679 if (branch_type == ST_BRANCH_TO_THUMB)
9680 {
9681 if (addend)
9682 value |= (1 << 24);
9683 else
9684 value &= ~(bfd_vma)(1 << 24);
9685 }
9686
9687 /* Select the correct instruction (BL or BLX). */
9688 /* Only if we are not handling a BL to a stub. In this
9689 case, mode switching is performed by the stub. */
9690 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9691 value |= (1 << 28);
9692 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9693 {
9694 value &= ~(bfd_vma)(1 << 28);
9695 value |= (1 << 24);
9696 }
9697 }
9698 }
9699 }
9700 break;
9701
9702 case R_ARM_ABS32:
9703 value += addend;
9704 if (branch_type == ST_BRANCH_TO_THUMB)
9705 value |= 1;
9706 break;
9707
9708 case R_ARM_ABS32_NOI:
9709 value += addend;
9710 break;
9711
9712 case R_ARM_REL32:
9713 value += addend;
9714 if (branch_type == ST_BRANCH_TO_THUMB)
9715 value |= 1;
9716 value -= (input_section->output_section->vma
9717 + input_section->output_offset + rel->r_offset);
9718 break;
9719
9720 case R_ARM_REL32_NOI:
9721 value += addend;
9722 value -= (input_section->output_section->vma
9723 + input_section->output_offset + rel->r_offset);
9724 break;
9725
9726 case R_ARM_PREL31:
9727 value -= (input_section->output_section->vma
9728 + input_section->output_offset + rel->r_offset);
9729 value += signed_addend;
9730 if (! h || h->root.type != bfd_link_hash_undefweak)
9731 {
9732 /* Check for overflow. */
9733 if ((value ^ (value >> 1)) & (1 << 30))
9734 return bfd_reloc_overflow;
9735 }
9736 value &= 0x7fffffff;
9737 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9738 if (branch_type == ST_BRANCH_TO_THUMB)
9739 value |= 1;
9740 break;
9741 }
9742
9743 bfd_put_32 (input_bfd, value, hit_data);
9744 return bfd_reloc_ok;
9745
9746 case R_ARM_ABS8:
9747 /* PR 16202: Refectch the addend using the correct size. */
9748 if (globals->use_rel)
9749 addend = bfd_get_8 (input_bfd, hit_data);
9750 value += addend;
9751
9752 /* There is no way to tell whether the user intended to use a signed or
9753 unsigned addend. When checking for overflow we accept either,
9754 as specified by the AAELF. */
9755 if ((long) value > 0xff || (long) value < -0x80)
9756 return bfd_reloc_overflow;
9757
9758 bfd_put_8 (input_bfd, value, hit_data);
9759 return bfd_reloc_ok;
9760
9761 case R_ARM_ABS16:
9762 /* PR 16202: Refectch the addend using the correct size. */
9763 if (globals->use_rel)
9764 addend = bfd_get_16 (input_bfd, hit_data);
9765 value += addend;
9766
9767 /* See comment for R_ARM_ABS8. */
9768 if ((long) value > 0xffff || (long) value < -0x8000)
9769 return bfd_reloc_overflow;
9770
9771 bfd_put_16 (input_bfd, value, hit_data);
9772 return bfd_reloc_ok;
9773
9774 case R_ARM_THM_ABS5:
9775 /* Support ldr and str instructions for the thumb. */
9776 if (globals->use_rel)
9777 {
9778 /* Need to refetch addend. */
9779 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9780 /* ??? Need to determine shift amount from operand size. */
9781 addend >>= howto->rightshift;
9782 }
9783 value += addend;
9784
9785 /* ??? Isn't value unsigned? */
9786 if ((long) value > 0x1f || (long) value < -0x10)
9787 return bfd_reloc_overflow;
9788
9789 /* ??? Value needs to be properly shifted into place first. */
9790 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9791 bfd_put_16 (input_bfd, value, hit_data);
9792 return bfd_reloc_ok;
9793
9794 case R_ARM_THM_ALU_PREL_11_0:
9795 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9796 {
9797 bfd_vma insn;
9798 bfd_signed_vma relocation;
9799
9800 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9801 | bfd_get_16 (input_bfd, hit_data + 2);
9802
9803 if (globals->use_rel)
9804 {
9805 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9806 | ((insn & (1 << 26)) >> 15);
9807 if (insn & 0xf00000)
9808 signed_addend = -signed_addend;
9809 }
9810
9811 relocation = value + signed_addend;
9812 relocation -= Pa (input_section->output_section->vma
9813 + input_section->output_offset
9814 + rel->r_offset);
9815
9816 value = relocation;
9817
9818 if (value >= 0x1000)
9819 return bfd_reloc_overflow;
9820
9821 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9822 | ((value & 0x700) << 4)
9823 | ((value & 0x800) << 15);
9824 if (relocation < 0)
9825 insn |= 0xa00000;
9826
9827 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9828 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9829
9830 return bfd_reloc_ok;
9831 }
9832
9833 case R_ARM_THM_PC8:
9834 /* PR 10073: This reloc is not generated by the GNU toolchain,
9835 but it is supported for compatibility with third party libraries
9836 generated by other compilers, specifically the ARM/IAR. */
9837 {
9838 bfd_vma insn;
9839 bfd_signed_vma relocation;
9840
9841 insn = bfd_get_16 (input_bfd, hit_data);
9842
9843 if (globals->use_rel)
9844 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9845
9846 relocation = value + addend;
9847 relocation -= Pa (input_section->output_section->vma
9848 + input_section->output_offset
9849 + rel->r_offset);
9850
9851 value = relocation;
9852
9853 /* We do not check for overflow of this reloc. Although strictly
9854 speaking this is incorrect, it appears to be necessary in order
9855 to work with IAR generated relocs. Since GCC and GAS do not
9856 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9857 a problem for them. */
9858 value &= 0x3fc;
9859
9860 insn = (insn & 0xff00) | (value >> 2);
9861
9862 bfd_put_16 (input_bfd, insn, hit_data);
9863
9864 return bfd_reloc_ok;
9865 }
9866
9867 case R_ARM_THM_PC12:
9868 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9869 {
9870 bfd_vma insn;
9871 bfd_signed_vma relocation;
9872
9873 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9874 | bfd_get_16 (input_bfd, hit_data + 2);
9875
9876 if (globals->use_rel)
9877 {
9878 signed_addend = insn & 0xfff;
9879 if (!(insn & (1 << 23)))
9880 signed_addend = -signed_addend;
9881 }
9882
9883 relocation = value + signed_addend;
9884 relocation -= Pa (input_section->output_section->vma
9885 + input_section->output_offset
9886 + rel->r_offset);
9887
9888 value = relocation;
9889
9890 if (value >= 0x1000)
9891 return bfd_reloc_overflow;
9892
9893 insn = (insn & 0xff7ff000) | value;
9894 if (relocation >= 0)
9895 insn |= (1 << 23);
9896
9897 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9898 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9899
9900 return bfd_reloc_ok;
9901 }
9902
9903 case R_ARM_THM_XPC22:
9904 case R_ARM_THM_CALL:
9905 case R_ARM_THM_JUMP24:
9906 /* Thumb BL (branch long instruction). */
9907 {
9908 bfd_vma relocation;
9909 bfd_vma reloc_sign;
9910 bfd_boolean overflow = FALSE;
9911 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9912 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9913 bfd_signed_vma reloc_signed_max;
9914 bfd_signed_vma reloc_signed_min;
9915 bfd_vma check;
9916 bfd_signed_vma signed_check;
9917 int bitsize;
9918 const int thumb2 = using_thumb2 (globals);
9919 const int thumb2_bl = using_thumb2_bl (globals);
9920
9921 /* A branch to an undefined weak symbol is turned into a jump to
9922 the next instruction unless a PLT entry will be created.
9923 The jump to the next instruction is optimized as a NOP.W for
9924 Thumb-2 enabled architectures. */
9925 if (h && h->root.type == bfd_link_hash_undefweak
9926 && plt_offset == (bfd_vma) -1)
9927 {
9928 if (thumb2)
9929 {
9930 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9931 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9932 }
9933 else
9934 {
9935 bfd_put_16 (input_bfd, 0xe000, hit_data);
9936 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9937 }
9938 return bfd_reloc_ok;
9939 }
9940
9941 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9942 with Thumb-1) involving the J1 and J2 bits. */
9943 if (globals->use_rel)
9944 {
9945 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9946 bfd_vma upper = upper_insn & 0x3ff;
9947 bfd_vma lower = lower_insn & 0x7ff;
9948 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9949 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9950 bfd_vma i1 = j1 ^ s ? 0 : 1;
9951 bfd_vma i2 = j2 ^ s ? 0 : 1;
9952
9953 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9954 /* Sign extend. */
9955 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9956
9957 signed_addend = addend;
9958 }
9959
9960 if (r_type == R_ARM_THM_XPC22)
9961 {
9962 /* Check for Thumb to Thumb call. */
9963 /* FIXME: Should we translate the instruction into a BL
9964 instruction instead ? */
9965 if (branch_type == ST_BRANCH_TO_THUMB)
9966 (*_bfd_error_handler)
9967 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9968 input_bfd,
9969 h ? h->root.root.string : "(local)");
9970 }
9971 else
9972 {
9973 /* If it is not a call to Thumb, assume call to Arm.
9974 If it is a call relative to a section name, then it is not a
9975 function call at all, but rather a long jump. Calls through
9976 the PLT do not require stubs. */
9977 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9978 {
9979 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9980 {
9981 /* Convert BL to BLX. */
9982 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9983 }
9984 else if (( r_type != R_ARM_THM_CALL)
9985 && (r_type != R_ARM_THM_JUMP24))
9986 {
9987 if (elf32_thumb_to_arm_stub
9988 (info, sym_name, input_bfd, output_bfd, input_section,
9989 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9990 error_message))
9991 return bfd_reloc_ok;
9992 else
9993 return bfd_reloc_dangerous;
9994 }
9995 }
9996 else if (branch_type == ST_BRANCH_TO_THUMB
9997 && globals->use_blx
9998 && r_type == R_ARM_THM_CALL)
9999 {
10000 /* Make sure this is a BL. */
10001 lower_insn |= 0x1800;
10002 }
10003 }
10004
10005 enum elf32_arm_stub_type stub_type = arm_stub_none;
10006 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
10007 {
10008 /* Check if a stub has to be inserted because the destination
10009 is too far. */
10010 struct elf32_arm_stub_hash_entry *stub_entry;
10011 struct elf32_arm_link_hash_entry *hash;
10012
10013 hash = (struct elf32_arm_link_hash_entry *) h;
10014
10015 stub_type = arm_type_of_stub (info, input_section, rel,
10016 st_type, &branch_type,
10017 hash, value, sym_sec,
10018 input_bfd, sym_name);
10019
10020 if (stub_type != arm_stub_none)
10021 {
10022 /* The target is out of reach or we are changing modes, so
10023 redirect the branch to the local stub for this
10024 function. */
10025 stub_entry = elf32_arm_get_stub_entry (input_section,
10026 sym_sec, h,
10027 rel, globals,
10028 stub_type);
10029 if (stub_entry != NULL)
10030 {
10031 value = (stub_entry->stub_offset
10032 + stub_entry->stub_sec->output_offset
10033 + stub_entry->stub_sec->output_section->vma);
10034
10035 if (plt_offset != (bfd_vma) -1)
10036 *unresolved_reloc_p = FALSE;
10037 }
10038
10039 /* If this call becomes a call to Arm, force BLX. */
10040 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
10041 {
10042 if ((stub_entry
10043 && !arm_stub_is_thumb (stub_entry->stub_type))
10044 || branch_type != ST_BRANCH_TO_THUMB)
10045 lower_insn = (lower_insn & ~0x1000) | 0x0800;
10046 }
10047 }
10048 }
10049
10050 /* Handle calls via the PLT. */
10051 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
10052 {
10053 value = (splt->output_section->vma
10054 + splt->output_offset
10055 + plt_offset);
10056
10057 if (globals->use_blx
10058 && r_type == R_ARM_THM_CALL
10059 && ! using_thumb_only (globals))
10060 {
10061 /* If the Thumb BLX instruction is available, convert
10062 the BL to a BLX instruction to call the ARM-mode
10063 PLT entry. */
10064 lower_insn = (lower_insn & ~0x1000) | 0x0800;
10065 branch_type = ST_BRANCH_TO_ARM;
10066 }
10067 else
10068 {
10069 if (! using_thumb_only (globals))
10070 /* Target the Thumb stub before the ARM PLT entry. */
10071 value -= PLT_THUMB_STUB_SIZE;
10072 branch_type = ST_BRANCH_TO_THUMB;
10073 }
10074 *unresolved_reloc_p = FALSE;
10075 }
10076
10077 relocation = value + signed_addend;
10078
10079 relocation -= (input_section->output_section->vma
10080 + input_section->output_offset
10081 + rel->r_offset);
10082
10083 check = relocation >> howto->rightshift;
10084
10085 /* If this is a signed value, the rightshift just dropped
10086 leading 1 bits (assuming twos complement). */
10087 if ((bfd_signed_vma) relocation >= 0)
10088 signed_check = check;
10089 else
10090 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10091
10092 /* Calculate the permissable maximum and minimum values for
10093 this relocation according to whether we're relocating for
10094 Thumb-2 or not. */
10095 bitsize = howto->bitsize;
10096 if (!thumb2_bl)
10097 bitsize -= 2;
10098 reloc_signed_max = (1 << (bitsize - 1)) - 1;
10099 reloc_signed_min = ~reloc_signed_max;
10100
10101 /* Assumes two's complement. */
10102 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10103 overflow = TRUE;
10104
10105 if ((lower_insn & 0x5000) == 0x4000)
10106 /* For a BLX instruction, make sure that the relocation is rounded up
10107 to a word boundary. This follows the semantics of the instruction
10108 which specifies that bit 1 of the target address will come from bit
10109 1 of the base address. */
10110 relocation = (relocation + 2) & ~ 3;
10111
10112 /* Put RELOCATION back into the insn. Assumes two's complement.
10113 We use the Thumb-2 encoding, which is safe even if dealing with
10114 a Thumb-1 instruction by virtue of our overflow check above. */
10115 reloc_sign = (signed_check < 0) ? 1 : 0;
10116 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10117 | ((relocation >> 12) & 0x3ff)
10118 | (reloc_sign << 10);
10119 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10120 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10121 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10122 | ((relocation >> 1) & 0x7ff);
10123
10124 /* Put the relocated value back in the object file: */
10125 bfd_put_16 (input_bfd, upper_insn, hit_data);
10126 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10127
10128 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10129 }
10130 break;
10131
10132 case R_ARM_THM_JUMP19:
10133 /* Thumb32 conditional branch instruction. */
10134 {
10135 bfd_vma relocation;
10136 bfd_boolean overflow = FALSE;
10137 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10138 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10139 bfd_signed_vma reloc_signed_max = 0xffffe;
10140 bfd_signed_vma reloc_signed_min = -0x100000;
10141 bfd_signed_vma signed_check;
10142 enum elf32_arm_stub_type stub_type = arm_stub_none;
10143 struct elf32_arm_stub_hash_entry *stub_entry;
10144 struct elf32_arm_link_hash_entry *hash;
10145
10146 /* Need to refetch the addend, reconstruct the top three bits,
10147 and squish the two 11 bit pieces together. */
10148 if (globals->use_rel)
10149 {
10150 bfd_vma S = (upper_insn & 0x0400) >> 10;
10151 bfd_vma upper = (upper_insn & 0x003f);
10152 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
10153 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
10154 bfd_vma lower = (lower_insn & 0x07ff);
10155
10156 upper |= J1 << 6;
10157 upper |= J2 << 7;
10158 upper |= (!S) << 8;
10159 upper -= 0x0100; /* Sign extend. */
10160
10161 addend = (upper << 12) | (lower << 1);
10162 signed_addend = addend;
10163 }
10164
10165 /* Handle calls via the PLT. */
10166 if (plt_offset != (bfd_vma) -1)
10167 {
10168 value = (splt->output_section->vma
10169 + splt->output_offset
10170 + plt_offset);
10171 /* Target the Thumb stub before the ARM PLT entry. */
10172 value -= PLT_THUMB_STUB_SIZE;
10173 *unresolved_reloc_p = FALSE;
10174 }
10175
10176 hash = (struct elf32_arm_link_hash_entry *)h;
10177
10178 stub_type = arm_type_of_stub (info, input_section, rel,
10179 st_type, &branch_type,
10180 hash, value, sym_sec,
10181 input_bfd, sym_name);
10182 if (stub_type != arm_stub_none)
10183 {
10184 stub_entry = elf32_arm_get_stub_entry (input_section,
10185 sym_sec, h,
10186 rel, globals,
10187 stub_type);
10188 if (stub_entry != NULL)
10189 {
10190 value = (stub_entry->stub_offset
10191 + stub_entry->stub_sec->output_offset
10192 + stub_entry->stub_sec->output_section->vma);
10193 }
10194 }
10195
10196 relocation = value + signed_addend;
10197 relocation -= (input_section->output_section->vma
10198 + input_section->output_offset
10199 + rel->r_offset);
10200 signed_check = (bfd_signed_vma) relocation;
10201
10202 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10203 overflow = TRUE;
10204
10205 /* Put RELOCATION back into the insn. */
10206 {
10207 bfd_vma S = (relocation & 0x00100000) >> 20;
10208 bfd_vma J2 = (relocation & 0x00080000) >> 19;
10209 bfd_vma J1 = (relocation & 0x00040000) >> 18;
10210 bfd_vma hi = (relocation & 0x0003f000) >> 12;
10211 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
10212
10213 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10214 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10215 }
10216
10217 /* Put the relocated value back in the object file: */
10218 bfd_put_16 (input_bfd, upper_insn, hit_data);
10219 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10220
10221 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10222 }
10223
10224 case R_ARM_THM_JUMP11:
10225 case R_ARM_THM_JUMP8:
10226 case R_ARM_THM_JUMP6:
10227 /* Thumb B (branch) instruction). */
10228 {
10229 bfd_signed_vma relocation;
10230 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10231 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10232 bfd_signed_vma signed_check;
10233
10234 /* CZB cannot jump backward. */
10235 if (r_type == R_ARM_THM_JUMP6)
10236 reloc_signed_min = 0;
10237
10238 if (globals->use_rel)
10239 {
10240 /* Need to refetch addend. */
10241 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10242 if (addend & ((howto->src_mask + 1) >> 1))
10243 {
10244 signed_addend = -1;
10245 signed_addend &= ~ howto->src_mask;
10246 signed_addend |= addend;
10247 }
10248 else
10249 signed_addend = addend;
10250 /* The value in the insn has been right shifted. We need to
10251 undo this, so that we can perform the address calculation
10252 in terms of bytes. */
10253 signed_addend <<= howto->rightshift;
10254 }
10255 relocation = value + signed_addend;
10256
10257 relocation -= (input_section->output_section->vma
10258 + input_section->output_offset
10259 + rel->r_offset);
10260
10261 relocation >>= howto->rightshift;
10262 signed_check = relocation;
10263
10264 if (r_type == R_ARM_THM_JUMP6)
10265 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10266 else
10267 relocation &= howto->dst_mask;
10268 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10269
10270 bfd_put_16 (input_bfd, relocation, hit_data);
10271
10272 /* Assumes two's complement. */
10273 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10274 return bfd_reloc_overflow;
10275
10276 return bfd_reloc_ok;
10277 }
10278
10279 case R_ARM_ALU_PCREL7_0:
10280 case R_ARM_ALU_PCREL15_8:
10281 case R_ARM_ALU_PCREL23_15:
10282 {
10283 bfd_vma insn;
10284 bfd_vma relocation;
10285
10286 insn = bfd_get_32 (input_bfd, hit_data);
10287 if (globals->use_rel)
10288 {
10289 /* Extract the addend. */
10290 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10291 signed_addend = addend;
10292 }
10293 relocation = value + signed_addend;
10294
10295 relocation -= (input_section->output_section->vma
10296 + input_section->output_offset
10297 + rel->r_offset);
10298 insn = (insn & ~0xfff)
10299 | ((howto->bitpos << 7) & 0xf00)
10300 | ((relocation >> howto->bitpos) & 0xff);
10301 bfd_put_32 (input_bfd, value, hit_data);
10302 }
10303 return bfd_reloc_ok;
10304
10305 case R_ARM_GNU_VTINHERIT:
10306 case R_ARM_GNU_VTENTRY:
10307 return bfd_reloc_ok;
10308
10309 case R_ARM_GOTOFF32:
10310 /* Relocation is relative to the start of the
10311 global offset table. */
10312
10313 BFD_ASSERT (sgot != NULL);
10314 if (sgot == NULL)
10315 return bfd_reloc_notsupported;
10316
10317 /* If we are addressing a Thumb function, we need to adjust the
10318 address by one, so that attempts to call the function pointer will
10319 correctly interpret it as Thumb code. */
10320 if (branch_type == ST_BRANCH_TO_THUMB)
10321 value += 1;
10322
10323 /* Note that sgot->output_offset is not involved in this
10324 calculation. We always want the start of .got. If we
10325 define _GLOBAL_OFFSET_TABLE in a different way, as is
10326 permitted by the ABI, we might have to change this
10327 calculation. */
10328 value -= sgot->output_section->vma;
10329 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10330 contents, rel->r_offset, value,
10331 rel->r_addend);
10332
10333 case R_ARM_GOTPC:
10334 /* Use global offset table as symbol value. */
10335 BFD_ASSERT (sgot != NULL);
10336
10337 if (sgot == NULL)
10338 return bfd_reloc_notsupported;
10339
10340 *unresolved_reloc_p = FALSE;
10341 value = sgot->output_section->vma;
10342 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10343 contents, rel->r_offset, value,
10344 rel->r_addend);
10345
10346 case R_ARM_GOT32:
10347 case R_ARM_GOT_PREL:
10348 /* Relocation is to the entry for this symbol in the
10349 global offset table. */
10350 if (sgot == NULL)
10351 return bfd_reloc_notsupported;
10352
10353 if (dynreloc_st_type == STT_GNU_IFUNC
10354 && plt_offset != (bfd_vma) -1
10355 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10356 {
10357 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10358 symbol, and the relocation resolves directly to the runtime
10359 target rather than to the .iplt entry. This means that any
10360 .got entry would be the same value as the .igot.plt entry,
10361 so there's no point creating both. */
10362 sgot = globals->root.igotplt;
10363 value = sgot->output_offset + gotplt_offset;
10364 }
10365 else if (h != NULL)
10366 {
10367 bfd_vma off;
10368
10369 off = h->got.offset;
10370 BFD_ASSERT (off != (bfd_vma) -1);
10371 if ((off & 1) != 0)
10372 {
10373 /* We have already processsed one GOT relocation against
10374 this symbol. */
10375 off &= ~1;
10376 if (globals->root.dynamic_sections_created
10377 && !SYMBOL_REFERENCES_LOCAL (info, h))
10378 *unresolved_reloc_p = FALSE;
10379 }
10380 else
10381 {
10382 Elf_Internal_Rela outrel;
10383
10384 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10385 {
10386 /* If the symbol doesn't resolve locally in a static
10387 object, we have an undefined reference. If the
10388 symbol doesn't resolve locally in a dynamic object,
10389 it should be resolved by the dynamic linker. */
10390 if (globals->root.dynamic_sections_created)
10391 {
10392 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10393 *unresolved_reloc_p = FALSE;
10394 }
10395 else
10396 outrel.r_info = 0;
10397 outrel.r_addend = 0;
10398 }
10399 else
10400 {
10401 if (dynreloc_st_type == STT_GNU_IFUNC)
10402 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10403 else if (bfd_link_pic (info) &&
10404 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10405 || h->root.type != bfd_link_hash_undefweak))
10406 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10407 else
10408 outrel.r_info = 0;
10409 outrel.r_addend = dynreloc_value;
10410 }
10411
10412 /* The GOT entry is initialized to zero by default.
10413 See if we should install a different value. */
10414 if (outrel.r_addend != 0
10415 && (outrel.r_info == 0 || globals->use_rel))
10416 {
10417 bfd_put_32 (output_bfd, outrel.r_addend,
10418 sgot->contents + off);
10419 outrel.r_addend = 0;
10420 }
10421
10422 if (outrel.r_info != 0)
10423 {
10424 outrel.r_offset = (sgot->output_section->vma
10425 + sgot->output_offset
10426 + off);
10427 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10428 }
10429 h->got.offset |= 1;
10430 }
10431 value = sgot->output_offset + off;
10432 }
10433 else
10434 {
10435 bfd_vma off;
10436
10437 BFD_ASSERT (local_got_offsets != NULL &&
10438 local_got_offsets[r_symndx] != (bfd_vma) -1);
10439
10440 off = local_got_offsets[r_symndx];
10441
10442 /* The offset must always be a multiple of 4. We use the
10443 least significant bit to record whether we have already
10444 generated the necessary reloc. */
10445 if ((off & 1) != 0)
10446 off &= ~1;
10447 else
10448 {
10449 if (globals->use_rel)
10450 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10451
10452 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10453 {
10454 Elf_Internal_Rela outrel;
10455
10456 outrel.r_addend = addend + dynreloc_value;
10457 outrel.r_offset = (sgot->output_section->vma
10458 + sgot->output_offset
10459 + off);
10460 if (dynreloc_st_type == STT_GNU_IFUNC)
10461 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10462 else
10463 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10464 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10465 }
10466
10467 local_got_offsets[r_symndx] |= 1;
10468 }
10469
10470 value = sgot->output_offset + off;
10471 }
10472 if (r_type != R_ARM_GOT32)
10473 value += sgot->output_section->vma;
10474
10475 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10476 contents, rel->r_offset, value,
10477 rel->r_addend);
10478
10479 case R_ARM_TLS_LDO32:
10480 value = value - dtpoff_base (info);
10481
10482 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10483 contents, rel->r_offset, value,
10484 rel->r_addend);
10485
10486 case R_ARM_TLS_LDM32:
10487 {
10488 bfd_vma off;
10489
10490 if (sgot == NULL)
10491 abort ();
10492
10493 off = globals->tls_ldm_got.offset;
10494
10495 if ((off & 1) != 0)
10496 off &= ~1;
10497 else
10498 {
10499 /* If we don't know the module number, create a relocation
10500 for it. */
10501 if (bfd_link_pic (info))
10502 {
10503 Elf_Internal_Rela outrel;
10504
10505 if (srelgot == NULL)
10506 abort ();
10507
10508 outrel.r_addend = 0;
10509 outrel.r_offset = (sgot->output_section->vma
10510 + sgot->output_offset + off);
10511 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10512
10513 if (globals->use_rel)
10514 bfd_put_32 (output_bfd, outrel.r_addend,
10515 sgot->contents + off);
10516
10517 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10518 }
10519 else
10520 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10521
10522 globals->tls_ldm_got.offset |= 1;
10523 }
10524
10525 value = sgot->output_section->vma + sgot->output_offset + off
10526 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10527
10528 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10529 contents, rel->r_offset, value,
10530 rel->r_addend);
10531 }
10532
10533 case R_ARM_TLS_CALL:
10534 case R_ARM_THM_TLS_CALL:
10535 case R_ARM_TLS_GD32:
10536 case R_ARM_TLS_IE32:
10537 case R_ARM_TLS_GOTDESC:
10538 case R_ARM_TLS_DESCSEQ:
10539 case R_ARM_THM_TLS_DESCSEQ:
10540 {
10541 bfd_vma off, offplt;
10542 int indx = 0;
10543 char tls_type;
10544
10545 BFD_ASSERT (sgot != NULL);
10546
10547 if (h != NULL)
10548 {
10549 bfd_boolean dyn;
10550 dyn = globals->root.dynamic_sections_created;
10551 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10552 bfd_link_pic (info),
10553 h)
10554 && (!bfd_link_pic (info)
10555 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10556 {
10557 *unresolved_reloc_p = FALSE;
10558 indx = h->dynindx;
10559 }
10560 off = h->got.offset;
10561 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10562 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10563 }
10564 else
10565 {
10566 BFD_ASSERT (local_got_offsets != NULL);
10567 off = local_got_offsets[r_symndx];
10568 offplt = local_tlsdesc_gotents[r_symndx];
10569 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10570 }
10571
10572 /* Linker relaxations happens from one of the
10573 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10574 if (ELF32_R_TYPE(rel->r_info) != r_type)
10575 tls_type = GOT_TLS_IE;
10576
10577 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10578
10579 if ((off & 1) != 0)
10580 off &= ~1;
10581 else
10582 {
10583 bfd_boolean need_relocs = FALSE;
10584 Elf_Internal_Rela outrel;
10585 int cur_off = off;
10586
10587 /* The GOT entries have not been initialized yet. Do it
10588 now, and emit any relocations. If both an IE GOT and a
10589 GD GOT are necessary, we emit the GD first. */
10590
10591 if ((bfd_link_pic (info) || indx != 0)
10592 && (h == NULL
10593 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10594 || h->root.type != bfd_link_hash_undefweak))
10595 {
10596 need_relocs = TRUE;
10597 BFD_ASSERT (srelgot != NULL);
10598 }
10599
10600 if (tls_type & GOT_TLS_GDESC)
10601 {
10602 bfd_byte *loc;
10603
10604 /* We should have relaxed, unless this is an undefined
10605 weak symbol. */
10606 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10607 || bfd_link_pic (info));
10608 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10609 <= globals->root.sgotplt->size);
10610
10611 outrel.r_addend = 0;
10612 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10613 + globals->root.sgotplt->output_offset
10614 + offplt
10615 + globals->sgotplt_jump_table_size);
10616
10617 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10618 sreloc = globals->root.srelplt;
10619 loc = sreloc->contents;
10620 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10621 BFD_ASSERT (loc + RELOC_SIZE (globals)
10622 <= sreloc->contents + sreloc->size);
10623
10624 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10625
10626 /* For globals, the first word in the relocation gets
10627 the relocation index and the top bit set, or zero,
10628 if we're binding now. For locals, it gets the
10629 symbol's offset in the tls section. */
10630 bfd_put_32 (output_bfd,
10631 !h ? value - elf_hash_table (info)->tls_sec->vma
10632 : info->flags & DF_BIND_NOW ? 0
10633 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10634 globals->root.sgotplt->contents + offplt
10635 + globals->sgotplt_jump_table_size);
10636
10637 /* Second word in the relocation is always zero. */
10638 bfd_put_32 (output_bfd, 0,
10639 globals->root.sgotplt->contents + offplt
10640 + globals->sgotplt_jump_table_size + 4);
10641 }
10642 if (tls_type & GOT_TLS_GD)
10643 {
10644 if (need_relocs)
10645 {
10646 outrel.r_addend = 0;
10647 outrel.r_offset = (sgot->output_section->vma
10648 + sgot->output_offset
10649 + cur_off);
10650 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10651
10652 if (globals->use_rel)
10653 bfd_put_32 (output_bfd, outrel.r_addend,
10654 sgot->contents + cur_off);
10655
10656 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10657
10658 if (indx == 0)
10659 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10660 sgot->contents + cur_off + 4);
10661 else
10662 {
10663 outrel.r_addend = 0;
10664 outrel.r_info = ELF32_R_INFO (indx,
10665 R_ARM_TLS_DTPOFF32);
10666 outrel.r_offset += 4;
10667
10668 if (globals->use_rel)
10669 bfd_put_32 (output_bfd, outrel.r_addend,
10670 sgot->contents + cur_off + 4);
10671
10672 elf32_arm_add_dynreloc (output_bfd, info,
10673 srelgot, &outrel);
10674 }
10675 }
10676 else
10677 {
10678 /* If we are not emitting relocations for a
10679 general dynamic reference, then we must be in a
10680 static link or an executable link with the
10681 symbol binding locally. Mark it as belonging
10682 to module 1, the executable. */
10683 bfd_put_32 (output_bfd, 1,
10684 sgot->contents + cur_off);
10685 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10686 sgot->contents + cur_off + 4);
10687 }
10688
10689 cur_off += 8;
10690 }
10691
10692 if (tls_type & GOT_TLS_IE)
10693 {
10694 if (need_relocs)
10695 {
10696 if (indx == 0)
10697 outrel.r_addend = value - dtpoff_base (info);
10698 else
10699 outrel.r_addend = 0;
10700 outrel.r_offset = (sgot->output_section->vma
10701 + sgot->output_offset
10702 + cur_off);
10703 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10704
10705 if (globals->use_rel)
10706 bfd_put_32 (output_bfd, outrel.r_addend,
10707 sgot->contents + cur_off);
10708
10709 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10710 }
10711 else
10712 bfd_put_32 (output_bfd, tpoff (info, value),
10713 sgot->contents + cur_off);
10714 cur_off += 4;
10715 }
10716
10717 if (h != NULL)
10718 h->got.offset |= 1;
10719 else
10720 local_got_offsets[r_symndx] |= 1;
10721 }
10722
10723 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10724 off += 8;
10725 else if (tls_type & GOT_TLS_GDESC)
10726 off = offplt;
10727
10728 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10729 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10730 {
10731 bfd_signed_vma offset;
10732 /* TLS stubs are arm mode. The original symbol is a
10733 data object, so branch_type is bogus. */
10734 branch_type = ST_BRANCH_TO_ARM;
10735 enum elf32_arm_stub_type stub_type
10736 = arm_type_of_stub (info, input_section, rel,
10737 st_type, &branch_type,
10738 (struct elf32_arm_link_hash_entry *)h,
10739 globals->tls_trampoline, globals->root.splt,
10740 input_bfd, sym_name);
10741
10742 if (stub_type != arm_stub_none)
10743 {
10744 struct elf32_arm_stub_hash_entry *stub_entry
10745 = elf32_arm_get_stub_entry
10746 (input_section, globals->root.splt, 0, rel,
10747 globals, stub_type);
10748 offset = (stub_entry->stub_offset
10749 + stub_entry->stub_sec->output_offset
10750 + stub_entry->stub_sec->output_section->vma);
10751 }
10752 else
10753 offset = (globals->root.splt->output_section->vma
10754 + globals->root.splt->output_offset
10755 + globals->tls_trampoline);
10756
10757 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10758 {
10759 unsigned long inst;
10760
10761 offset -= (input_section->output_section->vma
10762 + input_section->output_offset
10763 + rel->r_offset + 8);
10764
10765 inst = offset >> 2;
10766 inst &= 0x00ffffff;
10767 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10768 }
10769 else
10770 {
10771 /* Thumb blx encodes the offset in a complicated
10772 fashion. */
10773 unsigned upper_insn, lower_insn;
10774 unsigned neg;
10775
10776 offset -= (input_section->output_section->vma
10777 + input_section->output_offset
10778 + rel->r_offset + 4);
10779
10780 if (stub_type != arm_stub_none
10781 && arm_stub_is_thumb (stub_type))
10782 {
10783 lower_insn = 0xd000;
10784 }
10785 else
10786 {
10787 lower_insn = 0xc000;
10788 /* Round up the offset to a word boundary. */
10789 offset = (offset + 2) & ~2;
10790 }
10791
10792 neg = offset < 0;
10793 upper_insn = (0xf000
10794 | ((offset >> 12) & 0x3ff)
10795 | (neg << 10));
10796 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10797 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10798 | ((offset >> 1) & 0x7ff);
10799 bfd_put_16 (input_bfd, upper_insn, hit_data);
10800 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10801 return bfd_reloc_ok;
10802 }
10803 }
10804 /* These relocations needs special care, as besides the fact
10805 they point somewhere in .gotplt, the addend must be
10806 adjusted accordingly depending on the type of instruction
10807 we refer to. */
10808 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10809 {
10810 unsigned long data, insn;
10811 unsigned thumb;
10812
10813 data = bfd_get_32 (input_bfd, hit_data);
10814 thumb = data & 1;
10815 data &= ~1u;
10816
10817 if (thumb)
10818 {
10819 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10820 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10821 insn = (insn << 16)
10822 | bfd_get_16 (input_bfd,
10823 contents + rel->r_offset - data + 2);
10824 if ((insn & 0xf800c000) == 0xf000c000)
10825 /* bl/blx */
10826 value = -6;
10827 else if ((insn & 0xffffff00) == 0x4400)
10828 /* add */
10829 value = -5;
10830 else
10831 {
10832 (*_bfd_error_handler)
10833 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10834 input_bfd, input_section,
10835 (unsigned long)rel->r_offset, insn);
10836 return bfd_reloc_notsupported;
10837 }
10838 }
10839 else
10840 {
10841 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10842
10843 switch (insn >> 24)
10844 {
10845 case 0xeb: /* bl */
10846 case 0xfa: /* blx */
10847 value = -4;
10848 break;
10849
10850 case 0xe0: /* add */
10851 value = -8;
10852 break;
10853
10854 default:
10855 (*_bfd_error_handler)
10856 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10857 input_bfd, input_section,
10858 (unsigned long)rel->r_offset, insn);
10859 return bfd_reloc_notsupported;
10860 }
10861 }
10862
10863 value += ((globals->root.sgotplt->output_section->vma
10864 + globals->root.sgotplt->output_offset + off)
10865 - (input_section->output_section->vma
10866 + input_section->output_offset
10867 + rel->r_offset)
10868 + globals->sgotplt_jump_table_size);
10869 }
10870 else
10871 value = ((globals->root.sgot->output_section->vma
10872 + globals->root.sgot->output_offset + off)
10873 - (input_section->output_section->vma
10874 + input_section->output_offset + rel->r_offset));
10875
10876 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10877 contents, rel->r_offset, value,
10878 rel->r_addend);
10879 }
10880
10881 case R_ARM_TLS_LE32:
10882 if (bfd_link_dll (info))
10883 {
10884 (*_bfd_error_handler)
10885 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10886 input_bfd, input_section,
10887 (long) rel->r_offset, howto->name);
10888 return bfd_reloc_notsupported;
10889 }
10890 else
10891 value = tpoff (info, value);
10892
10893 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10894 contents, rel->r_offset, value,
10895 rel->r_addend);
10896
10897 case R_ARM_V4BX:
10898 if (globals->fix_v4bx)
10899 {
10900 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10901
10902 /* Ensure that we have a BX instruction. */
10903 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10904
10905 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10906 {
10907 /* Branch to veneer. */
10908 bfd_vma glue_addr;
10909 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10910 glue_addr -= input_section->output_section->vma
10911 + input_section->output_offset
10912 + rel->r_offset + 8;
10913 insn = (insn & 0xf0000000) | 0x0a000000
10914 | ((glue_addr >> 2) & 0x00ffffff);
10915 }
10916 else
10917 {
10918 /* Preserve Rm (lowest four bits) and the condition code
10919 (highest four bits). Other bits encode MOV PC,Rm. */
10920 insn = (insn & 0xf000000f) | 0x01a0f000;
10921 }
10922
10923 bfd_put_32 (input_bfd, insn, hit_data);
10924 }
10925 return bfd_reloc_ok;
10926
10927 case R_ARM_MOVW_ABS_NC:
10928 case R_ARM_MOVT_ABS:
10929 case R_ARM_MOVW_PREL_NC:
10930 case R_ARM_MOVT_PREL:
10931 /* Until we properly support segment-base-relative addressing then
10932 we assume the segment base to be zero, as for the group relocations.
10933 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10934 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10935 case R_ARM_MOVW_BREL_NC:
10936 case R_ARM_MOVW_BREL:
10937 case R_ARM_MOVT_BREL:
10938 {
10939 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10940
10941 if (globals->use_rel)
10942 {
10943 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10944 signed_addend = (addend ^ 0x8000) - 0x8000;
10945 }
10946
10947 value += signed_addend;
10948
10949 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10950 value -= (input_section->output_section->vma
10951 + input_section->output_offset + rel->r_offset);
10952
10953 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10954 return bfd_reloc_overflow;
10955
10956 if (branch_type == ST_BRANCH_TO_THUMB)
10957 value |= 1;
10958
10959 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10960 || r_type == R_ARM_MOVT_BREL)
10961 value >>= 16;
10962
10963 insn &= 0xfff0f000;
10964 insn |= value & 0xfff;
10965 insn |= (value & 0xf000) << 4;
10966 bfd_put_32 (input_bfd, insn, hit_data);
10967 }
10968 return bfd_reloc_ok;
10969
10970 case R_ARM_THM_MOVW_ABS_NC:
10971 case R_ARM_THM_MOVT_ABS:
10972 case R_ARM_THM_MOVW_PREL_NC:
10973 case R_ARM_THM_MOVT_PREL:
10974 /* Until we properly support segment-base-relative addressing then
10975 we assume the segment base to be zero, as for the above relocations.
10976 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10977 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10978 as R_ARM_THM_MOVT_ABS. */
10979 case R_ARM_THM_MOVW_BREL_NC:
10980 case R_ARM_THM_MOVW_BREL:
10981 case R_ARM_THM_MOVT_BREL:
10982 {
10983 bfd_vma insn;
10984
10985 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10986 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10987
10988 if (globals->use_rel)
10989 {
10990 addend = ((insn >> 4) & 0xf000)
10991 | ((insn >> 15) & 0x0800)
10992 | ((insn >> 4) & 0x0700)
10993 | (insn & 0x00ff);
10994 signed_addend = (addend ^ 0x8000) - 0x8000;
10995 }
10996
10997 value += signed_addend;
10998
10999 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
11000 value -= (input_section->output_section->vma
11001 + input_section->output_offset + rel->r_offset);
11002
11003 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
11004 return bfd_reloc_overflow;
11005
11006 if (branch_type == ST_BRANCH_TO_THUMB)
11007 value |= 1;
11008
11009 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
11010 || r_type == R_ARM_THM_MOVT_BREL)
11011 value >>= 16;
11012
11013 insn &= 0xfbf08f00;
11014 insn |= (value & 0xf000) << 4;
11015 insn |= (value & 0x0800) << 15;
11016 insn |= (value & 0x0700) << 4;
11017 insn |= (value & 0x00ff);
11018
11019 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11020 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11021 }
11022 return bfd_reloc_ok;
11023
11024 case R_ARM_ALU_PC_G0_NC:
11025 case R_ARM_ALU_PC_G1_NC:
11026 case R_ARM_ALU_PC_G0:
11027 case R_ARM_ALU_PC_G1:
11028 case R_ARM_ALU_PC_G2:
11029 case R_ARM_ALU_SB_G0_NC:
11030 case R_ARM_ALU_SB_G1_NC:
11031 case R_ARM_ALU_SB_G0:
11032 case R_ARM_ALU_SB_G1:
11033 case R_ARM_ALU_SB_G2:
11034 {
11035 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11036 bfd_vma pc = input_section->output_section->vma
11037 + input_section->output_offset + rel->r_offset;
11038 /* sb is the origin of the *segment* containing the symbol. */
11039 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11040 bfd_vma residual;
11041 bfd_vma g_n;
11042 bfd_signed_vma signed_value;
11043 int group = 0;
11044
11045 /* Determine which group of bits to select. */
11046 switch (r_type)
11047 {
11048 case R_ARM_ALU_PC_G0_NC:
11049 case R_ARM_ALU_PC_G0:
11050 case R_ARM_ALU_SB_G0_NC:
11051 case R_ARM_ALU_SB_G0:
11052 group = 0;
11053 break;
11054
11055 case R_ARM_ALU_PC_G1_NC:
11056 case R_ARM_ALU_PC_G1:
11057 case R_ARM_ALU_SB_G1_NC:
11058 case R_ARM_ALU_SB_G1:
11059 group = 1;
11060 break;
11061
11062 case R_ARM_ALU_PC_G2:
11063 case R_ARM_ALU_SB_G2:
11064 group = 2;
11065 break;
11066
11067 default:
11068 abort ();
11069 }
11070
11071 /* If REL, extract the addend from the insn. If RELA, it will
11072 have already been fetched for us. */
11073 if (globals->use_rel)
11074 {
11075 int negative;
11076 bfd_vma constant = insn & 0xff;
11077 bfd_vma rotation = (insn & 0xf00) >> 8;
11078
11079 if (rotation == 0)
11080 signed_addend = constant;
11081 else
11082 {
11083 /* Compensate for the fact that in the instruction, the
11084 rotation is stored in multiples of 2 bits. */
11085 rotation *= 2;
11086
11087 /* Rotate "constant" right by "rotation" bits. */
11088 signed_addend = (constant >> rotation) |
11089 (constant << (8 * sizeof (bfd_vma) - rotation));
11090 }
11091
11092 /* Determine if the instruction is an ADD or a SUB.
11093 (For REL, this determines the sign of the addend.) */
11094 negative = identify_add_or_sub (insn);
11095 if (negative == 0)
11096 {
11097 (*_bfd_error_handler)
11098 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11099 input_bfd, input_section,
11100 (long) rel->r_offset, howto->name);
11101 return bfd_reloc_overflow;
11102 }
11103
11104 signed_addend *= negative;
11105 }
11106
11107 /* Compute the value (X) to go in the place. */
11108 if (r_type == R_ARM_ALU_PC_G0_NC
11109 || r_type == R_ARM_ALU_PC_G1_NC
11110 || r_type == R_ARM_ALU_PC_G0
11111 || r_type == R_ARM_ALU_PC_G1
11112 || r_type == R_ARM_ALU_PC_G2)
11113 /* PC relative. */
11114 signed_value = value - pc + signed_addend;
11115 else
11116 /* Section base relative. */
11117 signed_value = value - sb + signed_addend;
11118
11119 /* If the target symbol is a Thumb function, then set the
11120 Thumb bit in the address. */
11121 if (branch_type == ST_BRANCH_TO_THUMB)
11122 signed_value |= 1;
11123
11124 /* Calculate the value of the relevant G_n, in encoded
11125 constant-with-rotation format. */
11126 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11127 group, &residual);
11128
11129 /* Check for overflow if required. */
11130 if ((r_type == R_ARM_ALU_PC_G0
11131 || r_type == R_ARM_ALU_PC_G1
11132 || r_type == R_ARM_ALU_PC_G2
11133 || r_type == R_ARM_ALU_SB_G0
11134 || r_type == R_ARM_ALU_SB_G1
11135 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11136 {
11137 (*_bfd_error_handler)
11138 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11139 input_bfd, input_section,
11140 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11141 howto->name);
11142 return bfd_reloc_overflow;
11143 }
11144
11145 /* Mask out the value and the ADD/SUB part of the opcode; take care
11146 not to destroy the S bit. */
11147 insn &= 0xff1ff000;
11148
11149 /* Set the opcode according to whether the value to go in the
11150 place is negative. */
11151 if (signed_value < 0)
11152 insn |= 1 << 22;
11153 else
11154 insn |= 1 << 23;
11155
11156 /* Encode the offset. */
11157 insn |= g_n;
11158
11159 bfd_put_32 (input_bfd, insn, hit_data);
11160 }
11161 return bfd_reloc_ok;
11162
11163 case R_ARM_LDR_PC_G0:
11164 case R_ARM_LDR_PC_G1:
11165 case R_ARM_LDR_PC_G2:
11166 case R_ARM_LDR_SB_G0:
11167 case R_ARM_LDR_SB_G1:
11168 case R_ARM_LDR_SB_G2:
11169 {
11170 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11171 bfd_vma pc = input_section->output_section->vma
11172 + input_section->output_offset + rel->r_offset;
11173 /* sb is the origin of the *segment* containing the symbol. */
11174 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11175 bfd_vma residual;
11176 bfd_signed_vma signed_value;
11177 int group = 0;
11178
11179 /* Determine which groups of bits to calculate. */
11180 switch (r_type)
11181 {
11182 case R_ARM_LDR_PC_G0:
11183 case R_ARM_LDR_SB_G0:
11184 group = 0;
11185 break;
11186
11187 case R_ARM_LDR_PC_G1:
11188 case R_ARM_LDR_SB_G1:
11189 group = 1;
11190 break;
11191
11192 case R_ARM_LDR_PC_G2:
11193 case R_ARM_LDR_SB_G2:
11194 group = 2;
11195 break;
11196
11197 default:
11198 abort ();
11199 }
11200
11201 /* If REL, extract the addend from the insn. If RELA, it will
11202 have already been fetched for us. */
11203 if (globals->use_rel)
11204 {
11205 int negative = (insn & (1 << 23)) ? 1 : -1;
11206 signed_addend = negative * (insn & 0xfff);
11207 }
11208
11209 /* Compute the value (X) to go in the place. */
11210 if (r_type == R_ARM_LDR_PC_G0
11211 || r_type == R_ARM_LDR_PC_G1
11212 || r_type == R_ARM_LDR_PC_G2)
11213 /* PC relative. */
11214 signed_value = value - pc + signed_addend;
11215 else
11216 /* Section base relative. */
11217 signed_value = value - sb + signed_addend;
11218
11219 /* Calculate the value of the relevant G_{n-1} to obtain
11220 the residual at that stage. */
11221 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11222 group - 1, &residual);
11223
11224 /* Check for overflow. */
11225 if (residual >= 0x1000)
11226 {
11227 (*_bfd_error_handler)
11228 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11229 input_bfd, input_section,
11230 (long) rel->r_offset, labs (signed_value), howto->name);
11231 return bfd_reloc_overflow;
11232 }
11233
11234 /* Mask out the value and U bit. */
11235 insn &= 0xff7ff000;
11236
11237 /* Set the U bit if the value to go in the place is non-negative. */
11238 if (signed_value >= 0)
11239 insn |= 1 << 23;
11240
11241 /* Encode the offset. */
11242 insn |= residual;
11243
11244 bfd_put_32 (input_bfd, insn, hit_data);
11245 }
11246 return bfd_reloc_ok;
11247
11248 case R_ARM_LDRS_PC_G0:
11249 case R_ARM_LDRS_PC_G1:
11250 case R_ARM_LDRS_PC_G2:
11251 case R_ARM_LDRS_SB_G0:
11252 case R_ARM_LDRS_SB_G1:
11253 case R_ARM_LDRS_SB_G2:
11254 {
11255 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11256 bfd_vma pc = input_section->output_section->vma
11257 + input_section->output_offset + rel->r_offset;
11258 /* sb is the origin of the *segment* containing the symbol. */
11259 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11260 bfd_vma residual;
11261 bfd_signed_vma signed_value;
11262 int group = 0;
11263
11264 /* Determine which groups of bits to calculate. */
11265 switch (r_type)
11266 {
11267 case R_ARM_LDRS_PC_G0:
11268 case R_ARM_LDRS_SB_G0:
11269 group = 0;
11270 break;
11271
11272 case R_ARM_LDRS_PC_G1:
11273 case R_ARM_LDRS_SB_G1:
11274 group = 1;
11275 break;
11276
11277 case R_ARM_LDRS_PC_G2:
11278 case R_ARM_LDRS_SB_G2:
11279 group = 2;
11280 break;
11281
11282 default:
11283 abort ();
11284 }
11285
11286 /* If REL, extract the addend from the insn. If RELA, it will
11287 have already been fetched for us. */
11288 if (globals->use_rel)
11289 {
11290 int negative = (insn & (1 << 23)) ? 1 : -1;
11291 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11292 }
11293
11294 /* Compute the value (X) to go in the place. */
11295 if (r_type == R_ARM_LDRS_PC_G0
11296 || r_type == R_ARM_LDRS_PC_G1
11297 || r_type == R_ARM_LDRS_PC_G2)
11298 /* PC relative. */
11299 signed_value = value - pc + signed_addend;
11300 else
11301 /* Section base relative. */
11302 signed_value = value - sb + signed_addend;
11303
11304 /* Calculate the value of the relevant G_{n-1} to obtain
11305 the residual at that stage. */
11306 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11307 group - 1, &residual);
11308
11309 /* Check for overflow. */
11310 if (residual >= 0x100)
11311 {
11312 (*_bfd_error_handler)
11313 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11314 input_bfd, input_section,
11315 (long) rel->r_offset, labs (signed_value), howto->name);
11316 return bfd_reloc_overflow;
11317 }
11318
11319 /* Mask out the value and U bit. */
11320 insn &= 0xff7ff0f0;
11321
11322 /* Set the U bit if the value to go in the place is non-negative. */
11323 if (signed_value >= 0)
11324 insn |= 1 << 23;
11325
11326 /* Encode the offset. */
11327 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11328
11329 bfd_put_32 (input_bfd, insn, hit_data);
11330 }
11331 return bfd_reloc_ok;
11332
11333 case R_ARM_LDC_PC_G0:
11334 case R_ARM_LDC_PC_G1:
11335 case R_ARM_LDC_PC_G2:
11336 case R_ARM_LDC_SB_G0:
11337 case R_ARM_LDC_SB_G1:
11338 case R_ARM_LDC_SB_G2:
11339 {
11340 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11341 bfd_vma pc = input_section->output_section->vma
11342 + input_section->output_offset + rel->r_offset;
11343 /* sb is the origin of the *segment* containing the symbol. */
11344 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11345 bfd_vma residual;
11346 bfd_signed_vma signed_value;
11347 int group = 0;
11348
11349 /* Determine which groups of bits to calculate. */
11350 switch (r_type)
11351 {
11352 case R_ARM_LDC_PC_G0:
11353 case R_ARM_LDC_SB_G0:
11354 group = 0;
11355 break;
11356
11357 case R_ARM_LDC_PC_G1:
11358 case R_ARM_LDC_SB_G1:
11359 group = 1;
11360 break;
11361
11362 case R_ARM_LDC_PC_G2:
11363 case R_ARM_LDC_SB_G2:
11364 group = 2;
11365 break;
11366
11367 default:
11368 abort ();
11369 }
11370
11371 /* If REL, extract the addend from the insn. If RELA, it will
11372 have already been fetched for us. */
11373 if (globals->use_rel)
11374 {
11375 int negative = (insn & (1 << 23)) ? 1 : -1;
11376 signed_addend = negative * ((insn & 0xff) << 2);
11377 }
11378
11379 /* Compute the value (X) to go in the place. */
11380 if (r_type == R_ARM_LDC_PC_G0
11381 || r_type == R_ARM_LDC_PC_G1
11382 || r_type == R_ARM_LDC_PC_G2)
11383 /* PC relative. */
11384 signed_value = value - pc + signed_addend;
11385 else
11386 /* Section base relative. */
11387 signed_value = value - sb + signed_addend;
11388
11389 /* Calculate the value of the relevant G_{n-1} to obtain
11390 the residual at that stage. */
11391 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11392 group - 1, &residual);
11393
11394 /* Check for overflow. (The absolute value to go in the place must be
11395 divisible by four and, after having been divided by four, must
11396 fit in eight bits.) */
11397 if ((residual & 0x3) != 0 || residual >= 0x400)
11398 {
11399 (*_bfd_error_handler)
11400 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11401 input_bfd, input_section,
11402 (long) rel->r_offset, labs (signed_value), howto->name);
11403 return bfd_reloc_overflow;
11404 }
11405
11406 /* Mask out the value and U bit. */
11407 insn &= 0xff7fff00;
11408
11409 /* Set the U bit if the value to go in the place is non-negative. */
11410 if (signed_value >= 0)
11411 insn |= 1 << 23;
11412
11413 /* Encode the offset. */
11414 insn |= residual >> 2;
11415
11416 bfd_put_32 (input_bfd, insn, hit_data);
11417 }
11418 return bfd_reloc_ok;
11419
11420 case R_ARM_THM_ALU_ABS_G0_NC:
11421 case R_ARM_THM_ALU_ABS_G1_NC:
11422 case R_ARM_THM_ALU_ABS_G2_NC:
11423 case R_ARM_THM_ALU_ABS_G3_NC:
11424 {
11425 const int shift_array[4] = {0, 8, 16, 24};
11426 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11427 bfd_vma addr = value;
11428 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11429
11430 /* Compute address. */
11431 if (globals->use_rel)
11432 signed_addend = insn & 0xff;
11433 addr += signed_addend;
11434 if (branch_type == ST_BRANCH_TO_THUMB)
11435 addr |= 1;
11436 /* Clean imm8 insn. */
11437 insn &= 0xff00;
11438 /* And update with correct part of address. */
11439 insn |= (addr >> shift) & 0xff;
11440 /* Update insn. */
11441 bfd_put_16 (input_bfd, insn, hit_data);
11442 }
11443
11444 *unresolved_reloc_p = FALSE;
11445 return bfd_reloc_ok;
11446
11447 default:
11448 return bfd_reloc_notsupported;
11449 }
11450 }
11451
11452 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11453 static void
11454 arm_add_to_rel (bfd * abfd,
11455 bfd_byte * address,
11456 reloc_howto_type * howto,
11457 bfd_signed_vma increment)
11458 {
11459 bfd_signed_vma addend;
11460
11461 if (howto->type == R_ARM_THM_CALL
11462 || howto->type == R_ARM_THM_JUMP24)
11463 {
11464 int upper_insn, lower_insn;
11465 int upper, lower;
11466
11467 upper_insn = bfd_get_16 (abfd, address);
11468 lower_insn = bfd_get_16 (abfd, address + 2);
11469 upper = upper_insn & 0x7ff;
11470 lower = lower_insn & 0x7ff;
11471
11472 addend = (upper << 12) | (lower << 1);
11473 addend += increment;
11474 addend >>= 1;
11475
11476 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11477 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11478
11479 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11480 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11481 }
11482 else
11483 {
11484 bfd_vma contents;
11485
11486 contents = bfd_get_32 (abfd, address);
11487
11488 /* Get the (signed) value from the instruction. */
11489 addend = contents & howto->src_mask;
11490 if (addend & ((howto->src_mask + 1) >> 1))
11491 {
11492 bfd_signed_vma mask;
11493
11494 mask = -1;
11495 mask &= ~ howto->src_mask;
11496 addend |= mask;
11497 }
11498
11499 /* Add in the increment, (which is a byte value). */
11500 switch (howto->type)
11501 {
11502 default:
11503 addend += increment;
11504 break;
11505
11506 case R_ARM_PC24:
11507 case R_ARM_PLT32:
11508 case R_ARM_CALL:
11509 case R_ARM_JUMP24:
11510 addend <<= howto->size;
11511 addend += increment;
11512
11513 /* Should we check for overflow here ? */
11514
11515 /* Drop any undesired bits. */
11516 addend >>= howto->rightshift;
11517 break;
11518 }
11519
11520 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11521
11522 bfd_put_32 (abfd, contents, address);
11523 }
11524 }
11525
11526 #define IS_ARM_TLS_RELOC(R_TYPE) \
11527 ((R_TYPE) == R_ARM_TLS_GD32 \
11528 || (R_TYPE) == R_ARM_TLS_LDO32 \
11529 || (R_TYPE) == R_ARM_TLS_LDM32 \
11530 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11531 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11532 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11533 || (R_TYPE) == R_ARM_TLS_LE32 \
11534 || (R_TYPE) == R_ARM_TLS_IE32 \
11535 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11536
11537 /* Specific set of relocations for the gnu tls dialect. */
11538 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11539 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11540 || (R_TYPE) == R_ARM_TLS_CALL \
11541 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11542 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11543 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11544
11545 /* Relocate an ARM ELF section. */
11546
11547 static bfd_boolean
11548 elf32_arm_relocate_section (bfd * output_bfd,
11549 struct bfd_link_info * info,
11550 bfd * input_bfd,
11551 asection * input_section,
11552 bfd_byte * contents,
11553 Elf_Internal_Rela * relocs,
11554 Elf_Internal_Sym * local_syms,
11555 asection ** local_sections)
11556 {
11557 Elf_Internal_Shdr *symtab_hdr;
11558 struct elf_link_hash_entry **sym_hashes;
11559 Elf_Internal_Rela *rel;
11560 Elf_Internal_Rela *relend;
11561 const char *name;
11562 struct elf32_arm_link_hash_table * globals;
11563
11564 globals = elf32_arm_hash_table (info);
11565 if (globals == NULL)
11566 return FALSE;
11567
11568 symtab_hdr = & elf_symtab_hdr (input_bfd);
11569 sym_hashes = elf_sym_hashes (input_bfd);
11570
11571 rel = relocs;
11572 relend = relocs + input_section->reloc_count;
11573 for (; rel < relend; rel++)
11574 {
11575 int r_type;
11576 reloc_howto_type * howto;
11577 unsigned long r_symndx;
11578 Elf_Internal_Sym * sym;
11579 asection * sec;
11580 struct elf_link_hash_entry * h;
11581 bfd_vma relocation;
11582 bfd_reloc_status_type r;
11583 arelent bfd_reloc;
11584 char sym_type;
11585 bfd_boolean unresolved_reloc = FALSE;
11586 char *error_message = NULL;
11587
11588 r_symndx = ELF32_R_SYM (rel->r_info);
11589 r_type = ELF32_R_TYPE (rel->r_info);
11590 r_type = arm_real_reloc_type (globals, r_type);
11591
11592 if ( r_type == R_ARM_GNU_VTENTRY
11593 || r_type == R_ARM_GNU_VTINHERIT)
11594 continue;
11595
11596 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11597 howto = bfd_reloc.howto;
11598
11599 h = NULL;
11600 sym = NULL;
11601 sec = NULL;
11602
11603 if (r_symndx < symtab_hdr->sh_info)
11604 {
11605 sym = local_syms + r_symndx;
11606 sym_type = ELF32_ST_TYPE (sym->st_info);
11607 sec = local_sections[r_symndx];
11608
11609 /* An object file might have a reference to a local
11610 undefined symbol. This is a daft object file, but we
11611 should at least do something about it. V4BX & NONE
11612 relocations do not use the symbol and are explicitly
11613 allowed to use the undefined symbol, so allow those.
11614 Likewise for relocations against STN_UNDEF. */
11615 if (r_type != R_ARM_V4BX
11616 && r_type != R_ARM_NONE
11617 && r_symndx != STN_UNDEF
11618 && bfd_is_und_section (sec)
11619 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11620 (*info->callbacks->undefined_symbol)
11621 (info, bfd_elf_string_from_elf_section
11622 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11623 input_bfd, input_section,
11624 rel->r_offset, TRUE);
11625
11626 if (globals->use_rel)
11627 {
11628 relocation = (sec->output_section->vma
11629 + sec->output_offset
11630 + sym->st_value);
11631 if (!bfd_link_relocatable (info)
11632 && (sec->flags & SEC_MERGE)
11633 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11634 {
11635 asection *msec;
11636 bfd_vma addend, value;
11637
11638 switch (r_type)
11639 {
11640 case R_ARM_MOVW_ABS_NC:
11641 case R_ARM_MOVT_ABS:
11642 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11643 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11644 addend = (addend ^ 0x8000) - 0x8000;
11645 break;
11646
11647 case R_ARM_THM_MOVW_ABS_NC:
11648 case R_ARM_THM_MOVT_ABS:
11649 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11650 << 16;
11651 value |= bfd_get_16 (input_bfd,
11652 contents + rel->r_offset + 2);
11653 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11654 | ((value & 0x04000000) >> 15);
11655 addend = (addend ^ 0x8000) - 0x8000;
11656 break;
11657
11658 default:
11659 if (howto->rightshift
11660 || (howto->src_mask & (howto->src_mask + 1)))
11661 {
11662 (*_bfd_error_handler)
11663 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11664 input_bfd, input_section,
11665 (long) rel->r_offset, howto->name);
11666 return FALSE;
11667 }
11668
11669 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11670
11671 /* Get the (signed) value from the instruction. */
11672 addend = value & howto->src_mask;
11673 if (addend & ((howto->src_mask + 1) >> 1))
11674 {
11675 bfd_signed_vma mask;
11676
11677 mask = -1;
11678 mask &= ~ howto->src_mask;
11679 addend |= mask;
11680 }
11681 break;
11682 }
11683
11684 msec = sec;
11685 addend =
11686 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11687 - relocation;
11688 addend += msec->output_section->vma + msec->output_offset;
11689
11690 /* Cases here must match those in the preceding
11691 switch statement. */
11692 switch (r_type)
11693 {
11694 case R_ARM_MOVW_ABS_NC:
11695 case R_ARM_MOVT_ABS:
11696 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11697 | (addend & 0xfff);
11698 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11699 break;
11700
11701 case R_ARM_THM_MOVW_ABS_NC:
11702 case R_ARM_THM_MOVT_ABS:
11703 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11704 | (addend & 0xff) | ((addend & 0x0800) << 15);
11705 bfd_put_16 (input_bfd, value >> 16,
11706 contents + rel->r_offset);
11707 bfd_put_16 (input_bfd, value,
11708 contents + rel->r_offset + 2);
11709 break;
11710
11711 default:
11712 value = (value & ~ howto->dst_mask)
11713 | (addend & howto->dst_mask);
11714 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11715 break;
11716 }
11717 }
11718 }
11719 else
11720 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11721 }
11722 else
11723 {
11724 bfd_boolean warned, ignored;
11725
11726 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11727 r_symndx, symtab_hdr, sym_hashes,
11728 h, sec, relocation,
11729 unresolved_reloc, warned, ignored);
11730
11731 sym_type = h->type;
11732 }
11733
11734 if (sec != NULL && discarded_section (sec))
11735 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11736 rel, 1, relend, howto, 0, contents);
11737
11738 if (bfd_link_relocatable (info))
11739 {
11740 /* This is a relocatable link. We don't have to change
11741 anything, unless the reloc is against a section symbol,
11742 in which case we have to adjust according to where the
11743 section symbol winds up in the output section. */
11744 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11745 {
11746 if (globals->use_rel)
11747 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11748 howto, (bfd_signed_vma) sec->output_offset);
11749 else
11750 rel->r_addend += sec->output_offset;
11751 }
11752 continue;
11753 }
11754
11755 if (h != NULL)
11756 name = h->root.root.string;
11757 else
11758 {
11759 name = (bfd_elf_string_from_elf_section
11760 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11761 if (name == NULL || *name == '\0')
11762 name = bfd_section_name (input_bfd, sec);
11763 }
11764
11765 if (r_symndx != STN_UNDEF
11766 && r_type != R_ARM_NONE
11767 && (h == NULL
11768 || h->root.type == bfd_link_hash_defined
11769 || h->root.type == bfd_link_hash_defweak)
11770 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11771 {
11772 (*_bfd_error_handler)
11773 ((sym_type == STT_TLS
11774 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11775 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11776 input_bfd,
11777 input_section,
11778 (long) rel->r_offset,
11779 howto->name,
11780 name);
11781 }
11782
11783 /* We call elf32_arm_final_link_relocate unless we're completely
11784 done, i.e., the relaxation produced the final output we want,
11785 and we won't let anybody mess with it. Also, we have to do
11786 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11787 both in relaxed and non-relaxed cases. */
11788 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11789 || (IS_ARM_TLS_GNU_RELOC (r_type)
11790 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11791 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11792 & GOT_TLS_GDESC)))
11793 {
11794 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11795 contents, rel, h == NULL);
11796 /* This may have been marked unresolved because it came from
11797 a shared library. But we've just dealt with that. */
11798 unresolved_reloc = 0;
11799 }
11800 else
11801 r = bfd_reloc_continue;
11802
11803 if (r == bfd_reloc_continue)
11804 {
11805 unsigned char branch_type =
11806 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11807 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11808
11809 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11810 input_section, contents, rel,
11811 relocation, info, sec, name,
11812 sym_type, branch_type, h,
11813 &unresolved_reloc,
11814 &error_message);
11815 }
11816
11817 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11818 because such sections are not SEC_ALLOC and thus ld.so will
11819 not process them. */
11820 if (unresolved_reloc
11821 && !((input_section->flags & SEC_DEBUGGING) != 0
11822 && h->def_dynamic)
11823 && _bfd_elf_section_offset (output_bfd, info, input_section,
11824 rel->r_offset) != (bfd_vma) -1)
11825 {
11826 (*_bfd_error_handler)
11827 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11828 input_bfd,
11829 input_section,
11830 (long) rel->r_offset,
11831 howto->name,
11832 h->root.root.string);
11833 return FALSE;
11834 }
11835
11836 if (r != bfd_reloc_ok)
11837 {
11838 switch (r)
11839 {
11840 case bfd_reloc_overflow:
11841 /* If the overflowing reloc was to an undefined symbol,
11842 we have already printed one error message and there
11843 is no point complaining again. */
11844 if (!h || h->root.type != bfd_link_hash_undefined)
11845 (*info->callbacks->reloc_overflow)
11846 (info, (h ? &h->root : NULL), name, howto->name,
11847 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
11848 break;
11849
11850 case bfd_reloc_undefined:
11851 (*info->callbacks->undefined_symbol)
11852 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
11853 break;
11854
11855 case bfd_reloc_outofrange:
11856 error_message = _("out of range");
11857 goto common_error;
11858
11859 case bfd_reloc_notsupported:
11860 error_message = _("unsupported relocation");
11861 goto common_error;
11862
11863 case bfd_reloc_dangerous:
11864 /* error_message should already be set. */
11865 goto common_error;
11866
11867 default:
11868 error_message = _("unknown error");
11869 /* Fall through. */
11870
11871 common_error:
11872 BFD_ASSERT (error_message != NULL);
11873 (*info->callbacks->reloc_dangerous)
11874 (info, error_message, input_bfd, input_section, rel->r_offset);
11875 break;
11876 }
11877 }
11878 }
11879
11880 return TRUE;
11881 }
11882
11883 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11884 adds the edit to the start of the list. (The list must be built in order of
11885 ascending TINDEX: the function's callers are primarily responsible for
11886 maintaining that condition). */
11887
11888 static void
11889 add_unwind_table_edit (arm_unwind_table_edit **head,
11890 arm_unwind_table_edit **tail,
11891 arm_unwind_edit_type type,
11892 asection *linked_section,
11893 unsigned int tindex)
11894 {
11895 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11896 xmalloc (sizeof (arm_unwind_table_edit));
11897
11898 new_edit->type = type;
11899 new_edit->linked_section = linked_section;
11900 new_edit->index = tindex;
11901
11902 if (tindex > 0)
11903 {
11904 new_edit->next = NULL;
11905
11906 if (*tail)
11907 (*tail)->next = new_edit;
11908
11909 (*tail) = new_edit;
11910
11911 if (!*head)
11912 (*head) = new_edit;
11913 }
11914 else
11915 {
11916 new_edit->next = *head;
11917
11918 if (!*tail)
11919 *tail = new_edit;
11920
11921 *head = new_edit;
11922 }
11923 }
11924
11925 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11926
11927 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11928 static void
11929 adjust_exidx_size(asection *exidx_sec, int adjust)
11930 {
11931 asection *out_sec;
11932
11933 if (!exidx_sec->rawsize)
11934 exidx_sec->rawsize = exidx_sec->size;
11935
11936 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11937 out_sec = exidx_sec->output_section;
11938 /* Adjust size of output section. */
11939 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11940 }
11941
11942 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11943 static void
11944 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11945 {
11946 struct _arm_elf_section_data *exidx_arm_data;
11947
11948 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11949 add_unwind_table_edit (
11950 &exidx_arm_data->u.exidx.unwind_edit_list,
11951 &exidx_arm_data->u.exidx.unwind_edit_tail,
11952 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11953
11954 exidx_arm_data->additional_reloc_count++;
11955
11956 adjust_exidx_size(exidx_sec, 8);
11957 }
11958
11959 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11960 made to those tables, such that:
11961
11962 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11963 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11964 codes which have been inlined into the index).
11965
11966 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11967
11968 The edits are applied when the tables are written
11969 (in elf32_arm_write_section). */
11970
11971 bfd_boolean
11972 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11973 unsigned int num_text_sections,
11974 struct bfd_link_info *info,
11975 bfd_boolean merge_exidx_entries)
11976 {
11977 bfd *inp;
11978 unsigned int last_second_word = 0, i;
11979 asection *last_exidx_sec = NULL;
11980 asection *last_text_sec = NULL;
11981 int last_unwind_type = -1;
11982
11983 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11984 text sections. */
11985 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11986 {
11987 asection *sec;
11988
11989 for (sec = inp->sections; sec != NULL; sec = sec->next)
11990 {
11991 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11992 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11993
11994 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11995 continue;
11996
11997 if (elf_sec->linked_to)
11998 {
11999 Elf_Internal_Shdr *linked_hdr
12000 = &elf_section_data (elf_sec->linked_to)->this_hdr;
12001 struct _arm_elf_section_data *linked_sec_arm_data
12002 = get_arm_elf_section_data (linked_hdr->bfd_section);
12003
12004 if (linked_sec_arm_data == NULL)
12005 continue;
12006
12007 /* Link this .ARM.exidx section back from the text section it
12008 describes. */
12009 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
12010 }
12011 }
12012 }
12013
12014 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
12015 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
12016 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
12017
12018 for (i = 0; i < num_text_sections; i++)
12019 {
12020 asection *sec = text_section_order[i];
12021 asection *exidx_sec;
12022 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
12023 struct _arm_elf_section_data *exidx_arm_data;
12024 bfd_byte *contents = NULL;
12025 int deleted_exidx_bytes = 0;
12026 bfd_vma j;
12027 arm_unwind_table_edit *unwind_edit_head = NULL;
12028 arm_unwind_table_edit *unwind_edit_tail = NULL;
12029 Elf_Internal_Shdr *hdr;
12030 bfd *ibfd;
12031
12032 if (arm_data == NULL)
12033 continue;
12034
12035 exidx_sec = arm_data->u.text.arm_exidx_sec;
12036 if (exidx_sec == NULL)
12037 {
12038 /* Section has no unwind data. */
12039 if (last_unwind_type == 0 || !last_exidx_sec)
12040 continue;
12041
12042 /* Ignore zero sized sections. */
12043 if (sec->size == 0)
12044 continue;
12045
12046 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12047 last_unwind_type = 0;
12048 continue;
12049 }
12050
12051 /* Skip /DISCARD/ sections. */
12052 if (bfd_is_abs_section (exidx_sec->output_section))
12053 continue;
12054
12055 hdr = &elf_section_data (exidx_sec)->this_hdr;
12056 if (hdr->sh_type != SHT_ARM_EXIDX)
12057 continue;
12058
12059 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
12060 if (exidx_arm_data == NULL)
12061 continue;
12062
12063 ibfd = exidx_sec->owner;
12064
12065 if (hdr->contents != NULL)
12066 contents = hdr->contents;
12067 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
12068 /* An error? */
12069 continue;
12070
12071 if (last_unwind_type > 0)
12072 {
12073 unsigned int first_word = bfd_get_32 (ibfd, contents);
12074 /* Add cantunwind if first unwind item does not match section
12075 start. */
12076 if (first_word != sec->vma)
12077 {
12078 insert_cantunwind_after (last_text_sec, last_exidx_sec);
12079 last_unwind_type = 0;
12080 }
12081 }
12082
12083 for (j = 0; j < hdr->sh_size; j += 8)
12084 {
12085 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
12086 int unwind_type;
12087 int elide = 0;
12088
12089 /* An EXIDX_CANTUNWIND entry. */
12090 if (second_word == 1)
12091 {
12092 if (last_unwind_type == 0)
12093 elide = 1;
12094 unwind_type = 0;
12095 }
12096 /* Inlined unwinding data. Merge if equal to previous. */
12097 else if ((second_word & 0x80000000) != 0)
12098 {
12099 if (merge_exidx_entries
12100 && last_second_word == second_word && last_unwind_type == 1)
12101 elide = 1;
12102 unwind_type = 1;
12103 last_second_word = second_word;
12104 }
12105 /* Normal table entry. In theory we could merge these too,
12106 but duplicate entries are likely to be much less common. */
12107 else
12108 unwind_type = 2;
12109
12110 if (elide && !bfd_link_relocatable (info))
12111 {
12112 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12113 DELETE_EXIDX_ENTRY, NULL, j / 8);
12114
12115 deleted_exidx_bytes += 8;
12116 }
12117
12118 last_unwind_type = unwind_type;
12119 }
12120
12121 /* Free contents if we allocated it ourselves. */
12122 if (contents != hdr->contents)
12123 free (contents);
12124
12125 /* Record edits to be applied later (in elf32_arm_write_section). */
12126 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12127 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12128
12129 if (deleted_exidx_bytes > 0)
12130 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12131
12132 last_exidx_sec = exidx_sec;
12133 last_text_sec = sec;
12134 }
12135
12136 /* Add terminating CANTUNWIND entry. */
12137 if (!bfd_link_relocatable (info) && last_exidx_sec
12138 && last_unwind_type != 0)
12139 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12140
12141 return TRUE;
12142 }
12143
12144 static bfd_boolean
12145 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12146 bfd *ibfd, const char *name)
12147 {
12148 asection *sec, *osec;
12149
12150 sec = bfd_get_linker_section (ibfd, name);
12151 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12152 return TRUE;
12153
12154 osec = sec->output_section;
12155 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12156 return TRUE;
12157
12158 if (! bfd_set_section_contents (obfd, osec, sec->contents,
12159 sec->output_offset, sec->size))
12160 return FALSE;
12161
12162 return TRUE;
12163 }
12164
12165 static bfd_boolean
12166 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12167 {
12168 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12169 asection *sec, *osec;
12170
12171 if (globals == NULL)
12172 return FALSE;
12173
12174 /* Invoke the regular ELF backend linker to do all the work. */
12175 if (!bfd_elf_final_link (abfd, info))
12176 return FALSE;
12177
12178 /* Process stub sections (eg BE8 encoding, ...). */
12179 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12180 unsigned int i;
12181 for (i=0; i<htab->top_id; i++)
12182 {
12183 sec = htab->stub_group[i].stub_sec;
12184 /* Only process it once, in its link_sec slot. */
12185 if (sec && i == htab->stub_group[i].link_sec->id)
12186 {
12187 osec = sec->output_section;
12188 elf32_arm_write_section (abfd, info, sec, sec->contents);
12189 if (! bfd_set_section_contents (abfd, osec, sec->contents,
12190 sec->output_offset, sec->size))
12191 return FALSE;
12192 }
12193 }
12194
12195 /* Write out any glue sections now that we have created all the
12196 stubs. */
12197 if (globals->bfd_of_glue_owner != NULL)
12198 {
12199 if (! elf32_arm_output_glue_section (info, abfd,
12200 globals->bfd_of_glue_owner,
12201 ARM2THUMB_GLUE_SECTION_NAME))
12202 return FALSE;
12203
12204 if (! elf32_arm_output_glue_section (info, abfd,
12205 globals->bfd_of_glue_owner,
12206 THUMB2ARM_GLUE_SECTION_NAME))
12207 return FALSE;
12208
12209 if (! elf32_arm_output_glue_section (info, abfd,
12210 globals->bfd_of_glue_owner,
12211 VFP11_ERRATUM_VENEER_SECTION_NAME))
12212 return FALSE;
12213
12214 if (! elf32_arm_output_glue_section (info, abfd,
12215 globals->bfd_of_glue_owner,
12216 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12217 return FALSE;
12218
12219 if (! elf32_arm_output_glue_section (info, abfd,
12220 globals->bfd_of_glue_owner,
12221 ARM_BX_GLUE_SECTION_NAME))
12222 return FALSE;
12223 }
12224
12225 return TRUE;
12226 }
12227
12228 /* Return a best guess for the machine number based on the attributes. */
12229
12230 static unsigned int
12231 bfd_arm_get_mach_from_attributes (bfd * abfd)
12232 {
12233 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12234
12235 switch (arch)
12236 {
12237 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12238 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12239 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12240
12241 case TAG_CPU_ARCH_V5TE:
12242 {
12243 char * name;
12244
12245 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12246 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12247
12248 if (name)
12249 {
12250 if (strcmp (name, "IWMMXT2") == 0)
12251 return bfd_mach_arm_iWMMXt2;
12252
12253 if (strcmp (name, "IWMMXT") == 0)
12254 return bfd_mach_arm_iWMMXt;
12255
12256 if (strcmp (name, "XSCALE") == 0)
12257 {
12258 int wmmx;
12259
12260 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12261 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12262 switch (wmmx)
12263 {
12264 case 1: return bfd_mach_arm_iWMMXt;
12265 case 2: return bfd_mach_arm_iWMMXt2;
12266 default: return bfd_mach_arm_XScale;
12267 }
12268 }
12269 }
12270
12271 return bfd_mach_arm_5TE;
12272 }
12273
12274 default:
12275 return bfd_mach_arm_unknown;
12276 }
12277 }
12278
12279 /* Set the right machine number. */
12280
12281 static bfd_boolean
12282 elf32_arm_object_p (bfd *abfd)
12283 {
12284 unsigned int mach;
12285
12286 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12287
12288 if (mach == bfd_mach_arm_unknown)
12289 {
12290 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12291 mach = bfd_mach_arm_ep9312;
12292 else
12293 mach = bfd_arm_get_mach_from_attributes (abfd);
12294 }
12295
12296 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12297 return TRUE;
12298 }
12299
12300 /* Function to keep ARM specific flags in the ELF header. */
12301
12302 static bfd_boolean
12303 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12304 {
12305 if (elf_flags_init (abfd)
12306 && elf_elfheader (abfd)->e_flags != flags)
12307 {
12308 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12309 {
12310 if (flags & EF_ARM_INTERWORK)
12311 (*_bfd_error_handler)
12312 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12313 abfd);
12314 else
12315 _bfd_error_handler
12316 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12317 abfd);
12318 }
12319 }
12320 else
12321 {
12322 elf_elfheader (abfd)->e_flags = flags;
12323 elf_flags_init (abfd) = TRUE;
12324 }
12325
12326 return TRUE;
12327 }
12328
12329 /* Copy backend specific data from one object module to another. */
12330
12331 static bfd_boolean
12332 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12333 {
12334 flagword in_flags;
12335 flagword out_flags;
12336
12337 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12338 return TRUE;
12339
12340 in_flags = elf_elfheader (ibfd)->e_flags;
12341 out_flags = elf_elfheader (obfd)->e_flags;
12342
12343 if (elf_flags_init (obfd)
12344 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12345 && in_flags != out_flags)
12346 {
12347 /* Cannot mix APCS26 and APCS32 code. */
12348 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12349 return FALSE;
12350
12351 /* Cannot mix float APCS and non-float APCS code. */
12352 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12353 return FALSE;
12354
12355 /* If the src and dest have different interworking flags
12356 then turn off the interworking bit. */
12357 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12358 {
12359 if (out_flags & EF_ARM_INTERWORK)
12360 _bfd_error_handler
12361 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12362 obfd, ibfd);
12363
12364 in_flags &= ~EF_ARM_INTERWORK;
12365 }
12366
12367 /* Likewise for PIC, though don't warn for this case. */
12368 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12369 in_flags &= ~EF_ARM_PIC;
12370 }
12371
12372 elf_elfheader (obfd)->e_flags = in_flags;
12373 elf_flags_init (obfd) = TRUE;
12374
12375 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12376 }
12377
12378 /* Values for Tag_ABI_PCS_R9_use. */
12379 enum
12380 {
12381 AEABI_R9_V6,
12382 AEABI_R9_SB,
12383 AEABI_R9_TLS,
12384 AEABI_R9_unused
12385 };
12386
12387 /* Values for Tag_ABI_PCS_RW_data. */
12388 enum
12389 {
12390 AEABI_PCS_RW_data_absolute,
12391 AEABI_PCS_RW_data_PCrel,
12392 AEABI_PCS_RW_data_SBrel,
12393 AEABI_PCS_RW_data_unused
12394 };
12395
12396 /* Values for Tag_ABI_enum_size. */
12397 enum
12398 {
12399 AEABI_enum_unused,
12400 AEABI_enum_short,
12401 AEABI_enum_wide,
12402 AEABI_enum_forced_wide
12403 };
12404
12405 /* Determine whether an object attribute tag takes an integer, a
12406 string or both. */
12407
12408 static int
12409 elf32_arm_obj_attrs_arg_type (int tag)
12410 {
12411 if (tag == Tag_compatibility)
12412 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12413 else if (tag == Tag_nodefaults)
12414 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12415 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12416 return ATTR_TYPE_FLAG_STR_VAL;
12417 else if (tag < 32)
12418 return ATTR_TYPE_FLAG_INT_VAL;
12419 else
12420 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12421 }
12422
12423 /* The ABI defines that Tag_conformance should be emitted first, and that
12424 Tag_nodefaults should be second (if either is defined). This sets those
12425 two positions, and bumps up the position of all the remaining tags to
12426 compensate. */
12427 static int
12428 elf32_arm_obj_attrs_order (int num)
12429 {
12430 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12431 return Tag_conformance;
12432 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12433 return Tag_nodefaults;
12434 if ((num - 2) < Tag_nodefaults)
12435 return num - 2;
12436 if ((num - 1) < Tag_conformance)
12437 return num - 1;
12438 return num;
12439 }
12440
12441 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12442 static bfd_boolean
12443 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12444 {
12445 if ((tag & 127) < 64)
12446 {
12447 _bfd_error_handler
12448 (_("%B: Unknown mandatory EABI object attribute %d"),
12449 abfd, tag);
12450 bfd_set_error (bfd_error_bad_value);
12451 return FALSE;
12452 }
12453 else
12454 {
12455 _bfd_error_handler
12456 (_("Warning: %B: Unknown EABI object attribute %d"),
12457 abfd, tag);
12458 return TRUE;
12459 }
12460 }
12461
12462 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12463 Returns -1 if no architecture could be read. */
12464
12465 static int
12466 get_secondary_compatible_arch (bfd *abfd)
12467 {
12468 obj_attribute *attr =
12469 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12470
12471 /* Note: the tag and its argument below are uleb128 values, though
12472 currently-defined values fit in one byte for each. */
12473 if (attr->s
12474 && attr->s[0] == Tag_CPU_arch
12475 && (attr->s[1] & 128) != 128
12476 && attr->s[2] == 0)
12477 return attr->s[1];
12478
12479 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12480 return -1;
12481 }
12482
12483 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12484 The tag is removed if ARCH is -1. */
12485
12486 static void
12487 set_secondary_compatible_arch (bfd *abfd, int arch)
12488 {
12489 obj_attribute *attr =
12490 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12491
12492 if (arch == -1)
12493 {
12494 attr->s = NULL;
12495 return;
12496 }
12497
12498 /* Note: the tag and its argument below are uleb128 values, though
12499 currently-defined values fit in one byte for each. */
12500 if (!attr->s)
12501 attr->s = (char *) bfd_alloc (abfd, 3);
12502 attr->s[0] = Tag_CPU_arch;
12503 attr->s[1] = arch;
12504 attr->s[2] = '\0';
12505 }
12506
12507 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12508 into account. */
12509
12510 static int
12511 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12512 int newtag, int secondary_compat)
12513 {
12514 #define T(X) TAG_CPU_ARCH_##X
12515 int tagl, tagh, result;
12516 const int v6t2[] =
12517 {
12518 T(V6T2), /* PRE_V4. */
12519 T(V6T2), /* V4. */
12520 T(V6T2), /* V4T. */
12521 T(V6T2), /* V5T. */
12522 T(V6T2), /* V5TE. */
12523 T(V6T2), /* V5TEJ. */
12524 T(V6T2), /* V6. */
12525 T(V7), /* V6KZ. */
12526 T(V6T2) /* V6T2. */
12527 };
12528 const int v6k[] =
12529 {
12530 T(V6K), /* PRE_V4. */
12531 T(V6K), /* V4. */
12532 T(V6K), /* V4T. */
12533 T(V6K), /* V5T. */
12534 T(V6K), /* V5TE. */
12535 T(V6K), /* V5TEJ. */
12536 T(V6K), /* V6. */
12537 T(V6KZ), /* V6KZ. */
12538 T(V7), /* V6T2. */
12539 T(V6K) /* V6K. */
12540 };
12541 const int v7[] =
12542 {
12543 T(V7), /* PRE_V4. */
12544 T(V7), /* V4. */
12545 T(V7), /* V4T. */
12546 T(V7), /* V5T. */
12547 T(V7), /* V5TE. */
12548 T(V7), /* V5TEJ. */
12549 T(V7), /* V6. */
12550 T(V7), /* V6KZ. */
12551 T(V7), /* V6T2. */
12552 T(V7), /* V6K. */
12553 T(V7) /* V7. */
12554 };
12555 const int v6_m[] =
12556 {
12557 -1, /* PRE_V4. */
12558 -1, /* V4. */
12559 T(V6K), /* V4T. */
12560 T(V6K), /* V5T. */
12561 T(V6K), /* V5TE. */
12562 T(V6K), /* V5TEJ. */
12563 T(V6K), /* V6. */
12564 T(V6KZ), /* V6KZ. */
12565 T(V7), /* V6T2. */
12566 T(V6K), /* V6K. */
12567 T(V7), /* V7. */
12568 T(V6_M) /* V6_M. */
12569 };
12570 const int v6s_m[] =
12571 {
12572 -1, /* PRE_V4. */
12573 -1, /* V4. */
12574 T(V6K), /* V4T. */
12575 T(V6K), /* V5T. */
12576 T(V6K), /* V5TE. */
12577 T(V6K), /* V5TEJ. */
12578 T(V6K), /* V6. */
12579 T(V6KZ), /* V6KZ. */
12580 T(V7), /* V6T2. */
12581 T(V6K), /* V6K. */
12582 T(V7), /* V7. */
12583 T(V6S_M), /* V6_M. */
12584 T(V6S_M) /* V6S_M. */
12585 };
12586 const int v7e_m[] =
12587 {
12588 -1, /* PRE_V4. */
12589 -1, /* V4. */
12590 T(V7E_M), /* V4T. */
12591 T(V7E_M), /* V5T. */
12592 T(V7E_M), /* V5TE. */
12593 T(V7E_M), /* V5TEJ. */
12594 T(V7E_M), /* V6. */
12595 T(V7E_M), /* V6KZ. */
12596 T(V7E_M), /* V6T2. */
12597 T(V7E_M), /* V6K. */
12598 T(V7E_M), /* V7. */
12599 T(V7E_M), /* V6_M. */
12600 T(V7E_M), /* V6S_M. */
12601 T(V7E_M) /* V7E_M. */
12602 };
12603 const int v8[] =
12604 {
12605 T(V8), /* PRE_V4. */
12606 T(V8), /* V4. */
12607 T(V8), /* V4T. */
12608 T(V8), /* V5T. */
12609 T(V8), /* V5TE. */
12610 T(V8), /* V5TEJ. */
12611 T(V8), /* V6. */
12612 T(V8), /* V6KZ. */
12613 T(V8), /* V6T2. */
12614 T(V8), /* V6K. */
12615 T(V8), /* V7. */
12616 T(V8), /* V6_M. */
12617 T(V8), /* V6S_M. */
12618 T(V8), /* V7E_M. */
12619 T(V8) /* V8. */
12620 };
12621 const int v8m_baseline[] =
12622 {
12623 -1, /* PRE_V4. */
12624 -1, /* V4. */
12625 -1, /* V4T. */
12626 -1, /* V5T. */
12627 -1, /* V5TE. */
12628 -1, /* V5TEJ. */
12629 -1, /* V6. */
12630 -1, /* V6KZ. */
12631 -1, /* V6T2. */
12632 -1, /* V6K. */
12633 -1, /* V7. */
12634 T(V8M_BASE), /* V6_M. */
12635 T(V8M_BASE), /* V6S_M. */
12636 -1, /* V7E_M. */
12637 -1, /* V8. */
12638 -1,
12639 T(V8M_BASE) /* V8-M BASELINE. */
12640 };
12641 const int v8m_mainline[] =
12642 {
12643 -1, /* PRE_V4. */
12644 -1, /* V4. */
12645 -1, /* V4T. */
12646 -1, /* V5T. */
12647 -1, /* V5TE. */
12648 -1, /* V5TEJ. */
12649 -1, /* V6. */
12650 -1, /* V6KZ. */
12651 -1, /* V6T2. */
12652 -1, /* V6K. */
12653 T(V8M_MAIN), /* V7. */
12654 T(V8M_MAIN), /* V6_M. */
12655 T(V8M_MAIN), /* V6S_M. */
12656 T(V8M_MAIN), /* V7E_M. */
12657 -1, /* V8. */
12658 -1,
12659 T(V8M_MAIN), /* V8-M BASELINE. */
12660 T(V8M_MAIN) /* V8-M MAINLINE. */
12661 };
12662 const int v4t_plus_v6_m[] =
12663 {
12664 -1, /* PRE_V4. */
12665 -1, /* V4. */
12666 T(V4T), /* V4T. */
12667 T(V5T), /* V5T. */
12668 T(V5TE), /* V5TE. */
12669 T(V5TEJ), /* V5TEJ. */
12670 T(V6), /* V6. */
12671 T(V6KZ), /* V6KZ. */
12672 T(V6T2), /* V6T2. */
12673 T(V6K), /* V6K. */
12674 T(V7), /* V7. */
12675 T(V6_M), /* V6_M. */
12676 T(V6S_M), /* V6S_M. */
12677 T(V7E_M), /* V7E_M. */
12678 T(V8), /* V8. */
12679 -1, /* Unused. */
12680 T(V8M_BASE), /* V8-M BASELINE. */
12681 T(V8M_MAIN), /* V8-M MAINLINE. */
12682 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12683 };
12684 const int *comb[] =
12685 {
12686 v6t2,
12687 v6k,
12688 v7,
12689 v6_m,
12690 v6s_m,
12691 v7e_m,
12692 v8,
12693 NULL,
12694 v8m_baseline,
12695 v8m_mainline,
12696 /* Pseudo-architecture. */
12697 v4t_plus_v6_m
12698 };
12699
12700 /* Check we've not got a higher architecture than we know about. */
12701
12702 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12703 {
12704 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12705 return -1;
12706 }
12707
12708 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12709
12710 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12711 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12712 oldtag = T(V4T_PLUS_V6_M);
12713
12714 /* And override the new tag if we have a Tag_also_compatible_with on the
12715 input. */
12716
12717 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12718 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12719 newtag = T(V4T_PLUS_V6_M);
12720
12721 tagl = (oldtag < newtag) ? oldtag : newtag;
12722 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12723
12724 /* Architectures before V6KZ add features monotonically. */
12725 if (tagh <= TAG_CPU_ARCH_V6KZ)
12726 return result;
12727
12728 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12729
12730 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12731 as the canonical version. */
12732 if (result == T(V4T_PLUS_V6_M))
12733 {
12734 result = T(V4T);
12735 *secondary_compat_out = T(V6_M);
12736 }
12737 else
12738 *secondary_compat_out = -1;
12739
12740 if (result == -1)
12741 {
12742 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12743 ibfd, oldtag, newtag);
12744 return -1;
12745 }
12746
12747 return result;
12748 #undef T
12749 }
12750
12751 /* Query attributes object to see if integer divide instructions may be
12752 present in an object. */
12753 static bfd_boolean
12754 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12755 {
12756 int arch = attr[Tag_CPU_arch].i;
12757 int profile = attr[Tag_CPU_arch_profile].i;
12758
12759 switch (attr[Tag_DIV_use].i)
12760 {
12761 case 0:
12762 /* Integer divide allowed if instruction contained in archetecture. */
12763 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12764 return TRUE;
12765 else if (arch >= TAG_CPU_ARCH_V7E_M)
12766 return TRUE;
12767 else
12768 return FALSE;
12769
12770 case 1:
12771 /* Integer divide explicitly prohibited. */
12772 return FALSE;
12773
12774 default:
12775 /* Unrecognised case - treat as allowing divide everywhere. */
12776 case 2:
12777 /* Integer divide allowed in ARM state. */
12778 return TRUE;
12779 }
12780 }
12781
12782 /* Query attributes object to see if integer divide instructions are
12783 forbidden to be in the object. This is not the inverse of
12784 elf32_arm_attributes_accept_div. */
12785 static bfd_boolean
12786 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12787 {
12788 return attr[Tag_DIV_use].i == 1;
12789 }
12790
12791 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12792 are conflicting attributes. */
12793
12794 static bfd_boolean
12795 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12796 {
12797 obj_attribute *in_attr;
12798 obj_attribute *out_attr;
12799 /* Some tags have 0 = don't care, 1 = strong requirement,
12800 2 = weak requirement. */
12801 static const int order_021[3] = {0, 2, 1};
12802 int i;
12803 bfd_boolean result = TRUE;
12804 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12805
12806 /* Skip the linker stubs file. This preserves previous behavior
12807 of accepting unknown attributes in the first input file - but
12808 is that a bug? */
12809 if (ibfd->flags & BFD_LINKER_CREATED)
12810 return TRUE;
12811
12812 /* Skip any input that hasn't attribute section.
12813 This enables to link object files without attribute section with
12814 any others. */
12815 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12816 return TRUE;
12817
12818 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12819 {
12820 /* This is the first object. Copy the attributes. */
12821 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12822
12823 out_attr = elf_known_obj_attributes_proc (obfd);
12824
12825 /* Use the Tag_null value to indicate the attributes have been
12826 initialized. */
12827 out_attr[0].i = 1;
12828
12829 /* We do not output objects with Tag_MPextension_use_legacy - we move
12830 the attribute's value to Tag_MPextension_use. */
12831 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12832 {
12833 if (out_attr[Tag_MPextension_use].i != 0
12834 && out_attr[Tag_MPextension_use_legacy].i
12835 != out_attr[Tag_MPextension_use].i)
12836 {
12837 _bfd_error_handler
12838 (_("Error: %B has both the current and legacy "
12839 "Tag_MPextension_use attributes"), ibfd);
12840 result = FALSE;
12841 }
12842
12843 out_attr[Tag_MPextension_use] =
12844 out_attr[Tag_MPextension_use_legacy];
12845 out_attr[Tag_MPextension_use_legacy].type = 0;
12846 out_attr[Tag_MPextension_use_legacy].i = 0;
12847 }
12848
12849 return result;
12850 }
12851
12852 in_attr = elf_known_obj_attributes_proc (ibfd);
12853 out_attr = elf_known_obj_attributes_proc (obfd);
12854 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12855 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12856 {
12857 /* Ignore mismatches if the object doesn't use floating point or is
12858 floating point ABI independent. */
12859 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12860 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12861 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12862 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12863 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12864 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12865 {
12866 _bfd_error_handler
12867 (_("error: %B uses VFP register arguments, %B does not"),
12868 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12869 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12870 result = FALSE;
12871 }
12872 }
12873
12874 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12875 {
12876 /* Merge this attribute with existing attributes. */
12877 switch (i)
12878 {
12879 case Tag_CPU_raw_name:
12880 case Tag_CPU_name:
12881 /* These are merged after Tag_CPU_arch. */
12882 break;
12883
12884 case Tag_ABI_optimization_goals:
12885 case Tag_ABI_FP_optimization_goals:
12886 /* Use the first value seen. */
12887 break;
12888
12889 case Tag_CPU_arch:
12890 {
12891 int secondary_compat = -1, secondary_compat_out = -1;
12892 unsigned int saved_out_attr = out_attr[i].i;
12893 int arch_attr;
12894 static const char *name_table[] =
12895 {
12896 /* These aren't real CPU names, but we can't guess
12897 that from the architecture version alone. */
12898 "Pre v4",
12899 "ARM v4",
12900 "ARM v4T",
12901 "ARM v5T",
12902 "ARM v5TE",
12903 "ARM v5TEJ",
12904 "ARM v6",
12905 "ARM v6KZ",
12906 "ARM v6T2",
12907 "ARM v6K",
12908 "ARM v7",
12909 "ARM v6-M",
12910 "ARM v6S-M",
12911 "ARM v8",
12912 "",
12913 "ARM v8-M.baseline",
12914 "ARM v8-M.mainline",
12915 };
12916
12917 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12918 secondary_compat = get_secondary_compatible_arch (ibfd);
12919 secondary_compat_out = get_secondary_compatible_arch (obfd);
12920 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12921 &secondary_compat_out,
12922 in_attr[i].i,
12923 secondary_compat);
12924
12925 /* Return with error if failed to merge. */
12926 if (arch_attr == -1)
12927 return FALSE;
12928
12929 out_attr[i].i = arch_attr;
12930
12931 set_secondary_compatible_arch (obfd, secondary_compat_out);
12932
12933 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12934 if (out_attr[i].i == saved_out_attr)
12935 ; /* Leave the names alone. */
12936 else if (out_attr[i].i == in_attr[i].i)
12937 {
12938 /* The output architecture has been changed to match the
12939 input architecture. Use the input names. */
12940 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12941 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12942 : NULL;
12943 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12944 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12945 : NULL;
12946 }
12947 else
12948 {
12949 out_attr[Tag_CPU_name].s = NULL;
12950 out_attr[Tag_CPU_raw_name].s = NULL;
12951 }
12952
12953 /* If we still don't have a value for Tag_CPU_name,
12954 make one up now. Tag_CPU_raw_name remains blank. */
12955 if (out_attr[Tag_CPU_name].s == NULL
12956 && out_attr[i].i < ARRAY_SIZE (name_table))
12957 out_attr[Tag_CPU_name].s =
12958 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12959 }
12960 break;
12961
12962 case Tag_ARM_ISA_use:
12963 case Tag_THUMB_ISA_use:
12964 case Tag_WMMX_arch:
12965 case Tag_Advanced_SIMD_arch:
12966 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12967 case Tag_ABI_FP_rounding:
12968 case Tag_ABI_FP_exceptions:
12969 case Tag_ABI_FP_user_exceptions:
12970 case Tag_ABI_FP_number_model:
12971 case Tag_FP_HP_extension:
12972 case Tag_CPU_unaligned_access:
12973 case Tag_T2EE_use:
12974 case Tag_MPextension_use:
12975 /* Use the largest value specified. */
12976 if (in_attr[i].i > out_attr[i].i)
12977 out_attr[i].i = in_attr[i].i;
12978 break;
12979
12980 case Tag_ABI_align_preserved:
12981 case Tag_ABI_PCS_RO_data:
12982 /* Use the smallest value specified. */
12983 if (in_attr[i].i < out_attr[i].i)
12984 out_attr[i].i = in_attr[i].i;
12985 break;
12986
12987 case Tag_ABI_align_needed:
12988 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12989 && (in_attr[Tag_ABI_align_preserved].i == 0
12990 || out_attr[Tag_ABI_align_preserved].i == 0))
12991 {
12992 /* This error message should be enabled once all non-conformant
12993 binaries in the toolchain have had the attributes set
12994 properly.
12995 _bfd_error_handler
12996 (_("error: %B: 8-byte data alignment conflicts with %B"),
12997 obfd, ibfd);
12998 result = FALSE; */
12999 }
13000 /* Fall through. */
13001 case Tag_ABI_FP_denormal:
13002 case Tag_ABI_PCS_GOT_use:
13003 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
13004 value if greater than 2 (for future-proofing). */
13005 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
13006 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
13007 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
13008 out_attr[i].i = in_attr[i].i;
13009 break;
13010
13011 case Tag_Virtualization_use:
13012 /* The virtualization tag effectively stores two bits of
13013 information: the intended use of TrustZone (in bit 0), and the
13014 intended use of Virtualization (in bit 1). */
13015 if (out_attr[i].i == 0)
13016 out_attr[i].i = in_attr[i].i;
13017 else if (in_attr[i].i != 0
13018 && in_attr[i].i != out_attr[i].i)
13019 {
13020 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
13021 out_attr[i].i = 3;
13022 else
13023 {
13024 _bfd_error_handler
13025 (_("error: %B: unable to merge virtualization attributes "
13026 "with %B"),
13027 obfd, ibfd);
13028 result = FALSE;
13029 }
13030 }
13031 break;
13032
13033 case Tag_CPU_arch_profile:
13034 if (out_attr[i].i != in_attr[i].i)
13035 {
13036 /* 0 will merge with anything.
13037 'A' and 'S' merge to 'A'.
13038 'R' and 'S' merge to 'R'.
13039 'M' and 'A|R|S' is an error. */
13040 if (out_attr[i].i == 0
13041 || (out_attr[i].i == 'S'
13042 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
13043 out_attr[i].i = in_attr[i].i;
13044 else if (in_attr[i].i == 0
13045 || (in_attr[i].i == 'S'
13046 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
13047 ; /* Do nothing. */
13048 else
13049 {
13050 _bfd_error_handler
13051 (_("error: %B: Conflicting architecture profiles %c/%c"),
13052 ibfd,
13053 in_attr[i].i ? in_attr[i].i : '0',
13054 out_attr[i].i ? out_attr[i].i : '0');
13055 result = FALSE;
13056 }
13057 }
13058 break;
13059
13060 case Tag_DSP_extension:
13061 /* No need to change output value if any of:
13062 - pre (<=) ARMv5T input architecture (do not have DSP)
13063 - M input profile not ARMv7E-M and do not have DSP. */
13064 if (in_attr[Tag_CPU_arch].i <= 3
13065 || (in_attr[Tag_CPU_arch_profile].i == 'M'
13066 && in_attr[Tag_CPU_arch].i != 13
13067 && in_attr[i].i == 0))
13068 ; /* Do nothing. */
13069 /* Output value should be 0 if DSP part of architecture, ie.
13070 - post (>=) ARMv5te architecture output
13071 - A, R or S profile output or ARMv7E-M output architecture. */
13072 else if (out_attr[Tag_CPU_arch].i >= 4
13073 && (out_attr[Tag_CPU_arch_profile].i == 'A'
13074 || out_attr[Tag_CPU_arch_profile].i == 'R'
13075 || out_attr[Tag_CPU_arch_profile].i == 'S'
13076 || out_attr[Tag_CPU_arch].i == 13))
13077 out_attr[i].i = 0;
13078 /* Otherwise, DSP instructions are added and not part of output
13079 architecture. */
13080 else
13081 out_attr[i].i = 1;
13082 break;
13083
13084 case Tag_FP_arch:
13085 {
13086 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
13087 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
13088 when it's 0. It might mean absence of FP hardware if
13089 Tag_FP_arch is zero. */
13090
13091 #define VFP_VERSION_COUNT 9
13092 static const struct
13093 {
13094 int ver;
13095 int regs;
13096 } vfp_versions[VFP_VERSION_COUNT] =
13097 {
13098 {0, 0},
13099 {1, 16},
13100 {2, 16},
13101 {3, 32},
13102 {3, 16},
13103 {4, 32},
13104 {4, 16},
13105 {8, 32},
13106 {8, 16}
13107 };
13108 int ver;
13109 int regs;
13110 int newval;
13111
13112 /* If the output has no requirement about FP hardware,
13113 follow the requirement of the input. */
13114 if (out_attr[i].i == 0)
13115 {
13116 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13117 out_attr[i].i = in_attr[i].i;
13118 out_attr[Tag_ABI_HardFP_use].i
13119 = in_attr[Tag_ABI_HardFP_use].i;
13120 break;
13121 }
13122 /* If the input has no requirement about FP hardware, do
13123 nothing. */
13124 else if (in_attr[i].i == 0)
13125 {
13126 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
13127 break;
13128 }
13129
13130 /* Both the input and the output have nonzero Tag_FP_arch.
13131 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13132
13133 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13134 do nothing. */
13135 if (in_attr[Tag_ABI_HardFP_use].i == 0
13136 && out_attr[Tag_ABI_HardFP_use].i == 0)
13137 ;
13138 /* If the input and the output have different Tag_ABI_HardFP_use,
13139 the combination of them is 0 (implied by Tag_FP_arch). */
13140 else if (in_attr[Tag_ABI_HardFP_use].i
13141 != out_attr[Tag_ABI_HardFP_use].i)
13142 out_attr[Tag_ABI_HardFP_use].i = 0;
13143
13144 /* Now we can handle Tag_FP_arch. */
13145
13146 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13147 pick the biggest. */
13148 if (in_attr[i].i >= VFP_VERSION_COUNT
13149 && in_attr[i].i > out_attr[i].i)
13150 {
13151 out_attr[i] = in_attr[i];
13152 break;
13153 }
13154 /* The output uses the superset of input features
13155 (ISA version) and registers. */
13156 ver = vfp_versions[in_attr[i].i].ver;
13157 if (ver < vfp_versions[out_attr[i].i].ver)
13158 ver = vfp_versions[out_attr[i].i].ver;
13159 regs = vfp_versions[in_attr[i].i].regs;
13160 if (regs < vfp_versions[out_attr[i].i].regs)
13161 regs = vfp_versions[out_attr[i].i].regs;
13162 /* This assumes all possible supersets are also a valid
13163 options. */
13164 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13165 {
13166 if (regs == vfp_versions[newval].regs
13167 && ver == vfp_versions[newval].ver)
13168 break;
13169 }
13170 out_attr[i].i = newval;
13171 }
13172 break;
13173 case Tag_PCS_config:
13174 if (out_attr[i].i == 0)
13175 out_attr[i].i = in_attr[i].i;
13176 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13177 {
13178 /* It's sometimes ok to mix different configs, so this is only
13179 a warning. */
13180 _bfd_error_handler
13181 (_("Warning: %B: Conflicting platform configuration"), ibfd);
13182 }
13183 break;
13184 case Tag_ABI_PCS_R9_use:
13185 if (in_attr[i].i != out_attr[i].i
13186 && out_attr[i].i != AEABI_R9_unused
13187 && in_attr[i].i != AEABI_R9_unused)
13188 {
13189 _bfd_error_handler
13190 (_("error: %B: Conflicting use of R9"), ibfd);
13191 result = FALSE;
13192 }
13193 if (out_attr[i].i == AEABI_R9_unused)
13194 out_attr[i].i = in_attr[i].i;
13195 break;
13196 case Tag_ABI_PCS_RW_data:
13197 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13198 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13199 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13200 {
13201 _bfd_error_handler
13202 (_("error: %B: SB relative addressing conflicts with use of R9"),
13203 ibfd);
13204 result = FALSE;
13205 }
13206 /* Use the smallest value specified. */
13207 if (in_attr[i].i < out_attr[i].i)
13208 out_attr[i].i = in_attr[i].i;
13209 break;
13210 case Tag_ABI_PCS_wchar_t:
13211 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13212 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13213 {
13214 _bfd_error_handler
13215 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13216 ibfd, in_attr[i].i, out_attr[i].i);
13217 }
13218 else if (in_attr[i].i && !out_attr[i].i)
13219 out_attr[i].i = in_attr[i].i;
13220 break;
13221 case Tag_ABI_enum_size:
13222 if (in_attr[i].i != AEABI_enum_unused)
13223 {
13224 if (out_attr[i].i == AEABI_enum_unused
13225 || out_attr[i].i == AEABI_enum_forced_wide)
13226 {
13227 /* The existing object is compatible with anything.
13228 Use whatever requirements the new object has. */
13229 out_attr[i].i = in_attr[i].i;
13230 }
13231 else if (in_attr[i].i != AEABI_enum_forced_wide
13232 && out_attr[i].i != in_attr[i].i
13233 && !elf_arm_tdata (obfd)->no_enum_size_warning)
13234 {
13235 static const char *aeabi_enum_names[] =
13236 { "", "variable-size", "32-bit", "" };
13237 const char *in_name =
13238 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13239 ? aeabi_enum_names[in_attr[i].i]
13240 : "<unknown>";
13241 const char *out_name =
13242 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13243 ? aeabi_enum_names[out_attr[i].i]
13244 : "<unknown>";
13245 _bfd_error_handler
13246 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13247 ibfd, in_name, out_name);
13248 }
13249 }
13250 break;
13251 case Tag_ABI_VFP_args:
13252 /* Aready done. */
13253 break;
13254 case Tag_ABI_WMMX_args:
13255 if (in_attr[i].i != out_attr[i].i)
13256 {
13257 _bfd_error_handler
13258 (_("error: %B uses iWMMXt register arguments, %B does not"),
13259 ibfd, obfd);
13260 result = FALSE;
13261 }
13262 break;
13263 case Tag_compatibility:
13264 /* Merged in target-independent code. */
13265 break;
13266 case Tag_ABI_HardFP_use:
13267 /* This is handled along with Tag_FP_arch. */
13268 break;
13269 case Tag_ABI_FP_16bit_format:
13270 if (in_attr[i].i != 0 && out_attr[i].i != 0)
13271 {
13272 if (in_attr[i].i != out_attr[i].i)
13273 {
13274 _bfd_error_handler
13275 (_("error: fp16 format mismatch between %B and %B"),
13276 ibfd, obfd);
13277 result = FALSE;
13278 }
13279 }
13280 if (in_attr[i].i != 0)
13281 out_attr[i].i = in_attr[i].i;
13282 break;
13283
13284 case Tag_DIV_use:
13285 /* A value of zero on input means that the divide instruction may
13286 be used if available in the base architecture as specified via
13287 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13288 the user did not want divide instructions. A value of 2
13289 explicitly means that divide instructions were allowed in ARM
13290 and Thumb state. */
13291 if (in_attr[i].i == out_attr[i].i)
13292 /* Do nothing. */ ;
13293 else if (elf32_arm_attributes_forbid_div (in_attr)
13294 && !elf32_arm_attributes_accept_div (out_attr))
13295 out_attr[i].i = 1;
13296 else if (elf32_arm_attributes_forbid_div (out_attr)
13297 && elf32_arm_attributes_accept_div (in_attr))
13298 out_attr[i].i = in_attr[i].i;
13299 else if (in_attr[i].i == 2)
13300 out_attr[i].i = in_attr[i].i;
13301 break;
13302
13303 case Tag_MPextension_use_legacy:
13304 /* We don't output objects with Tag_MPextension_use_legacy - we
13305 move the value to Tag_MPextension_use. */
13306 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13307 {
13308 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13309 {
13310 _bfd_error_handler
13311 (_("%B has has both the current and legacy "
13312 "Tag_MPextension_use attributes"),
13313 ibfd);
13314 result = FALSE;
13315 }
13316 }
13317
13318 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13319 out_attr[Tag_MPextension_use] = in_attr[i];
13320
13321 break;
13322
13323 case Tag_nodefaults:
13324 /* This tag is set if it exists, but the value is unused (and is
13325 typically zero). We don't actually need to do anything here -
13326 the merge happens automatically when the type flags are merged
13327 below. */
13328 break;
13329 case Tag_also_compatible_with:
13330 /* Already done in Tag_CPU_arch. */
13331 break;
13332 case Tag_conformance:
13333 /* Keep the attribute if it matches. Throw it away otherwise.
13334 No attribute means no claim to conform. */
13335 if (!in_attr[i].s || !out_attr[i].s
13336 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13337 out_attr[i].s = NULL;
13338 break;
13339
13340 default:
13341 result
13342 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13343 }
13344
13345 /* If out_attr was copied from in_attr then it won't have a type yet. */
13346 if (in_attr[i].type && !out_attr[i].type)
13347 out_attr[i].type = in_attr[i].type;
13348 }
13349
13350 /* Merge Tag_compatibility attributes and any common GNU ones. */
13351 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13352 return FALSE;
13353
13354 /* Check for any attributes not known on ARM. */
13355 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13356
13357 return result;
13358 }
13359
13360
13361 /* Return TRUE if the two EABI versions are incompatible. */
13362
13363 static bfd_boolean
13364 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13365 {
13366 /* v4 and v5 are the same spec before and after it was released,
13367 so allow mixing them. */
13368 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13369 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13370 return TRUE;
13371
13372 return (iver == over);
13373 }
13374
13375 /* Merge backend specific data from an object file to the output
13376 object file when linking. */
13377
13378 static bfd_boolean
13379 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13380
13381 /* Display the flags field. */
13382
13383 static bfd_boolean
13384 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13385 {
13386 FILE * file = (FILE *) ptr;
13387 unsigned long flags;
13388
13389 BFD_ASSERT (abfd != NULL && ptr != NULL);
13390
13391 /* Print normal ELF private data. */
13392 _bfd_elf_print_private_bfd_data (abfd, ptr);
13393
13394 flags = elf_elfheader (abfd)->e_flags;
13395 /* Ignore init flag - it may not be set, despite the flags field
13396 containing valid data. */
13397
13398 /* xgettext:c-format */
13399 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13400
13401 switch (EF_ARM_EABI_VERSION (flags))
13402 {
13403 case EF_ARM_EABI_UNKNOWN:
13404 /* The following flag bits are GNU extensions and not part of the
13405 official ARM ELF extended ABI. Hence they are only decoded if
13406 the EABI version is not set. */
13407 if (flags & EF_ARM_INTERWORK)
13408 fprintf (file, _(" [interworking enabled]"));
13409
13410 if (flags & EF_ARM_APCS_26)
13411 fprintf (file, " [APCS-26]");
13412 else
13413 fprintf (file, " [APCS-32]");
13414
13415 if (flags & EF_ARM_VFP_FLOAT)
13416 fprintf (file, _(" [VFP float format]"));
13417 else if (flags & EF_ARM_MAVERICK_FLOAT)
13418 fprintf (file, _(" [Maverick float format]"));
13419 else
13420 fprintf (file, _(" [FPA float format]"));
13421
13422 if (flags & EF_ARM_APCS_FLOAT)
13423 fprintf (file, _(" [floats passed in float registers]"));
13424
13425 if (flags & EF_ARM_PIC)
13426 fprintf (file, _(" [position independent]"));
13427
13428 if (flags & EF_ARM_NEW_ABI)
13429 fprintf (file, _(" [new ABI]"));
13430
13431 if (flags & EF_ARM_OLD_ABI)
13432 fprintf (file, _(" [old ABI]"));
13433
13434 if (flags & EF_ARM_SOFT_FLOAT)
13435 fprintf (file, _(" [software FP]"));
13436
13437 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13438 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13439 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13440 | EF_ARM_MAVERICK_FLOAT);
13441 break;
13442
13443 case EF_ARM_EABI_VER1:
13444 fprintf (file, _(" [Version1 EABI]"));
13445
13446 if (flags & EF_ARM_SYMSARESORTED)
13447 fprintf (file, _(" [sorted symbol table]"));
13448 else
13449 fprintf (file, _(" [unsorted symbol table]"));
13450
13451 flags &= ~ EF_ARM_SYMSARESORTED;
13452 break;
13453
13454 case EF_ARM_EABI_VER2:
13455 fprintf (file, _(" [Version2 EABI]"));
13456
13457 if (flags & EF_ARM_SYMSARESORTED)
13458 fprintf (file, _(" [sorted symbol table]"));
13459 else
13460 fprintf (file, _(" [unsorted symbol table]"));
13461
13462 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13463 fprintf (file, _(" [dynamic symbols use segment index]"));
13464
13465 if (flags & EF_ARM_MAPSYMSFIRST)
13466 fprintf (file, _(" [mapping symbols precede others]"));
13467
13468 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13469 | EF_ARM_MAPSYMSFIRST);
13470 break;
13471
13472 case EF_ARM_EABI_VER3:
13473 fprintf (file, _(" [Version3 EABI]"));
13474 break;
13475
13476 case EF_ARM_EABI_VER4:
13477 fprintf (file, _(" [Version4 EABI]"));
13478 goto eabi;
13479
13480 case EF_ARM_EABI_VER5:
13481 fprintf (file, _(" [Version5 EABI]"));
13482
13483 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13484 fprintf (file, _(" [soft-float ABI]"));
13485
13486 if (flags & EF_ARM_ABI_FLOAT_HARD)
13487 fprintf (file, _(" [hard-float ABI]"));
13488
13489 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13490
13491 eabi:
13492 if (flags & EF_ARM_BE8)
13493 fprintf (file, _(" [BE8]"));
13494
13495 if (flags & EF_ARM_LE8)
13496 fprintf (file, _(" [LE8]"));
13497
13498 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13499 break;
13500
13501 default:
13502 fprintf (file, _(" <EABI version unrecognised>"));
13503 break;
13504 }
13505
13506 flags &= ~ EF_ARM_EABIMASK;
13507
13508 if (flags & EF_ARM_RELEXEC)
13509 fprintf (file, _(" [relocatable executable]"));
13510
13511 flags &= ~EF_ARM_RELEXEC;
13512
13513 if (flags)
13514 fprintf (file, _("<Unrecognised flag bits set>"));
13515
13516 fputc ('\n', file);
13517
13518 return TRUE;
13519 }
13520
13521 static int
13522 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13523 {
13524 switch (ELF_ST_TYPE (elf_sym->st_info))
13525 {
13526 case STT_ARM_TFUNC:
13527 return ELF_ST_TYPE (elf_sym->st_info);
13528
13529 case STT_ARM_16BIT:
13530 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13531 This allows us to distinguish between data used by Thumb instructions
13532 and non-data (which is probably code) inside Thumb regions of an
13533 executable. */
13534 if (type != STT_OBJECT && type != STT_TLS)
13535 return ELF_ST_TYPE (elf_sym->st_info);
13536 break;
13537
13538 default:
13539 break;
13540 }
13541
13542 return type;
13543 }
13544
13545 static asection *
13546 elf32_arm_gc_mark_hook (asection *sec,
13547 struct bfd_link_info *info,
13548 Elf_Internal_Rela *rel,
13549 struct elf_link_hash_entry *h,
13550 Elf_Internal_Sym *sym)
13551 {
13552 if (h != NULL)
13553 switch (ELF32_R_TYPE (rel->r_info))
13554 {
13555 case R_ARM_GNU_VTINHERIT:
13556 case R_ARM_GNU_VTENTRY:
13557 return NULL;
13558 }
13559
13560 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13561 }
13562
13563 /* Update the got entry reference counts for the section being removed. */
13564
13565 static bfd_boolean
13566 elf32_arm_gc_sweep_hook (bfd * abfd,
13567 struct bfd_link_info * info,
13568 asection * sec,
13569 const Elf_Internal_Rela * relocs)
13570 {
13571 Elf_Internal_Shdr *symtab_hdr;
13572 struct elf_link_hash_entry **sym_hashes;
13573 bfd_signed_vma *local_got_refcounts;
13574 const Elf_Internal_Rela *rel, *relend;
13575 struct elf32_arm_link_hash_table * globals;
13576
13577 if (bfd_link_relocatable (info))
13578 return TRUE;
13579
13580 globals = elf32_arm_hash_table (info);
13581 if (globals == NULL)
13582 return FALSE;
13583
13584 elf_section_data (sec)->local_dynrel = NULL;
13585
13586 symtab_hdr = & elf_symtab_hdr (abfd);
13587 sym_hashes = elf_sym_hashes (abfd);
13588 local_got_refcounts = elf_local_got_refcounts (abfd);
13589
13590 check_use_blx (globals);
13591
13592 relend = relocs + sec->reloc_count;
13593 for (rel = relocs; rel < relend; rel++)
13594 {
13595 unsigned long r_symndx;
13596 struct elf_link_hash_entry *h = NULL;
13597 struct elf32_arm_link_hash_entry *eh;
13598 int r_type;
13599 bfd_boolean call_reloc_p;
13600 bfd_boolean may_become_dynamic_p;
13601 bfd_boolean may_need_local_target_p;
13602 union gotplt_union *root_plt;
13603 struct arm_plt_info *arm_plt;
13604
13605 r_symndx = ELF32_R_SYM (rel->r_info);
13606 if (r_symndx >= symtab_hdr->sh_info)
13607 {
13608 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13609 while (h->root.type == bfd_link_hash_indirect
13610 || h->root.type == bfd_link_hash_warning)
13611 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13612 }
13613 eh = (struct elf32_arm_link_hash_entry *) h;
13614
13615 call_reloc_p = FALSE;
13616 may_become_dynamic_p = FALSE;
13617 may_need_local_target_p = FALSE;
13618
13619 r_type = ELF32_R_TYPE (rel->r_info);
13620 r_type = arm_real_reloc_type (globals, r_type);
13621 switch (r_type)
13622 {
13623 case R_ARM_GOT32:
13624 case R_ARM_GOT_PREL:
13625 case R_ARM_TLS_GD32:
13626 case R_ARM_TLS_IE32:
13627 if (h != NULL)
13628 {
13629 if (h->got.refcount > 0)
13630 h->got.refcount -= 1;
13631 }
13632 else if (local_got_refcounts != NULL)
13633 {
13634 if (local_got_refcounts[r_symndx] > 0)
13635 local_got_refcounts[r_symndx] -= 1;
13636 }
13637 break;
13638
13639 case R_ARM_TLS_LDM32:
13640 globals->tls_ldm_got.refcount -= 1;
13641 break;
13642
13643 case R_ARM_PC24:
13644 case R_ARM_PLT32:
13645 case R_ARM_CALL:
13646 case R_ARM_JUMP24:
13647 case R_ARM_PREL31:
13648 case R_ARM_THM_CALL:
13649 case R_ARM_THM_JUMP24:
13650 case R_ARM_THM_JUMP19:
13651 call_reloc_p = TRUE;
13652 may_need_local_target_p = TRUE;
13653 break;
13654
13655 case R_ARM_ABS12:
13656 if (!globals->vxworks_p)
13657 {
13658 may_need_local_target_p = TRUE;
13659 break;
13660 }
13661 /* Fall through. */
13662 case R_ARM_ABS32:
13663 case R_ARM_ABS32_NOI:
13664 case R_ARM_REL32:
13665 case R_ARM_REL32_NOI:
13666 case R_ARM_MOVW_ABS_NC:
13667 case R_ARM_MOVT_ABS:
13668 case R_ARM_MOVW_PREL_NC:
13669 case R_ARM_MOVT_PREL:
13670 case R_ARM_THM_MOVW_ABS_NC:
13671 case R_ARM_THM_MOVT_ABS:
13672 case R_ARM_THM_MOVW_PREL_NC:
13673 case R_ARM_THM_MOVT_PREL:
13674 /* Should the interworking branches be here also? */
13675 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13676 && (sec->flags & SEC_ALLOC) != 0)
13677 {
13678 if (h == NULL
13679 && elf32_arm_howto_from_type (r_type)->pc_relative)
13680 {
13681 call_reloc_p = TRUE;
13682 may_need_local_target_p = TRUE;
13683 }
13684 else
13685 may_become_dynamic_p = TRUE;
13686 }
13687 else
13688 may_need_local_target_p = TRUE;
13689 break;
13690
13691 default:
13692 break;
13693 }
13694
13695 if (may_need_local_target_p
13696 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13697 {
13698 /* If PLT refcount book-keeping is wrong and too low, we'll
13699 see a zero value (going to -1) for the root PLT reference
13700 count. */
13701 if (root_plt->refcount >= 0)
13702 {
13703 BFD_ASSERT (root_plt->refcount != 0);
13704 root_plt->refcount -= 1;
13705 }
13706 else
13707 /* A value of -1 means the symbol has become local, forced
13708 or seeing a hidden definition. Any other negative value
13709 is an error. */
13710 BFD_ASSERT (root_plt->refcount == -1);
13711
13712 if (!call_reloc_p)
13713 arm_plt->noncall_refcount--;
13714
13715 if (r_type == R_ARM_THM_CALL)
13716 arm_plt->maybe_thumb_refcount--;
13717
13718 if (r_type == R_ARM_THM_JUMP24
13719 || r_type == R_ARM_THM_JUMP19)
13720 arm_plt->thumb_refcount--;
13721 }
13722
13723 if (may_become_dynamic_p)
13724 {
13725 struct elf_dyn_relocs **pp;
13726 struct elf_dyn_relocs *p;
13727
13728 if (h != NULL)
13729 pp = &(eh->dyn_relocs);
13730 else
13731 {
13732 Elf_Internal_Sym *isym;
13733
13734 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13735 abfd, r_symndx);
13736 if (isym == NULL)
13737 return FALSE;
13738 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13739 if (pp == NULL)
13740 return FALSE;
13741 }
13742 for (; (p = *pp) != NULL; pp = &p->next)
13743 if (p->sec == sec)
13744 {
13745 /* Everything must go for SEC. */
13746 *pp = p->next;
13747 break;
13748 }
13749 }
13750 }
13751
13752 return TRUE;
13753 }
13754
13755 /* Look through the relocs for a section during the first phase. */
13756
13757 static bfd_boolean
13758 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13759 asection *sec, const Elf_Internal_Rela *relocs)
13760 {
13761 Elf_Internal_Shdr *symtab_hdr;
13762 struct elf_link_hash_entry **sym_hashes;
13763 const Elf_Internal_Rela *rel;
13764 const Elf_Internal_Rela *rel_end;
13765 bfd *dynobj;
13766 asection *sreloc;
13767 struct elf32_arm_link_hash_table *htab;
13768 bfd_boolean call_reloc_p;
13769 bfd_boolean may_become_dynamic_p;
13770 bfd_boolean may_need_local_target_p;
13771 unsigned long nsyms;
13772
13773 if (bfd_link_relocatable (info))
13774 return TRUE;
13775
13776 BFD_ASSERT (is_arm_elf (abfd));
13777
13778 htab = elf32_arm_hash_table (info);
13779 if (htab == NULL)
13780 return FALSE;
13781
13782 sreloc = NULL;
13783
13784 /* Create dynamic sections for relocatable executables so that we can
13785 copy relocations. */
13786 if (htab->root.is_relocatable_executable
13787 && ! htab->root.dynamic_sections_created)
13788 {
13789 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13790 return FALSE;
13791 }
13792
13793 if (htab->root.dynobj == NULL)
13794 htab->root.dynobj = abfd;
13795 if (!create_ifunc_sections (info))
13796 return FALSE;
13797
13798 dynobj = htab->root.dynobj;
13799
13800 symtab_hdr = & elf_symtab_hdr (abfd);
13801 sym_hashes = elf_sym_hashes (abfd);
13802 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13803
13804 rel_end = relocs + sec->reloc_count;
13805 for (rel = relocs; rel < rel_end; rel++)
13806 {
13807 Elf_Internal_Sym *isym;
13808 struct elf_link_hash_entry *h;
13809 struct elf32_arm_link_hash_entry *eh;
13810 unsigned long r_symndx;
13811 int r_type;
13812
13813 r_symndx = ELF32_R_SYM (rel->r_info);
13814 r_type = ELF32_R_TYPE (rel->r_info);
13815 r_type = arm_real_reloc_type (htab, r_type);
13816
13817 if (r_symndx >= nsyms
13818 /* PR 9934: It is possible to have relocations that do not
13819 refer to symbols, thus it is also possible to have an
13820 object file containing relocations but no symbol table. */
13821 && (r_symndx > STN_UNDEF || nsyms > 0))
13822 {
13823 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13824 r_symndx);
13825 return FALSE;
13826 }
13827
13828 h = NULL;
13829 isym = NULL;
13830 if (nsyms > 0)
13831 {
13832 if (r_symndx < symtab_hdr->sh_info)
13833 {
13834 /* A local symbol. */
13835 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13836 abfd, r_symndx);
13837 if (isym == NULL)
13838 return FALSE;
13839 }
13840 else
13841 {
13842 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13843 while (h->root.type == bfd_link_hash_indirect
13844 || h->root.type == bfd_link_hash_warning)
13845 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13846
13847 /* PR15323, ref flags aren't set for references in the
13848 same object. */
13849 h->root.non_ir_ref = 1;
13850 }
13851 }
13852
13853 eh = (struct elf32_arm_link_hash_entry *) h;
13854
13855 call_reloc_p = FALSE;
13856 may_become_dynamic_p = FALSE;
13857 may_need_local_target_p = FALSE;
13858
13859 /* Could be done earlier, if h were already available. */
13860 r_type = elf32_arm_tls_transition (info, r_type, h);
13861 switch (r_type)
13862 {
13863 case R_ARM_GOT32:
13864 case R_ARM_GOT_PREL:
13865 case R_ARM_TLS_GD32:
13866 case R_ARM_TLS_IE32:
13867 case R_ARM_TLS_GOTDESC:
13868 case R_ARM_TLS_DESCSEQ:
13869 case R_ARM_THM_TLS_DESCSEQ:
13870 case R_ARM_TLS_CALL:
13871 case R_ARM_THM_TLS_CALL:
13872 /* This symbol requires a global offset table entry. */
13873 {
13874 int tls_type, old_tls_type;
13875
13876 switch (r_type)
13877 {
13878 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13879
13880 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13881
13882 case R_ARM_TLS_GOTDESC:
13883 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13884 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13885 tls_type = GOT_TLS_GDESC; break;
13886
13887 default: tls_type = GOT_NORMAL; break;
13888 }
13889
13890 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13891 info->flags |= DF_STATIC_TLS;
13892
13893 if (h != NULL)
13894 {
13895 h->got.refcount++;
13896 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13897 }
13898 else
13899 {
13900 /* This is a global offset table entry for a local symbol. */
13901 if (!elf32_arm_allocate_local_sym_info (abfd))
13902 return FALSE;
13903 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13904 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13905 }
13906
13907 /* If a variable is accessed with both tls methods, two
13908 slots may be created. */
13909 if (GOT_TLS_GD_ANY_P (old_tls_type)
13910 && GOT_TLS_GD_ANY_P (tls_type))
13911 tls_type |= old_tls_type;
13912
13913 /* We will already have issued an error message if there
13914 is a TLS/non-TLS mismatch, based on the symbol
13915 type. So just combine any TLS types needed. */
13916 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13917 && tls_type != GOT_NORMAL)
13918 tls_type |= old_tls_type;
13919
13920 /* If the symbol is accessed in both IE and GDESC
13921 method, we're able to relax. Turn off the GDESC flag,
13922 without messing up with any other kind of tls types
13923 that may be involved. */
13924 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13925 tls_type &= ~GOT_TLS_GDESC;
13926
13927 if (old_tls_type != tls_type)
13928 {
13929 if (h != NULL)
13930 elf32_arm_hash_entry (h)->tls_type = tls_type;
13931 else
13932 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13933 }
13934 }
13935 /* Fall through. */
13936
13937 case R_ARM_TLS_LDM32:
13938 if (r_type == R_ARM_TLS_LDM32)
13939 htab->tls_ldm_got.refcount++;
13940 /* Fall through. */
13941
13942 case R_ARM_GOTOFF32:
13943 case R_ARM_GOTPC:
13944 if (htab->root.sgot == NULL
13945 && !create_got_section (htab->root.dynobj, info))
13946 return FALSE;
13947 break;
13948
13949 case R_ARM_PC24:
13950 case R_ARM_PLT32:
13951 case R_ARM_CALL:
13952 case R_ARM_JUMP24:
13953 case R_ARM_PREL31:
13954 case R_ARM_THM_CALL:
13955 case R_ARM_THM_JUMP24:
13956 case R_ARM_THM_JUMP19:
13957 call_reloc_p = TRUE;
13958 may_need_local_target_p = TRUE;
13959 break;
13960
13961 case R_ARM_ABS12:
13962 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13963 ldr __GOTT_INDEX__ offsets. */
13964 if (!htab->vxworks_p)
13965 {
13966 may_need_local_target_p = TRUE;
13967 break;
13968 }
13969 else goto jump_over;
13970
13971 /* Fall through. */
13972
13973 case R_ARM_MOVW_ABS_NC:
13974 case R_ARM_MOVT_ABS:
13975 case R_ARM_THM_MOVW_ABS_NC:
13976 case R_ARM_THM_MOVT_ABS:
13977 if (bfd_link_pic (info))
13978 {
13979 (*_bfd_error_handler)
13980 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13981 abfd, elf32_arm_howto_table_1[r_type].name,
13982 (h) ? h->root.root.string : "a local symbol");
13983 bfd_set_error (bfd_error_bad_value);
13984 return FALSE;
13985 }
13986
13987 /* Fall through. */
13988 case R_ARM_ABS32:
13989 case R_ARM_ABS32_NOI:
13990 jump_over:
13991 if (h != NULL && bfd_link_executable (info))
13992 {
13993 h->pointer_equality_needed = 1;
13994 }
13995 /* Fall through. */
13996 case R_ARM_REL32:
13997 case R_ARM_REL32_NOI:
13998 case R_ARM_MOVW_PREL_NC:
13999 case R_ARM_MOVT_PREL:
14000 case R_ARM_THM_MOVW_PREL_NC:
14001 case R_ARM_THM_MOVT_PREL:
14002
14003 /* Should the interworking branches be listed here? */
14004 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
14005 && (sec->flags & SEC_ALLOC) != 0)
14006 {
14007 if (h == NULL
14008 && elf32_arm_howto_from_type (r_type)->pc_relative)
14009 {
14010 /* In shared libraries and relocatable executables,
14011 we treat local relative references as calls;
14012 see the related SYMBOL_CALLS_LOCAL code in
14013 allocate_dynrelocs. */
14014 call_reloc_p = TRUE;
14015 may_need_local_target_p = TRUE;
14016 }
14017 else
14018 /* We are creating a shared library or relocatable
14019 executable, and this is a reloc against a global symbol,
14020 or a non-PC-relative reloc against a local symbol.
14021 We may need to copy the reloc into the output. */
14022 may_become_dynamic_p = TRUE;
14023 }
14024 else
14025 may_need_local_target_p = TRUE;
14026 break;
14027
14028 /* This relocation describes the C++ object vtable hierarchy.
14029 Reconstruct it for later use during GC. */
14030 case R_ARM_GNU_VTINHERIT:
14031 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
14032 return FALSE;
14033 break;
14034
14035 /* This relocation describes which C++ vtable entries are actually
14036 used. Record for later use during GC. */
14037 case R_ARM_GNU_VTENTRY:
14038 BFD_ASSERT (h != NULL);
14039 if (h != NULL
14040 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
14041 return FALSE;
14042 break;
14043 }
14044
14045 if (h != NULL)
14046 {
14047 if (call_reloc_p)
14048 /* We may need a .plt entry if the function this reloc
14049 refers to is in a different object, regardless of the
14050 symbol's type. We can't tell for sure yet, because
14051 something later might force the symbol local. */
14052 h->needs_plt = 1;
14053 else if (may_need_local_target_p)
14054 /* If this reloc is in a read-only section, we might
14055 need a copy reloc. We can't check reliably at this
14056 stage whether the section is read-only, as input
14057 sections have not yet been mapped to output sections.
14058 Tentatively set the flag for now, and correct in
14059 adjust_dynamic_symbol. */
14060 h->non_got_ref = 1;
14061 }
14062
14063 if (may_need_local_target_p
14064 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
14065 {
14066 union gotplt_union *root_plt;
14067 struct arm_plt_info *arm_plt;
14068 struct arm_local_iplt_info *local_iplt;
14069
14070 if (h != NULL)
14071 {
14072 root_plt = &h->plt;
14073 arm_plt = &eh->plt;
14074 }
14075 else
14076 {
14077 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
14078 if (local_iplt == NULL)
14079 return FALSE;
14080 root_plt = &local_iplt->root;
14081 arm_plt = &local_iplt->arm;
14082 }
14083
14084 /* If the symbol is a function that doesn't bind locally,
14085 this relocation will need a PLT entry. */
14086 if (root_plt->refcount != -1)
14087 root_plt->refcount += 1;
14088
14089 if (!call_reloc_p)
14090 arm_plt->noncall_refcount++;
14091
14092 /* It's too early to use htab->use_blx here, so we have to
14093 record possible blx references separately from
14094 relocs that definitely need a thumb stub. */
14095
14096 if (r_type == R_ARM_THM_CALL)
14097 arm_plt->maybe_thumb_refcount += 1;
14098
14099 if (r_type == R_ARM_THM_JUMP24
14100 || r_type == R_ARM_THM_JUMP19)
14101 arm_plt->thumb_refcount += 1;
14102 }
14103
14104 if (may_become_dynamic_p)
14105 {
14106 struct elf_dyn_relocs *p, **head;
14107
14108 /* Create a reloc section in dynobj. */
14109 if (sreloc == NULL)
14110 {
14111 sreloc = _bfd_elf_make_dynamic_reloc_section
14112 (sec, dynobj, 2, abfd, ! htab->use_rel);
14113
14114 if (sreloc == NULL)
14115 return FALSE;
14116
14117 /* BPABI objects never have dynamic relocations mapped. */
14118 if (htab->symbian_p)
14119 {
14120 flagword flags;
14121
14122 flags = bfd_get_section_flags (dynobj, sreloc);
14123 flags &= ~(SEC_LOAD | SEC_ALLOC);
14124 bfd_set_section_flags (dynobj, sreloc, flags);
14125 }
14126 }
14127
14128 /* If this is a global symbol, count the number of
14129 relocations we need for this symbol. */
14130 if (h != NULL)
14131 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14132 else
14133 {
14134 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14135 if (head == NULL)
14136 return FALSE;
14137 }
14138
14139 p = *head;
14140 if (p == NULL || p->sec != sec)
14141 {
14142 bfd_size_type amt = sizeof *p;
14143
14144 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14145 if (p == NULL)
14146 return FALSE;
14147 p->next = *head;
14148 *head = p;
14149 p->sec = sec;
14150 p->count = 0;
14151 p->pc_count = 0;
14152 }
14153
14154 if (elf32_arm_howto_from_type (r_type)->pc_relative)
14155 p->pc_count += 1;
14156 p->count += 1;
14157 }
14158 }
14159
14160 return TRUE;
14161 }
14162
14163 /* Unwinding tables are not referenced directly. This pass marks them as
14164 required if the corresponding code section is marked. */
14165
14166 static bfd_boolean
14167 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14168 elf_gc_mark_hook_fn gc_mark_hook)
14169 {
14170 bfd *sub;
14171 Elf_Internal_Shdr **elf_shdrp;
14172 bfd_boolean again;
14173
14174 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14175
14176 /* Marking EH data may cause additional code sections to be marked,
14177 requiring multiple passes. */
14178 again = TRUE;
14179 while (again)
14180 {
14181 again = FALSE;
14182 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14183 {
14184 asection *o;
14185
14186 if (! is_arm_elf (sub))
14187 continue;
14188
14189 elf_shdrp = elf_elfsections (sub);
14190 for (o = sub->sections; o != NULL; o = o->next)
14191 {
14192 Elf_Internal_Shdr *hdr;
14193
14194 hdr = &elf_section_data (o)->this_hdr;
14195 if (hdr->sh_type == SHT_ARM_EXIDX
14196 && hdr->sh_link
14197 && hdr->sh_link < elf_numsections (sub)
14198 && !o->gc_mark
14199 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14200 {
14201 again = TRUE;
14202 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14203 return FALSE;
14204 }
14205 }
14206 }
14207 }
14208
14209 return TRUE;
14210 }
14211
14212 /* Treat mapping symbols as special target symbols. */
14213
14214 static bfd_boolean
14215 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14216 {
14217 return bfd_is_arm_special_symbol_name (sym->name,
14218 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14219 }
14220
14221 /* This is a copy of elf_find_function() from elf.c except that
14222 ARM mapping symbols are ignored when looking for function names
14223 and STT_ARM_TFUNC is considered to a function type. */
14224
14225 static bfd_boolean
14226 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
14227 asymbol ** symbols,
14228 asection * section,
14229 bfd_vma offset,
14230 const char ** filename_ptr,
14231 const char ** functionname_ptr)
14232 {
14233 const char * filename = NULL;
14234 asymbol * func = NULL;
14235 bfd_vma low_func = 0;
14236 asymbol ** p;
14237
14238 for (p = symbols; *p != NULL; p++)
14239 {
14240 elf_symbol_type *q;
14241
14242 q = (elf_symbol_type *) *p;
14243
14244 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14245 {
14246 default:
14247 break;
14248 case STT_FILE:
14249 filename = bfd_asymbol_name (&q->symbol);
14250 break;
14251 case STT_FUNC:
14252 case STT_ARM_TFUNC:
14253 case STT_NOTYPE:
14254 /* Skip mapping symbols. */
14255 if ((q->symbol.flags & BSF_LOCAL)
14256 && bfd_is_arm_special_symbol_name (q->symbol.name,
14257 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14258 continue;
14259 /* Fall through. */
14260 if (bfd_get_section (&q->symbol) == section
14261 && q->symbol.value >= low_func
14262 && q->symbol.value <= offset)
14263 {
14264 func = (asymbol *) q;
14265 low_func = q->symbol.value;
14266 }
14267 break;
14268 }
14269 }
14270
14271 if (func == NULL)
14272 return FALSE;
14273
14274 if (filename_ptr)
14275 *filename_ptr = filename;
14276 if (functionname_ptr)
14277 *functionname_ptr = bfd_asymbol_name (func);
14278
14279 return TRUE;
14280 }
14281
14282
14283 /* Find the nearest line to a particular section and offset, for error
14284 reporting. This code is a duplicate of the code in elf.c, except
14285 that it uses arm_elf_find_function. */
14286
14287 static bfd_boolean
14288 elf32_arm_find_nearest_line (bfd * abfd,
14289 asymbol ** symbols,
14290 asection * section,
14291 bfd_vma offset,
14292 const char ** filename_ptr,
14293 const char ** functionname_ptr,
14294 unsigned int * line_ptr,
14295 unsigned int * discriminator_ptr)
14296 {
14297 bfd_boolean found = FALSE;
14298
14299 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14300 filename_ptr, functionname_ptr,
14301 line_ptr, discriminator_ptr,
14302 dwarf_debug_sections, 0,
14303 & elf_tdata (abfd)->dwarf2_find_line_info))
14304 {
14305 if (!*functionname_ptr)
14306 arm_elf_find_function (abfd, symbols, section, offset,
14307 *filename_ptr ? NULL : filename_ptr,
14308 functionname_ptr);
14309
14310 return TRUE;
14311 }
14312
14313 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14314 uses DWARF1. */
14315
14316 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14317 & found, filename_ptr,
14318 functionname_ptr, line_ptr,
14319 & elf_tdata (abfd)->line_info))
14320 return FALSE;
14321
14322 if (found && (*functionname_ptr || *line_ptr))
14323 return TRUE;
14324
14325 if (symbols == NULL)
14326 return FALSE;
14327
14328 if (! arm_elf_find_function (abfd, symbols, section, offset,
14329 filename_ptr, functionname_ptr))
14330 return FALSE;
14331
14332 *line_ptr = 0;
14333 return TRUE;
14334 }
14335
14336 static bfd_boolean
14337 elf32_arm_find_inliner_info (bfd * abfd,
14338 const char ** filename_ptr,
14339 const char ** functionname_ptr,
14340 unsigned int * line_ptr)
14341 {
14342 bfd_boolean found;
14343 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14344 functionname_ptr, line_ptr,
14345 & elf_tdata (abfd)->dwarf2_find_line_info);
14346 return found;
14347 }
14348
14349 /* Adjust a symbol defined by a dynamic object and referenced by a
14350 regular object. The current definition is in some section of the
14351 dynamic object, but we're not including those sections. We have to
14352 change the definition to something the rest of the link can
14353 understand. */
14354
14355 static bfd_boolean
14356 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14357 struct elf_link_hash_entry * h)
14358 {
14359 bfd * dynobj;
14360 asection * s;
14361 struct elf32_arm_link_hash_entry * eh;
14362 struct elf32_arm_link_hash_table *globals;
14363
14364 globals = elf32_arm_hash_table (info);
14365 if (globals == NULL)
14366 return FALSE;
14367
14368 dynobj = elf_hash_table (info)->dynobj;
14369
14370 /* Make sure we know what is going on here. */
14371 BFD_ASSERT (dynobj != NULL
14372 && (h->needs_plt
14373 || h->type == STT_GNU_IFUNC
14374 || h->u.weakdef != NULL
14375 || (h->def_dynamic
14376 && h->ref_regular
14377 && !h->def_regular)));
14378
14379 eh = (struct elf32_arm_link_hash_entry *) h;
14380
14381 /* If this is a function, put it in the procedure linkage table. We
14382 will fill in the contents of the procedure linkage table later,
14383 when we know the address of the .got section. */
14384 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14385 {
14386 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14387 symbol binds locally. */
14388 if (h->plt.refcount <= 0
14389 || (h->type != STT_GNU_IFUNC
14390 && (SYMBOL_CALLS_LOCAL (info, h)
14391 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14392 && h->root.type == bfd_link_hash_undefweak))))
14393 {
14394 /* This case can occur if we saw a PLT32 reloc in an input
14395 file, but the symbol was never referred to by a dynamic
14396 object, or if all references were garbage collected. In
14397 such a case, we don't actually need to build a procedure
14398 linkage table, and we can just do a PC24 reloc instead. */
14399 h->plt.offset = (bfd_vma) -1;
14400 eh->plt.thumb_refcount = 0;
14401 eh->plt.maybe_thumb_refcount = 0;
14402 eh->plt.noncall_refcount = 0;
14403 h->needs_plt = 0;
14404 }
14405
14406 return TRUE;
14407 }
14408 else
14409 {
14410 /* It's possible that we incorrectly decided a .plt reloc was
14411 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14412 in check_relocs. We can't decide accurately between function
14413 and non-function syms in check-relocs; Objects loaded later in
14414 the link may change h->type. So fix it now. */
14415 h->plt.offset = (bfd_vma) -1;
14416 eh->plt.thumb_refcount = 0;
14417 eh->plt.maybe_thumb_refcount = 0;
14418 eh->plt.noncall_refcount = 0;
14419 }
14420
14421 /* If this is a weak symbol, and there is a real definition, the
14422 processor independent code will have arranged for us to see the
14423 real definition first, and we can just use the same value. */
14424 if (h->u.weakdef != NULL)
14425 {
14426 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14427 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14428 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14429 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14430 return TRUE;
14431 }
14432
14433 /* If there are no non-GOT references, we do not need a copy
14434 relocation. */
14435 if (!h->non_got_ref)
14436 return TRUE;
14437
14438 /* This is a reference to a symbol defined by a dynamic object which
14439 is not a function. */
14440
14441 /* If we are creating a shared library, we must presume that the
14442 only references to the symbol are via the global offset table.
14443 For such cases we need not do anything here; the relocations will
14444 be handled correctly by relocate_section. Relocatable executables
14445 can reference data in shared objects directly, so we don't need to
14446 do anything here. */
14447 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14448 return TRUE;
14449
14450 /* We must allocate the symbol in our .dynbss section, which will
14451 become part of the .bss section of the executable. There will be
14452 an entry for this symbol in the .dynsym section. The dynamic
14453 object will contain position independent code, so all references
14454 from the dynamic object to this symbol will go through the global
14455 offset table. The dynamic linker will use the .dynsym entry to
14456 determine the address it must put in the global offset table, so
14457 both the dynamic object and the regular object will refer to the
14458 same memory location for the variable. */
14459 s = bfd_get_linker_section (dynobj, ".dynbss");
14460 BFD_ASSERT (s != NULL);
14461
14462 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14463 linker to copy the initial value out of the dynamic object and into
14464 the runtime process image. We need to remember the offset into the
14465 .rel(a).bss section we are going to use. */
14466 if (info->nocopyreloc == 0
14467 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14468 && h->size != 0)
14469 {
14470 asection *srel;
14471
14472 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14473 elf32_arm_allocate_dynrelocs (info, srel, 1);
14474 h->needs_copy = 1;
14475 }
14476
14477 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14478 }
14479
14480 /* Allocate space in .plt, .got and associated reloc sections for
14481 dynamic relocs. */
14482
14483 static bfd_boolean
14484 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14485 {
14486 struct bfd_link_info *info;
14487 struct elf32_arm_link_hash_table *htab;
14488 struct elf32_arm_link_hash_entry *eh;
14489 struct elf_dyn_relocs *p;
14490
14491 if (h->root.type == bfd_link_hash_indirect)
14492 return TRUE;
14493
14494 eh = (struct elf32_arm_link_hash_entry *) h;
14495
14496 info = (struct bfd_link_info *) inf;
14497 htab = elf32_arm_hash_table (info);
14498 if (htab == NULL)
14499 return FALSE;
14500
14501 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14502 && h->plt.refcount > 0)
14503 {
14504 /* Make sure this symbol is output as a dynamic symbol.
14505 Undefined weak syms won't yet be marked as dynamic. */
14506 if (h->dynindx == -1
14507 && !h->forced_local)
14508 {
14509 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14510 return FALSE;
14511 }
14512
14513 /* If the call in the PLT entry binds locally, the associated
14514 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14515 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14516 than the .plt section. */
14517 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14518 {
14519 eh->is_iplt = 1;
14520 if (eh->plt.noncall_refcount == 0
14521 && SYMBOL_REFERENCES_LOCAL (info, h))
14522 /* All non-call references can be resolved directly.
14523 This means that they can (and in some cases, must)
14524 resolve directly to the run-time target, rather than
14525 to the PLT. That in turns means that any .got entry
14526 would be equal to the .igot.plt entry, so there's
14527 no point having both. */
14528 h->got.refcount = 0;
14529 }
14530
14531 if (bfd_link_pic (info)
14532 || eh->is_iplt
14533 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14534 {
14535 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14536
14537 /* If this symbol is not defined in a regular file, and we are
14538 not generating a shared library, then set the symbol to this
14539 location in the .plt. This is required to make function
14540 pointers compare as equal between the normal executable and
14541 the shared library. */
14542 if (! bfd_link_pic (info)
14543 && !h->def_regular)
14544 {
14545 h->root.u.def.section = htab->root.splt;
14546 h->root.u.def.value = h->plt.offset;
14547
14548 /* Make sure the function is not marked as Thumb, in case
14549 it is the target of an ABS32 relocation, which will
14550 point to the PLT entry. */
14551 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14552 }
14553
14554 /* VxWorks executables have a second set of relocations for
14555 each PLT entry. They go in a separate relocation section,
14556 which is processed by the kernel loader. */
14557 if (htab->vxworks_p && !bfd_link_pic (info))
14558 {
14559 /* There is a relocation for the initial PLT entry:
14560 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14561 if (h->plt.offset == htab->plt_header_size)
14562 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14563
14564 /* There are two extra relocations for each subsequent
14565 PLT entry: an R_ARM_32 relocation for the GOT entry,
14566 and an R_ARM_32 relocation for the PLT entry. */
14567 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14568 }
14569 }
14570 else
14571 {
14572 h->plt.offset = (bfd_vma) -1;
14573 h->needs_plt = 0;
14574 }
14575 }
14576 else
14577 {
14578 h->plt.offset = (bfd_vma) -1;
14579 h->needs_plt = 0;
14580 }
14581
14582 eh = (struct elf32_arm_link_hash_entry *) h;
14583 eh->tlsdesc_got = (bfd_vma) -1;
14584
14585 if (h->got.refcount > 0)
14586 {
14587 asection *s;
14588 bfd_boolean dyn;
14589 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14590 int indx;
14591
14592 /* Make sure this symbol is output as a dynamic symbol.
14593 Undefined weak syms won't yet be marked as dynamic. */
14594 if (h->dynindx == -1
14595 && !h->forced_local)
14596 {
14597 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14598 return FALSE;
14599 }
14600
14601 if (!htab->symbian_p)
14602 {
14603 s = htab->root.sgot;
14604 h->got.offset = s->size;
14605
14606 if (tls_type == GOT_UNKNOWN)
14607 abort ();
14608
14609 if (tls_type == GOT_NORMAL)
14610 /* Non-TLS symbols need one GOT slot. */
14611 s->size += 4;
14612 else
14613 {
14614 if (tls_type & GOT_TLS_GDESC)
14615 {
14616 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14617 eh->tlsdesc_got
14618 = (htab->root.sgotplt->size
14619 - elf32_arm_compute_jump_table_size (htab));
14620 htab->root.sgotplt->size += 8;
14621 h->got.offset = (bfd_vma) -2;
14622 /* plt.got_offset needs to know there's a TLS_DESC
14623 reloc in the middle of .got.plt. */
14624 htab->num_tls_desc++;
14625 }
14626
14627 if (tls_type & GOT_TLS_GD)
14628 {
14629 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14630 the symbol is both GD and GDESC, got.offset may
14631 have been overwritten. */
14632 h->got.offset = s->size;
14633 s->size += 8;
14634 }
14635
14636 if (tls_type & GOT_TLS_IE)
14637 /* R_ARM_TLS_IE32 needs one GOT slot. */
14638 s->size += 4;
14639 }
14640
14641 dyn = htab->root.dynamic_sections_created;
14642
14643 indx = 0;
14644 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14645 bfd_link_pic (info),
14646 h)
14647 && (!bfd_link_pic (info)
14648 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14649 indx = h->dynindx;
14650
14651 if (tls_type != GOT_NORMAL
14652 && (bfd_link_pic (info) || indx != 0)
14653 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14654 || h->root.type != bfd_link_hash_undefweak))
14655 {
14656 if (tls_type & GOT_TLS_IE)
14657 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14658
14659 if (tls_type & GOT_TLS_GD)
14660 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14661
14662 if (tls_type & GOT_TLS_GDESC)
14663 {
14664 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14665 /* GDESC needs a trampoline to jump to. */
14666 htab->tls_trampoline = -1;
14667 }
14668
14669 /* Only GD needs it. GDESC just emits one relocation per
14670 2 entries. */
14671 if ((tls_type & GOT_TLS_GD) && indx != 0)
14672 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14673 }
14674 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14675 {
14676 if (htab->root.dynamic_sections_created)
14677 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14678 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14679 }
14680 else if (h->type == STT_GNU_IFUNC
14681 && eh->plt.noncall_refcount == 0)
14682 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14683 they all resolve dynamically instead. Reserve room for the
14684 GOT entry's R_ARM_IRELATIVE relocation. */
14685 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14686 else if (bfd_link_pic (info)
14687 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14688 || h->root.type != bfd_link_hash_undefweak))
14689 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14690 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14691 }
14692 }
14693 else
14694 h->got.offset = (bfd_vma) -1;
14695
14696 /* Allocate stubs for exported Thumb functions on v4t. */
14697 if (!htab->use_blx && h->dynindx != -1
14698 && h->def_regular
14699 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14700 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14701 {
14702 struct elf_link_hash_entry * th;
14703 struct bfd_link_hash_entry * bh;
14704 struct elf_link_hash_entry * myh;
14705 char name[1024];
14706 asection *s;
14707 bh = NULL;
14708 /* Create a new symbol to regist the real location of the function. */
14709 s = h->root.u.def.section;
14710 sprintf (name, "__real_%s", h->root.root.string);
14711 _bfd_generic_link_add_one_symbol (info, s->owner,
14712 name, BSF_GLOBAL, s,
14713 h->root.u.def.value,
14714 NULL, TRUE, FALSE, &bh);
14715
14716 myh = (struct elf_link_hash_entry *) bh;
14717 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14718 myh->forced_local = 1;
14719 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14720 eh->export_glue = myh;
14721 th = record_arm_to_thumb_glue (info, h);
14722 /* Point the symbol at the stub. */
14723 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14724 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14725 h->root.u.def.section = th->root.u.def.section;
14726 h->root.u.def.value = th->root.u.def.value & ~1;
14727 }
14728
14729 if (eh->dyn_relocs == NULL)
14730 return TRUE;
14731
14732 /* In the shared -Bsymbolic case, discard space allocated for
14733 dynamic pc-relative relocs against symbols which turn out to be
14734 defined in regular objects. For the normal shared case, discard
14735 space for pc-relative relocs that have become local due to symbol
14736 visibility changes. */
14737
14738 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14739 {
14740 /* Relocs that use pc_count are PC-relative forms, which will appear
14741 on something like ".long foo - ." or "movw REG, foo - .". We want
14742 calls to protected symbols to resolve directly to the function
14743 rather than going via the plt. If people want function pointer
14744 comparisons to work as expected then they should avoid writing
14745 assembly like ".long foo - .". */
14746 if (SYMBOL_CALLS_LOCAL (info, h))
14747 {
14748 struct elf_dyn_relocs **pp;
14749
14750 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14751 {
14752 p->count -= p->pc_count;
14753 p->pc_count = 0;
14754 if (p->count == 0)
14755 *pp = p->next;
14756 else
14757 pp = &p->next;
14758 }
14759 }
14760
14761 if (htab->vxworks_p)
14762 {
14763 struct elf_dyn_relocs **pp;
14764
14765 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14766 {
14767 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14768 *pp = p->next;
14769 else
14770 pp = &p->next;
14771 }
14772 }
14773
14774 /* Also discard relocs on undefined weak syms with non-default
14775 visibility. */
14776 if (eh->dyn_relocs != NULL
14777 && h->root.type == bfd_link_hash_undefweak)
14778 {
14779 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14780 eh->dyn_relocs = NULL;
14781
14782 /* Make sure undefined weak symbols are output as a dynamic
14783 symbol in PIEs. */
14784 else if (h->dynindx == -1
14785 && !h->forced_local)
14786 {
14787 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14788 return FALSE;
14789 }
14790 }
14791
14792 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14793 && h->root.type == bfd_link_hash_new)
14794 {
14795 /* Output absolute symbols so that we can create relocations
14796 against them. For normal symbols we output a relocation
14797 against the section that contains them. */
14798 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14799 return FALSE;
14800 }
14801
14802 }
14803 else
14804 {
14805 /* For the non-shared case, discard space for relocs against
14806 symbols which turn out to need copy relocs or are not
14807 dynamic. */
14808
14809 if (!h->non_got_ref
14810 && ((h->def_dynamic
14811 && !h->def_regular)
14812 || (htab->root.dynamic_sections_created
14813 && (h->root.type == bfd_link_hash_undefweak
14814 || h->root.type == bfd_link_hash_undefined))))
14815 {
14816 /* Make sure this symbol is output as a dynamic symbol.
14817 Undefined weak syms won't yet be marked as dynamic. */
14818 if (h->dynindx == -1
14819 && !h->forced_local)
14820 {
14821 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14822 return FALSE;
14823 }
14824
14825 /* If that succeeded, we know we'll be keeping all the
14826 relocs. */
14827 if (h->dynindx != -1)
14828 goto keep;
14829 }
14830
14831 eh->dyn_relocs = NULL;
14832
14833 keep: ;
14834 }
14835
14836 /* Finally, allocate space. */
14837 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14838 {
14839 asection *sreloc = elf_section_data (p->sec)->sreloc;
14840 if (h->type == STT_GNU_IFUNC
14841 && eh->plt.noncall_refcount == 0
14842 && SYMBOL_REFERENCES_LOCAL (info, h))
14843 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14844 else
14845 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14846 }
14847
14848 return TRUE;
14849 }
14850
14851 /* Find any dynamic relocs that apply to read-only sections. */
14852
14853 static bfd_boolean
14854 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14855 {
14856 struct elf32_arm_link_hash_entry * eh;
14857 struct elf_dyn_relocs * p;
14858
14859 eh = (struct elf32_arm_link_hash_entry *) h;
14860 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14861 {
14862 asection *s = p->sec;
14863
14864 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14865 {
14866 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14867
14868 info->flags |= DF_TEXTREL;
14869
14870 /* Not an error, just cut short the traversal. */
14871 return FALSE;
14872 }
14873 }
14874 return TRUE;
14875 }
14876
14877 void
14878 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14879 int byteswap_code)
14880 {
14881 struct elf32_arm_link_hash_table *globals;
14882
14883 globals = elf32_arm_hash_table (info);
14884 if (globals == NULL)
14885 return;
14886
14887 globals->byteswap_code = byteswap_code;
14888 }
14889
14890 /* Set the sizes of the dynamic sections. */
14891
14892 static bfd_boolean
14893 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14894 struct bfd_link_info * info)
14895 {
14896 bfd * dynobj;
14897 asection * s;
14898 bfd_boolean plt;
14899 bfd_boolean relocs;
14900 bfd *ibfd;
14901 struct elf32_arm_link_hash_table *htab;
14902
14903 htab = elf32_arm_hash_table (info);
14904 if (htab == NULL)
14905 return FALSE;
14906
14907 dynobj = elf_hash_table (info)->dynobj;
14908 BFD_ASSERT (dynobj != NULL);
14909 check_use_blx (htab);
14910
14911 if (elf_hash_table (info)->dynamic_sections_created)
14912 {
14913 /* Set the contents of the .interp section to the interpreter. */
14914 if (bfd_link_executable (info) && !info->nointerp)
14915 {
14916 s = bfd_get_linker_section (dynobj, ".interp");
14917 BFD_ASSERT (s != NULL);
14918 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14919 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14920 }
14921 }
14922
14923 /* Set up .got offsets for local syms, and space for local dynamic
14924 relocs. */
14925 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14926 {
14927 bfd_signed_vma *local_got;
14928 bfd_signed_vma *end_local_got;
14929 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14930 char *local_tls_type;
14931 bfd_vma *local_tlsdesc_gotent;
14932 bfd_size_type locsymcount;
14933 Elf_Internal_Shdr *symtab_hdr;
14934 asection *srel;
14935 bfd_boolean is_vxworks = htab->vxworks_p;
14936 unsigned int symndx;
14937
14938 if (! is_arm_elf (ibfd))
14939 continue;
14940
14941 for (s = ibfd->sections; s != NULL; s = s->next)
14942 {
14943 struct elf_dyn_relocs *p;
14944
14945 for (p = (struct elf_dyn_relocs *)
14946 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14947 {
14948 if (!bfd_is_abs_section (p->sec)
14949 && bfd_is_abs_section (p->sec->output_section))
14950 {
14951 /* Input section has been discarded, either because
14952 it is a copy of a linkonce section or due to
14953 linker script /DISCARD/, so we'll be discarding
14954 the relocs too. */
14955 }
14956 else if (is_vxworks
14957 && strcmp (p->sec->output_section->name,
14958 ".tls_vars") == 0)
14959 {
14960 /* Relocations in vxworks .tls_vars sections are
14961 handled specially by the loader. */
14962 }
14963 else if (p->count != 0)
14964 {
14965 srel = elf_section_data (p->sec)->sreloc;
14966 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14967 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14968 info->flags |= DF_TEXTREL;
14969 }
14970 }
14971 }
14972
14973 local_got = elf_local_got_refcounts (ibfd);
14974 if (!local_got)
14975 continue;
14976
14977 symtab_hdr = & elf_symtab_hdr (ibfd);
14978 locsymcount = symtab_hdr->sh_info;
14979 end_local_got = local_got + locsymcount;
14980 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14981 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14982 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14983 symndx = 0;
14984 s = htab->root.sgot;
14985 srel = htab->root.srelgot;
14986 for (; local_got < end_local_got;
14987 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14988 ++local_tlsdesc_gotent, ++symndx)
14989 {
14990 *local_tlsdesc_gotent = (bfd_vma) -1;
14991 local_iplt = *local_iplt_ptr;
14992 if (local_iplt != NULL)
14993 {
14994 struct elf_dyn_relocs *p;
14995
14996 if (local_iplt->root.refcount > 0)
14997 {
14998 elf32_arm_allocate_plt_entry (info, TRUE,
14999 &local_iplt->root,
15000 &local_iplt->arm);
15001 if (local_iplt->arm.noncall_refcount == 0)
15002 /* All references to the PLT are calls, so all
15003 non-call references can resolve directly to the
15004 run-time target. This means that the .got entry
15005 would be the same as the .igot.plt entry, so there's
15006 no point creating both. */
15007 *local_got = 0;
15008 }
15009 else
15010 {
15011 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
15012 local_iplt->root.offset = (bfd_vma) -1;
15013 }
15014
15015 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
15016 {
15017 asection *psrel;
15018
15019 psrel = elf_section_data (p->sec)->sreloc;
15020 if (local_iplt->arm.noncall_refcount == 0)
15021 elf32_arm_allocate_irelocs (info, psrel, p->count);
15022 else
15023 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
15024 }
15025 }
15026 if (*local_got > 0)
15027 {
15028 Elf_Internal_Sym *isym;
15029
15030 *local_got = s->size;
15031 if (*local_tls_type & GOT_TLS_GD)
15032 /* TLS_GD relocs need an 8-byte structure in the GOT. */
15033 s->size += 8;
15034 if (*local_tls_type & GOT_TLS_GDESC)
15035 {
15036 *local_tlsdesc_gotent = htab->root.sgotplt->size
15037 - elf32_arm_compute_jump_table_size (htab);
15038 htab->root.sgotplt->size += 8;
15039 *local_got = (bfd_vma) -2;
15040 /* plt.got_offset needs to know there's a TLS_DESC
15041 reloc in the middle of .got.plt. */
15042 htab->num_tls_desc++;
15043 }
15044 if (*local_tls_type & GOT_TLS_IE)
15045 s->size += 4;
15046
15047 if (*local_tls_type & GOT_NORMAL)
15048 {
15049 /* If the symbol is both GD and GDESC, *local_got
15050 may have been overwritten. */
15051 *local_got = s->size;
15052 s->size += 4;
15053 }
15054
15055 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
15056 if (isym == NULL)
15057 return FALSE;
15058
15059 /* If all references to an STT_GNU_IFUNC PLT are calls,
15060 then all non-call references, including this GOT entry,
15061 resolve directly to the run-time target. */
15062 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
15063 && (local_iplt == NULL
15064 || local_iplt->arm.noncall_refcount == 0))
15065 elf32_arm_allocate_irelocs (info, srel, 1);
15066 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
15067 {
15068 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
15069 || *local_tls_type & GOT_TLS_GD)
15070 elf32_arm_allocate_dynrelocs (info, srel, 1);
15071
15072 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
15073 {
15074 elf32_arm_allocate_dynrelocs (info,
15075 htab->root.srelplt, 1);
15076 htab->tls_trampoline = -1;
15077 }
15078 }
15079 }
15080 else
15081 *local_got = (bfd_vma) -1;
15082 }
15083 }
15084
15085 if (htab->tls_ldm_got.refcount > 0)
15086 {
15087 /* Allocate two GOT entries and one dynamic relocation (if necessary)
15088 for R_ARM_TLS_LDM32 relocations. */
15089 htab->tls_ldm_got.offset = htab->root.sgot->size;
15090 htab->root.sgot->size += 8;
15091 if (bfd_link_pic (info))
15092 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15093 }
15094 else
15095 htab->tls_ldm_got.offset = -1;
15096
15097 /* Allocate global sym .plt and .got entries, and space for global
15098 sym dynamic relocs. */
15099 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15100
15101 /* Here we rummage through the found bfds to collect glue information. */
15102 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15103 {
15104 if (! is_arm_elf (ibfd))
15105 continue;
15106
15107 /* Initialise mapping tables for code/data. */
15108 bfd_elf32_arm_init_maps (ibfd);
15109
15110 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15111 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15112 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15113 /* xgettext:c-format */
15114 _bfd_error_handler (_("Errors encountered processing file %s"),
15115 ibfd->filename);
15116 }
15117
15118 /* Allocate space for the glue sections now that we've sized them. */
15119 bfd_elf32_arm_allocate_interworking_sections (info);
15120
15121 /* For every jump slot reserved in the sgotplt, reloc_count is
15122 incremented. However, when we reserve space for TLS descriptors,
15123 it's not incremented, so in order to compute the space reserved
15124 for them, it suffices to multiply the reloc count by the jump
15125 slot size. */
15126 if (htab->root.srelplt)
15127 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15128
15129 if (htab->tls_trampoline)
15130 {
15131 if (htab->root.splt->size == 0)
15132 htab->root.splt->size += htab->plt_header_size;
15133
15134 htab->tls_trampoline = htab->root.splt->size;
15135 htab->root.splt->size += htab->plt_entry_size;
15136
15137 /* If we're not using lazy TLS relocations, don't generate the
15138 PLT and GOT entries they require. */
15139 if (!(info->flags & DF_BIND_NOW))
15140 {
15141 htab->dt_tlsdesc_got = htab->root.sgot->size;
15142 htab->root.sgot->size += 4;
15143
15144 htab->dt_tlsdesc_plt = htab->root.splt->size;
15145 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15146 }
15147 }
15148
15149 /* The check_relocs and adjust_dynamic_symbol entry points have
15150 determined the sizes of the various dynamic sections. Allocate
15151 memory for them. */
15152 plt = FALSE;
15153 relocs = FALSE;
15154 for (s = dynobj->sections; s != NULL; s = s->next)
15155 {
15156 const char * name;
15157
15158 if ((s->flags & SEC_LINKER_CREATED) == 0)
15159 continue;
15160
15161 /* It's OK to base decisions on the section name, because none
15162 of the dynobj section names depend upon the input files. */
15163 name = bfd_get_section_name (dynobj, s);
15164
15165 if (s == htab->root.splt)
15166 {
15167 /* Remember whether there is a PLT. */
15168 plt = s->size != 0;
15169 }
15170 else if (CONST_STRNEQ (name, ".rel"))
15171 {
15172 if (s->size != 0)
15173 {
15174 /* Remember whether there are any reloc sections other
15175 than .rel(a).plt and .rela.plt.unloaded. */
15176 if (s != htab->root.srelplt && s != htab->srelplt2)
15177 relocs = TRUE;
15178
15179 /* We use the reloc_count field as a counter if we need
15180 to copy relocs into the output file. */
15181 s->reloc_count = 0;
15182 }
15183 }
15184 else if (s != htab->root.sgot
15185 && s != htab->root.sgotplt
15186 && s != htab->root.iplt
15187 && s != htab->root.igotplt
15188 && s != htab->sdynbss)
15189 {
15190 /* It's not one of our sections, so don't allocate space. */
15191 continue;
15192 }
15193
15194 if (s->size == 0)
15195 {
15196 /* If we don't need this section, strip it from the
15197 output file. This is mostly to handle .rel(a).bss and
15198 .rel(a).plt. We must create both sections in
15199 create_dynamic_sections, because they must be created
15200 before the linker maps input sections to output
15201 sections. The linker does that before
15202 adjust_dynamic_symbol is called, and it is that
15203 function which decides whether anything needs to go
15204 into these sections. */
15205 s->flags |= SEC_EXCLUDE;
15206 continue;
15207 }
15208
15209 if ((s->flags & SEC_HAS_CONTENTS) == 0)
15210 continue;
15211
15212 /* Allocate memory for the section contents. */
15213 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15214 if (s->contents == NULL)
15215 return FALSE;
15216 }
15217
15218 if (elf_hash_table (info)->dynamic_sections_created)
15219 {
15220 /* Add some entries to the .dynamic section. We fill in the
15221 values later, in elf32_arm_finish_dynamic_sections, but we
15222 must add the entries now so that we get the correct size for
15223 the .dynamic section. The DT_DEBUG entry is filled in by the
15224 dynamic linker and used by the debugger. */
15225 #define add_dynamic_entry(TAG, VAL) \
15226 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15227
15228 if (bfd_link_executable (info))
15229 {
15230 if (!add_dynamic_entry (DT_DEBUG, 0))
15231 return FALSE;
15232 }
15233
15234 if (plt)
15235 {
15236 if ( !add_dynamic_entry (DT_PLTGOT, 0)
15237 || !add_dynamic_entry (DT_PLTRELSZ, 0)
15238 || !add_dynamic_entry (DT_PLTREL,
15239 htab->use_rel ? DT_REL : DT_RELA)
15240 || !add_dynamic_entry (DT_JMPREL, 0))
15241 return FALSE;
15242
15243 if (htab->dt_tlsdesc_plt &&
15244 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
15245 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15246 return FALSE;
15247 }
15248
15249 if (relocs)
15250 {
15251 if (htab->use_rel)
15252 {
15253 if (!add_dynamic_entry (DT_REL, 0)
15254 || !add_dynamic_entry (DT_RELSZ, 0)
15255 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15256 return FALSE;
15257 }
15258 else
15259 {
15260 if (!add_dynamic_entry (DT_RELA, 0)
15261 || !add_dynamic_entry (DT_RELASZ, 0)
15262 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15263 return FALSE;
15264 }
15265 }
15266
15267 /* If any dynamic relocs apply to a read-only section,
15268 then we need a DT_TEXTREL entry. */
15269 if ((info->flags & DF_TEXTREL) == 0)
15270 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15271 info);
15272
15273 if ((info->flags & DF_TEXTREL) != 0)
15274 {
15275 if (!add_dynamic_entry (DT_TEXTREL, 0))
15276 return FALSE;
15277 }
15278 if (htab->vxworks_p
15279 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15280 return FALSE;
15281 }
15282 #undef add_dynamic_entry
15283
15284 return TRUE;
15285 }
15286
15287 /* Size sections even though they're not dynamic. We use it to setup
15288 _TLS_MODULE_BASE_, if needed. */
15289
15290 static bfd_boolean
15291 elf32_arm_always_size_sections (bfd *output_bfd,
15292 struct bfd_link_info *info)
15293 {
15294 asection *tls_sec;
15295
15296 if (bfd_link_relocatable (info))
15297 return TRUE;
15298
15299 tls_sec = elf_hash_table (info)->tls_sec;
15300
15301 if (tls_sec)
15302 {
15303 struct elf_link_hash_entry *tlsbase;
15304
15305 tlsbase = elf_link_hash_lookup
15306 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15307
15308 if (tlsbase)
15309 {
15310 struct bfd_link_hash_entry *bh = NULL;
15311 const struct elf_backend_data *bed
15312 = get_elf_backend_data (output_bfd);
15313
15314 if (!(_bfd_generic_link_add_one_symbol
15315 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15316 tls_sec, 0, NULL, FALSE,
15317 bed->collect, &bh)))
15318 return FALSE;
15319
15320 tlsbase->type = STT_TLS;
15321 tlsbase = (struct elf_link_hash_entry *)bh;
15322 tlsbase->def_regular = 1;
15323 tlsbase->other = STV_HIDDEN;
15324 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15325 }
15326 }
15327 return TRUE;
15328 }
15329
15330 /* Finish up dynamic symbol handling. We set the contents of various
15331 dynamic sections here. */
15332
15333 static bfd_boolean
15334 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15335 struct bfd_link_info * info,
15336 struct elf_link_hash_entry * h,
15337 Elf_Internal_Sym * sym)
15338 {
15339 struct elf32_arm_link_hash_table *htab;
15340 struct elf32_arm_link_hash_entry *eh;
15341
15342 htab = elf32_arm_hash_table (info);
15343 if (htab == NULL)
15344 return FALSE;
15345
15346 eh = (struct elf32_arm_link_hash_entry *) h;
15347
15348 if (h->plt.offset != (bfd_vma) -1)
15349 {
15350 if (!eh->is_iplt)
15351 {
15352 BFD_ASSERT (h->dynindx != -1);
15353 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15354 h->dynindx, 0))
15355 return FALSE;
15356 }
15357
15358 if (!h->def_regular)
15359 {
15360 /* Mark the symbol as undefined, rather than as defined in
15361 the .plt section. */
15362 sym->st_shndx = SHN_UNDEF;
15363 /* If the symbol is weak we need to clear the value.
15364 Otherwise, the PLT entry would provide a definition for
15365 the symbol even if the symbol wasn't defined anywhere,
15366 and so the symbol would never be NULL. Leave the value if
15367 there were any relocations where pointer equality matters
15368 (this is a clue for the dynamic linker, to make function
15369 pointer comparisons work between an application and shared
15370 library). */
15371 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15372 sym->st_value = 0;
15373 }
15374 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15375 {
15376 /* At least one non-call relocation references this .iplt entry,
15377 so the .iplt entry is the function's canonical address. */
15378 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15379 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15380 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15381 (output_bfd, htab->root.iplt->output_section));
15382 sym->st_value = (h->plt.offset
15383 + htab->root.iplt->output_section->vma
15384 + htab->root.iplt->output_offset);
15385 }
15386 }
15387
15388 if (h->needs_copy)
15389 {
15390 asection * s;
15391 Elf_Internal_Rela rel;
15392
15393 /* This symbol needs a copy reloc. Set it up. */
15394 BFD_ASSERT (h->dynindx != -1
15395 && (h->root.type == bfd_link_hash_defined
15396 || h->root.type == bfd_link_hash_defweak));
15397
15398 s = htab->srelbss;
15399 BFD_ASSERT (s != NULL);
15400
15401 rel.r_addend = 0;
15402 rel.r_offset = (h->root.u.def.value
15403 + h->root.u.def.section->output_section->vma
15404 + h->root.u.def.section->output_offset);
15405 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15406 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15407 }
15408
15409 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15410 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15411 to the ".got" section. */
15412 if (h == htab->root.hdynamic
15413 || (!htab->vxworks_p && h == htab->root.hgot))
15414 sym->st_shndx = SHN_ABS;
15415
15416 return TRUE;
15417 }
15418
15419 static void
15420 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15421 void *contents,
15422 const unsigned long *template, unsigned count)
15423 {
15424 unsigned ix;
15425
15426 for (ix = 0; ix != count; ix++)
15427 {
15428 unsigned long insn = template[ix];
15429
15430 /* Emit mov pc,rx if bx is not permitted. */
15431 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15432 insn = (insn & 0xf000000f) | 0x01a0f000;
15433 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15434 }
15435 }
15436
15437 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15438 other variants, NaCl needs this entry in a static executable's
15439 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15440 zero. For .iplt really only the last bundle is useful, and .iplt
15441 could have a shorter first entry, with each individual PLT entry's
15442 relative branch calculated differently so it targets the last
15443 bundle instead of the instruction before it (labelled .Lplt_tail
15444 above). But it's simpler to keep the size and layout of PLT0
15445 consistent with the dynamic case, at the cost of some dead code at
15446 the start of .iplt and the one dead store to the stack at the start
15447 of .Lplt_tail. */
15448 static void
15449 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15450 asection *plt, bfd_vma got_displacement)
15451 {
15452 unsigned int i;
15453
15454 put_arm_insn (htab, output_bfd,
15455 elf32_arm_nacl_plt0_entry[0]
15456 | arm_movw_immediate (got_displacement),
15457 plt->contents + 0);
15458 put_arm_insn (htab, output_bfd,
15459 elf32_arm_nacl_plt0_entry[1]
15460 | arm_movt_immediate (got_displacement),
15461 plt->contents + 4);
15462
15463 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15464 put_arm_insn (htab, output_bfd,
15465 elf32_arm_nacl_plt0_entry[i],
15466 plt->contents + (i * 4));
15467 }
15468
15469 /* Finish up the dynamic sections. */
15470
15471 static bfd_boolean
15472 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15473 {
15474 bfd * dynobj;
15475 asection * sgot;
15476 asection * sdyn;
15477 struct elf32_arm_link_hash_table *htab;
15478
15479 htab = elf32_arm_hash_table (info);
15480 if (htab == NULL)
15481 return FALSE;
15482
15483 dynobj = elf_hash_table (info)->dynobj;
15484
15485 sgot = htab->root.sgotplt;
15486 /* A broken linker script might have discarded the dynamic sections.
15487 Catch this here so that we do not seg-fault later on. */
15488 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15489 return FALSE;
15490 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15491
15492 if (elf_hash_table (info)->dynamic_sections_created)
15493 {
15494 asection *splt;
15495 Elf32_External_Dyn *dyncon, *dynconend;
15496
15497 splt = htab->root.splt;
15498 BFD_ASSERT (splt != NULL && sdyn != NULL);
15499 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15500
15501 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15502 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15503
15504 for (; dyncon < dynconend; dyncon++)
15505 {
15506 Elf_Internal_Dyn dyn;
15507 const char * name;
15508 asection * s;
15509
15510 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15511
15512 switch (dyn.d_tag)
15513 {
15514 unsigned int type;
15515
15516 default:
15517 if (htab->vxworks_p
15518 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15519 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15520 break;
15521
15522 case DT_HASH:
15523 name = ".hash";
15524 goto get_vma_if_bpabi;
15525 case DT_STRTAB:
15526 name = ".dynstr";
15527 goto get_vma_if_bpabi;
15528 case DT_SYMTAB:
15529 name = ".dynsym";
15530 goto get_vma_if_bpabi;
15531 case DT_VERSYM:
15532 name = ".gnu.version";
15533 goto get_vma_if_bpabi;
15534 case DT_VERDEF:
15535 name = ".gnu.version_d";
15536 goto get_vma_if_bpabi;
15537 case DT_VERNEED:
15538 name = ".gnu.version_r";
15539 goto get_vma_if_bpabi;
15540
15541 case DT_PLTGOT:
15542 name = htab->symbian_p ? ".got" : ".got.plt";
15543 goto get_vma;
15544 case DT_JMPREL:
15545 name = RELOC_SECTION (htab, ".plt");
15546 get_vma:
15547 s = bfd_get_linker_section (dynobj, name);
15548 if (s == NULL)
15549 {
15550 (*_bfd_error_handler)
15551 (_("could not find section %s"), name);
15552 bfd_set_error (bfd_error_invalid_operation);
15553 return FALSE;
15554 }
15555 if (!htab->symbian_p)
15556 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15557 else
15558 /* In the BPABI, tags in the PT_DYNAMIC section point
15559 at the file offset, not the memory address, for the
15560 convenience of the post linker. */
15561 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15562 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15563 break;
15564
15565 get_vma_if_bpabi:
15566 if (htab->symbian_p)
15567 goto get_vma;
15568 break;
15569
15570 case DT_PLTRELSZ:
15571 s = htab->root.srelplt;
15572 BFD_ASSERT (s != NULL);
15573 dyn.d_un.d_val = s->size;
15574 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15575 break;
15576
15577 case DT_RELSZ:
15578 case DT_RELASZ:
15579 if (!htab->symbian_p)
15580 {
15581 /* My reading of the SVR4 ABI indicates that the
15582 procedure linkage table relocs (DT_JMPREL) should be
15583 included in the overall relocs (DT_REL). This is
15584 what Solaris does. However, UnixWare can not handle
15585 that case. Therefore, we override the DT_RELSZ entry
15586 here to make it not include the JMPREL relocs. Since
15587 the linker script arranges for .rel(a).plt to follow all
15588 other relocation sections, we don't have to worry
15589 about changing the DT_REL entry. */
15590 s = htab->root.srelplt;
15591 if (s != NULL)
15592 dyn.d_un.d_val -= s->size;
15593 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15594 break;
15595 }
15596 /* Fall through. */
15597
15598 case DT_REL:
15599 case DT_RELA:
15600 /* In the BPABI, the DT_REL tag must point at the file
15601 offset, not the VMA, of the first relocation
15602 section. So, we use code similar to that in
15603 elflink.c, but do not check for SHF_ALLOC on the
15604 relcoation section, since relocations sections are
15605 never allocated under the BPABI. The comments above
15606 about Unixware notwithstanding, we include all of the
15607 relocations here. */
15608 if (htab->symbian_p)
15609 {
15610 unsigned int i;
15611 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15612 ? SHT_REL : SHT_RELA);
15613 dyn.d_un.d_val = 0;
15614 for (i = 1; i < elf_numsections (output_bfd); i++)
15615 {
15616 Elf_Internal_Shdr *hdr
15617 = elf_elfsections (output_bfd)[i];
15618 if (hdr->sh_type == type)
15619 {
15620 if (dyn.d_tag == DT_RELSZ
15621 || dyn.d_tag == DT_RELASZ)
15622 dyn.d_un.d_val += hdr->sh_size;
15623 else if ((ufile_ptr) hdr->sh_offset
15624 <= dyn.d_un.d_val - 1)
15625 dyn.d_un.d_val = hdr->sh_offset;
15626 }
15627 }
15628 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15629 }
15630 break;
15631
15632 case DT_TLSDESC_PLT:
15633 s = htab->root.splt;
15634 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15635 + htab->dt_tlsdesc_plt);
15636 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15637 break;
15638
15639 case DT_TLSDESC_GOT:
15640 s = htab->root.sgot;
15641 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15642 + htab->dt_tlsdesc_got);
15643 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15644 break;
15645
15646 /* Set the bottom bit of DT_INIT/FINI if the
15647 corresponding function is Thumb. */
15648 case DT_INIT:
15649 name = info->init_function;
15650 goto get_sym;
15651 case DT_FINI:
15652 name = info->fini_function;
15653 get_sym:
15654 /* If it wasn't set by elf_bfd_final_link
15655 then there is nothing to adjust. */
15656 if (dyn.d_un.d_val != 0)
15657 {
15658 struct elf_link_hash_entry * eh;
15659
15660 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15661 FALSE, FALSE, TRUE);
15662 if (eh != NULL
15663 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15664 == ST_BRANCH_TO_THUMB)
15665 {
15666 dyn.d_un.d_val |= 1;
15667 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15668 }
15669 }
15670 break;
15671 }
15672 }
15673
15674 /* Fill in the first entry in the procedure linkage table. */
15675 if (splt->size > 0 && htab->plt_header_size)
15676 {
15677 const bfd_vma *plt0_entry;
15678 bfd_vma got_address, plt_address, got_displacement;
15679
15680 /* Calculate the addresses of the GOT and PLT. */
15681 got_address = sgot->output_section->vma + sgot->output_offset;
15682 plt_address = splt->output_section->vma + splt->output_offset;
15683
15684 if (htab->vxworks_p)
15685 {
15686 /* The VxWorks GOT is relocated by the dynamic linker.
15687 Therefore, we must emit relocations rather than simply
15688 computing the values now. */
15689 Elf_Internal_Rela rel;
15690
15691 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15692 put_arm_insn (htab, output_bfd, plt0_entry[0],
15693 splt->contents + 0);
15694 put_arm_insn (htab, output_bfd, plt0_entry[1],
15695 splt->contents + 4);
15696 put_arm_insn (htab, output_bfd, plt0_entry[2],
15697 splt->contents + 8);
15698 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15699
15700 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15701 rel.r_offset = plt_address + 12;
15702 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15703 rel.r_addend = 0;
15704 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15705 htab->srelplt2->contents);
15706 }
15707 else if (htab->nacl_p)
15708 arm_nacl_put_plt0 (htab, output_bfd, splt,
15709 got_address + 8 - (plt_address + 16));
15710 else if (using_thumb_only (htab))
15711 {
15712 got_displacement = got_address - (plt_address + 12);
15713
15714 plt0_entry = elf32_thumb2_plt0_entry;
15715 put_arm_insn (htab, output_bfd, plt0_entry[0],
15716 splt->contents + 0);
15717 put_arm_insn (htab, output_bfd, plt0_entry[1],
15718 splt->contents + 4);
15719 put_arm_insn (htab, output_bfd, plt0_entry[2],
15720 splt->contents + 8);
15721
15722 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15723 }
15724 else
15725 {
15726 got_displacement = got_address - (plt_address + 16);
15727
15728 plt0_entry = elf32_arm_plt0_entry;
15729 put_arm_insn (htab, output_bfd, plt0_entry[0],
15730 splt->contents + 0);
15731 put_arm_insn (htab, output_bfd, plt0_entry[1],
15732 splt->contents + 4);
15733 put_arm_insn (htab, output_bfd, plt0_entry[2],
15734 splt->contents + 8);
15735 put_arm_insn (htab, output_bfd, plt0_entry[3],
15736 splt->contents + 12);
15737
15738 #ifdef FOUR_WORD_PLT
15739 /* The displacement value goes in the otherwise-unused
15740 last word of the second entry. */
15741 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15742 #else
15743 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15744 #endif
15745 }
15746 }
15747
15748 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15749 really seem like the right value. */
15750 if (splt->output_section->owner == output_bfd)
15751 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15752
15753 if (htab->dt_tlsdesc_plt)
15754 {
15755 bfd_vma got_address
15756 = sgot->output_section->vma + sgot->output_offset;
15757 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15758 + htab->root.sgot->output_offset);
15759 bfd_vma plt_address
15760 = splt->output_section->vma + splt->output_offset;
15761
15762 arm_put_trampoline (htab, output_bfd,
15763 splt->contents + htab->dt_tlsdesc_plt,
15764 dl_tlsdesc_lazy_trampoline, 6);
15765
15766 bfd_put_32 (output_bfd,
15767 gotplt_address + htab->dt_tlsdesc_got
15768 - (plt_address + htab->dt_tlsdesc_plt)
15769 - dl_tlsdesc_lazy_trampoline[6],
15770 splt->contents + htab->dt_tlsdesc_plt + 24);
15771 bfd_put_32 (output_bfd,
15772 got_address - (plt_address + htab->dt_tlsdesc_plt)
15773 - dl_tlsdesc_lazy_trampoline[7],
15774 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15775 }
15776
15777 if (htab->tls_trampoline)
15778 {
15779 arm_put_trampoline (htab, output_bfd,
15780 splt->contents + htab->tls_trampoline,
15781 tls_trampoline, 3);
15782 #ifdef FOUR_WORD_PLT
15783 bfd_put_32 (output_bfd, 0x00000000,
15784 splt->contents + htab->tls_trampoline + 12);
15785 #endif
15786 }
15787
15788 if (htab->vxworks_p
15789 && !bfd_link_pic (info)
15790 && htab->root.splt->size > 0)
15791 {
15792 /* Correct the .rel(a).plt.unloaded relocations. They will have
15793 incorrect symbol indexes. */
15794 int num_plts;
15795 unsigned char *p;
15796
15797 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15798 / htab->plt_entry_size);
15799 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15800
15801 for (; num_plts; num_plts--)
15802 {
15803 Elf_Internal_Rela rel;
15804
15805 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15806 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15807 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15808 p += RELOC_SIZE (htab);
15809
15810 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15811 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15812 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15813 p += RELOC_SIZE (htab);
15814 }
15815 }
15816 }
15817
15818 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15819 /* NaCl uses a special first entry in .iplt too. */
15820 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15821
15822 /* Fill in the first three entries in the global offset table. */
15823 if (sgot)
15824 {
15825 if (sgot->size > 0)
15826 {
15827 if (sdyn == NULL)
15828 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15829 else
15830 bfd_put_32 (output_bfd,
15831 sdyn->output_section->vma + sdyn->output_offset,
15832 sgot->contents);
15833 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15834 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15835 }
15836
15837 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15838 }
15839
15840 return TRUE;
15841 }
15842
15843 static void
15844 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15845 {
15846 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15847 struct elf32_arm_link_hash_table *globals;
15848 struct elf_segment_map *m;
15849
15850 i_ehdrp = elf_elfheader (abfd);
15851
15852 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15853 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15854 else
15855 _bfd_elf_post_process_headers (abfd, link_info);
15856 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15857
15858 if (link_info)
15859 {
15860 globals = elf32_arm_hash_table (link_info);
15861 if (globals != NULL && globals->byteswap_code)
15862 i_ehdrp->e_flags |= EF_ARM_BE8;
15863 }
15864
15865 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15866 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15867 {
15868 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15869 if (abi == AEABI_VFP_args_vfp)
15870 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15871 else
15872 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15873 }
15874
15875 /* Scan segment to set p_flags attribute if it contains only sections with
15876 SHF_ARM_PURECODE flag. */
15877 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15878 {
15879 unsigned int j;
15880
15881 if (m->count == 0)
15882 continue;
15883 for (j = 0; j < m->count; j++)
15884 {
15885 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
15886 break;
15887 }
15888 if (j == m->count)
15889 {
15890 m->p_flags = PF_X;
15891 m->p_flags_valid = 1;
15892 }
15893 }
15894 }
15895
15896 static enum elf_reloc_type_class
15897 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15898 const asection *rel_sec ATTRIBUTE_UNUSED,
15899 const Elf_Internal_Rela *rela)
15900 {
15901 switch ((int) ELF32_R_TYPE (rela->r_info))
15902 {
15903 case R_ARM_RELATIVE:
15904 return reloc_class_relative;
15905 case R_ARM_JUMP_SLOT:
15906 return reloc_class_plt;
15907 case R_ARM_COPY:
15908 return reloc_class_copy;
15909 case R_ARM_IRELATIVE:
15910 return reloc_class_ifunc;
15911 default:
15912 return reloc_class_normal;
15913 }
15914 }
15915
15916 static void
15917 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15918 {
15919 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15920 }
15921
15922 /* Return TRUE if this is an unwinding table entry. */
15923
15924 static bfd_boolean
15925 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15926 {
15927 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15928 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15929 }
15930
15931
15932 /* Set the type and flags for an ARM section. We do this by
15933 the section name, which is a hack, but ought to work. */
15934
15935 static bfd_boolean
15936 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15937 {
15938 const char * name;
15939
15940 name = bfd_get_section_name (abfd, sec);
15941
15942 if (is_arm_elf_unwind_section_name (abfd, name))
15943 {
15944 hdr->sh_type = SHT_ARM_EXIDX;
15945 hdr->sh_flags |= SHF_LINK_ORDER;
15946 }
15947
15948 if (sec->flags & SEC_ELF_PURECODE)
15949 hdr->sh_flags |= SHF_ARM_PURECODE;
15950
15951 return TRUE;
15952 }
15953
15954 /* Handle an ARM specific section when reading an object file. This is
15955 called when bfd_section_from_shdr finds a section with an unknown
15956 type. */
15957
15958 static bfd_boolean
15959 elf32_arm_section_from_shdr (bfd *abfd,
15960 Elf_Internal_Shdr * hdr,
15961 const char *name,
15962 int shindex)
15963 {
15964 /* There ought to be a place to keep ELF backend specific flags, but
15965 at the moment there isn't one. We just keep track of the
15966 sections by their name, instead. Fortunately, the ABI gives
15967 names for all the ARM specific sections, so we will probably get
15968 away with this. */
15969 switch (hdr->sh_type)
15970 {
15971 case SHT_ARM_EXIDX:
15972 case SHT_ARM_PREEMPTMAP:
15973 case SHT_ARM_ATTRIBUTES:
15974 break;
15975
15976 default:
15977 return FALSE;
15978 }
15979
15980 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15981 return FALSE;
15982
15983 return TRUE;
15984 }
15985
15986 static _arm_elf_section_data *
15987 get_arm_elf_section_data (asection * sec)
15988 {
15989 if (sec && sec->owner && is_arm_elf (sec->owner))
15990 return elf32_arm_section_data (sec);
15991 else
15992 return NULL;
15993 }
15994
15995 typedef struct
15996 {
15997 void *flaginfo;
15998 struct bfd_link_info *info;
15999 asection *sec;
16000 int sec_shndx;
16001 int (*func) (void *, const char *, Elf_Internal_Sym *,
16002 asection *, struct elf_link_hash_entry *);
16003 } output_arch_syminfo;
16004
16005 enum map_symbol_type
16006 {
16007 ARM_MAP_ARM,
16008 ARM_MAP_THUMB,
16009 ARM_MAP_DATA
16010 };
16011
16012
16013 /* Output a single mapping symbol. */
16014
16015 static bfd_boolean
16016 elf32_arm_output_map_sym (output_arch_syminfo *osi,
16017 enum map_symbol_type type,
16018 bfd_vma offset)
16019 {
16020 static const char *names[3] = {"$a", "$t", "$d"};
16021 Elf_Internal_Sym sym;
16022
16023 sym.st_value = osi->sec->output_section->vma
16024 + osi->sec->output_offset
16025 + offset;
16026 sym.st_size = 0;
16027 sym.st_other = 0;
16028 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
16029 sym.st_shndx = osi->sec_shndx;
16030 sym.st_target_internal = 0;
16031 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
16032 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
16033 }
16034
16035 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
16036 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
16037
16038 static bfd_boolean
16039 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
16040 bfd_boolean is_iplt_entry_p,
16041 union gotplt_union *root_plt,
16042 struct arm_plt_info *arm_plt)
16043 {
16044 struct elf32_arm_link_hash_table *htab;
16045 bfd_vma addr, plt_header_size;
16046
16047 if (root_plt->offset == (bfd_vma) -1)
16048 return TRUE;
16049
16050 htab = elf32_arm_hash_table (osi->info);
16051 if (htab == NULL)
16052 return FALSE;
16053
16054 if (is_iplt_entry_p)
16055 {
16056 osi->sec = htab->root.iplt;
16057 plt_header_size = 0;
16058 }
16059 else
16060 {
16061 osi->sec = htab->root.splt;
16062 plt_header_size = htab->plt_header_size;
16063 }
16064 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
16065 (osi->info->output_bfd, osi->sec->output_section));
16066
16067 addr = root_plt->offset & -2;
16068 if (htab->symbian_p)
16069 {
16070 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16071 return FALSE;
16072 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
16073 return FALSE;
16074 }
16075 else if (htab->vxworks_p)
16076 {
16077 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16078 return FALSE;
16079 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
16080 return FALSE;
16081 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
16082 return FALSE;
16083 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
16084 return FALSE;
16085 }
16086 else if (htab->nacl_p)
16087 {
16088 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16089 return FALSE;
16090 }
16091 else if (using_thumb_only (htab))
16092 {
16093 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16094 return FALSE;
16095 }
16096 else
16097 {
16098 bfd_boolean thumb_stub_p;
16099
16100 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16101 if (thumb_stub_p)
16102 {
16103 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16104 return FALSE;
16105 }
16106 #ifdef FOUR_WORD_PLT
16107 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16108 return FALSE;
16109 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16110 return FALSE;
16111 #else
16112 /* A three-word PLT with no Thumb thunk contains only Arm code,
16113 so only need to output a mapping symbol for the first PLT entry and
16114 entries with thumb thunks. */
16115 if (thumb_stub_p || addr == plt_header_size)
16116 {
16117 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16118 return FALSE;
16119 }
16120 #endif
16121 }
16122
16123 return TRUE;
16124 }
16125
16126 /* Output mapping symbols for PLT entries associated with H. */
16127
16128 static bfd_boolean
16129 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16130 {
16131 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16132 struct elf32_arm_link_hash_entry *eh;
16133
16134 if (h->root.type == bfd_link_hash_indirect)
16135 return TRUE;
16136
16137 if (h->root.type == bfd_link_hash_warning)
16138 /* When warning symbols are created, they **replace** the "real"
16139 entry in the hash table, thus we never get to see the real
16140 symbol in a hash traversal. So look at it now. */
16141 h = (struct elf_link_hash_entry *) h->root.u.i.link;
16142
16143 eh = (struct elf32_arm_link_hash_entry *) h;
16144 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16145 &h->plt, &eh->plt);
16146 }
16147
16148 /* Bind a veneered symbol to its veneer identified by its hash entry
16149 STUB_ENTRY. The veneered location thus loose its symbol. */
16150
16151 static void
16152 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16153 {
16154 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16155
16156 BFD_ASSERT (hash);
16157 hash->root.root.u.def.section = stub_entry->stub_sec;
16158 hash->root.root.u.def.value = stub_entry->stub_offset;
16159 hash->root.size = stub_entry->stub_size;
16160 }
16161
16162 /* Output a single local symbol for a generated stub. */
16163
16164 static bfd_boolean
16165 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16166 bfd_vma offset, bfd_vma size)
16167 {
16168 Elf_Internal_Sym sym;
16169
16170 sym.st_value = osi->sec->output_section->vma
16171 + osi->sec->output_offset
16172 + offset;
16173 sym.st_size = size;
16174 sym.st_other = 0;
16175 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16176 sym.st_shndx = osi->sec_shndx;
16177 sym.st_target_internal = 0;
16178 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16179 }
16180
16181 static bfd_boolean
16182 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16183 void * in_arg)
16184 {
16185 struct elf32_arm_stub_hash_entry *stub_entry;
16186 asection *stub_sec;
16187 bfd_vma addr;
16188 char *stub_name;
16189 output_arch_syminfo *osi;
16190 const insn_sequence *template_sequence;
16191 enum stub_insn_type prev_type;
16192 int size;
16193 int i;
16194 enum map_symbol_type sym_type;
16195
16196 /* Massage our args to the form they really have. */
16197 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16198 osi = (output_arch_syminfo *) in_arg;
16199
16200 stub_sec = stub_entry->stub_sec;
16201
16202 /* Ensure this stub is attached to the current section being
16203 processed. */
16204 if (stub_sec != osi->sec)
16205 return TRUE;
16206
16207 addr = (bfd_vma) stub_entry->stub_offset;
16208 template_sequence = stub_entry->stub_template;
16209
16210 if (arm_stub_sym_claimed (stub_entry->stub_type))
16211 arm_stub_claim_sym (stub_entry);
16212 else
16213 {
16214 stub_name = stub_entry->output_name;
16215 switch (template_sequence[0].type)
16216 {
16217 case ARM_TYPE:
16218 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16219 stub_entry->stub_size))
16220 return FALSE;
16221 break;
16222 case THUMB16_TYPE:
16223 case THUMB32_TYPE:
16224 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16225 stub_entry->stub_size))
16226 return FALSE;
16227 break;
16228 default:
16229 BFD_FAIL ();
16230 return 0;
16231 }
16232 }
16233
16234 prev_type = DATA_TYPE;
16235 size = 0;
16236 for (i = 0; i < stub_entry->stub_template_size; i++)
16237 {
16238 switch (template_sequence[i].type)
16239 {
16240 case ARM_TYPE:
16241 sym_type = ARM_MAP_ARM;
16242 break;
16243
16244 case THUMB16_TYPE:
16245 case THUMB32_TYPE:
16246 sym_type = ARM_MAP_THUMB;
16247 break;
16248
16249 case DATA_TYPE:
16250 sym_type = ARM_MAP_DATA;
16251 break;
16252
16253 default:
16254 BFD_FAIL ();
16255 return FALSE;
16256 }
16257
16258 if (template_sequence[i].type != prev_type)
16259 {
16260 prev_type = template_sequence[i].type;
16261 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16262 return FALSE;
16263 }
16264
16265 switch (template_sequence[i].type)
16266 {
16267 case ARM_TYPE:
16268 case THUMB32_TYPE:
16269 size += 4;
16270 break;
16271
16272 case THUMB16_TYPE:
16273 size += 2;
16274 break;
16275
16276 case DATA_TYPE:
16277 size += 4;
16278 break;
16279
16280 default:
16281 BFD_FAIL ();
16282 return FALSE;
16283 }
16284 }
16285
16286 return TRUE;
16287 }
16288
16289 /* Output mapping symbols for linker generated sections,
16290 and for those data-only sections that do not have a
16291 $d. */
16292
16293 static bfd_boolean
16294 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16295 struct bfd_link_info *info,
16296 void *flaginfo,
16297 int (*func) (void *, const char *,
16298 Elf_Internal_Sym *,
16299 asection *,
16300 struct elf_link_hash_entry *))
16301 {
16302 output_arch_syminfo osi;
16303 struct elf32_arm_link_hash_table *htab;
16304 bfd_vma offset;
16305 bfd_size_type size;
16306 bfd *input_bfd;
16307
16308 htab = elf32_arm_hash_table (info);
16309 if (htab == NULL)
16310 return FALSE;
16311
16312 check_use_blx (htab);
16313
16314 osi.flaginfo = flaginfo;
16315 osi.info = info;
16316 osi.func = func;
16317
16318 /* Add a $d mapping symbol to data-only sections that
16319 don't have any mapping symbol. This may result in (harmless) redundant
16320 mapping symbols. */
16321 for (input_bfd = info->input_bfds;
16322 input_bfd != NULL;
16323 input_bfd = input_bfd->link.next)
16324 {
16325 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16326 for (osi.sec = input_bfd->sections;
16327 osi.sec != NULL;
16328 osi.sec = osi.sec->next)
16329 {
16330 if (osi.sec->output_section != NULL
16331 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16332 != 0)
16333 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16334 == SEC_HAS_CONTENTS
16335 && get_arm_elf_section_data (osi.sec) != NULL
16336 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16337 && osi.sec->size > 0
16338 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16339 {
16340 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16341 (output_bfd, osi.sec->output_section);
16342 if (osi.sec_shndx != (int)SHN_BAD)
16343 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16344 }
16345 }
16346 }
16347
16348 /* ARM->Thumb glue. */
16349 if (htab->arm_glue_size > 0)
16350 {
16351 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16352 ARM2THUMB_GLUE_SECTION_NAME);
16353
16354 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16355 (output_bfd, osi.sec->output_section);
16356 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16357 || htab->pic_veneer)
16358 size = ARM2THUMB_PIC_GLUE_SIZE;
16359 else if (htab->use_blx)
16360 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16361 else
16362 size = ARM2THUMB_STATIC_GLUE_SIZE;
16363
16364 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16365 {
16366 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16367 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16368 }
16369 }
16370
16371 /* Thumb->ARM glue. */
16372 if (htab->thumb_glue_size > 0)
16373 {
16374 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16375 THUMB2ARM_GLUE_SECTION_NAME);
16376
16377 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16378 (output_bfd, osi.sec->output_section);
16379 size = THUMB2ARM_GLUE_SIZE;
16380
16381 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16382 {
16383 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16384 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16385 }
16386 }
16387
16388 /* ARMv4 BX veneers. */
16389 if (htab->bx_glue_size > 0)
16390 {
16391 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16392 ARM_BX_GLUE_SECTION_NAME);
16393
16394 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16395 (output_bfd, osi.sec->output_section);
16396
16397 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16398 }
16399
16400 /* Long calls stubs. */
16401 if (htab->stub_bfd && htab->stub_bfd->sections)
16402 {
16403 asection* stub_sec;
16404
16405 for (stub_sec = htab->stub_bfd->sections;
16406 stub_sec != NULL;
16407 stub_sec = stub_sec->next)
16408 {
16409 /* Ignore non-stub sections. */
16410 if (!strstr (stub_sec->name, STUB_SUFFIX))
16411 continue;
16412
16413 osi.sec = stub_sec;
16414
16415 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16416 (output_bfd, osi.sec->output_section);
16417
16418 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16419 }
16420 }
16421
16422 /* Finally, output mapping symbols for the PLT. */
16423 if (htab->root.splt && htab->root.splt->size > 0)
16424 {
16425 osi.sec = htab->root.splt;
16426 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16427 (output_bfd, osi.sec->output_section));
16428
16429 /* Output mapping symbols for the plt header. SymbianOS does not have a
16430 plt header. */
16431 if (htab->vxworks_p)
16432 {
16433 /* VxWorks shared libraries have no PLT header. */
16434 if (!bfd_link_pic (info))
16435 {
16436 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16437 return FALSE;
16438 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16439 return FALSE;
16440 }
16441 }
16442 else if (htab->nacl_p)
16443 {
16444 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16445 return FALSE;
16446 }
16447 else if (using_thumb_only (htab))
16448 {
16449 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16450 return FALSE;
16451 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16452 return FALSE;
16453 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16454 return FALSE;
16455 }
16456 else if (!htab->symbian_p)
16457 {
16458 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16459 return FALSE;
16460 #ifndef FOUR_WORD_PLT
16461 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16462 return FALSE;
16463 #endif
16464 }
16465 }
16466 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16467 {
16468 /* NaCl uses a special first entry in .iplt too. */
16469 osi.sec = htab->root.iplt;
16470 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16471 (output_bfd, osi.sec->output_section));
16472 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16473 return FALSE;
16474 }
16475 if ((htab->root.splt && htab->root.splt->size > 0)
16476 || (htab->root.iplt && htab->root.iplt->size > 0))
16477 {
16478 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16479 for (input_bfd = info->input_bfds;
16480 input_bfd != NULL;
16481 input_bfd = input_bfd->link.next)
16482 {
16483 struct arm_local_iplt_info **local_iplt;
16484 unsigned int i, num_syms;
16485
16486 local_iplt = elf32_arm_local_iplt (input_bfd);
16487 if (local_iplt != NULL)
16488 {
16489 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16490 for (i = 0; i < num_syms; i++)
16491 if (local_iplt[i] != NULL
16492 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16493 &local_iplt[i]->root,
16494 &local_iplt[i]->arm))
16495 return FALSE;
16496 }
16497 }
16498 }
16499 if (htab->dt_tlsdesc_plt != 0)
16500 {
16501 /* Mapping symbols for the lazy tls trampoline. */
16502 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16503 return FALSE;
16504
16505 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16506 htab->dt_tlsdesc_plt + 24))
16507 return FALSE;
16508 }
16509 if (htab->tls_trampoline != 0)
16510 {
16511 /* Mapping symbols for the tls trampoline. */
16512 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16513 return FALSE;
16514 #ifdef FOUR_WORD_PLT
16515 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16516 htab->tls_trampoline + 12))
16517 return FALSE;
16518 #endif
16519 }
16520
16521 return TRUE;
16522 }
16523
16524 /* Allocate target specific section data. */
16525
16526 static bfd_boolean
16527 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16528 {
16529 if (!sec->used_by_bfd)
16530 {
16531 _arm_elf_section_data *sdata;
16532 bfd_size_type amt = sizeof (*sdata);
16533
16534 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16535 if (sdata == NULL)
16536 return FALSE;
16537 sec->used_by_bfd = sdata;
16538 }
16539
16540 return _bfd_elf_new_section_hook (abfd, sec);
16541 }
16542
16543
16544 /* Used to order a list of mapping symbols by address. */
16545
16546 static int
16547 elf32_arm_compare_mapping (const void * a, const void * b)
16548 {
16549 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16550 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16551
16552 if (amap->vma > bmap->vma)
16553 return 1;
16554 else if (amap->vma < bmap->vma)
16555 return -1;
16556 else if (amap->type > bmap->type)
16557 /* Ensure results do not depend on the host qsort for objects with
16558 multiple mapping symbols at the same address by sorting on type
16559 after vma. */
16560 return 1;
16561 else if (amap->type < bmap->type)
16562 return -1;
16563 else
16564 return 0;
16565 }
16566
16567 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16568
16569 static unsigned long
16570 offset_prel31 (unsigned long addr, bfd_vma offset)
16571 {
16572 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16573 }
16574
16575 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16576 relocations. */
16577
16578 static void
16579 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16580 {
16581 unsigned long first_word = bfd_get_32 (output_bfd, from);
16582 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16583
16584 /* High bit of first word is supposed to be zero. */
16585 if ((first_word & 0x80000000ul) == 0)
16586 first_word = offset_prel31 (first_word, offset);
16587
16588 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16589 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16590 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16591 second_word = offset_prel31 (second_word, offset);
16592
16593 bfd_put_32 (output_bfd, first_word, to);
16594 bfd_put_32 (output_bfd, second_word, to + 4);
16595 }
16596
16597 /* Data for make_branch_to_a8_stub(). */
16598
16599 struct a8_branch_to_stub_data
16600 {
16601 asection *writing_section;
16602 bfd_byte *contents;
16603 };
16604
16605
16606 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16607 places for a particular section. */
16608
16609 static bfd_boolean
16610 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16611 void *in_arg)
16612 {
16613 struct elf32_arm_stub_hash_entry *stub_entry;
16614 struct a8_branch_to_stub_data *data;
16615 bfd_byte *contents;
16616 unsigned long branch_insn;
16617 bfd_vma veneered_insn_loc, veneer_entry_loc;
16618 bfd_signed_vma branch_offset;
16619 bfd *abfd;
16620 unsigned int loc;
16621
16622 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16623 data = (struct a8_branch_to_stub_data *) in_arg;
16624
16625 if (stub_entry->target_section != data->writing_section
16626 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16627 return TRUE;
16628
16629 contents = data->contents;
16630
16631 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16632 generated when both source and target are in the same section. */
16633 veneered_insn_loc = stub_entry->target_section->output_section->vma
16634 + stub_entry->target_section->output_offset
16635 + stub_entry->source_value;
16636
16637 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16638 + stub_entry->stub_sec->output_offset
16639 + stub_entry->stub_offset;
16640
16641 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16642 veneered_insn_loc &= ~3u;
16643
16644 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16645
16646 abfd = stub_entry->target_section->owner;
16647 loc = stub_entry->source_value;
16648
16649 /* We attempt to avoid this condition by setting stubs_always_after_branch
16650 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16651 This check is just to be on the safe side... */
16652 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16653 {
16654 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16655 "allocated in unsafe location"), abfd);
16656 return FALSE;
16657 }
16658
16659 switch (stub_entry->stub_type)
16660 {
16661 case arm_stub_a8_veneer_b:
16662 case arm_stub_a8_veneer_b_cond:
16663 branch_insn = 0xf0009000;
16664 goto jump24;
16665
16666 case arm_stub_a8_veneer_blx:
16667 branch_insn = 0xf000e800;
16668 goto jump24;
16669
16670 case arm_stub_a8_veneer_bl:
16671 {
16672 unsigned int i1, j1, i2, j2, s;
16673
16674 branch_insn = 0xf000d000;
16675
16676 jump24:
16677 if (branch_offset < -16777216 || branch_offset > 16777214)
16678 {
16679 /* There's not much we can do apart from complain if this
16680 happens. */
16681 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16682 "of range (input file too large)"), abfd);
16683 return FALSE;
16684 }
16685
16686 /* i1 = not(j1 eor s), so:
16687 not i1 = j1 eor s
16688 j1 = (not i1) eor s. */
16689
16690 branch_insn |= (branch_offset >> 1) & 0x7ff;
16691 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16692 i2 = (branch_offset >> 22) & 1;
16693 i1 = (branch_offset >> 23) & 1;
16694 s = (branch_offset >> 24) & 1;
16695 j1 = (!i1) ^ s;
16696 j2 = (!i2) ^ s;
16697 branch_insn |= j2 << 11;
16698 branch_insn |= j1 << 13;
16699 branch_insn |= s << 26;
16700 }
16701 break;
16702
16703 default:
16704 BFD_FAIL ();
16705 return FALSE;
16706 }
16707
16708 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16709 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16710
16711 return TRUE;
16712 }
16713
16714 /* Beginning of stm32l4xx work-around. */
16715
16716 /* Functions encoding instructions necessary for the emission of the
16717 fix-stm32l4xx-629360.
16718 Encoding is extracted from the
16719 ARM (C) Architecture Reference Manual
16720 ARMv7-A and ARMv7-R edition
16721 ARM DDI 0406C.b (ID072512). */
16722
16723 static inline bfd_vma
16724 create_instruction_branch_absolute (int branch_offset)
16725 {
16726 /* A8.8.18 B (A8-334)
16727 B target_address (Encoding T4). */
16728 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16729 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16730 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16731
16732 int s = ((branch_offset & 0x1000000) >> 24);
16733 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16734 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16735
16736 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16737 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16738
16739 bfd_vma patched_inst = 0xf0009000
16740 | s << 26 /* S. */
16741 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16742 | j1 << 13 /* J1. */
16743 | j2 << 11 /* J2. */
16744 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16745
16746 return patched_inst;
16747 }
16748
16749 static inline bfd_vma
16750 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16751 {
16752 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16753 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16754 bfd_vma patched_inst = 0xe8900000
16755 | (/*W=*/wback << 21)
16756 | (base_reg << 16)
16757 | (reg_mask & 0x0000ffff);
16758
16759 return patched_inst;
16760 }
16761
16762 static inline bfd_vma
16763 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16764 {
16765 /* A8.8.60 LDMDB/LDMEA (A8-402)
16766 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16767 bfd_vma patched_inst = 0xe9100000
16768 | (/*W=*/wback << 21)
16769 | (base_reg << 16)
16770 | (reg_mask & 0x0000ffff);
16771
16772 return patched_inst;
16773 }
16774
16775 static inline bfd_vma
16776 create_instruction_mov (int target_reg, int source_reg)
16777 {
16778 /* A8.8.103 MOV (register) (A8-486)
16779 MOV Rd, Rm (Encoding T1). */
16780 bfd_vma patched_inst = 0x4600
16781 | (target_reg & 0x7)
16782 | ((target_reg & 0x8) >> 3) << 7
16783 | (source_reg << 3);
16784
16785 return patched_inst;
16786 }
16787
16788 static inline bfd_vma
16789 create_instruction_sub (int target_reg, int source_reg, int value)
16790 {
16791 /* A8.8.221 SUB (immediate) (A8-708)
16792 SUB Rd, Rn, #value (Encoding T3). */
16793 bfd_vma patched_inst = 0xf1a00000
16794 | (target_reg << 8)
16795 | (source_reg << 16)
16796 | (/*S=*/0 << 20)
16797 | ((value & 0x800) >> 11) << 26
16798 | ((value & 0x700) >> 8) << 12
16799 | (value & 0x0ff);
16800
16801 return patched_inst;
16802 }
16803
16804 static inline bfd_vma
16805 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16806 int first_reg)
16807 {
16808 /* A8.8.332 VLDM (A8-922)
16809 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16810 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16811 | (/*W=*/wback << 21)
16812 | (base_reg << 16)
16813 | (num_words & 0x000000ff)
16814 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16815 | (first_reg & 0x00000001) << 22;
16816
16817 return patched_inst;
16818 }
16819
16820 static inline bfd_vma
16821 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16822 int first_reg)
16823 {
16824 /* A8.8.332 VLDM (A8-922)
16825 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16826 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16827 | (base_reg << 16)
16828 | (num_words & 0x000000ff)
16829 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16830 | (first_reg & 0x00000001) << 22;
16831
16832 return patched_inst;
16833 }
16834
16835 static inline bfd_vma
16836 create_instruction_udf_w (int value)
16837 {
16838 /* A8.8.247 UDF (A8-758)
16839 Undefined (Encoding T2). */
16840 bfd_vma patched_inst = 0xf7f0a000
16841 | (value & 0x00000fff)
16842 | (value & 0x000f0000) << 16;
16843
16844 return patched_inst;
16845 }
16846
16847 static inline bfd_vma
16848 create_instruction_udf (int value)
16849 {
16850 /* A8.8.247 UDF (A8-758)
16851 Undefined (Encoding T1). */
16852 bfd_vma patched_inst = 0xde00
16853 | (value & 0xff);
16854
16855 return patched_inst;
16856 }
16857
16858 /* Functions writing an instruction in memory, returning the next
16859 memory position to write to. */
16860
16861 static inline bfd_byte *
16862 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16863 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16864 {
16865 put_thumb2_insn (htab, output_bfd, insn, pt);
16866 return pt + 4;
16867 }
16868
16869 static inline bfd_byte *
16870 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16871 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16872 {
16873 put_thumb_insn (htab, output_bfd, insn, pt);
16874 return pt + 2;
16875 }
16876
16877 /* Function filling up a region in memory with T1 and T2 UDFs taking
16878 care of alignment. */
16879
16880 static bfd_byte *
16881 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16882 bfd * output_bfd,
16883 const bfd_byte * const base_stub_contents,
16884 bfd_byte * const from_stub_contents,
16885 const bfd_byte * const end_stub_contents)
16886 {
16887 bfd_byte *current_stub_contents = from_stub_contents;
16888
16889 /* Fill the remaining of the stub with deterministic contents : UDF
16890 instructions.
16891 Check if realignment is needed on modulo 4 frontier using T1, to
16892 further use T2. */
16893 if ((current_stub_contents < end_stub_contents)
16894 && !((current_stub_contents - base_stub_contents) % 2)
16895 && ((current_stub_contents - base_stub_contents) % 4))
16896 current_stub_contents =
16897 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16898 create_instruction_udf (0));
16899
16900 for (; current_stub_contents < end_stub_contents;)
16901 current_stub_contents =
16902 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16903 create_instruction_udf_w (0));
16904
16905 return current_stub_contents;
16906 }
16907
16908 /* Functions writing the stream of instructions equivalent to the
16909 derived sequence for ldmia, ldmdb, vldm respectively. */
16910
16911 static void
16912 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16913 bfd * output_bfd,
16914 const insn32 initial_insn,
16915 const bfd_byte *const initial_insn_addr,
16916 bfd_byte *const base_stub_contents)
16917 {
16918 int wback = (initial_insn & 0x00200000) >> 21;
16919 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16920 int insn_all_registers = initial_insn & 0x0000ffff;
16921 int insn_low_registers, insn_high_registers;
16922 int usable_register_mask;
16923 int nb_registers = popcount (insn_all_registers);
16924 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16925 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16926 bfd_byte *current_stub_contents = base_stub_contents;
16927
16928 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16929
16930 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16931 smaller than 8 registers load sequences that do not cause the
16932 hardware issue. */
16933 if (nb_registers <= 8)
16934 {
16935 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16936 current_stub_contents =
16937 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16938 initial_insn);
16939
16940 /* B initial_insn_addr+4. */
16941 if (!restore_pc)
16942 current_stub_contents =
16943 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16944 create_instruction_branch_absolute
16945 (initial_insn_addr - current_stub_contents));
16946
16947
16948 /* Fill the remaining of the stub with deterministic contents. */
16949 current_stub_contents =
16950 stm32l4xx_fill_stub_udf (htab, output_bfd,
16951 base_stub_contents, current_stub_contents,
16952 base_stub_contents +
16953 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16954
16955 return;
16956 }
16957
16958 /* - reg_list[13] == 0. */
16959 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16960
16961 /* - reg_list[14] & reg_list[15] != 1. */
16962 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16963
16964 /* - if (wback==1) reg_list[rn] == 0. */
16965 BFD_ASSERT (!wback || !restore_rn);
16966
16967 /* - nb_registers > 8. */
16968 BFD_ASSERT (popcount (insn_all_registers) > 8);
16969
16970 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16971
16972 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16973 - One with the 7 lowest registers (register mask 0x007F)
16974 This LDM will finally contain between 2 and 7 registers
16975 - One with the 7 highest registers (register mask 0xDF80)
16976 This ldm will finally contain between 2 and 7 registers. */
16977 insn_low_registers = insn_all_registers & 0x007F;
16978 insn_high_registers = insn_all_registers & 0xDF80;
16979
16980 /* A spare register may be needed during this veneer to temporarily
16981 handle the base register. This register will be restored with the
16982 last LDM operation.
16983 The usable register may be any general purpose register (that
16984 excludes PC, SP, LR : register mask is 0x1FFF). */
16985 usable_register_mask = 0x1FFF;
16986
16987 /* Generate the stub function. */
16988 if (wback)
16989 {
16990 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16991 current_stub_contents =
16992 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16993 create_instruction_ldmia
16994 (rn, /*wback=*/1, insn_low_registers));
16995
16996 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16997 current_stub_contents =
16998 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16999 create_instruction_ldmia
17000 (rn, /*wback=*/1, insn_high_registers));
17001 if (!restore_pc)
17002 {
17003 /* B initial_insn_addr+4. */
17004 current_stub_contents =
17005 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17006 create_instruction_branch_absolute
17007 (initial_insn_addr - current_stub_contents));
17008 }
17009 }
17010 else /* if (!wback). */
17011 {
17012 ri = rn;
17013
17014 /* If Rn is not part of the high-register-list, move it there. */
17015 if (!(insn_high_registers & (1 << rn)))
17016 {
17017 /* Choose a Ri in the high-register-list that will be restored. */
17018 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17019
17020 /* MOV Ri, Rn. */
17021 current_stub_contents =
17022 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17023 create_instruction_mov (ri, rn));
17024 }
17025
17026 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
17027 current_stub_contents =
17028 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17029 create_instruction_ldmia
17030 (ri, /*wback=*/1, insn_low_registers));
17031
17032 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
17033 current_stub_contents =
17034 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17035 create_instruction_ldmia
17036 (ri, /*wback=*/0, insn_high_registers));
17037
17038 if (!restore_pc)
17039 {
17040 /* B initial_insn_addr+4. */
17041 current_stub_contents =
17042 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17043 create_instruction_branch_absolute
17044 (initial_insn_addr - current_stub_contents));
17045 }
17046 }
17047
17048 /* Fill the remaining of the stub with deterministic contents. */
17049 current_stub_contents =
17050 stm32l4xx_fill_stub_udf (htab, output_bfd,
17051 base_stub_contents, current_stub_contents,
17052 base_stub_contents +
17053 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17054 }
17055
17056 static void
17057 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
17058 bfd * output_bfd,
17059 const insn32 initial_insn,
17060 const bfd_byte *const initial_insn_addr,
17061 bfd_byte *const base_stub_contents)
17062 {
17063 int wback = (initial_insn & 0x00200000) >> 21;
17064 int ri, rn = (initial_insn & 0x000f0000) >> 16;
17065 int insn_all_registers = initial_insn & 0x0000ffff;
17066 int insn_low_registers, insn_high_registers;
17067 int usable_register_mask;
17068 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
17069 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
17070 int nb_registers = popcount (insn_all_registers);
17071 bfd_byte *current_stub_contents = base_stub_contents;
17072
17073 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
17074
17075 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17076 smaller than 8 registers load sequences that do not cause the
17077 hardware issue. */
17078 if (nb_registers <= 8)
17079 {
17080 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
17081 current_stub_contents =
17082 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17083 initial_insn);
17084
17085 /* B initial_insn_addr+4. */
17086 current_stub_contents =
17087 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17088 create_instruction_branch_absolute
17089 (initial_insn_addr - current_stub_contents));
17090
17091 /* Fill the remaining of the stub with deterministic contents. */
17092 current_stub_contents =
17093 stm32l4xx_fill_stub_udf (htab, output_bfd,
17094 base_stub_contents, current_stub_contents,
17095 base_stub_contents +
17096 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17097
17098 return;
17099 }
17100
17101 /* - reg_list[13] == 0. */
17102 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17103
17104 /* - reg_list[14] & reg_list[15] != 1. */
17105 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17106
17107 /* - if (wback==1) reg_list[rn] == 0. */
17108 BFD_ASSERT (!wback || !restore_rn);
17109
17110 /* - nb_registers > 8. */
17111 BFD_ASSERT (popcount (insn_all_registers) > 8);
17112
17113 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17114
17115 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17116 - One with the 7 lowest registers (register mask 0x007F)
17117 This LDM will finally contain between 2 and 7 registers
17118 - One with the 7 highest registers (register mask 0xDF80)
17119 This ldm will finally contain between 2 and 7 registers. */
17120 insn_low_registers = insn_all_registers & 0x007F;
17121 insn_high_registers = insn_all_registers & 0xDF80;
17122
17123 /* A spare register may be needed during this veneer to temporarily
17124 handle the base register. This register will be restored with
17125 the last LDM operation.
17126 The usable register may be any general purpose register (that excludes
17127 PC, SP, LR : register mask is 0x1FFF). */
17128 usable_register_mask = 0x1FFF;
17129
17130 /* Generate the stub function. */
17131 if (!wback && !restore_pc && !restore_rn)
17132 {
17133 /* Choose a Ri in the low-register-list that will be restored. */
17134 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17135
17136 /* MOV Ri, Rn. */
17137 current_stub_contents =
17138 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17139 create_instruction_mov (ri, rn));
17140
17141 /* LDMDB Ri!, {R-high-register-list}. */
17142 current_stub_contents =
17143 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17144 create_instruction_ldmdb
17145 (ri, /*wback=*/1, insn_high_registers));
17146
17147 /* LDMDB Ri, {R-low-register-list}. */
17148 current_stub_contents =
17149 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17150 create_instruction_ldmdb
17151 (ri, /*wback=*/0, insn_low_registers));
17152
17153 /* B initial_insn_addr+4. */
17154 current_stub_contents =
17155 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17156 create_instruction_branch_absolute
17157 (initial_insn_addr - current_stub_contents));
17158 }
17159 else if (wback && !restore_pc && !restore_rn)
17160 {
17161 /* LDMDB Rn!, {R-high-register-list}. */
17162 current_stub_contents =
17163 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17164 create_instruction_ldmdb
17165 (rn, /*wback=*/1, insn_high_registers));
17166
17167 /* LDMDB Rn!, {R-low-register-list}. */
17168 current_stub_contents =
17169 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17170 create_instruction_ldmdb
17171 (rn, /*wback=*/1, insn_low_registers));
17172
17173 /* B initial_insn_addr+4. */
17174 current_stub_contents =
17175 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17176 create_instruction_branch_absolute
17177 (initial_insn_addr - current_stub_contents));
17178 }
17179 else if (!wback && restore_pc && !restore_rn)
17180 {
17181 /* Choose a Ri in the high-register-list that will be restored. */
17182 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17183
17184 /* SUB Ri, Rn, #(4*nb_registers). */
17185 current_stub_contents =
17186 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17187 create_instruction_sub (ri, rn, (4 * nb_registers)));
17188
17189 /* LDMIA Ri!, {R-low-register-list}. */
17190 current_stub_contents =
17191 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17192 create_instruction_ldmia
17193 (ri, /*wback=*/1, insn_low_registers));
17194
17195 /* LDMIA Ri, {R-high-register-list}. */
17196 current_stub_contents =
17197 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17198 create_instruction_ldmia
17199 (ri, /*wback=*/0, insn_high_registers));
17200 }
17201 else if (wback && restore_pc && !restore_rn)
17202 {
17203 /* Choose a Ri in the high-register-list that will be restored. */
17204 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17205
17206 /* SUB Rn, Rn, #(4*nb_registers) */
17207 current_stub_contents =
17208 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17209 create_instruction_sub (rn, rn, (4 * nb_registers)));
17210
17211 /* MOV Ri, Rn. */
17212 current_stub_contents =
17213 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17214 create_instruction_mov (ri, rn));
17215
17216 /* LDMIA Ri!, {R-low-register-list}. */
17217 current_stub_contents =
17218 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17219 create_instruction_ldmia
17220 (ri, /*wback=*/1, insn_low_registers));
17221
17222 /* LDMIA Ri, {R-high-register-list}. */
17223 current_stub_contents =
17224 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17225 create_instruction_ldmia
17226 (ri, /*wback=*/0, insn_high_registers));
17227 }
17228 else if (!wback && !restore_pc && restore_rn)
17229 {
17230 ri = rn;
17231 if (!(insn_low_registers & (1 << rn)))
17232 {
17233 /* Choose a Ri in the low-register-list that will be restored. */
17234 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17235
17236 /* MOV Ri, Rn. */
17237 current_stub_contents =
17238 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17239 create_instruction_mov (ri, rn));
17240 }
17241
17242 /* LDMDB Ri!, {R-high-register-list}. */
17243 current_stub_contents =
17244 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17245 create_instruction_ldmdb
17246 (ri, /*wback=*/1, insn_high_registers));
17247
17248 /* LDMDB Ri, {R-low-register-list}. */
17249 current_stub_contents =
17250 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17251 create_instruction_ldmdb
17252 (ri, /*wback=*/0, insn_low_registers));
17253
17254 /* B initial_insn_addr+4. */
17255 current_stub_contents =
17256 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17257 create_instruction_branch_absolute
17258 (initial_insn_addr - current_stub_contents));
17259 }
17260 else if (!wback && restore_pc && restore_rn)
17261 {
17262 ri = rn;
17263 if (!(insn_high_registers & (1 << rn)))
17264 {
17265 /* Choose a Ri in the high-register-list that will be restored. */
17266 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17267 }
17268
17269 /* SUB Ri, Rn, #(4*nb_registers). */
17270 current_stub_contents =
17271 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17272 create_instruction_sub (ri, rn, (4 * nb_registers)));
17273
17274 /* LDMIA Ri!, {R-low-register-list}. */
17275 current_stub_contents =
17276 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17277 create_instruction_ldmia
17278 (ri, /*wback=*/1, insn_low_registers));
17279
17280 /* LDMIA Ri, {R-high-register-list}. */
17281 current_stub_contents =
17282 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17283 create_instruction_ldmia
17284 (ri, /*wback=*/0, insn_high_registers));
17285 }
17286 else if (wback && restore_rn)
17287 {
17288 /* The assembler should not have accepted to encode this. */
17289 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17290 "undefined behavior.\n");
17291 }
17292
17293 /* Fill the remaining of the stub with deterministic contents. */
17294 current_stub_contents =
17295 stm32l4xx_fill_stub_udf (htab, output_bfd,
17296 base_stub_contents, current_stub_contents,
17297 base_stub_contents +
17298 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17299
17300 }
17301
17302 static void
17303 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17304 bfd * output_bfd,
17305 const insn32 initial_insn,
17306 const bfd_byte *const initial_insn_addr,
17307 bfd_byte *const base_stub_contents)
17308 {
17309 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17310 bfd_byte *current_stub_contents = base_stub_contents;
17311
17312 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17313
17314 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17315 smaller than 8 words load sequences that do not cause the
17316 hardware issue. */
17317 if (num_words <= 8)
17318 {
17319 /* Untouched instruction. */
17320 current_stub_contents =
17321 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17322 initial_insn);
17323
17324 /* B initial_insn_addr+4. */
17325 current_stub_contents =
17326 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17327 create_instruction_branch_absolute
17328 (initial_insn_addr - current_stub_contents));
17329 }
17330 else
17331 {
17332 bfd_boolean is_dp = /* DP encoding. */
17333 (initial_insn & 0xfe100f00) == 0xec100b00;
17334 bfd_boolean is_ia_nobang = /* (IA without !). */
17335 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17336 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17337 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17338 bfd_boolean is_db_bang = /* (DB with !). */
17339 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17340 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17341 /* d = UInt (Vd:D);. */
17342 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17343 | (((unsigned int)initial_insn << 9) >> 31);
17344
17345 /* Compute the number of 8-words chunks needed to split. */
17346 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17347 int chunk;
17348
17349 /* The test coverage has been done assuming the following
17350 hypothesis that exactly one of the previous is_ predicates is
17351 true. */
17352 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17353 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17354
17355 /* We treat the cutting of the words in one pass for all
17356 cases, then we emit the adjustments:
17357
17358 vldm rx, {...}
17359 -> vldm rx!, {8_words_or_less} for each needed 8_word
17360 -> sub rx, rx, #size (list)
17361
17362 vldm rx!, {...}
17363 -> vldm rx!, {8_words_or_less} for each needed 8_word
17364 This also handles vpop instruction (when rx is sp)
17365
17366 vldmd rx!, {...}
17367 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17368 for (chunk = 0; chunk < chunks; ++chunk)
17369 {
17370 bfd_vma new_insn = 0;
17371
17372 if (is_ia_nobang || is_ia_bang)
17373 {
17374 new_insn = create_instruction_vldmia
17375 (base_reg,
17376 is_dp,
17377 /*wback= . */1,
17378 chunks - (chunk + 1) ?
17379 8 : num_words - chunk * 8,
17380 first_reg + chunk * 8);
17381 }
17382 else if (is_db_bang)
17383 {
17384 new_insn = create_instruction_vldmdb
17385 (base_reg,
17386 is_dp,
17387 chunks - (chunk + 1) ?
17388 8 : num_words - chunk * 8,
17389 first_reg + chunk * 8);
17390 }
17391
17392 if (new_insn)
17393 current_stub_contents =
17394 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17395 new_insn);
17396 }
17397
17398 /* Only this case requires the base register compensation
17399 subtract. */
17400 if (is_ia_nobang)
17401 {
17402 current_stub_contents =
17403 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17404 create_instruction_sub
17405 (base_reg, base_reg, 4*num_words));
17406 }
17407
17408 /* B initial_insn_addr+4. */
17409 current_stub_contents =
17410 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17411 create_instruction_branch_absolute
17412 (initial_insn_addr - current_stub_contents));
17413 }
17414
17415 /* Fill the remaining of the stub with deterministic contents. */
17416 current_stub_contents =
17417 stm32l4xx_fill_stub_udf (htab, output_bfd,
17418 base_stub_contents, current_stub_contents,
17419 base_stub_contents +
17420 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17421 }
17422
17423 static void
17424 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17425 bfd * output_bfd,
17426 const insn32 wrong_insn,
17427 const bfd_byte *const wrong_insn_addr,
17428 bfd_byte *const stub_contents)
17429 {
17430 if (is_thumb2_ldmia (wrong_insn))
17431 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17432 wrong_insn, wrong_insn_addr,
17433 stub_contents);
17434 else if (is_thumb2_ldmdb (wrong_insn))
17435 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17436 wrong_insn, wrong_insn_addr,
17437 stub_contents);
17438 else if (is_thumb2_vldm (wrong_insn))
17439 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17440 wrong_insn, wrong_insn_addr,
17441 stub_contents);
17442 }
17443
17444 /* End of stm32l4xx work-around. */
17445
17446
17447 static void
17448 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17449 asection *output_sec, Elf_Internal_Rela *rel)
17450 {
17451 BFD_ASSERT (output_sec && rel);
17452 struct bfd_elf_section_reloc_data *output_reldata;
17453 struct elf32_arm_link_hash_table *htab;
17454 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17455 Elf_Internal_Shdr *rel_hdr;
17456
17457
17458 if (oesd->rel.hdr)
17459 {
17460 rel_hdr = oesd->rel.hdr;
17461 output_reldata = &(oesd->rel);
17462 }
17463 else if (oesd->rela.hdr)
17464 {
17465 rel_hdr = oesd->rela.hdr;
17466 output_reldata = &(oesd->rela);
17467 }
17468 else
17469 {
17470 abort ();
17471 }
17472
17473 bfd_byte *erel = rel_hdr->contents;
17474 erel += output_reldata->count * rel_hdr->sh_entsize;
17475 htab = elf32_arm_hash_table (info);
17476 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17477 output_reldata->count++;
17478 }
17479
17480 /* Do code byteswapping. Return FALSE afterwards so that the section is
17481 written out as normal. */
17482
17483 static bfd_boolean
17484 elf32_arm_write_section (bfd *output_bfd,
17485 struct bfd_link_info *link_info,
17486 asection *sec,
17487 bfd_byte *contents)
17488 {
17489 unsigned int mapcount, errcount;
17490 _arm_elf_section_data *arm_data;
17491 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17492 elf32_arm_section_map *map;
17493 elf32_vfp11_erratum_list *errnode;
17494 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17495 bfd_vma ptr;
17496 bfd_vma end;
17497 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17498 bfd_byte tmp;
17499 unsigned int i;
17500
17501 if (globals == NULL)
17502 return FALSE;
17503
17504 /* If this section has not been allocated an _arm_elf_section_data
17505 structure then we cannot record anything. */
17506 arm_data = get_arm_elf_section_data (sec);
17507 if (arm_data == NULL)
17508 return FALSE;
17509
17510 mapcount = arm_data->mapcount;
17511 map = arm_data->map;
17512 errcount = arm_data->erratumcount;
17513
17514 if (errcount != 0)
17515 {
17516 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17517
17518 for (errnode = arm_data->erratumlist; errnode != 0;
17519 errnode = errnode->next)
17520 {
17521 bfd_vma target = errnode->vma - offset;
17522
17523 switch (errnode->type)
17524 {
17525 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17526 {
17527 bfd_vma branch_to_veneer;
17528 /* Original condition code of instruction, plus bit mask for
17529 ARM B instruction. */
17530 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17531 | 0x0a000000;
17532
17533 /* The instruction is before the label. */
17534 target -= 4;
17535
17536 /* Above offset included in -4 below. */
17537 branch_to_veneer = errnode->u.b.veneer->vma
17538 - errnode->vma - 4;
17539
17540 if ((signed) branch_to_veneer < -(1 << 25)
17541 || (signed) branch_to_veneer >= (1 << 25))
17542 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17543 "range"), output_bfd);
17544
17545 insn |= (branch_to_veneer >> 2) & 0xffffff;
17546 contents[endianflip ^ target] = insn & 0xff;
17547 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17548 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17549 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17550 }
17551 break;
17552
17553 case VFP11_ERRATUM_ARM_VENEER:
17554 {
17555 bfd_vma branch_from_veneer;
17556 unsigned int insn;
17557
17558 /* Take size of veneer into account. */
17559 branch_from_veneer = errnode->u.v.branch->vma
17560 - errnode->vma - 12;
17561
17562 if ((signed) branch_from_veneer < -(1 << 25)
17563 || (signed) branch_from_veneer >= (1 << 25))
17564 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17565 "range"), output_bfd);
17566
17567 /* Original instruction. */
17568 insn = errnode->u.v.branch->u.b.vfp_insn;
17569 contents[endianflip ^ target] = insn & 0xff;
17570 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17571 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17572 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17573
17574 /* Branch back to insn after original insn. */
17575 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17576 contents[endianflip ^ (target + 4)] = insn & 0xff;
17577 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17578 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17579 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17580 }
17581 break;
17582
17583 default:
17584 abort ();
17585 }
17586 }
17587 }
17588
17589 if (arm_data->stm32l4xx_erratumcount != 0)
17590 {
17591 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17592 stm32l4xx_errnode != 0;
17593 stm32l4xx_errnode = stm32l4xx_errnode->next)
17594 {
17595 bfd_vma target = stm32l4xx_errnode->vma - offset;
17596
17597 switch (stm32l4xx_errnode->type)
17598 {
17599 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17600 {
17601 unsigned int insn;
17602 bfd_vma branch_to_veneer =
17603 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17604
17605 if ((signed) branch_to_veneer < -(1 << 24)
17606 || (signed) branch_to_veneer >= (1 << 24))
17607 {
17608 bfd_vma out_of_range =
17609 ((signed) branch_to_veneer < -(1 << 24)) ?
17610 - branch_to_veneer - (1 << 24) :
17611 ((signed) branch_to_veneer >= (1 << 24)) ?
17612 branch_to_veneer - (1 << 24) : 0;
17613
17614 (*_bfd_error_handler)
17615 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17616 "Jump out of range by %ld bytes. "
17617 "Cannot encode branch instruction. "),
17618 output_bfd,
17619 (long) (stm32l4xx_errnode->vma - 4),
17620 out_of_range);
17621 continue;
17622 }
17623
17624 insn = create_instruction_branch_absolute
17625 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17626
17627 /* The instruction is before the label. */
17628 target -= 4;
17629
17630 put_thumb2_insn (globals, output_bfd,
17631 (bfd_vma) insn, contents + target);
17632 }
17633 break;
17634
17635 case STM32L4XX_ERRATUM_VENEER:
17636 {
17637 bfd_byte * veneer;
17638 bfd_byte * veneer_r;
17639 unsigned int insn;
17640
17641 veneer = contents + target;
17642 veneer_r = veneer
17643 + stm32l4xx_errnode->u.b.veneer->vma
17644 - stm32l4xx_errnode->vma - 4;
17645
17646 if ((signed) (veneer_r - veneer -
17647 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17648 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17649 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17650 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17651 || (signed) (veneer_r - veneer) >= (1 << 24))
17652 {
17653 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17654 "veneer."), output_bfd);
17655 continue;
17656 }
17657
17658 /* Original instruction. */
17659 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17660
17661 stm32l4xx_create_replacing_stub
17662 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17663 }
17664 break;
17665
17666 default:
17667 abort ();
17668 }
17669 }
17670 }
17671
17672 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17673 {
17674 arm_unwind_table_edit *edit_node
17675 = arm_data->u.exidx.unwind_edit_list;
17676 /* Now, sec->size is the size of the section we will write. The original
17677 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17678 markers) was sec->rawsize. (This isn't the case if we perform no
17679 edits, then rawsize will be zero and we should use size). */
17680 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17681 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17682 unsigned int in_index, out_index;
17683 bfd_vma add_to_offsets = 0;
17684
17685 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17686 {
17687 if (edit_node)
17688 {
17689 unsigned int edit_index = edit_node->index;
17690
17691 if (in_index < edit_index && in_index * 8 < input_size)
17692 {
17693 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17694 contents + in_index * 8, add_to_offsets);
17695 out_index++;
17696 in_index++;
17697 }
17698 else if (in_index == edit_index
17699 || (in_index * 8 >= input_size
17700 && edit_index == UINT_MAX))
17701 {
17702 switch (edit_node->type)
17703 {
17704 case DELETE_EXIDX_ENTRY:
17705 in_index++;
17706 add_to_offsets += 8;
17707 break;
17708
17709 case INSERT_EXIDX_CANTUNWIND_AT_END:
17710 {
17711 asection *text_sec = edit_node->linked_section;
17712 bfd_vma text_offset = text_sec->output_section->vma
17713 + text_sec->output_offset
17714 + text_sec->size;
17715 bfd_vma exidx_offset = offset + out_index * 8;
17716 unsigned long prel31_offset;
17717
17718 /* Note: this is meant to be equivalent to an
17719 R_ARM_PREL31 relocation. These synthetic
17720 EXIDX_CANTUNWIND markers are not relocated by the
17721 usual BFD method. */
17722 prel31_offset = (text_offset - exidx_offset)
17723 & 0x7ffffffful;
17724 if (bfd_link_relocatable (link_info))
17725 {
17726 /* Here relocation for new EXIDX_CANTUNWIND is
17727 created, so there is no need to
17728 adjust offset by hand. */
17729 prel31_offset = text_sec->output_offset
17730 + text_sec->size;
17731
17732 /* New relocation entity. */
17733 asection *text_out = text_sec->output_section;
17734 Elf_Internal_Rela rel;
17735 rel.r_addend = 0;
17736 rel.r_offset = exidx_offset;
17737 rel.r_info = ELF32_R_INFO (text_out->target_index,
17738 R_ARM_PREL31);
17739
17740 elf32_arm_add_relocation (output_bfd, link_info,
17741 sec->output_section,
17742 &rel);
17743 }
17744
17745 /* First address we can't unwind. */
17746 bfd_put_32 (output_bfd, prel31_offset,
17747 &edited_contents[out_index * 8]);
17748
17749 /* Code for EXIDX_CANTUNWIND. */
17750 bfd_put_32 (output_bfd, 0x1,
17751 &edited_contents[out_index * 8 + 4]);
17752
17753 out_index++;
17754 add_to_offsets -= 8;
17755 }
17756 break;
17757 }
17758
17759 edit_node = edit_node->next;
17760 }
17761 }
17762 else
17763 {
17764 /* No more edits, copy remaining entries verbatim. */
17765 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17766 contents + in_index * 8, add_to_offsets);
17767 out_index++;
17768 in_index++;
17769 }
17770 }
17771
17772 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17773 bfd_set_section_contents (output_bfd, sec->output_section,
17774 edited_contents,
17775 (file_ptr) sec->output_offset, sec->size);
17776
17777 return TRUE;
17778 }
17779
17780 /* Fix code to point to Cortex-A8 erratum stubs. */
17781 if (globals->fix_cortex_a8)
17782 {
17783 struct a8_branch_to_stub_data data;
17784
17785 data.writing_section = sec;
17786 data.contents = contents;
17787
17788 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17789 & data);
17790 }
17791
17792 if (mapcount == 0)
17793 return FALSE;
17794
17795 if (globals->byteswap_code)
17796 {
17797 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17798
17799 ptr = map[0].vma;
17800 for (i = 0; i < mapcount; i++)
17801 {
17802 if (i == mapcount - 1)
17803 end = sec->size;
17804 else
17805 end = map[i + 1].vma;
17806
17807 switch (map[i].type)
17808 {
17809 case 'a':
17810 /* Byte swap code words. */
17811 while (ptr + 3 < end)
17812 {
17813 tmp = contents[ptr];
17814 contents[ptr] = contents[ptr + 3];
17815 contents[ptr + 3] = tmp;
17816 tmp = contents[ptr + 1];
17817 contents[ptr + 1] = contents[ptr + 2];
17818 contents[ptr + 2] = tmp;
17819 ptr += 4;
17820 }
17821 break;
17822
17823 case 't':
17824 /* Byte swap code halfwords. */
17825 while (ptr + 1 < end)
17826 {
17827 tmp = contents[ptr];
17828 contents[ptr] = contents[ptr + 1];
17829 contents[ptr + 1] = tmp;
17830 ptr += 2;
17831 }
17832 break;
17833
17834 case 'd':
17835 /* Leave data alone. */
17836 break;
17837 }
17838 ptr = end;
17839 }
17840 }
17841
17842 free (map);
17843 arm_data->mapcount = -1;
17844 arm_data->mapsize = 0;
17845 arm_data->map = NULL;
17846
17847 return FALSE;
17848 }
17849
17850 /* Mangle thumb function symbols as we read them in. */
17851
17852 static bfd_boolean
17853 elf32_arm_swap_symbol_in (bfd * abfd,
17854 const void *psrc,
17855 const void *pshn,
17856 Elf_Internal_Sym *dst)
17857 {
17858 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17859 return FALSE;
17860 dst->st_target_internal = 0;
17861
17862 /* New EABI objects mark thumb function symbols by setting the low bit of
17863 the address. */
17864 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17865 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17866 {
17867 if (dst->st_value & 1)
17868 {
17869 dst->st_value &= ~(bfd_vma) 1;
17870 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17871 ST_BRANCH_TO_THUMB);
17872 }
17873 else
17874 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17875 }
17876 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17877 {
17878 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17879 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17880 }
17881 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17882 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17883 else
17884 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17885
17886 return TRUE;
17887 }
17888
17889
17890 /* Mangle thumb function symbols as we write them out. */
17891
17892 static void
17893 elf32_arm_swap_symbol_out (bfd *abfd,
17894 const Elf_Internal_Sym *src,
17895 void *cdst,
17896 void *shndx)
17897 {
17898 Elf_Internal_Sym newsym;
17899
17900 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17901 of the address set, as per the new EABI. We do this unconditionally
17902 because objcopy does not set the elf header flags until after
17903 it writes out the symbol table. */
17904 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17905 {
17906 newsym = *src;
17907 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17908 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17909 if (newsym.st_shndx != SHN_UNDEF)
17910 {
17911 /* Do this only for defined symbols. At link type, the static
17912 linker will simulate the work of dynamic linker of resolving
17913 symbols and will carry over the thumbness of found symbols to
17914 the output symbol table. It's not clear how it happens, but
17915 the thumbness of undefined symbols can well be different at
17916 runtime, and writing '1' for them will be confusing for users
17917 and possibly for dynamic linker itself.
17918 */
17919 newsym.st_value |= 1;
17920 }
17921
17922 src = &newsym;
17923 }
17924 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17925 }
17926
17927 /* Add the PT_ARM_EXIDX program header. */
17928
17929 static bfd_boolean
17930 elf32_arm_modify_segment_map (bfd *abfd,
17931 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17932 {
17933 struct elf_segment_map *m;
17934 asection *sec;
17935
17936 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17937 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17938 {
17939 /* If there is already a PT_ARM_EXIDX header, then we do not
17940 want to add another one. This situation arises when running
17941 "strip"; the input binary already has the header. */
17942 m = elf_seg_map (abfd);
17943 while (m && m->p_type != PT_ARM_EXIDX)
17944 m = m->next;
17945 if (!m)
17946 {
17947 m = (struct elf_segment_map *)
17948 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17949 if (m == NULL)
17950 return FALSE;
17951 m->p_type = PT_ARM_EXIDX;
17952 m->count = 1;
17953 m->sections[0] = sec;
17954
17955 m->next = elf_seg_map (abfd);
17956 elf_seg_map (abfd) = m;
17957 }
17958 }
17959
17960 return TRUE;
17961 }
17962
17963 /* We may add a PT_ARM_EXIDX program header. */
17964
17965 static int
17966 elf32_arm_additional_program_headers (bfd *abfd,
17967 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17968 {
17969 asection *sec;
17970
17971 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17972 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17973 return 1;
17974 else
17975 return 0;
17976 }
17977
17978 /* Hook called by the linker routine which adds symbols from an object
17979 file. */
17980
17981 static bfd_boolean
17982 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17983 Elf_Internal_Sym *sym, const char **namep,
17984 flagword *flagsp, asection **secp, bfd_vma *valp)
17985 {
17986 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17987 && (abfd->flags & DYNAMIC) == 0
17988 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17989 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17990
17991 if (elf32_arm_hash_table (info) == NULL)
17992 return FALSE;
17993
17994 if (elf32_arm_hash_table (info)->vxworks_p
17995 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17996 flagsp, secp, valp))
17997 return FALSE;
17998
17999 return TRUE;
18000 }
18001
18002 /* We use this to override swap_symbol_in and swap_symbol_out. */
18003 const struct elf_size_info elf32_arm_size_info =
18004 {
18005 sizeof (Elf32_External_Ehdr),
18006 sizeof (Elf32_External_Phdr),
18007 sizeof (Elf32_External_Shdr),
18008 sizeof (Elf32_External_Rel),
18009 sizeof (Elf32_External_Rela),
18010 sizeof (Elf32_External_Sym),
18011 sizeof (Elf32_External_Dyn),
18012 sizeof (Elf_External_Note),
18013 4,
18014 1,
18015 32, 2,
18016 ELFCLASS32, EV_CURRENT,
18017 bfd_elf32_write_out_phdrs,
18018 bfd_elf32_write_shdrs_and_ehdr,
18019 bfd_elf32_checksum_contents,
18020 bfd_elf32_write_relocs,
18021 elf32_arm_swap_symbol_in,
18022 elf32_arm_swap_symbol_out,
18023 bfd_elf32_slurp_reloc_table,
18024 bfd_elf32_slurp_symbol_table,
18025 bfd_elf32_swap_dyn_in,
18026 bfd_elf32_swap_dyn_out,
18027 bfd_elf32_swap_reloc_in,
18028 bfd_elf32_swap_reloc_out,
18029 bfd_elf32_swap_reloca_in,
18030 bfd_elf32_swap_reloca_out
18031 };
18032
18033 static bfd_vma
18034 read_code32 (const bfd *abfd, const bfd_byte *addr)
18035 {
18036 /* V7 BE8 code is always little endian. */
18037 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
18038 return bfd_getl32 (addr);
18039
18040 return bfd_get_32 (abfd, addr);
18041 }
18042
18043 static bfd_vma
18044 read_code16 (const bfd *abfd, const bfd_byte *addr)
18045 {
18046 /* V7 BE8 code is always little endian. */
18047 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
18048 return bfd_getl16 (addr);
18049
18050 return bfd_get_16 (abfd, addr);
18051 }
18052
18053 /* Return size of plt0 entry starting at ADDR
18054 or (bfd_vma) -1 if size can not be determined. */
18055
18056 static bfd_vma
18057 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
18058 {
18059 bfd_vma first_word;
18060 bfd_vma plt0_size;
18061
18062 first_word = read_code32 (abfd, addr);
18063
18064 if (first_word == elf32_arm_plt0_entry[0])
18065 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
18066 else if (first_word == elf32_thumb2_plt0_entry[0])
18067 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
18068 else
18069 /* We don't yet handle this PLT format. */
18070 return (bfd_vma) -1;
18071
18072 return plt0_size;
18073 }
18074
18075 /* Return size of plt entry starting at offset OFFSET
18076 of plt section located at address START
18077 or (bfd_vma) -1 if size can not be determined. */
18078
18079 static bfd_vma
18080 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
18081 {
18082 bfd_vma first_insn;
18083 bfd_vma plt_size = 0;
18084 const bfd_byte *addr = start + offset;
18085
18086 /* PLT entry size if fixed on Thumb-only platforms. */
18087 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
18088 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
18089
18090 /* Respect Thumb stub if necessary. */
18091 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18092 {
18093 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18094 }
18095
18096 /* Strip immediate from first add. */
18097 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18098
18099 #ifdef FOUR_WORD_PLT
18100 if (first_insn == elf32_arm_plt_entry[0])
18101 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18102 #else
18103 if (first_insn == elf32_arm_plt_entry_long[0])
18104 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18105 else if (first_insn == elf32_arm_plt_entry_short[0])
18106 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18107 #endif
18108 else
18109 /* We don't yet handle this PLT format. */
18110 return (bfd_vma) -1;
18111
18112 return plt_size;
18113 }
18114
18115 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
18116
18117 static long
18118 elf32_arm_get_synthetic_symtab (bfd *abfd,
18119 long symcount ATTRIBUTE_UNUSED,
18120 asymbol **syms ATTRIBUTE_UNUSED,
18121 long dynsymcount,
18122 asymbol **dynsyms,
18123 asymbol **ret)
18124 {
18125 asection *relplt;
18126 asymbol *s;
18127 arelent *p;
18128 long count, i, n;
18129 size_t size;
18130 Elf_Internal_Shdr *hdr;
18131 char *names;
18132 asection *plt;
18133 bfd_vma offset;
18134 bfd_byte *data;
18135
18136 *ret = NULL;
18137
18138 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18139 return 0;
18140
18141 if (dynsymcount <= 0)
18142 return 0;
18143
18144 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18145 if (relplt == NULL)
18146 return 0;
18147
18148 hdr = &elf_section_data (relplt)->this_hdr;
18149 if (hdr->sh_link != elf_dynsymtab (abfd)
18150 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18151 return 0;
18152
18153 plt = bfd_get_section_by_name (abfd, ".plt");
18154 if (plt == NULL)
18155 return 0;
18156
18157 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18158 return -1;
18159
18160 data = plt->contents;
18161 if (data == NULL)
18162 {
18163 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18164 return -1;
18165 bfd_cache_section_contents((asection *) plt, data);
18166 }
18167
18168 count = relplt->size / hdr->sh_entsize;
18169 size = count * sizeof (asymbol);
18170 p = relplt->relocation;
18171 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18172 {
18173 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18174 if (p->addend != 0)
18175 size += sizeof ("+0x") - 1 + 8;
18176 }
18177
18178 s = *ret = (asymbol *) bfd_malloc (size);
18179 if (s == NULL)
18180 return -1;
18181
18182 offset = elf32_arm_plt0_size (abfd, data);
18183 if (offset == (bfd_vma) -1)
18184 return -1;
18185
18186 names = (char *) (s + count);
18187 p = relplt->relocation;
18188 n = 0;
18189 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18190 {
18191 size_t len;
18192
18193 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18194 if (plt_size == (bfd_vma) -1)
18195 break;
18196
18197 *s = **p->sym_ptr_ptr;
18198 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18199 we are defining a symbol, ensure one of them is set. */
18200 if ((s->flags & BSF_LOCAL) == 0)
18201 s->flags |= BSF_GLOBAL;
18202 s->flags |= BSF_SYNTHETIC;
18203 s->section = plt;
18204 s->value = offset;
18205 s->name = names;
18206 s->udata.p = NULL;
18207 len = strlen ((*p->sym_ptr_ptr)->name);
18208 memcpy (names, (*p->sym_ptr_ptr)->name, len);
18209 names += len;
18210 if (p->addend != 0)
18211 {
18212 char buf[30], *a;
18213
18214 memcpy (names, "+0x", sizeof ("+0x") - 1);
18215 names += sizeof ("+0x") - 1;
18216 bfd_sprintf_vma (abfd, buf, p->addend);
18217 for (a = buf; *a == '0'; ++a)
18218 ;
18219 len = strlen (a);
18220 memcpy (names, a, len);
18221 names += len;
18222 }
18223 memcpy (names, "@plt", sizeof ("@plt"));
18224 names += sizeof ("@plt");
18225 ++s, ++n;
18226 offset += plt_size;
18227 }
18228
18229 return n;
18230 }
18231
18232 static bfd_boolean
18233 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18234 {
18235 if (hdr->sh_flags & SHF_ARM_PURECODE)
18236 *flags |= SEC_ELF_PURECODE;
18237 return TRUE;
18238 }
18239
18240 static flagword
18241 elf32_arm_lookup_section_flags (char *flag_name)
18242 {
18243 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
18244 return SHF_ARM_PURECODE;
18245
18246 return SEC_NO_FLAGS;
18247 }
18248
18249 static unsigned int
18250 elf32_arm_count_additional_relocs (asection *sec)
18251 {
18252 struct _arm_elf_section_data *arm_data;
18253 arm_data = get_arm_elf_section_data (sec);
18254 return arm_data->additional_reloc_count;
18255 }
18256
18257 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18258 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18259 FALSE otherwise. ISECTION is the best guess matching section from the
18260 input bfd IBFD, but it might be NULL. */
18261
18262 static bfd_boolean
18263 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18264 bfd *obfd ATTRIBUTE_UNUSED,
18265 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18266 Elf_Internal_Shdr *osection)
18267 {
18268 switch (osection->sh_type)
18269 {
18270 case SHT_ARM_EXIDX:
18271 {
18272 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18273 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18274 unsigned i = 0;
18275
18276 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18277 osection->sh_info = 0;
18278
18279 /* The sh_link field must be set to the text section associated with
18280 this index section. Unfortunately the ARM EHABI does not specify
18281 exactly how to determine this association. Our caller does try
18282 to match up OSECTION with its corresponding input section however
18283 so that is a good first guess. */
18284 if (isection != NULL
18285 && osection->bfd_section != NULL
18286 && isection->bfd_section != NULL
18287 && isection->bfd_section->output_section != NULL
18288 && isection->bfd_section->output_section == osection->bfd_section
18289 && iheaders != NULL
18290 && isection->sh_link > 0
18291 && isection->sh_link < elf_numsections (ibfd)
18292 && iheaders[isection->sh_link]->bfd_section != NULL
18293 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18294 )
18295 {
18296 for (i = elf_numsections (obfd); i-- > 0;)
18297 if (oheaders[i]->bfd_section
18298 == iheaders[isection->sh_link]->bfd_section->output_section)
18299 break;
18300 }
18301
18302 if (i == 0)
18303 {
18304 /* Failing that we have to find a matching section ourselves. If
18305 we had the output section name available we could compare that
18306 with input section names. Unfortunately we don't. So instead
18307 we use a simple heuristic and look for the nearest executable
18308 section before this one. */
18309 for (i = elf_numsections (obfd); i-- > 0;)
18310 if (oheaders[i] == osection)
18311 break;
18312 if (i == 0)
18313 break;
18314
18315 while (i-- > 0)
18316 if (oheaders[i]->sh_type == SHT_PROGBITS
18317 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18318 == (SHF_ALLOC | SHF_EXECINSTR))
18319 break;
18320 }
18321
18322 if (i)
18323 {
18324 osection->sh_link = i;
18325 /* If the text section was part of a group
18326 then the index section should be too. */
18327 if (oheaders[i]->sh_flags & SHF_GROUP)
18328 osection->sh_flags |= SHF_GROUP;
18329 return TRUE;
18330 }
18331 }
18332 break;
18333
18334 case SHT_ARM_PREEMPTMAP:
18335 osection->sh_flags = SHF_ALLOC;
18336 break;
18337
18338 case SHT_ARM_ATTRIBUTES:
18339 case SHT_ARM_DEBUGOVERLAY:
18340 case SHT_ARM_OVERLAYSECTION:
18341 default:
18342 break;
18343 }
18344
18345 return FALSE;
18346 }
18347
18348 /* Returns TRUE if NAME is an ARM mapping symbol.
18349 Traditionally the symbols $a, $d and $t have been used.
18350 The ARM ELF standard also defines $x (for A64 code). It also allows a
18351 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
18352 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
18353 not support them here. $t.x indicates the start of ThumbEE instructions. */
18354
18355 static bfd_boolean
18356 is_arm_mapping_symbol (const char * name)
18357 {
18358 return name != NULL /* Paranoia. */
18359 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
18360 the mapping symbols could have acquired a prefix.
18361 We do not support this here, since such symbols no
18362 longer conform to the ARM ELF ABI. */
18363 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
18364 && (name[2] == 0 || name[2] == '.');
18365 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
18366 any characters that follow the period are legal characters for the body
18367 of a symbol's name. For now we just assume that this is the case. */
18368 }
18369
18370 /* Make sure that mapping symbols in object files are not removed via the
18371 "strip --strip-unneeded" tool. These symbols are needed in order to
18372 correctly generate interworking veneers, and for byte swapping code
18373 regions. Once an object file has been linked, it is safe to remove the
18374 symbols as they will no longer be needed. */
18375
18376 static void
18377 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
18378 {
18379 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
18380 && sym->section != bfd_abs_section_ptr
18381 && is_arm_mapping_symbol (sym->name))
18382 sym->flags |= BSF_KEEP;
18383 }
18384
18385 #undef elf_backend_copy_special_section_fields
18386 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18387
18388 #define ELF_ARCH bfd_arch_arm
18389 #define ELF_TARGET_ID ARM_ELF_DATA
18390 #define ELF_MACHINE_CODE EM_ARM
18391 #ifdef __QNXTARGET__
18392 #define ELF_MAXPAGESIZE 0x1000
18393 #else
18394 #define ELF_MAXPAGESIZE 0x10000
18395 #endif
18396 #define ELF_MINPAGESIZE 0x1000
18397 #define ELF_COMMONPAGESIZE 0x1000
18398
18399 #define bfd_elf32_mkobject elf32_arm_mkobject
18400
18401 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18402 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18403 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18404 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18405 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18406 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18407 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18408 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18409 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18410 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18411 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18412 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18413 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18414
18415 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18416 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18417 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18418 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18419 #define elf_backend_check_relocs elf32_arm_check_relocs
18420 #define elf_backend_relocate_section elf32_arm_relocate_section
18421 #define elf_backend_write_section elf32_arm_write_section
18422 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18423 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18424 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18425 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18426 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18427 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18428 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18429 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18430 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18431 #define elf_backend_object_p elf32_arm_object_p
18432 #define elf_backend_fake_sections elf32_arm_fake_sections
18433 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18434 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18435 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18436 #define elf_backend_size_info elf32_arm_size_info
18437 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18438 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18439 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18440 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18441 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18442 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18443 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
18444
18445 #define elf_backend_can_refcount 1
18446 #define elf_backend_can_gc_sections 1
18447 #define elf_backend_plt_readonly 1
18448 #define elf_backend_want_got_plt 1
18449 #define elf_backend_want_plt_sym 0
18450 #define elf_backend_may_use_rel_p 1
18451 #define elf_backend_may_use_rela_p 0
18452 #define elf_backend_default_use_rela_p 0
18453
18454 #define elf_backend_got_header_size 12
18455 #define elf_backend_extern_protected_data 1
18456
18457 #undef elf_backend_obj_attrs_vendor
18458 #define elf_backend_obj_attrs_vendor "aeabi"
18459 #undef elf_backend_obj_attrs_section
18460 #define elf_backend_obj_attrs_section ".ARM.attributes"
18461 #undef elf_backend_obj_attrs_arg_type
18462 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18463 #undef elf_backend_obj_attrs_section_type
18464 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18465 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18466 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18467
18468 #undef elf_backend_section_flags
18469 #define elf_backend_section_flags elf32_arm_section_flags
18470 #undef elf_backend_lookup_section_flags_hook
18471 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18472
18473 #include "elf32-target.h"
18474
18475 /* Native Client targets. */
18476
18477 #undef TARGET_LITTLE_SYM
18478 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18479 #undef TARGET_LITTLE_NAME
18480 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18481 #undef TARGET_BIG_SYM
18482 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18483 #undef TARGET_BIG_NAME
18484 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18485
18486 /* Like elf32_arm_link_hash_table_create -- but overrides
18487 appropriately for NaCl. */
18488
18489 static struct bfd_link_hash_table *
18490 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18491 {
18492 struct bfd_link_hash_table *ret;
18493
18494 ret = elf32_arm_link_hash_table_create (abfd);
18495 if (ret)
18496 {
18497 struct elf32_arm_link_hash_table *htab
18498 = (struct elf32_arm_link_hash_table *) ret;
18499
18500 htab->nacl_p = 1;
18501
18502 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18503 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18504 }
18505 return ret;
18506 }
18507
18508 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18509 really need to use elf32_arm_modify_segment_map. But we do it
18510 anyway just to reduce gratuitous differences with the stock ARM backend. */
18511
18512 static bfd_boolean
18513 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18514 {
18515 return (elf32_arm_modify_segment_map (abfd, info)
18516 && nacl_modify_segment_map (abfd, info));
18517 }
18518
18519 static void
18520 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18521 {
18522 elf32_arm_final_write_processing (abfd, linker);
18523 nacl_final_write_processing (abfd, linker);
18524 }
18525
18526 static bfd_vma
18527 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18528 const arelent *rel ATTRIBUTE_UNUSED)
18529 {
18530 return plt->vma
18531 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18532 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18533 }
18534
18535 #undef elf32_bed
18536 #define elf32_bed elf32_arm_nacl_bed
18537 #undef bfd_elf32_bfd_link_hash_table_create
18538 #define bfd_elf32_bfd_link_hash_table_create \
18539 elf32_arm_nacl_link_hash_table_create
18540 #undef elf_backend_plt_alignment
18541 #define elf_backend_plt_alignment 4
18542 #undef elf_backend_modify_segment_map
18543 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18544 #undef elf_backend_modify_program_headers
18545 #define elf_backend_modify_program_headers nacl_modify_program_headers
18546 #undef elf_backend_final_write_processing
18547 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18548 #undef bfd_elf32_get_synthetic_symtab
18549 #undef elf_backend_plt_sym_val
18550 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18551 #undef elf_backend_copy_special_section_fields
18552
18553 #undef ELF_MINPAGESIZE
18554 #undef ELF_COMMONPAGESIZE
18555
18556
18557 #include "elf32-target.h"
18558
18559 /* Reset to defaults. */
18560 #undef elf_backend_plt_alignment
18561 #undef elf_backend_modify_segment_map
18562 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18563 #undef elf_backend_modify_program_headers
18564 #undef elf_backend_final_write_processing
18565 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18566 #undef ELF_MINPAGESIZE
18567 #define ELF_MINPAGESIZE 0x1000
18568 #undef ELF_COMMONPAGESIZE
18569 #define ELF_COMMONPAGESIZE 0x1000
18570
18571
18572 /* VxWorks Targets. */
18573
18574 #undef TARGET_LITTLE_SYM
18575 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18576 #undef TARGET_LITTLE_NAME
18577 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18578 #undef TARGET_BIG_SYM
18579 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18580 #undef TARGET_BIG_NAME
18581 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18582
18583 /* Like elf32_arm_link_hash_table_create -- but overrides
18584 appropriately for VxWorks. */
18585
18586 static struct bfd_link_hash_table *
18587 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18588 {
18589 struct bfd_link_hash_table *ret;
18590
18591 ret = elf32_arm_link_hash_table_create (abfd);
18592 if (ret)
18593 {
18594 struct elf32_arm_link_hash_table *htab
18595 = (struct elf32_arm_link_hash_table *) ret;
18596 htab->use_rel = 0;
18597 htab->vxworks_p = 1;
18598 }
18599 return ret;
18600 }
18601
18602 static void
18603 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18604 {
18605 elf32_arm_final_write_processing (abfd, linker);
18606 elf_vxworks_final_write_processing (abfd, linker);
18607 }
18608
18609 #undef elf32_bed
18610 #define elf32_bed elf32_arm_vxworks_bed
18611
18612 #undef bfd_elf32_bfd_link_hash_table_create
18613 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18614 #undef elf_backend_final_write_processing
18615 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18616 #undef elf_backend_emit_relocs
18617 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18618
18619 #undef elf_backend_may_use_rel_p
18620 #define elf_backend_may_use_rel_p 0
18621 #undef elf_backend_may_use_rela_p
18622 #define elf_backend_may_use_rela_p 1
18623 #undef elf_backend_default_use_rela_p
18624 #define elf_backend_default_use_rela_p 1
18625 #undef elf_backend_want_plt_sym
18626 #define elf_backend_want_plt_sym 1
18627 #undef ELF_MAXPAGESIZE
18628 #define ELF_MAXPAGESIZE 0x1000
18629
18630 #include "elf32-target.h"
18631
18632
18633 /* Merge backend specific data from an object file to the output
18634 object file when linking. */
18635
18636 static bfd_boolean
18637 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18638 {
18639 flagword out_flags;
18640 flagword in_flags;
18641 bfd_boolean flags_compatible = TRUE;
18642 asection *sec;
18643
18644 /* Check if we have the same endianness. */
18645 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18646 return FALSE;
18647
18648 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18649 return TRUE;
18650
18651 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18652 return FALSE;
18653
18654 /* The input BFD must have had its flags initialised. */
18655 /* The following seems bogus to me -- The flags are initialized in
18656 the assembler but I don't think an elf_flags_init field is
18657 written into the object. */
18658 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18659
18660 in_flags = elf_elfheader (ibfd)->e_flags;
18661 out_flags = elf_elfheader (obfd)->e_flags;
18662
18663 /* In theory there is no reason why we couldn't handle this. However
18664 in practice it isn't even close to working and there is no real
18665 reason to want it. */
18666 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18667 && !(ibfd->flags & DYNAMIC)
18668 && (in_flags & EF_ARM_BE8))
18669 {
18670 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18671 ibfd);
18672 return FALSE;
18673 }
18674
18675 if (!elf_flags_init (obfd))
18676 {
18677 /* If the input is the default architecture and had the default
18678 flags then do not bother setting the flags for the output
18679 architecture, instead allow future merges to do this. If no
18680 future merges ever set these flags then they will retain their
18681 uninitialised values, which surprise surprise, correspond
18682 to the default values. */
18683 if (bfd_get_arch_info (ibfd)->the_default
18684 && elf_elfheader (ibfd)->e_flags == 0)
18685 return TRUE;
18686
18687 elf_flags_init (obfd) = TRUE;
18688 elf_elfheader (obfd)->e_flags = in_flags;
18689
18690 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18691 && bfd_get_arch_info (obfd)->the_default)
18692 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18693
18694 return TRUE;
18695 }
18696
18697 /* Determine what should happen if the input ARM architecture
18698 does not match the output ARM architecture. */
18699 if (! bfd_arm_merge_machines (ibfd, obfd))
18700 return FALSE;
18701
18702 /* Identical flags must be compatible. */
18703 if (in_flags == out_flags)
18704 return TRUE;
18705
18706 /* Check to see if the input BFD actually contains any sections. If
18707 not, its flags may not have been initialised either, but it
18708 cannot actually cause any incompatiblity. Do not short-circuit
18709 dynamic objects; their section list may be emptied by
18710 elf_link_add_object_symbols.
18711
18712 Also check to see if there are no code sections in the input.
18713 In this case there is no need to check for code specific flags.
18714 XXX - do we need to worry about floating-point format compatability
18715 in data sections ? */
18716 if (!(ibfd->flags & DYNAMIC))
18717 {
18718 bfd_boolean null_input_bfd = TRUE;
18719 bfd_boolean only_data_sections = TRUE;
18720
18721 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18722 {
18723 /* Ignore synthetic glue sections. */
18724 if (strcmp (sec->name, ".glue_7")
18725 && strcmp (sec->name, ".glue_7t"))
18726 {
18727 if ((bfd_get_section_flags (ibfd, sec)
18728 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18729 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18730 only_data_sections = FALSE;
18731
18732 null_input_bfd = FALSE;
18733 break;
18734 }
18735 }
18736
18737 if (null_input_bfd || only_data_sections)
18738 return TRUE;
18739 }
18740
18741 /* Complain about various flag mismatches. */
18742 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18743 EF_ARM_EABI_VERSION (out_flags)))
18744 {
18745 _bfd_error_handler
18746 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18747 ibfd, obfd,
18748 (in_flags & EF_ARM_EABIMASK) >> 24,
18749 (out_flags & EF_ARM_EABIMASK) >> 24);
18750 return FALSE;
18751 }
18752
18753 /* Not sure what needs to be checked for EABI versions >= 1. */
18754 /* VxWorks libraries do not use these flags. */
18755 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18756 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18757 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18758 {
18759 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18760 {
18761 _bfd_error_handler
18762 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18763 ibfd, obfd,
18764 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18765 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18766 flags_compatible = FALSE;
18767 }
18768
18769 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18770 {
18771 if (in_flags & EF_ARM_APCS_FLOAT)
18772 _bfd_error_handler
18773 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18774 ibfd, obfd);
18775 else
18776 _bfd_error_handler
18777 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18778 ibfd, obfd);
18779
18780 flags_compatible = FALSE;
18781 }
18782
18783 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18784 {
18785 if (in_flags & EF_ARM_VFP_FLOAT)
18786 _bfd_error_handler
18787 (_("error: %B uses VFP instructions, whereas %B does not"),
18788 ibfd, obfd);
18789 else
18790 _bfd_error_handler
18791 (_("error: %B uses FPA instructions, whereas %B does not"),
18792 ibfd, obfd);
18793
18794 flags_compatible = FALSE;
18795 }
18796
18797 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18798 {
18799 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18800 _bfd_error_handler
18801 (_("error: %B uses Maverick instructions, whereas %B does not"),
18802 ibfd, obfd);
18803 else
18804 _bfd_error_handler
18805 (_("error: %B does not use Maverick instructions, whereas %B does"),
18806 ibfd, obfd);
18807
18808 flags_compatible = FALSE;
18809 }
18810
18811 #ifdef EF_ARM_SOFT_FLOAT
18812 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18813 {
18814 /* We can allow interworking between code that is VFP format
18815 layout, and uses either soft float or integer regs for
18816 passing floating point arguments and results. We already
18817 know that the APCS_FLOAT flags match; similarly for VFP
18818 flags. */
18819 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18820 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18821 {
18822 if (in_flags & EF_ARM_SOFT_FLOAT)
18823 _bfd_error_handler
18824 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18825 ibfd, obfd);
18826 else
18827 _bfd_error_handler
18828 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18829 ibfd, obfd);
18830
18831 flags_compatible = FALSE;
18832 }
18833 }
18834 #endif
18835
18836 /* Interworking mismatch is only a warning. */
18837 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18838 {
18839 if (in_flags & EF_ARM_INTERWORK)
18840 {
18841 _bfd_error_handler
18842 (_("Warning: %B supports interworking, whereas %B does not"),
18843 ibfd, obfd);
18844 }
18845 else
18846 {
18847 _bfd_error_handler
18848 (_("Warning: %B does not support interworking, whereas %B does"),
18849 ibfd, obfd);
18850 }
18851 }
18852 }
18853
18854 return flags_compatible;
18855 }
18856
18857
18858 /* Symbian OS Targets. */
18859
18860 #undef TARGET_LITTLE_SYM
18861 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18862 #undef TARGET_LITTLE_NAME
18863 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18864 #undef TARGET_BIG_SYM
18865 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18866 #undef TARGET_BIG_NAME
18867 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18868
18869 /* Like elf32_arm_link_hash_table_create -- but overrides
18870 appropriately for Symbian OS. */
18871
18872 static struct bfd_link_hash_table *
18873 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18874 {
18875 struct bfd_link_hash_table *ret;
18876
18877 ret = elf32_arm_link_hash_table_create (abfd);
18878 if (ret)
18879 {
18880 struct elf32_arm_link_hash_table *htab
18881 = (struct elf32_arm_link_hash_table *)ret;
18882 /* There is no PLT header for Symbian OS. */
18883 htab->plt_header_size = 0;
18884 /* The PLT entries are each one instruction and one word. */
18885 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18886 htab->symbian_p = 1;
18887 /* Symbian uses armv5t or above, so use_blx is always true. */
18888 htab->use_blx = 1;
18889 htab->root.is_relocatable_executable = 1;
18890 }
18891 return ret;
18892 }
18893
18894 static const struct bfd_elf_special_section
18895 elf32_arm_symbian_special_sections[] =
18896 {
18897 /* In a BPABI executable, the dynamic linking sections do not go in
18898 the loadable read-only segment. The post-linker may wish to
18899 refer to these sections, but they are not part of the final
18900 program image. */
18901 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18902 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18903 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18904 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18905 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18906 /* These sections do not need to be writable as the SymbianOS
18907 postlinker will arrange things so that no dynamic relocation is
18908 required. */
18909 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18910 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18911 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18912 { NULL, 0, 0, 0, 0 }
18913 };
18914
18915 static void
18916 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18917 struct bfd_link_info *link_info)
18918 {
18919 /* BPABI objects are never loaded directly by an OS kernel; they are
18920 processed by a postlinker first, into an OS-specific format. If
18921 the D_PAGED bit is set on the file, BFD will align segments on
18922 page boundaries, so that an OS can directly map the file. With
18923 BPABI objects, that just results in wasted space. In addition,
18924 because we clear the D_PAGED bit, map_sections_to_segments will
18925 recognize that the program headers should not be mapped into any
18926 loadable segment. */
18927 abfd->flags &= ~D_PAGED;
18928 elf32_arm_begin_write_processing (abfd, link_info);
18929 }
18930
18931 static bfd_boolean
18932 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18933 struct bfd_link_info *info)
18934 {
18935 struct elf_segment_map *m;
18936 asection *dynsec;
18937
18938 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18939 segment. However, because the .dynamic section is not marked
18940 with SEC_LOAD, the generic ELF code will not create such a
18941 segment. */
18942 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18943 if (dynsec)
18944 {
18945 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18946 if (m->p_type == PT_DYNAMIC)
18947 break;
18948
18949 if (m == NULL)
18950 {
18951 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18952 m->next = elf_seg_map (abfd);
18953 elf_seg_map (abfd) = m;
18954 }
18955 }
18956
18957 /* Also call the generic arm routine. */
18958 return elf32_arm_modify_segment_map (abfd, info);
18959 }
18960
18961 /* Return address for Ith PLT stub in section PLT, for relocation REL
18962 or (bfd_vma) -1 if it should not be included. */
18963
18964 static bfd_vma
18965 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18966 const arelent *rel ATTRIBUTE_UNUSED)
18967 {
18968 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18969 }
18970
18971 #undef elf32_bed
18972 #define elf32_bed elf32_arm_symbian_bed
18973
18974 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18975 will process them and then discard them. */
18976 #undef ELF_DYNAMIC_SEC_FLAGS
18977 #define ELF_DYNAMIC_SEC_FLAGS \
18978 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18979
18980 #undef elf_backend_emit_relocs
18981
18982 #undef bfd_elf32_bfd_link_hash_table_create
18983 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18984 #undef elf_backend_special_sections
18985 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18986 #undef elf_backend_begin_write_processing
18987 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18988 #undef elf_backend_final_write_processing
18989 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18990
18991 #undef elf_backend_modify_segment_map
18992 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18993
18994 /* There is no .got section for BPABI objects, and hence no header. */
18995 #undef elf_backend_got_header_size
18996 #define elf_backend_got_header_size 0
18997
18998 /* Similarly, there is no .got.plt section. */
18999 #undef elf_backend_want_got_plt
19000 #define elf_backend_want_got_plt 0
19001
19002 #undef elf_backend_plt_sym_val
19003 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
19004
19005 #undef elf_backend_may_use_rel_p
19006 #define elf_backend_may_use_rel_p 1
19007 #undef elf_backend_may_use_rela_p
19008 #define elf_backend_may_use_rela_p 0
19009 #undef elf_backend_default_use_rela_p
19010 #define elf_backend_default_use_rela_p 0
19011 #undef elf_backend_want_plt_sym
19012 #define elf_backend_want_plt_sym 0
19013 #undef ELF_MAXPAGESIZE
19014 #define ELF_MAXPAGESIZE 0x8000
19015
19016 #include "elf32-target.h"
This page took 0.592342 seconds and 4 git commands to generate.