[ARM] Implement PLT for FDPIC.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[5] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE), /* pcrel_offset */
1764 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1765 0, /* rightshift */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1767 32, /* bitsize */
1768 FALSE, /* pc_relative */
1769 0, /* bitpos */
1770 complain_overflow_bitfield,/* complain_on_overflow */
1771 bfd_elf_generic_reloc, /* special_function */
1772 "R_ARM_GOTFUNCDESC", /* name */
1773 FALSE, /* partial_inplace */
1774 0, /* src_mask */
1775 0xffffffff, /* dst_mask */
1776 FALSE), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1778 0, /* rightshift */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1780 32, /* bitsize */
1781 FALSE, /* pc_relative */
1782 0, /* bitpos */
1783 complain_overflow_bitfield,/* complain_on_overflow */
1784 bfd_elf_generic_reloc, /* special_function */
1785 "R_ARM_GOTOFFFUNCDESC",/* name */
1786 FALSE, /* partial_inplace */
1787 0, /* src_mask */
1788 0xffffffff, /* dst_mask */
1789 FALSE), /* pcrel_offset */
1790 HOWTO (R_ARM_FUNCDESC, /* type */
1791 0, /* rightshift */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1793 32, /* bitsize */
1794 FALSE, /* pc_relative */
1795 0, /* bitpos */
1796 complain_overflow_bitfield,/* complain_on_overflow */
1797 bfd_elf_generic_reloc, /* special_function */
1798 "R_ARM_FUNCDESC", /* name */
1799 FALSE, /* partial_inplace */
1800 0, /* src_mask */
1801 0xffffffff, /* dst_mask */
1802 FALSE), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1804 0, /* rightshift */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1806 64, /* bitsize */
1807 FALSE, /* pc_relative */
1808 0, /* bitpos */
1809 complain_overflow_bitfield,/* complain_on_overflow */
1810 bfd_elf_generic_reloc, /* special_function */
1811 "R_ARM_FUNCDESC_VALUE",/* name */
1812 FALSE, /* partial_inplace */
1813 0, /* src_mask */
1814 0xffffffff, /* dst_mask */
1815 FALSE), /* pcrel_offset */
1816 };
1817
1818 /* 249-255 extended, currently unused, relocations: */
1819 static reloc_howto_type elf32_arm_howto_table_3[4] =
1820 {
1821 HOWTO (R_ARM_RREL32, /* type */
1822 0, /* rightshift */
1823 0, /* size (0 = byte, 1 = short, 2 = long) */
1824 0, /* bitsize */
1825 FALSE, /* pc_relative */
1826 0, /* bitpos */
1827 complain_overflow_dont,/* complain_on_overflow */
1828 bfd_elf_generic_reloc, /* special_function */
1829 "R_ARM_RREL32", /* name */
1830 FALSE, /* partial_inplace */
1831 0, /* src_mask */
1832 0, /* dst_mask */
1833 FALSE), /* pcrel_offset */
1834
1835 HOWTO (R_ARM_RABS32, /* type */
1836 0, /* rightshift */
1837 0, /* size (0 = byte, 1 = short, 2 = long) */
1838 0, /* bitsize */
1839 FALSE, /* pc_relative */
1840 0, /* bitpos */
1841 complain_overflow_dont,/* complain_on_overflow */
1842 bfd_elf_generic_reloc, /* special_function */
1843 "R_ARM_RABS32", /* name */
1844 FALSE, /* partial_inplace */
1845 0, /* src_mask */
1846 0, /* dst_mask */
1847 FALSE), /* pcrel_offset */
1848
1849 HOWTO (R_ARM_RPC24, /* type */
1850 0, /* rightshift */
1851 0, /* size (0 = byte, 1 = short, 2 = long) */
1852 0, /* bitsize */
1853 FALSE, /* pc_relative */
1854 0, /* bitpos */
1855 complain_overflow_dont,/* complain_on_overflow */
1856 bfd_elf_generic_reloc, /* special_function */
1857 "R_ARM_RPC24", /* name */
1858 FALSE, /* partial_inplace */
1859 0, /* src_mask */
1860 0, /* dst_mask */
1861 FALSE), /* pcrel_offset */
1862
1863 HOWTO (R_ARM_RBASE, /* type */
1864 0, /* rightshift */
1865 0, /* size (0 = byte, 1 = short, 2 = long) */
1866 0, /* bitsize */
1867 FALSE, /* pc_relative */
1868 0, /* bitpos */
1869 complain_overflow_dont,/* complain_on_overflow */
1870 bfd_elf_generic_reloc, /* special_function */
1871 "R_ARM_RBASE", /* name */
1872 FALSE, /* partial_inplace */
1873 0, /* src_mask */
1874 0, /* dst_mask */
1875 FALSE) /* pcrel_offset */
1876 };
1877
1878 static reloc_howto_type *
1879 elf32_arm_howto_from_type (unsigned int r_type)
1880 {
1881 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1882 return &elf32_arm_howto_table_1[r_type];
1883
1884 if (r_type >= R_ARM_IRELATIVE
1885 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1886 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1887
1888 if (r_type >= R_ARM_RREL32
1889 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1890 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1891
1892 return NULL;
1893 }
1894
1895 static bfd_boolean
1896 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1897 Elf_Internal_Rela * elf_reloc)
1898 {
1899 unsigned int r_type;
1900
1901 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1902 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1903 {
1904 /* xgettext:c-format */
1905 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1906 abfd, r_type);
1907 bfd_set_error (bfd_error_bad_value);
1908 return FALSE;
1909 }
1910 return TRUE;
1911 }
1912
1913 struct elf32_arm_reloc_map
1914 {
1915 bfd_reloc_code_real_type bfd_reloc_val;
1916 unsigned char elf_reloc_val;
1917 };
1918
1919 /* All entries in this list must also be present in elf32_arm_howto_table. */
1920 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1921 {
1922 {BFD_RELOC_NONE, R_ARM_NONE},
1923 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1924 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1925 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1926 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1927 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1928 {BFD_RELOC_32, R_ARM_ABS32},
1929 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1930 {BFD_RELOC_8, R_ARM_ABS8},
1931 {BFD_RELOC_16, R_ARM_ABS16},
1932 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1933 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1934 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1935 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1936 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1937 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1938 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1939 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1940 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1941 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1942 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1943 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1944 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1945 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1946 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1947 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1948 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1949 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1950 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1951 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1952 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1953 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1954 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1955 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1956 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1957 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1958 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1959 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1960 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1961 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1962 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1963 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1964 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1965 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1966 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1967 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1968 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1969 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
1970 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
1971 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
1972 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
1973 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1974 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1975 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1976 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1977 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1978 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1979 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1980 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1981 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1982 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1983 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1984 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1985 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1986 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1987 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1988 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1989 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1990 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1991 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1992 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1993 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1994 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1995 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1996 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1997 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1998 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1999 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2000 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2001 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2002 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2003 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2004 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2005 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2006 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2007 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2008 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2009 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2010 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2011 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2012 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2013 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2014 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2015 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
2016 };
2017
2018 static reloc_howto_type *
2019 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2020 bfd_reloc_code_real_type code)
2021 {
2022 unsigned int i;
2023
2024 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2025 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2026 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2027
2028 return NULL;
2029 }
2030
2031 static reloc_howto_type *
2032 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2033 const char *r_name)
2034 {
2035 unsigned int i;
2036
2037 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2038 if (elf32_arm_howto_table_1[i].name != NULL
2039 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2040 return &elf32_arm_howto_table_1[i];
2041
2042 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2043 if (elf32_arm_howto_table_2[i].name != NULL
2044 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2045 return &elf32_arm_howto_table_2[i];
2046
2047 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2048 if (elf32_arm_howto_table_3[i].name != NULL
2049 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2050 return &elf32_arm_howto_table_3[i];
2051
2052 return NULL;
2053 }
2054
2055 /* Support for core dump NOTE sections. */
2056
2057 static bfd_boolean
2058 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2059 {
2060 int offset;
2061 size_t size;
2062
2063 switch (note->descsz)
2064 {
2065 default:
2066 return FALSE;
2067
2068 case 148: /* Linux/ARM 32-bit. */
2069 /* pr_cursig */
2070 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2071
2072 /* pr_pid */
2073 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2074
2075 /* pr_reg */
2076 offset = 72;
2077 size = 72;
2078
2079 break;
2080 }
2081
2082 /* Make a ".reg/999" section. */
2083 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2084 size, note->descpos + offset);
2085 }
2086
2087 static bfd_boolean
2088 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2089 {
2090 switch (note->descsz)
2091 {
2092 default:
2093 return FALSE;
2094
2095 case 124: /* Linux/ARM elf_prpsinfo. */
2096 elf_tdata (abfd)->core->pid
2097 = bfd_get_32 (abfd, note->descdata + 12);
2098 elf_tdata (abfd)->core->program
2099 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2100 elf_tdata (abfd)->core->command
2101 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2102 }
2103
2104 /* Note that for some reason, a spurious space is tacked
2105 onto the end of the args in some (at least one anyway)
2106 implementations, so strip it off if it exists. */
2107 {
2108 char *command = elf_tdata (abfd)->core->command;
2109 int n = strlen (command);
2110
2111 if (0 < n && command[n - 1] == ' ')
2112 command[n - 1] = '\0';
2113 }
2114
2115 return TRUE;
2116 }
2117
2118 static char *
2119 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2120 int note_type, ...)
2121 {
2122 switch (note_type)
2123 {
2124 default:
2125 return NULL;
2126
2127 case NT_PRPSINFO:
2128 {
2129 char data[124];
2130 va_list ap;
2131
2132 va_start (ap, note_type);
2133 memset (data, 0, sizeof (data));
2134 strncpy (data + 28, va_arg (ap, const char *), 16);
2135 strncpy (data + 44, va_arg (ap, const char *), 80);
2136 va_end (ap);
2137
2138 return elfcore_write_note (abfd, buf, bufsiz,
2139 "CORE", note_type, data, sizeof (data));
2140 }
2141
2142 case NT_PRSTATUS:
2143 {
2144 char data[148];
2145 va_list ap;
2146 long pid;
2147 int cursig;
2148 const void *greg;
2149
2150 va_start (ap, note_type);
2151 memset (data, 0, sizeof (data));
2152 pid = va_arg (ap, long);
2153 bfd_put_32 (abfd, pid, data + 24);
2154 cursig = va_arg (ap, int);
2155 bfd_put_16 (abfd, cursig, data + 12);
2156 greg = va_arg (ap, const void *);
2157 memcpy (data + 72, greg, 72);
2158 va_end (ap);
2159
2160 return elfcore_write_note (abfd, buf, bufsiz,
2161 "CORE", note_type, data, sizeof (data));
2162 }
2163 }
2164 }
2165
2166 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2167 #define TARGET_LITTLE_NAME "elf32-littlearm"
2168 #define TARGET_BIG_SYM arm_elf32_be_vec
2169 #define TARGET_BIG_NAME "elf32-bigarm"
2170
2171 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2172 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2173 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2174
2175 typedef unsigned long int insn32;
2176 typedef unsigned short int insn16;
2177
2178 /* In lieu of proper flags, assume all EABIv4 or later objects are
2179 interworkable. */
2180 #define INTERWORK_FLAG(abfd) \
2181 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2182 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2183 || ((abfd)->flags & BFD_LINKER_CREATED))
2184
2185 /* The linker script knows the section names for placement.
2186 The entry_names are used to do simple name mangling on the stubs.
2187 Given a function name, and its type, the stub can be found. The
2188 name can be changed. The only requirement is the %s be present. */
2189 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2190 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2191
2192 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2193 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2194
2195 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2196 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2197
2198 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2199 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2200
2201 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2202 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2203
2204 #define STUB_ENTRY_NAME "__%s_veneer"
2205
2206 #define CMSE_PREFIX "__acle_se_"
2207
2208 /* The name of the dynamic interpreter. This is put in the .interp
2209 section. */
2210 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2211
2212 static const unsigned long tls_trampoline [] =
2213 {
2214 0xe08e0000, /* add r0, lr, r0 */
2215 0xe5901004, /* ldr r1, [r0,#4] */
2216 0xe12fff11, /* bx r1 */
2217 };
2218
2219 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2220 {
2221 0xe52d2004, /* push {r2} */
2222 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2223 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2224 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2225 0xe081100f, /* 2: add r1, pc */
2226 0xe12fff12, /* bx r2 */
2227 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2228 + dl_tlsdesc_lazy_resolver(GOT) */
2229 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2230 };
2231
2232 /* ARM FDPIC PLT entry. */
2233 /* The last 5 words contain PLT lazy fragment code and data. */
2234 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2235 {
2236 0xe59fc008, /* ldr r12, .L1 */
2237 0xe08cc009, /* add r12, r12, r9 */
2238 0xe59c9004, /* ldr r9, [r12, #4] */
2239 0xe59cf000, /* ldr pc, [r12] */
2240 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2241 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2242 0xe51fc00c, /* ldr r12, [pc, #-12] */
2243 0xe92d1000, /* push {r12} */
2244 0xe599c004, /* ldr r12, [r9, #4] */
2245 0xe599f000, /* ldr pc, [r9] */
2246 };
2247
2248 #ifdef FOUR_WORD_PLT
2249
2250 /* The first entry in a procedure linkage table looks like
2251 this. It is set up so that any shared library function that is
2252 called before the relocation has been set up calls the dynamic
2253 linker first. */
2254 static const bfd_vma elf32_arm_plt0_entry [] =
2255 {
2256 0xe52de004, /* str lr, [sp, #-4]! */
2257 0xe59fe010, /* ldr lr, [pc, #16] */
2258 0xe08fe00e, /* add lr, pc, lr */
2259 0xe5bef008, /* ldr pc, [lr, #8]! */
2260 };
2261
2262 /* Subsequent entries in a procedure linkage table look like
2263 this. */
2264 static const bfd_vma elf32_arm_plt_entry [] =
2265 {
2266 0xe28fc600, /* add ip, pc, #NN */
2267 0xe28cca00, /* add ip, ip, #NN */
2268 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2269 0x00000000, /* unused */
2270 };
2271
2272 #else /* not FOUR_WORD_PLT */
2273
2274 /* The first entry in a procedure linkage table looks like
2275 this. It is set up so that any shared library function that is
2276 called before the relocation has been set up calls the dynamic
2277 linker first. */
2278 static const bfd_vma elf32_arm_plt0_entry [] =
2279 {
2280 0xe52de004, /* str lr, [sp, #-4]! */
2281 0xe59fe004, /* ldr lr, [pc, #4] */
2282 0xe08fe00e, /* add lr, pc, lr */
2283 0xe5bef008, /* ldr pc, [lr, #8]! */
2284 0x00000000, /* &GOT[0] - . */
2285 };
2286
2287 /* By default subsequent entries in a procedure linkage table look like
2288 this. Offsets that don't fit into 28 bits will cause link error. */
2289 static const bfd_vma elf32_arm_plt_entry_short [] =
2290 {
2291 0xe28fc600, /* add ip, pc, #0xNN00000 */
2292 0xe28cca00, /* add ip, ip, #0xNN000 */
2293 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2294 };
2295
2296 /* When explicitly asked, we'll use this "long" entry format
2297 which can cope with arbitrary displacements. */
2298 static const bfd_vma elf32_arm_plt_entry_long [] =
2299 {
2300 0xe28fc200, /* add ip, pc, #0xN0000000 */
2301 0xe28cc600, /* add ip, ip, #0xNN00000 */
2302 0xe28cca00, /* add ip, ip, #0xNN000 */
2303 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2304 };
2305
2306 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2307
2308 #endif /* not FOUR_WORD_PLT */
2309
2310 /* The first entry in a procedure linkage table looks like this.
2311 It is set up so that any shared library function that is called before the
2312 relocation has been set up calls the dynamic linker first. */
2313 static const bfd_vma elf32_thumb2_plt0_entry [] =
2314 {
2315 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2316 an instruction maybe encoded to one or two array elements. */
2317 0xf8dfb500, /* push {lr} */
2318 0x44fee008, /* ldr.w lr, [pc, #8] */
2319 /* add lr, pc */
2320 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2321 0x00000000, /* &GOT[0] - . */
2322 };
2323
2324 /* Subsequent entries in a procedure linkage table for thumb only target
2325 look like this. */
2326 static const bfd_vma elf32_thumb2_plt_entry [] =
2327 {
2328 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2329 an instruction maybe encoded to one or two array elements. */
2330 0x0c00f240, /* movw ip, #0xNNNN */
2331 0x0c00f2c0, /* movt ip, #0xNNNN */
2332 0xf8dc44fc, /* add ip, pc */
2333 0xbf00f000 /* ldr.w pc, [ip] */
2334 /* nop */
2335 };
2336
2337 /* The format of the first entry in the procedure linkage table
2338 for a VxWorks executable. */
2339 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2340 {
2341 0xe52dc008, /* str ip,[sp,#-8]! */
2342 0xe59fc000, /* ldr ip,[pc] */
2343 0xe59cf008, /* ldr pc,[ip,#8] */
2344 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2345 };
2346
2347 /* The format of subsequent entries in a VxWorks executable. */
2348 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2349 {
2350 0xe59fc000, /* ldr ip,[pc] */
2351 0xe59cf000, /* ldr pc,[ip] */
2352 0x00000000, /* .long @got */
2353 0xe59fc000, /* ldr ip,[pc] */
2354 0xea000000, /* b _PLT */
2355 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2356 };
2357
2358 /* The format of entries in a VxWorks shared library. */
2359 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2360 {
2361 0xe59fc000, /* ldr ip,[pc] */
2362 0xe79cf009, /* ldr pc,[ip,r9] */
2363 0x00000000, /* .long @got */
2364 0xe59fc000, /* ldr ip,[pc] */
2365 0xe599f008, /* ldr pc,[r9,#8] */
2366 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2367 };
2368
2369 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2370 #define PLT_THUMB_STUB_SIZE 4
2371 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2372 {
2373 0x4778, /* bx pc */
2374 0x46c0 /* nop */
2375 };
2376
2377 /* The entries in a PLT when using a DLL-based target with multiple
2378 address spaces. */
2379 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2380 {
2381 0xe51ff004, /* ldr pc, [pc, #-4] */
2382 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2383 };
2384
2385 /* The first entry in a procedure linkage table looks like
2386 this. It is set up so that any shared library function that is
2387 called before the relocation has been set up calls the dynamic
2388 linker first. */
2389 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2390 {
2391 /* First bundle: */
2392 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2393 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2394 0xe08cc00f, /* add ip, ip, pc */
2395 0xe52dc008, /* str ip, [sp, #-8]! */
2396 /* Second bundle: */
2397 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2398 0xe59cc000, /* ldr ip, [ip] */
2399 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2400 0xe12fff1c, /* bx ip */
2401 /* Third bundle: */
2402 0xe320f000, /* nop */
2403 0xe320f000, /* nop */
2404 0xe320f000, /* nop */
2405 /* .Lplt_tail: */
2406 0xe50dc004, /* str ip, [sp, #-4] */
2407 /* Fourth bundle: */
2408 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2409 0xe59cc000, /* ldr ip, [ip] */
2410 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2411 0xe12fff1c, /* bx ip */
2412 };
2413 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2414
2415 /* Subsequent entries in a procedure linkage table look like this. */
2416 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2417 {
2418 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2419 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2420 0xe08cc00f, /* add ip, ip, pc */
2421 0xea000000, /* b .Lplt_tail */
2422 };
2423
2424 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2425 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2426 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2427 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2428 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2429 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2430 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2431 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2432
2433 enum stub_insn_type
2434 {
2435 THUMB16_TYPE = 1,
2436 THUMB32_TYPE,
2437 ARM_TYPE,
2438 DATA_TYPE
2439 };
2440
2441 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2442 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2443 is inserted in arm_build_one_stub(). */
2444 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2445 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2446 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2447 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2448 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2449 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2450 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2451 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2452
2453 typedef struct
2454 {
2455 bfd_vma data;
2456 enum stub_insn_type type;
2457 unsigned int r_type;
2458 int reloc_addend;
2459 } insn_sequence;
2460
2461 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2462 to reach the stub if necessary. */
2463 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2464 {
2465 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2466 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2467 };
2468
2469 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2470 available. */
2471 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2472 {
2473 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2474 ARM_INSN (0xe12fff1c), /* bx ip */
2475 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2476 };
2477
2478 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2479 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2480 {
2481 THUMB16_INSN (0xb401), /* push {r0} */
2482 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2483 THUMB16_INSN (0x4684), /* mov ip, r0 */
2484 THUMB16_INSN (0xbc01), /* pop {r0} */
2485 THUMB16_INSN (0x4760), /* bx ip */
2486 THUMB16_INSN (0xbf00), /* nop */
2487 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2488 };
2489
2490 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2491 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2492 {
2493 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2494 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2495 };
2496
2497 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2498 M-profile architectures. */
2499 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2500 {
2501 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2502 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2503 THUMB16_INSN (0x4760), /* bx ip */
2504 };
2505
2506 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2507 allowed. */
2508 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2509 {
2510 THUMB16_INSN (0x4778), /* bx pc */
2511 THUMB16_INSN (0x46c0), /* nop */
2512 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2513 ARM_INSN (0xe12fff1c), /* bx ip */
2514 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2515 };
2516
2517 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2518 available. */
2519 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2520 {
2521 THUMB16_INSN (0x4778), /* bx pc */
2522 THUMB16_INSN (0x46c0), /* nop */
2523 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2524 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2525 };
2526
2527 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2528 one, when the destination is close enough. */
2529 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2530 {
2531 THUMB16_INSN (0x4778), /* bx pc */
2532 THUMB16_INSN (0x46c0), /* nop */
2533 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2534 };
2535
2536 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2537 blx to reach the stub if necessary. */
2538 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2539 {
2540 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2541 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2542 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2543 };
2544
2545 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2546 blx to reach the stub if necessary. We can not add into pc;
2547 it is not guaranteed to mode switch (different in ARMv6 and
2548 ARMv7). */
2549 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2550 {
2551 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2552 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2553 ARM_INSN (0xe12fff1c), /* bx ip */
2554 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2555 };
2556
2557 /* V4T ARM -> ARM long branch stub, PIC. */
2558 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2559 {
2560 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2561 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2562 ARM_INSN (0xe12fff1c), /* bx ip */
2563 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2564 };
2565
2566 /* V4T Thumb -> ARM long branch stub, PIC. */
2567 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2568 {
2569 THUMB16_INSN (0x4778), /* bx pc */
2570 THUMB16_INSN (0x46c0), /* nop */
2571 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2572 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2573 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2574 };
2575
2576 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2577 architectures. */
2578 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2579 {
2580 THUMB16_INSN (0xb401), /* push {r0} */
2581 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2582 THUMB16_INSN (0x46fc), /* mov ip, pc */
2583 THUMB16_INSN (0x4484), /* add ip, r0 */
2584 THUMB16_INSN (0xbc01), /* pop {r0} */
2585 THUMB16_INSN (0x4760), /* bx ip */
2586 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2587 };
2588
2589 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2590 allowed. */
2591 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2592 {
2593 THUMB16_INSN (0x4778), /* bx pc */
2594 THUMB16_INSN (0x46c0), /* nop */
2595 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2596 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2597 ARM_INSN (0xe12fff1c), /* bx ip */
2598 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2599 };
2600
2601 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2602 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2603 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2604 {
2605 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2606 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2607 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2608 };
2609
2610 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2611 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2612 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2613 {
2614 THUMB16_INSN (0x4778), /* bx pc */
2615 THUMB16_INSN (0x46c0), /* nop */
2616 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2617 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2618 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2619 };
2620
2621 /* NaCl ARM -> ARM long branch stub. */
2622 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2623 {
2624 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2625 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2626 ARM_INSN (0xe12fff1c), /* bx ip */
2627 ARM_INSN (0xe320f000), /* nop */
2628 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2629 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2630 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2631 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2632 };
2633
2634 /* NaCl ARM -> ARM long branch stub, PIC. */
2635 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2636 {
2637 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2638 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2639 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2640 ARM_INSN (0xe12fff1c), /* bx ip */
2641 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2642 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2643 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2644 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2645 };
2646
2647 /* Stub used for transition to secure state (aka SG veneer). */
2648 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2649 {
2650 THUMB32_INSN (0xe97fe97f), /* sg. */
2651 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2652 };
2653
2654
2655 /* Cortex-A8 erratum-workaround stubs. */
2656
2657 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2658 can't use a conditional branch to reach this stub). */
2659
2660 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2661 {
2662 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2663 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2664 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2665 };
2666
2667 /* Stub used for b.w and bl.w instructions. */
2668
2669 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2670 {
2671 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2672 };
2673
2674 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2675 {
2676 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2677 };
2678
2679 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2680 instruction (which switches to ARM mode) to point to this stub. Jump to the
2681 real destination using an ARM-mode branch. */
2682
2683 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2684 {
2685 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2686 };
2687
2688 /* For each section group there can be a specially created linker section
2689 to hold the stubs for that group. The name of the stub section is based
2690 upon the name of another section within that group with the suffix below
2691 applied.
2692
2693 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2694 create what appeared to be a linker stub section when it actually
2695 contained user code/data. For example, consider this fragment:
2696
2697 const char * stubborn_problems[] = { "np" };
2698
2699 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2700 section called:
2701
2702 .data.rel.local.stubborn_problems
2703
2704 This then causes problems in arm32_arm_build_stubs() as it triggers:
2705
2706 // Ignore non-stub sections.
2707 if (!strstr (stub_sec->name, STUB_SUFFIX))
2708 continue;
2709
2710 And so the section would be ignored instead of being processed. Hence
2711 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2712 C identifier. */
2713 #define STUB_SUFFIX ".__stub"
2714
2715 /* One entry per long/short branch stub defined above. */
2716 #define DEF_STUBS \
2717 DEF_STUB(long_branch_any_any) \
2718 DEF_STUB(long_branch_v4t_arm_thumb) \
2719 DEF_STUB(long_branch_thumb_only) \
2720 DEF_STUB(long_branch_v4t_thumb_thumb) \
2721 DEF_STUB(long_branch_v4t_thumb_arm) \
2722 DEF_STUB(short_branch_v4t_thumb_arm) \
2723 DEF_STUB(long_branch_any_arm_pic) \
2724 DEF_STUB(long_branch_any_thumb_pic) \
2725 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2726 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2727 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2728 DEF_STUB(long_branch_thumb_only_pic) \
2729 DEF_STUB(long_branch_any_tls_pic) \
2730 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2731 DEF_STUB(long_branch_arm_nacl) \
2732 DEF_STUB(long_branch_arm_nacl_pic) \
2733 DEF_STUB(cmse_branch_thumb_only) \
2734 DEF_STUB(a8_veneer_b_cond) \
2735 DEF_STUB(a8_veneer_b) \
2736 DEF_STUB(a8_veneer_bl) \
2737 DEF_STUB(a8_veneer_blx) \
2738 DEF_STUB(long_branch_thumb2_only) \
2739 DEF_STUB(long_branch_thumb2_only_pure)
2740
2741 #define DEF_STUB(x) arm_stub_##x,
2742 enum elf32_arm_stub_type
2743 {
2744 arm_stub_none,
2745 DEF_STUBS
2746 max_stub_type
2747 };
2748 #undef DEF_STUB
2749
2750 /* Note the first a8_veneer type. */
2751 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2752
2753 typedef struct
2754 {
2755 const insn_sequence* template_sequence;
2756 int template_size;
2757 } stub_def;
2758
2759 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2760 static const stub_def stub_definitions[] =
2761 {
2762 {NULL, 0},
2763 DEF_STUBS
2764 };
2765
2766 struct elf32_arm_stub_hash_entry
2767 {
2768 /* Base hash table entry structure. */
2769 struct bfd_hash_entry root;
2770
2771 /* The stub section. */
2772 asection *stub_sec;
2773
2774 /* Offset within stub_sec of the beginning of this stub. */
2775 bfd_vma stub_offset;
2776
2777 /* Given the symbol's value and its section we can determine its final
2778 value when building the stubs (so the stub knows where to jump). */
2779 bfd_vma target_value;
2780 asection *target_section;
2781
2782 /* Same as above but for the source of the branch to the stub. Used for
2783 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2784 such, source section does not need to be recorded since Cortex-A8 erratum
2785 workaround stubs are only generated when both source and target are in the
2786 same section. */
2787 bfd_vma source_value;
2788
2789 /* The instruction which caused this stub to be generated (only valid for
2790 Cortex-A8 erratum workaround stubs at present). */
2791 unsigned long orig_insn;
2792
2793 /* The stub type. */
2794 enum elf32_arm_stub_type stub_type;
2795 /* Its encoding size in bytes. */
2796 int stub_size;
2797 /* Its template. */
2798 const insn_sequence *stub_template;
2799 /* The size of the template (number of entries). */
2800 int stub_template_size;
2801
2802 /* The symbol table entry, if any, that this was derived from. */
2803 struct elf32_arm_link_hash_entry *h;
2804
2805 /* Type of branch. */
2806 enum arm_st_branch_type branch_type;
2807
2808 /* Where this stub is being called from, or, in the case of combined
2809 stub sections, the first input section in the group. */
2810 asection *id_sec;
2811
2812 /* The name for the local symbol at the start of this stub. The
2813 stub name in the hash table has to be unique; this does not, so
2814 it can be friendlier. */
2815 char *output_name;
2816 };
2817
2818 /* Used to build a map of a section. This is required for mixed-endian
2819 code/data. */
2820
2821 typedef struct elf32_elf_section_map
2822 {
2823 bfd_vma vma;
2824 char type;
2825 }
2826 elf32_arm_section_map;
2827
2828 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2829
2830 typedef enum
2831 {
2832 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2833 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2834 VFP11_ERRATUM_ARM_VENEER,
2835 VFP11_ERRATUM_THUMB_VENEER
2836 }
2837 elf32_vfp11_erratum_type;
2838
2839 typedef struct elf32_vfp11_erratum_list
2840 {
2841 struct elf32_vfp11_erratum_list *next;
2842 bfd_vma vma;
2843 union
2844 {
2845 struct
2846 {
2847 struct elf32_vfp11_erratum_list *veneer;
2848 unsigned int vfp_insn;
2849 } b;
2850 struct
2851 {
2852 struct elf32_vfp11_erratum_list *branch;
2853 unsigned int id;
2854 } v;
2855 } u;
2856 elf32_vfp11_erratum_type type;
2857 }
2858 elf32_vfp11_erratum_list;
2859
2860 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2861 veneer. */
2862 typedef enum
2863 {
2864 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2865 STM32L4XX_ERRATUM_VENEER
2866 }
2867 elf32_stm32l4xx_erratum_type;
2868
2869 typedef struct elf32_stm32l4xx_erratum_list
2870 {
2871 struct elf32_stm32l4xx_erratum_list *next;
2872 bfd_vma vma;
2873 union
2874 {
2875 struct
2876 {
2877 struct elf32_stm32l4xx_erratum_list *veneer;
2878 unsigned int insn;
2879 } b;
2880 struct
2881 {
2882 struct elf32_stm32l4xx_erratum_list *branch;
2883 unsigned int id;
2884 } v;
2885 } u;
2886 elf32_stm32l4xx_erratum_type type;
2887 }
2888 elf32_stm32l4xx_erratum_list;
2889
2890 typedef enum
2891 {
2892 DELETE_EXIDX_ENTRY,
2893 INSERT_EXIDX_CANTUNWIND_AT_END
2894 }
2895 arm_unwind_edit_type;
2896
2897 /* A (sorted) list of edits to apply to an unwind table. */
2898 typedef struct arm_unwind_table_edit
2899 {
2900 arm_unwind_edit_type type;
2901 /* Note: we sometimes want to insert an unwind entry corresponding to a
2902 section different from the one we're currently writing out, so record the
2903 (text) section this edit relates to here. */
2904 asection *linked_section;
2905 unsigned int index;
2906 struct arm_unwind_table_edit *next;
2907 }
2908 arm_unwind_table_edit;
2909
2910 typedef struct _arm_elf_section_data
2911 {
2912 /* Information about mapping symbols. */
2913 struct bfd_elf_section_data elf;
2914 unsigned int mapcount;
2915 unsigned int mapsize;
2916 elf32_arm_section_map *map;
2917 /* Information about CPU errata. */
2918 unsigned int erratumcount;
2919 elf32_vfp11_erratum_list *erratumlist;
2920 unsigned int stm32l4xx_erratumcount;
2921 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2922 unsigned int additional_reloc_count;
2923 /* Information about unwind tables. */
2924 union
2925 {
2926 /* Unwind info attached to a text section. */
2927 struct
2928 {
2929 asection *arm_exidx_sec;
2930 } text;
2931
2932 /* Unwind info attached to an .ARM.exidx section. */
2933 struct
2934 {
2935 arm_unwind_table_edit *unwind_edit_list;
2936 arm_unwind_table_edit *unwind_edit_tail;
2937 } exidx;
2938 } u;
2939 }
2940 _arm_elf_section_data;
2941
2942 #define elf32_arm_section_data(sec) \
2943 ((_arm_elf_section_data *) elf_section_data (sec))
2944
2945 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2946 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2947 so may be created multiple times: we use an array of these entries whilst
2948 relaxing which we can refresh easily, then create stubs for each potentially
2949 erratum-triggering instruction once we've settled on a solution. */
2950
2951 struct a8_erratum_fix
2952 {
2953 bfd *input_bfd;
2954 asection *section;
2955 bfd_vma offset;
2956 bfd_vma target_offset;
2957 unsigned long orig_insn;
2958 char *stub_name;
2959 enum elf32_arm_stub_type stub_type;
2960 enum arm_st_branch_type branch_type;
2961 };
2962
2963 /* A table of relocs applied to branches which might trigger Cortex-A8
2964 erratum. */
2965
2966 struct a8_erratum_reloc
2967 {
2968 bfd_vma from;
2969 bfd_vma destination;
2970 struct elf32_arm_link_hash_entry *hash;
2971 const char *sym_name;
2972 unsigned int r_type;
2973 enum arm_st_branch_type branch_type;
2974 bfd_boolean non_a8_stub;
2975 };
2976
2977 /* The size of the thread control block. */
2978 #define TCB_SIZE 8
2979
2980 /* ARM-specific information about a PLT entry, over and above the usual
2981 gotplt_union. */
2982 struct arm_plt_info
2983 {
2984 /* We reference count Thumb references to a PLT entry separately,
2985 so that we can emit the Thumb trampoline only if needed. */
2986 bfd_signed_vma thumb_refcount;
2987
2988 /* Some references from Thumb code may be eliminated by BL->BLX
2989 conversion, so record them separately. */
2990 bfd_signed_vma maybe_thumb_refcount;
2991
2992 /* How many of the recorded PLT accesses were from non-call relocations.
2993 This information is useful when deciding whether anything takes the
2994 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2995 non-call references to the function should resolve directly to the
2996 real runtime target. */
2997 unsigned int noncall_refcount;
2998
2999 /* Since PLT entries have variable size if the Thumb prologue is
3000 used, we need to record the index into .got.plt instead of
3001 recomputing it from the PLT offset. */
3002 bfd_signed_vma got_offset;
3003 };
3004
3005 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3006 struct arm_local_iplt_info
3007 {
3008 /* The information that is usually found in the generic ELF part of
3009 the hash table entry. */
3010 union gotplt_union root;
3011
3012 /* The information that is usually found in the ARM-specific part of
3013 the hash table entry. */
3014 struct arm_plt_info arm;
3015
3016 /* A list of all potential dynamic relocations against this symbol. */
3017 struct elf_dyn_relocs *dyn_relocs;
3018 };
3019
3020 /* Structure to handle FDPIC support for local functions. */
3021 struct fdpic_local {
3022 unsigned int funcdesc_cnt;
3023 unsigned int gotofffuncdesc_cnt;
3024 int funcdesc_offset;
3025 };
3026
3027 struct elf_arm_obj_tdata
3028 {
3029 struct elf_obj_tdata root;
3030
3031 /* tls_type for each local got entry. */
3032 char *local_got_tls_type;
3033
3034 /* GOTPLT entries for TLS descriptors. */
3035 bfd_vma *local_tlsdesc_gotent;
3036
3037 /* Information for local symbols that need entries in .iplt. */
3038 struct arm_local_iplt_info **local_iplt;
3039
3040 /* Zero to warn when linking objects with incompatible enum sizes. */
3041 int no_enum_size_warning;
3042
3043 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3044 int no_wchar_size_warning;
3045
3046 /* Maintains FDPIC counters and funcdesc info. */
3047 struct fdpic_local *local_fdpic_cnts;
3048 };
3049
3050 #define elf_arm_tdata(bfd) \
3051 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3052
3053 #define elf32_arm_local_got_tls_type(bfd) \
3054 (elf_arm_tdata (bfd)->local_got_tls_type)
3055
3056 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3057 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3058
3059 #define elf32_arm_local_iplt(bfd) \
3060 (elf_arm_tdata (bfd)->local_iplt)
3061
3062 #define elf32_arm_local_fdpic_cnts(bfd) \
3063 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3064
3065 #define is_arm_elf(bfd) \
3066 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3067 && elf_tdata (bfd) != NULL \
3068 && elf_object_id (bfd) == ARM_ELF_DATA)
3069
3070 static bfd_boolean
3071 elf32_arm_mkobject (bfd *abfd)
3072 {
3073 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3074 ARM_ELF_DATA);
3075 }
3076
3077 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3078
3079 /* Structure to handle FDPIC support for extern functions. */
3080 struct fdpic_global {
3081 unsigned int gotofffuncdesc_cnt;
3082 unsigned int gotfuncdesc_cnt;
3083 unsigned int funcdesc_cnt;
3084 int funcdesc_offset;
3085 int gotfuncdesc_offset;
3086 };
3087
3088 /* Arm ELF linker hash entry. */
3089 struct elf32_arm_link_hash_entry
3090 {
3091 struct elf_link_hash_entry root;
3092
3093 /* Track dynamic relocs copied for this symbol. */
3094 struct elf_dyn_relocs *dyn_relocs;
3095
3096 /* ARM-specific PLT information. */
3097 struct arm_plt_info plt;
3098
3099 #define GOT_UNKNOWN 0
3100 #define GOT_NORMAL 1
3101 #define GOT_TLS_GD 2
3102 #define GOT_TLS_IE 4
3103 #define GOT_TLS_GDESC 8
3104 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3105 unsigned int tls_type : 8;
3106
3107 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3108 unsigned int is_iplt : 1;
3109
3110 unsigned int unused : 23;
3111
3112 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3113 starting at the end of the jump table. */
3114 bfd_vma tlsdesc_got;
3115
3116 /* The symbol marking the real symbol location for exported thumb
3117 symbols with Arm stubs. */
3118 struct elf_link_hash_entry *export_glue;
3119
3120 /* A pointer to the most recently used stub hash entry against this
3121 symbol. */
3122 struct elf32_arm_stub_hash_entry *stub_cache;
3123
3124 /* Counter for FDPIC relocations against this symbol. */
3125 struct fdpic_global fdpic_cnts;
3126 };
3127
3128 /* Traverse an arm ELF linker hash table. */
3129 #define elf32_arm_link_hash_traverse(table, func, info) \
3130 (elf_link_hash_traverse \
3131 (&(table)->root, \
3132 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3133 (info)))
3134
3135 /* Get the ARM elf linker hash table from a link_info structure. */
3136 #define elf32_arm_hash_table(info) \
3137 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3138 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3139
3140 #define arm_stub_hash_lookup(table, string, create, copy) \
3141 ((struct elf32_arm_stub_hash_entry *) \
3142 bfd_hash_lookup ((table), (string), (create), (copy)))
3143
3144 /* Array to keep track of which stub sections have been created, and
3145 information on stub grouping. */
3146 struct map_stub
3147 {
3148 /* This is the section to which stubs in the group will be
3149 attached. */
3150 asection *link_sec;
3151 /* The stub section. */
3152 asection *stub_sec;
3153 };
3154
3155 #define elf32_arm_compute_jump_table_size(htab) \
3156 ((htab)->next_tls_desc_index * 4)
3157
3158 /* ARM ELF linker hash table. */
3159 struct elf32_arm_link_hash_table
3160 {
3161 /* The main hash table. */
3162 struct elf_link_hash_table root;
3163
3164 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3165 bfd_size_type thumb_glue_size;
3166
3167 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3168 bfd_size_type arm_glue_size;
3169
3170 /* The size in bytes of section containing the ARMv4 BX veneers. */
3171 bfd_size_type bx_glue_size;
3172
3173 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3174 veneer has been populated. */
3175 bfd_vma bx_glue_offset[15];
3176
3177 /* The size in bytes of the section containing glue for VFP11 erratum
3178 veneers. */
3179 bfd_size_type vfp11_erratum_glue_size;
3180
3181 /* The size in bytes of the section containing glue for STM32L4XX erratum
3182 veneers. */
3183 bfd_size_type stm32l4xx_erratum_glue_size;
3184
3185 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3186 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3187 elf32_arm_write_section(). */
3188 struct a8_erratum_fix *a8_erratum_fixes;
3189 unsigned int num_a8_erratum_fixes;
3190
3191 /* An arbitrary input BFD chosen to hold the glue sections. */
3192 bfd * bfd_of_glue_owner;
3193
3194 /* Nonzero to output a BE8 image. */
3195 int byteswap_code;
3196
3197 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3198 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3199 int target1_is_rel;
3200
3201 /* The relocation to use for R_ARM_TARGET2 relocations. */
3202 int target2_reloc;
3203
3204 /* 0 = Ignore R_ARM_V4BX.
3205 1 = Convert BX to MOV PC.
3206 2 = Generate v4 interworing stubs. */
3207 int fix_v4bx;
3208
3209 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3210 int fix_cortex_a8;
3211
3212 /* Whether we should fix the ARM1176 BLX immediate issue. */
3213 int fix_arm1176;
3214
3215 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3216 int use_blx;
3217
3218 /* What sort of code sequences we should look for which may trigger the
3219 VFP11 denorm erratum. */
3220 bfd_arm_vfp11_fix vfp11_fix;
3221
3222 /* Global counter for the number of fixes we have emitted. */
3223 int num_vfp11_fixes;
3224
3225 /* What sort of code sequences we should look for which may trigger the
3226 STM32L4XX erratum. */
3227 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3228
3229 /* Global counter for the number of fixes we have emitted. */
3230 int num_stm32l4xx_fixes;
3231
3232 /* Nonzero to force PIC branch veneers. */
3233 int pic_veneer;
3234
3235 /* The number of bytes in the initial entry in the PLT. */
3236 bfd_size_type plt_header_size;
3237
3238 /* The number of bytes in the subsequent PLT etries. */
3239 bfd_size_type plt_entry_size;
3240
3241 /* True if the target system is VxWorks. */
3242 int vxworks_p;
3243
3244 /* True if the target system is Symbian OS. */
3245 int symbian_p;
3246
3247 /* True if the target system is Native Client. */
3248 int nacl_p;
3249
3250 /* True if the target uses REL relocations. */
3251 bfd_boolean use_rel;
3252
3253 /* Nonzero if import library must be a secure gateway import library
3254 as per ARMv8-M Security Extensions. */
3255 int cmse_implib;
3256
3257 /* The import library whose symbols' address must remain stable in
3258 the import library generated. */
3259 bfd *in_implib_bfd;
3260
3261 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3262 bfd_vma next_tls_desc_index;
3263
3264 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3265 bfd_vma num_tls_desc;
3266
3267 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3268 asection *srelplt2;
3269
3270 /* The offset into splt of the PLT entry for the TLS descriptor
3271 resolver. Special values are 0, if not necessary (or not found
3272 to be necessary yet), and -1 if needed but not determined
3273 yet. */
3274 bfd_vma dt_tlsdesc_plt;
3275
3276 /* The offset into sgot of the GOT entry used by the PLT entry
3277 above. */
3278 bfd_vma dt_tlsdesc_got;
3279
3280 /* Offset in .plt section of tls_arm_trampoline. */
3281 bfd_vma tls_trampoline;
3282
3283 /* Data for R_ARM_TLS_LDM32 relocations. */
3284 union
3285 {
3286 bfd_signed_vma refcount;
3287 bfd_vma offset;
3288 } tls_ldm_got;
3289
3290 /* Small local sym cache. */
3291 struct sym_cache sym_cache;
3292
3293 /* For convenience in allocate_dynrelocs. */
3294 bfd * obfd;
3295
3296 /* The amount of space used by the reserved portion of the sgotplt
3297 section, plus whatever space is used by the jump slots. */
3298 bfd_vma sgotplt_jump_table_size;
3299
3300 /* The stub hash table. */
3301 struct bfd_hash_table stub_hash_table;
3302
3303 /* Linker stub bfd. */
3304 bfd *stub_bfd;
3305
3306 /* Linker call-backs. */
3307 asection * (*add_stub_section) (const char *, asection *, asection *,
3308 unsigned int);
3309 void (*layout_sections_again) (void);
3310
3311 /* Array to keep track of which stub sections have been created, and
3312 information on stub grouping. */
3313 struct map_stub *stub_group;
3314
3315 /* Input stub section holding secure gateway veneers. */
3316 asection *cmse_stub_sec;
3317
3318 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3319 start to be allocated. */
3320 bfd_vma new_cmse_stub_offset;
3321
3322 /* Number of elements in stub_group. */
3323 unsigned int top_id;
3324
3325 /* Assorted information used by elf32_arm_size_stubs. */
3326 unsigned int bfd_count;
3327 unsigned int top_index;
3328 asection **input_list;
3329
3330 /* True if the target system uses FDPIC. */
3331 int fdpic_p;
3332
3333 /* Fixup section. Used for FDPIC. */
3334 asection *srofixup;
3335 };
3336
3337 /* Add an FDPIC read-only fixup. */
3338 static void
3339 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3340 {
3341 bfd_vma fixup_offset;
3342
3343 fixup_offset = srofixup->reloc_count++ * 4;
3344 BFD_ASSERT (fixup_offset < srofixup->size);
3345 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3346 }
3347
3348 static inline int
3349 ctz (unsigned int mask)
3350 {
3351 #if GCC_VERSION >= 3004
3352 return __builtin_ctz (mask);
3353 #else
3354 unsigned int i;
3355
3356 for (i = 0; i < 8 * sizeof (mask); i++)
3357 {
3358 if (mask & 0x1)
3359 break;
3360 mask = (mask >> 1);
3361 }
3362 return i;
3363 #endif
3364 }
3365
3366 static inline int
3367 elf32_arm_popcount (unsigned int mask)
3368 {
3369 #if GCC_VERSION >= 3004
3370 return __builtin_popcount (mask);
3371 #else
3372 unsigned int i;
3373 int sum = 0;
3374
3375 for (i = 0; i < 8 * sizeof (mask); i++)
3376 {
3377 if (mask & 0x1)
3378 sum++;
3379 mask = (mask >> 1);
3380 }
3381 return sum;
3382 #endif
3383 }
3384
3385 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3386 asection *sreloc, Elf_Internal_Rela *rel);
3387
3388 static void
3389 arm_elf_fill_funcdesc(bfd *output_bfd,
3390 struct bfd_link_info *info,
3391 int *funcdesc_offset,
3392 int dynindx,
3393 int offset,
3394 bfd_vma addr,
3395 bfd_vma dynreloc_value,
3396 bfd_vma seg)
3397 {
3398 if ((*funcdesc_offset & 1) == 0)
3399 {
3400 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3401 asection *sgot = globals->root.sgot;
3402
3403 if (bfd_link_pic(info))
3404 {
3405 asection *srelgot = globals->root.srelgot;
3406 Elf_Internal_Rela outrel;
3407
3408 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3409 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3410 outrel.r_addend = 0;
3411
3412 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3413 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3414 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3415 }
3416 else
3417 {
3418 struct elf_link_hash_entry *hgot = globals->root.hgot;
3419 bfd_vma got_value = hgot->root.u.def.value
3420 + hgot->root.u.def.section->output_section->vma
3421 + hgot->root.u.def.section->output_offset;
3422
3423 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3424 sgot->output_section->vma + sgot->output_offset
3425 + offset);
3426 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3427 sgot->output_section->vma + sgot->output_offset
3428 + offset + 4);
3429 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3430 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3431 }
3432 *funcdesc_offset |= 1;
3433 }
3434 }
3435
3436 /* Create an entry in an ARM ELF linker hash table. */
3437
3438 static struct bfd_hash_entry *
3439 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3440 struct bfd_hash_table * table,
3441 const char * string)
3442 {
3443 struct elf32_arm_link_hash_entry * ret =
3444 (struct elf32_arm_link_hash_entry *) entry;
3445
3446 /* Allocate the structure if it has not already been allocated by a
3447 subclass. */
3448 if (ret == NULL)
3449 ret = (struct elf32_arm_link_hash_entry *)
3450 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3451 if (ret == NULL)
3452 return (struct bfd_hash_entry *) ret;
3453
3454 /* Call the allocation method of the superclass. */
3455 ret = ((struct elf32_arm_link_hash_entry *)
3456 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3457 table, string));
3458 if (ret != NULL)
3459 {
3460 ret->dyn_relocs = NULL;
3461 ret->tls_type = GOT_UNKNOWN;
3462 ret->tlsdesc_got = (bfd_vma) -1;
3463 ret->plt.thumb_refcount = 0;
3464 ret->plt.maybe_thumb_refcount = 0;
3465 ret->plt.noncall_refcount = 0;
3466 ret->plt.got_offset = -1;
3467 ret->is_iplt = FALSE;
3468 ret->export_glue = NULL;
3469
3470 ret->stub_cache = NULL;
3471
3472 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3473 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3474 ret->fdpic_cnts.funcdesc_cnt = 0;
3475 ret->fdpic_cnts.funcdesc_offset = -1;
3476 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3477 }
3478
3479 return (struct bfd_hash_entry *) ret;
3480 }
3481
3482 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3483 symbols. */
3484
3485 static bfd_boolean
3486 elf32_arm_allocate_local_sym_info (bfd *abfd)
3487 {
3488 if (elf_local_got_refcounts (abfd) == NULL)
3489 {
3490 bfd_size_type num_syms;
3491 bfd_size_type size;
3492 char *data;
3493
3494 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3495 size = num_syms * (sizeof (bfd_signed_vma)
3496 + sizeof (struct arm_local_iplt_info *)
3497 + sizeof (bfd_vma)
3498 + sizeof (char)
3499 + sizeof (struct fdpic_local));
3500 data = bfd_zalloc (abfd, size);
3501 if (data == NULL)
3502 return FALSE;
3503
3504 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3505 data += num_syms * sizeof (struct fdpic_local);
3506
3507 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3508 data += num_syms * sizeof (bfd_signed_vma);
3509
3510 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3511 data += num_syms * sizeof (struct arm_local_iplt_info *);
3512
3513 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3514 data += num_syms * sizeof (bfd_vma);
3515
3516 elf32_arm_local_got_tls_type (abfd) = data;
3517 }
3518 return TRUE;
3519 }
3520
3521 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3522 to input bfd ABFD. Create the information if it doesn't already exist.
3523 Return null if an allocation fails. */
3524
3525 static struct arm_local_iplt_info *
3526 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3527 {
3528 struct arm_local_iplt_info **ptr;
3529
3530 if (!elf32_arm_allocate_local_sym_info (abfd))
3531 return NULL;
3532
3533 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3534 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3535 if (*ptr == NULL)
3536 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3537 return *ptr;
3538 }
3539
3540 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3541 in ABFD's symbol table. If the symbol is global, H points to its
3542 hash table entry, otherwise H is null.
3543
3544 Return true if the symbol does have PLT information. When returning
3545 true, point *ROOT_PLT at the target-independent reference count/offset
3546 union and *ARM_PLT at the ARM-specific information. */
3547
3548 static bfd_boolean
3549 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3550 struct elf32_arm_link_hash_entry *h,
3551 unsigned long r_symndx, union gotplt_union **root_plt,
3552 struct arm_plt_info **arm_plt)
3553 {
3554 struct arm_local_iplt_info *local_iplt;
3555
3556 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3557 return FALSE;
3558
3559 if (h != NULL)
3560 {
3561 *root_plt = &h->root.plt;
3562 *arm_plt = &h->plt;
3563 return TRUE;
3564 }
3565
3566 if (elf32_arm_local_iplt (abfd) == NULL)
3567 return FALSE;
3568
3569 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3570 if (local_iplt == NULL)
3571 return FALSE;
3572
3573 *root_plt = &local_iplt->root;
3574 *arm_plt = &local_iplt->arm;
3575 return TRUE;
3576 }
3577
3578 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3579 before it. */
3580
3581 static bfd_boolean
3582 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3583 struct arm_plt_info *arm_plt)
3584 {
3585 struct elf32_arm_link_hash_table *htab;
3586
3587 htab = elf32_arm_hash_table (info);
3588 return (arm_plt->thumb_refcount != 0
3589 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3590 }
3591
3592 /* Return a pointer to the head of the dynamic reloc list that should
3593 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3594 ABFD's symbol table. Return null if an error occurs. */
3595
3596 static struct elf_dyn_relocs **
3597 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3598 Elf_Internal_Sym *isym)
3599 {
3600 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3601 {
3602 struct arm_local_iplt_info *local_iplt;
3603
3604 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3605 if (local_iplt == NULL)
3606 return NULL;
3607 return &local_iplt->dyn_relocs;
3608 }
3609 else
3610 {
3611 /* Track dynamic relocs needed for local syms too.
3612 We really need local syms available to do this
3613 easily. Oh well. */
3614 asection *s;
3615 void *vpp;
3616
3617 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3618 if (s == NULL)
3619 abort ();
3620
3621 vpp = &elf_section_data (s)->local_dynrel;
3622 return (struct elf_dyn_relocs **) vpp;
3623 }
3624 }
3625
3626 /* Initialize an entry in the stub hash table. */
3627
3628 static struct bfd_hash_entry *
3629 stub_hash_newfunc (struct bfd_hash_entry *entry,
3630 struct bfd_hash_table *table,
3631 const char *string)
3632 {
3633 /* Allocate the structure if it has not already been allocated by a
3634 subclass. */
3635 if (entry == NULL)
3636 {
3637 entry = (struct bfd_hash_entry *)
3638 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3639 if (entry == NULL)
3640 return entry;
3641 }
3642
3643 /* Call the allocation method of the superclass. */
3644 entry = bfd_hash_newfunc (entry, table, string);
3645 if (entry != NULL)
3646 {
3647 struct elf32_arm_stub_hash_entry *eh;
3648
3649 /* Initialize the local fields. */
3650 eh = (struct elf32_arm_stub_hash_entry *) entry;
3651 eh->stub_sec = NULL;
3652 eh->stub_offset = (bfd_vma) -1;
3653 eh->source_value = 0;
3654 eh->target_value = 0;
3655 eh->target_section = NULL;
3656 eh->orig_insn = 0;
3657 eh->stub_type = arm_stub_none;
3658 eh->stub_size = 0;
3659 eh->stub_template = NULL;
3660 eh->stub_template_size = -1;
3661 eh->h = NULL;
3662 eh->id_sec = NULL;
3663 eh->output_name = NULL;
3664 }
3665
3666 return entry;
3667 }
3668
3669 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3670 shortcuts to them in our hash table. */
3671
3672 static bfd_boolean
3673 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3674 {
3675 struct elf32_arm_link_hash_table *htab;
3676
3677 htab = elf32_arm_hash_table (info);
3678 if (htab == NULL)
3679 return FALSE;
3680
3681 /* BPABI objects never have a GOT, or associated sections. */
3682 if (htab->symbian_p)
3683 return TRUE;
3684
3685 if (! _bfd_elf_create_got_section (dynobj, info))
3686 return FALSE;
3687
3688 /* Also create .rofixup. */
3689 if (htab->fdpic_p)
3690 {
3691 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3692 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3693 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3694 if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3695 return FALSE;
3696 }
3697
3698 return TRUE;
3699 }
3700
3701 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3702
3703 static bfd_boolean
3704 create_ifunc_sections (struct bfd_link_info *info)
3705 {
3706 struct elf32_arm_link_hash_table *htab;
3707 const struct elf_backend_data *bed;
3708 bfd *dynobj;
3709 asection *s;
3710 flagword flags;
3711
3712 htab = elf32_arm_hash_table (info);
3713 dynobj = htab->root.dynobj;
3714 bed = get_elf_backend_data (dynobj);
3715 flags = bed->dynamic_sec_flags;
3716
3717 if (htab->root.iplt == NULL)
3718 {
3719 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3720 flags | SEC_READONLY | SEC_CODE);
3721 if (s == NULL
3722 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3723 return FALSE;
3724 htab->root.iplt = s;
3725 }
3726
3727 if (htab->root.irelplt == NULL)
3728 {
3729 s = bfd_make_section_anyway_with_flags (dynobj,
3730 RELOC_SECTION (htab, ".iplt"),
3731 flags | SEC_READONLY);
3732 if (s == NULL
3733 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3734 return FALSE;
3735 htab->root.irelplt = s;
3736 }
3737
3738 if (htab->root.igotplt == NULL)
3739 {
3740 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3741 if (s == NULL
3742 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3743 return FALSE;
3744 htab->root.igotplt = s;
3745 }
3746 return TRUE;
3747 }
3748
3749 /* Determine if we're dealing with a Thumb only architecture. */
3750
3751 static bfd_boolean
3752 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3753 {
3754 int arch;
3755 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3756 Tag_CPU_arch_profile);
3757
3758 if (profile)
3759 return profile == 'M';
3760
3761 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3762
3763 /* Force return logic to be reviewed for each new architecture. */
3764 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3765
3766 if (arch == TAG_CPU_ARCH_V6_M
3767 || arch == TAG_CPU_ARCH_V6S_M
3768 || arch == TAG_CPU_ARCH_V7E_M
3769 || arch == TAG_CPU_ARCH_V8M_BASE
3770 || arch == TAG_CPU_ARCH_V8M_MAIN)
3771 return TRUE;
3772
3773 return FALSE;
3774 }
3775
3776 /* Determine if we're dealing with a Thumb-2 object. */
3777
3778 static bfd_boolean
3779 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3780 {
3781 int arch;
3782 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3783 Tag_THUMB_ISA_use);
3784
3785 if (thumb_isa)
3786 return thumb_isa == 2;
3787
3788 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3789
3790 /* Force return logic to be reviewed for each new architecture. */
3791 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3792
3793 return (arch == TAG_CPU_ARCH_V6T2
3794 || arch == TAG_CPU_ARCH_V7
3795 || arch == TAG_CPU_ARCH_V7E_M
3796 || arch == TAG_CPU_ARCH_V8
3797 || arch == TAG_CPU_ARCH_V8R
3798 || arch == TAG_CPU_ARCH_V8M_MAIN);
3799 }
3800
3801 /* Determine whether Thumb-2 BL instruction is available. */
3802
3803 static bfd_boolean
3804 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3805 {
3806 int arch =
3807 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3808
3809 /* Force return logic to be reviewed for each new architecture. */
3810 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3811
3812 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3813 return (arch == TAG_CPU_ARCH_V6T2
3814 || arch >= TAG_CPU_ARCH_V7);
3815 }
3816
3817 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3818 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3819 hash table. */
3820
3821 static bfd_boolean
3822 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3823 {
3824 struct elf32_arm_link_hash_table *htab;
3825
3826 htab = elf32_arm_hash_table (info);
3827 if (htab == NULL)
3828 return FALSE;
3829
3830 if (!htab->root.sgot && !create_got_section (dynobj, info))
3831 return FALSE;
3832
3833 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3834 return FALSE;
3835
3836 if (htab->vxworks_p)
3837 {
3838 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3839 return FALSE;
3840
3841 if (bfd_link_pic (info))
3842 {
3843 htab->plt_header_size = 0;
3844 htab->plt_entry_size
3845 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3846 }
3847 else
3848 {
3849 htab->plt_header_size
3850 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3851 htab->plt_entry_size
3852 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3853 }
3854
3855 if (elf_elfheader (dynobj))
3856 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3857 }
3858 else
3859 {
3860 /* PR ld/16017
3861 Test for thumb only architectures. Note - we cannot just call
3862 using_thumb_only() as the attributes in the output bfd have not been
3863 initialised at this point, so instead we use the input bfd. */
3864 bfd * saved_obfd = htab->obfd;
3865
3866 htab->obfd = dynobj;
3867 if (using_thumb_only (htab))
3868 {
3869 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3870 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3871 }
3872 htab->obfd = saved_obfd;
3873 }
3874
3875 if (htab->fdpic_p) {
3876 htab->plt_header_size = 0;
3877 if (info->flags & DF_BIND_NOW)
3878 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3879 else
3880 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3881 }
3882
3883 if (!htab->root.splt
3884 || !htab->root.srelplt
3885 || !htab->root.sdynbss
3886 || (!bfd_link_pic (info) && !htab->root.srelbss))
3887 abort ();
3888
3889 return TRUE;
3890 }
3891
3892 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3893
3894 static void
3895 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3896 struct elf_link_hash_entry *dir,
3897 struct elf_link_hash_entry *ind)
3898 {
3899 struct elf32_arm_link_hash_entry *edir, *eind;
3900
3901 edir = (struct elf32_arm_link_hash_entry *) dir;
3902 eind = (struct elf32_arm_link_hash_entry *) ind;
3903
3904 if (eind->dyn_relocs != NULL)
3905 {
3906 if (edir->dyn_relocs != NULL)
3907 {
3908 struct elf_dyn_relocs **pp;
3909 struct elf_dyn_relocs *p;
3910
3911 /* Add reloc counts against the indirect sym to the direct sym
3912 list. Merge any entries against the same section. */
3913 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3914 {
3915 struct elf_dyn_relocs *q;
3916
3917 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3918 if (q->sec == p->sec)
3919 {
3920 q->pc_count += p->pc_count;
3921 q->count += p->count;
3922 *pp = p->next;
3923 break;
3924 }
3925 if (q == NULL)
3926 pp = &p->next;
3927 }
3928 *pp = edir->dyn_relocs;
3929 }
3930
3931 edir->dyn_relocs = eind->dyn_relocs;
3932 eind->dyn_relocs = NULL;
3933 }
3934
3935 if (ind->root.type == bfd_link_hash_indirect)
3936 {
3937 /* Copy over PLT info. */
3938 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3939 eind->plt.thumb_refcount = 0;
3940 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3941 eind->plt.maybe_thumb_refcount = 0;
3942 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3943 eind->plt.noncall_refcount = 0;
3944
3945 /* Copy FDPIC counters. */
3946 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
3947 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
3948 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
3949
3950 /* We should only allocate a function to .iplt once the final
3951 symbol information is known. */
3952 BFD_ASSERT (!eind->is_iplt);
3953
3954 if (dir->got.refcount <= 0)
3955 {
3956 edir->tls_type = eind->tls_type;
3957 eind->tls_type = GOT_UNKNOWN;
3958 }
3959 }
3960
3961 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3962 }
3963
3964 /* Destroy an ARM elf linker hash table. */
3965
3966 static void
3967 elf32_arm_link_hash_table_free (bfd *obfd)
3968 {
3969 struct elf32_arm_link_hash_table *ret
3970 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3971
3972 bfd_hash_table_free (&ret->stub_hash_table);
3973 _bfd_elf_link_hash_table_free (obfd);
3974 }
3975
3976 /* Create an ARM elf linker hash table. */
3977
3978 static struct bfd_link_hash_table *
3979 elf32_arm_link_hash_table_create (bfd *abfd)
3980 {
3981 struct elf32_arm_link_hash_table *ret;
3982 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3983
3984 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3985 if (ret == NULL)
3986 return NULL;
3987
3988 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3989 elf32_arm_link_hash_newfunc,
3990 sizeof (struct elf32_arm_link_hash_entry),
3991 ARM_ELF_DATA))
3992 {
3993 free (ret);
3994 return NULL;
3995 }
3996
3997 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3998 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3999 #ifdef FOUR_WORD_PLT
4000 ret->plt_header_size = 16;
4001 ret->plt_entry_size = 16;
4002 #else
4003 ret->plt_header_size = 20;
4004 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4005 #endif
4006 ret->use_rel = TRUE;
4007 ret->obfd = abfd;
4008 ret->fdpic_p = 0;
4009
4010 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4011 sizeof (struct elf32_arm_stub_hash_entry)))
4012 {
4013 _bfd_elf_link_hash_table_free (abfd);
4014 return NULL;
4015 }
4016 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4017
4018 return &ret->root.root;
4019 }
4020
4021 /* Determine what kind of NOPs are available. */
4022
4023 static bfd_boolean
4024 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4025 {
4026 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4027 Tag_CPU_arch);
4028
4029 /* Force return logic to be reviewed for each new architecture. */
4030 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
4031
4032 return (arch == TAG_CPU_ARCH_V6T2
4033 || arch == TAG_CPU_ARCH_V6K
4034 || arch == TAG_CPU_ARCH_V7
4035 || arch == TAG_CPU_ARCH_V8
4036 || arch == TAG_CPU_ARCH_V8R);
4037 }
4038
4039 static bfd_boolean
4040 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4041 {
4042 switch (stub_type)
4043 {
4044 case arm_stub_long_branch_thumb_only:
4045 case arm_stub_long_branch_thumb2_only:
4046 case arm_stub_long_branch_thumb2_only_pure:
4047 case arm_stub_long_branch_v4t_thumb_arm:
4048 case arm_stub_short_branch_v4t_thumb_arm:
4049 case arm_stub_long_branch_v4t_thumb_arm_pic:
4050 case arm_stub_long_branch_v4t_thumb_tls_pic:
4051 case arm_stub_long_branch_thumb_only_pic:
4052 case arm_stub_cmse_branch_thumb_only:
4053 return TRUE;
4054 case arm_stub_none:
4055 BFD_FAIL ();
4056 return FALSE;
4057 break;
4058 default:
4059 return FALSE;
4060 }
4061 }
4062
4063 /* Determine the type of stub needed, if any, for a call. */
4064
4065 static enum elf32_arm_stub_type
4066 arm_type_of_stub (struct bfd_link_info *info,
4067 asection *input_sec,
4068 const Elf_Internal_Rela *rel,
4069 unsigned char st_type,
4070 enum arm_st_branch_type *actual_branch_type,
4071 struct elf32_arm_link_hash_entry *hash,
4072 bfd_vma destination,
4073 asection *sym_sec,
4074 bfd *input_bfd,
4075 const char *name)
4076 {
4077 bfd_vma location;
4078 bfd_signed_vma branch_offset;
4079 unsigned int r_type;
4080 struct elf32_arm_link_hash_table * globals;
4081 bfd_boolean thumb2, thumb2_bl, thumb_only;
4082 enum elf32_arm_stub_type stub_type = arm_stub_none;
4083 int use_plt = 0;
4084 enum arm_st_branch_type branch_type = *actual_branch_type;
4085 union gotplt_union *root_plt;
4086 struct arm_plt_info *arm_plt;
4087 int arch;
4088 int thumb2_movw;
4089
4090 if (branch_type == ST_BRANCH_LONG)
4091 return stub_type;
4092
4093 globals = elf32_arm_hash_table (info);
4094 if (globals == NULL)
4095 return stub_type;
4096
4097 thumb_only = using_thumb_only (globals);
4098 thumb2 = using_thumb2 (globals);
4099 thumb2_bl = using_thumb2_bl (globals);
4100
4101 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4102
4103 /* True for architectures that implement the thumb2 movw instruction. */
4104 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4105
4106 /* Determine where the call point is. */
4107 location = (input_sec->output_offset
4108 + input_sec->output_section->vma
4109 + rel->r_offset);
4110
4111 r_type = ELF32_R_TYPE (rel->r_info);
4112
4113 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4114 are considering a function call relocation. */
4115 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4116 || r_type == R_ARM_THM_JUMP19)
4117 && branch_type == ST_BRANCH_TO_ARM)
4118 branch_type = ST_BRANCH_TO_THUMB;
4119
4120 /* For TLS call relocs, it is the caller's responsibility to provide
4121 the address of the appropriate trampoline. */
4122 if (r_type != R_ARM_TLS_CALL
4123 && r_type != R_ARM_THM_TLS_CALL
4124 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4125 ELF32_R_SYM (rel->r_info), &root_plt,
4126 &arm_plt)
4127 && root_plt->offset != (bfd_vma) -1)
4128 {
4129 asection *splt;
4130
4131 if (hash == NULL || hash->is_iplt)
4132 splt = globals->root.iplt;
4133 else
4134 splt = globals->root.splt;
4135 if (splt != NULL)
4136 {
4137 use_plt = 1;
4138
4139 /* Note when dealing with PLT entries: the main PLT stub is in
4140 ARM mode, so if the branch is in Thumb mode, another
4141 Thumb->ARM stub will be inserted later just before the ARM
4142 PLT stub. If a long branch stub is needed, we'll add a
4143 Thumb->Arm one and branch directly to the ARM PLT entry.
4144 Here, we have to check if a pre-PLT Thumb->ARM stub
4145 is needed and if it will be close enough. */
4146
4147 destination = (splt->output_section->vma
4148 + splt->output_offset
4149 + root_plt->offset);
4150 st_type = STT_FUNC;
4151
4152 /* Thumb branch/call to PLT: it can become a branch to ARM
4153 or to Thumb. We must perform the same checks and
4154 corrections as in elf32_arm_final_link_relocate. */
4155 if ((r_type == R_ARM_THM_CALL)
4156 || (r_type == R_ARM_THM_JUMP24))
4157 {
4158 if (globals->use_blx
4159 && r_type == R_ARM_THM_CALL
4160 && !thumb_only)
4161 {
4162 /* If the Thumb BLX instruction is available, convert
4163 the BL to a BLX instruction to call the ARM-mode
4164 PLT entry. */
4165 branch_type = ST_BRANCH_TO_ARM;
4166 }
4167 else
4168 {
4169 if (!thumb_only)
4170 /* Target the Thumb stub before the ARM PLT entry. */
4171 destination -= PLT_THUMB_STUB_SIZE;
4172 branch_type = ST_BRANCH_TO_THUMB;
4173 }
4174 }
4175 else
4176 {
4177 branch_type = ST_BRANCH_TO_ARM;
4178 }
4179 }
4180 }
4181 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4182 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4183
4184 branch_offset = (bfd_signed_vma)(destination - location);
4185
4186 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4187 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4188 {
4189 /* Handle cases where:
4190 - this call goes too far (different Thumb/Thumb2 max
4191 distance)
4192 - it's a Thumb->Arm call and blx is not available, or it's a
4193 Thumb->Arm branch (not bl). A stub is needed in this case,
4194 but only if this call is not through a PLT entry. Indeed,
4195 PLT stubs handle mode switching already. */
4196 if ((!thumb2_bl
4197 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4198 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4199 || (thumb2_bl
4200 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4201 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4202 || (thumb2
4203 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4204 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4205 && (r_type == R_ARM_THM_JUMP19))
4206 || (branch_type == ST_BRANCH_TO_ARM
4207 && (((r_type == R_ARM_THM_CALL
4208 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4209 || (r_type == R_ARM_THM_JUMP24)
4210 || (r_type == R_ARM_THM_JUMP19))
4211 && !use_plt))
4212 {
4213 /* If we need to insert a Thumb-Thumb long branch stub to a
4214 PLT, use one that branches directly to the ARM PLT
4215 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4216 stub, undo this now. */
4217 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4218 {
4219 branch_type = ST_BRANCH_TO_ARM;
4220 branch_offset += PLT_THUMB_STUB_SIZE;
4221 }
4222
4223 if (branch_type == ST_BRANCH_TO_THUMB)
4224 {
4225 /* Thumb to thumb. */
4226 if (!thumb_only)
4227 {
4228 if (input_sec->flags & SEC_ELF_PURECODE)
4229 _bfd_error_handler
4230 (_("%pB(%pA): warning: long branch veneers used in"
4231 " section with SHF_ARM_PURECODE section"
4232 " attribute is only supported for M-profile"
4233 " targets that implement the movw instruction"),
4234 input_bfd, input_sec);
4235
4236 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4237 /* PIC stubs. */
4238 ? ((globals->use_blx
4239 && (r_type == R_ARM_THM_CALL))
4240 /* V5T and above. Stub starts with ARM code, so
4241 we must be able to switch mode before
4242 reaching it, which is only possible for 'bl'
4243 (ie R_ARM_THM_CALL relocation). */
4244 ? arm_stub_long_branch_any_thumb_pic
4245 /* On V4T, use Thumb code only. */
4246 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4247
4248 /* non-PIC stubs. */
4249 : ((globals->use_blx
4250 && (r_type == R_ARM_THM_CALL))
4251 /* V5T and above. */
4252 ? arm_stub_long_branch_any_any
4253 /* V4T. */
4254 : arm_stub_long_branch_v4t_thumb_thumb);
4255 }
4256 else
4257 {
4258 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4259 stub_type = arm_stub_long_branch_thumb2_only_pure;
4260 else
4261 {
4262 if (input_sec->flags & SEC_ELF_PURECODE)
4263 _bfd_error_handler
4264 (_("%pB(%pA): warning: long branch veneers used in"
4265 " section with SHF_ARM_PURECODE section"
4266 " attribute is only supported for M-profile"
4267 " targets that implement the movw instruction"),
4268 input_bfd, input_sec);
4269
4270 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4271 /* PIC stub. */
4272 ? arm_stub_long_branch_thumb_only_pic
4273 /* non-PIC stub. */
4274 : (thumb2 ? arm_stub_long_branch_thumb2_only
4275 : arm_stub_long_branch_thumb_only);
4276 }
4277 }
4278 }
4279 else
4280 {
4281 if (input_sec->flags & SEC_ELF_PURECODE)
4282 _bfd_error_handler
4283 (_("%pB(%pA): warning: long branch veneers used in"
4284 " section with SHF_ARM_PURECODE section"
4285 " attribute is only supported" " for M-profile"
4286 " targets that implement the movw instruction"),
4287 input_bfd, input_sec);
4288
4289 /* Thumb to arm. */
4290 if (sym_sec != NULL
4291 && sym_sec->owner != NULL
4292 && !INTERWORK_FLAG (sym_sec->owner))
4293 {
4294 _bfd_error_handler
4295 (_("%pB(%s): warning: interworking not enabled;"
4296 " first occurrence: %pB: %s call to %s"),
4297 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4298 }
4299
4300 stub_type =
4301 (bfd_link_pic (info) | globals->pic_veneer)
4302 /* PIC stubs. */
4303 ? (r_type == R_ARM_THM_TLS_CALL
4304 /* TLS PIC stubs. */
4305 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4306 : arm_stub_long_branch_v4t_thumb_tls_pic)
4307 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4308 /* V5T PIC and above. */
4309 ? arm_stub_long_branch_any_arm_pic
4310 /* V4T PIC stub. */
4311 : arm_stub_long_branch_v4t_thumb_arm_pic))
4312
4313 /* non-PIC stubs. */
4314 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4315 /* V5T and above. */
4316 ? arm_stub_long_branch_any_any
4317 /* V4T. */
4318 : arm_stub_long_branch_v4t_thumb_arm);
4319
4320 /* Handle v4t short branches. */
4321 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4322 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4323 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4324 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4325 }
4326 }
4327 }
4328 else if (r_type == R_ARM_CALL
4329 || r_type == R_ARM_JUMP24
4330 || r_type == R_ARM_PLT32
4331 || r_type == R_ARM_TLS_CALL)
4332 {
4333 if (input_sec->flags & SEC_ELF_PURECODE)
4334 _bfd_error_handler
4335 (_("%pB(%pA): warning: long branch veneers used in"
4336 " section with SHF_ARM_PURECODE section"
4337 " attribute is only supported for M-profile"
4338 " targets that implement the movw instruction"),
4339 input_bfd, input_sec);
4340 if (branch_type == ST_BRANCH_TO_THUMB)
4341 {
4342 /* Arm to thumb. */
4343
4344 if (sym_sec != NULL
4345 && sym_sec->owner != NULL
4346 && !INTERWORK_FLAG (sym_sec->owner))
4347 {
4348 _bfd_error_handler
4349 (_("%pB(%s): warning: interworking not enabled;"
4350 " first occurrence: %pB: %s call to %s"),
4351 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4352 }
4353
4354 /* We have an extra 2-bytes reach because of
4355 the mode change (bit 24 (H) of BLX encoding). */
4356 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4357 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4358 || (r_type == R_ARM_CALL && !globals->use_blx)
4359 || (r_type == R_ARM_JUMP24)
4360 || (r_type == R_ARM_PLT32))
4361 {
4362 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4363 /* PIC stubs. */
4364 ? ((globals->use_blx)
4365 /* V5T and above. */
4366 ? arm_stub_long_branch_any_thumb_pic
4367 /* V4T stub. */
4368 : arm_stub_long_branch_v4t_arm_thumb_pic)
4369
4370 /* non-PIC stubs. */
4371 : ((globals->use_blx)
4372 /* V5T and above. */
4373 ? arm_stub_long_branch_any_any
4374 /* V4T. */
4375 : arm_stub_long_branch_v4t_arm_thumb);
4376 }
4377 }
4378 else
4379 {
4380 /* Arm to arm. */
4381 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4382 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4383 {
4384 stub_type =
4385 (bfd_link_pic (info) | globals->pic_veneer)
4386 /* PIC stubs. */
4387 ? (r_type == R_ARM_TLS_CALL
4388 /* TLS PIC Stub. */
4389 ? arm_stub_long_branch_any_tls_pic
4390 : (globals->nacl_p
4391 ? arm_stub_long_branch_arm_nacl_pic
4392 : arm_stub_long_branch_any_arm_pic))
4393 /* non-PIC stubs. */
4394 : (globals->nacl_p
4395 ? arm_stub_long_branch_arm_nacl
4396 : arm_stub_long_branch_any_any);
4397 }
4398 }
4399 }
4400
4401 /* If a stub is needed, record the actual destination type. */
4402 if (stub_type != arm_stub_none)
4403 *actual_branch_type = branch_type;
4404
4405 return stub_type;
4406 }
4407
4408 /* Build a name for an entry in the stub hash table. */
4409
4410 static char *
4411 elf32_arm_stub_name (const asection *input_section,
4412 const asection *sym_sec,
4413 const struct elf32_arm_link_hash_entry *hash,
4414 const Elf_Internal_Rela *rel,
4415 enum elf32_arm_stub_type stub_type)
4416 {
4417 char *stub_name;
4418 bfd_size_type len;
4419
4420 if (hash)
4421 {
4422 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4423 stub_name = (char *) bfd_malloc (len);
4424 if (stub_name != NULL)
4425 sprintf (stub_name, "%08x_%s+%x_%d",
4426 input_section->id & 0xffffffff,
4427 hash->root.root.root.string,
4428 (int) rel->r_addend & 0xffffffff,
4429 (int) stub_type);
4430 }
4431 else
4432 {
4433 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4434 stub_name = (char *) bfd_malloc (len);
4435 if (stub_name != NULL)
4436 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4437 input_section->id & 0xffffffff,
4438 sym_sec->id & 0xffffffff,
4439 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4440 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4441 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4442 (int) rel->r_addend & 0xffffffff,
4443 (int) stub_type);
4444 }
4445
4446 return stub_name;
4447 }
4448
4449 /* Look up an entry in the stub hash. Stub entries are cached because
4450 creating the stub name takes a bit of time. */
4451
4452 static struct elf32_arm_stub_hash_entry *
4453 elf32_arm_get_stub_entry (const asection *input_section,
4454 const asection *sym_sec,
4455 struct elf_link_hash_entry *hash,
4456 const Elf_Internal_Rela *rel,
4457 struct elf32_arm_link_hash_table *htab,
4458 enum elf32_arm_stub_type stub_type)
4459 {
4460 struct elf32_arm_stub_hash_entry *stub_entry;
4461 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4462 const asection *id_sec;
4463
4464 if ((input_section->flags & SEC_CODE) == 0)
4465 return NULL;
4466
4467 /* If this input section is part of a group of sections sharing one
4468 stub section, then use the id of the first section in the group.
4469 Stub names need to include a section id, as there may well be
4470 more than one stub used to reach say, printf, and we need to
4471 distinguish between them. */
4472 BFD_ASSERT (input_section->id <= htab->top_id);
4473 id_sec = htab->stub_group[input_section->id].link_sec;
4474
4475 if (h != NULL && h->stub_cache != NULL
4476 && h->stub_cache->h == h
4477 && h->stub_cache->id_sec == id_sec
4478 && h->stub_cache->stub_type == stub_type)
4479 {
4480 stub_entry = h->stub_cache;
4481 }
4482 else
4483 {
4484 char *stub_name;
4485
4486 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4487 if (stub_name == NULL)
4488 return NULL;
4489
4490 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4491 stub_name, FALSE, FALSE);
4492 if (h != NULL)
4493 h->stub_cache = stub_entry;
4494
4495 free (stub_name);
4496 }
4497
4498 return stub_entry;
4499 }
4500
4501 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4502 section. */
4503
4504 static bfd_boolean
4505 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4506 {
4507 if (stub_type >= max_stub_type)
4508 abort (); /* Should be unreachable. */
4509
4510 switch (stub_type)
4511 {
4512 case arm_stub_cmse_branch_thumb_only:
4513 return TRUE;
4514
4515 default:
4516 return FALSE;
4517 }
4518
4519 abort (); /* Should be unreachable. */
4520 }
4521
4522 /* Required alignment (as a power of 2) for the dedicated section holding
4523 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4524 with input sections. */
4525
4526 static int
4527 arm_dedicated_stub_output_section_required_alignment
4528 (enum elf32_arm_stub_type stub_type)
4529 {
4530 if (stub_type >= max_stub_type)
4531 abort (); /* Should be unreachable. */
4532
4533 switch (stub_type)
4534 {
4535 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4536 boundary. */
4537 case arm_stub_cmse_branch_thumb_only:
4538 return 5;
4539
4540 default:
4541 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4542 return 0;
4543 }
4544
4545 abort (); /* Should be unreachable. */
4546 }
4547
4548 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4549 NULL if veneers of this type are interspersed with input sections. */
4550
4551 static const char *
4552 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4553 {
4554 if (stub_type >= max_stub_type)
4555 abort (); /* Should be unreachable. */
4556
4557 switch (stub_type)
4558 {
4559 case arm_stub_cmse_branch_thumb_only:
4560 return ".gnu.sgstubs";
4561
4562 default:
4563 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4564 return NULL;
4565 }
4566
4567 abort (); /* Should be unreachable. */
4568 }
4569
4570 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4571 returns the address of the hash table field in HTAB holding a pointer to the
4572 corresponding input section. Otherwise, returns NULL. */
4573
4574 static asection **
4575 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4576 enum elf32_arm_stub_type stub_type)
4577 {
4578 if (stub_type >= max_stub_type)
4579 abort (); /* Should be unreachable. */
4580
4581 switch (stub_type)
4582 {
4583 case arm_stub_cmse_branch_thumb_only:
4584 return &htab->cmse_stub_sec;
4585
4586 default:
4587 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4588 return NULL;
4589 }
4590
4591 abort (); /* Should be unreachable. */
4592 }
4593
4594 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4595 is the section that branch into veneer and can be NULL if stub should go in
4596 a dedicated output section. Returns a pointer to the stub section, and the
4597 section to which the stub section will be attached (in *LINK_SEC_P).
4598 LINK_SEC_P may be NULL. */
4599
4600 static asection *
4601 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4602 struct elf32_arm_link_hash_table *htab,
4603 enum elf32_arm_stub_type stub_type)
4604 {
4605 asection *link_sec, *out_sec, **stub_sec_p;
4606 const char *stub_sec_prefix;
4607 bfd_boolean dedicated_output_section =
4608 arm_dedicated_stub_output_section_required (stub_type);
4609 int align;
4610
4611 if (dedicated_output_section)
4612 {
4613 bfd *output_bfd = htab->obfd;
4614 const char *out_sec_name =
4615 arm_dedicated_stub_output_section_name (stub_type);
4616 link_sec = NULL;
4617 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4618 stub_sec_prefix = out_sec_name;
4619 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4620 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4621 if (out_sec == NULL)
4622 {
4623 _bfd_error_handler (_("no address assigned to the veneers output "
4624 "section %s"), out_sec_name);
4625 return NULL;
4626 }
4627 }
4628 else
4629 {
4630 BFD_ASSERT (section->id <= htab->top_id);
4631 link_sec = htab->stub_group[section->id].link_sec;
4632 BFD_ASSERT (link_sec != NULL);
4633 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4634 if (*stub_sec_p == NULL)
4635 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4636 stub_sec_prefix = link_sec->name;
4637 out_sec = link_sec->output_section;
4638 align = htab->nacl_p ? 4 : 3;
4639 }
4640
4641 if (*stub_sec_p == NULL)
4642 {
4643 size_t namelen;
4644 bfd_size_type len;
4645 char *s_name;
4646
4647 namelen = strlen (stub_sec_prefix);
4648 len = namelen + sizeof (STUB_SUFFIX);
4649 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4650 if (s_name == NULL)
4651 return NULL;
4652
4653 memcpy (s_name, stub_sec_prefix, namelen);
4654 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4655 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4656 align);
4657 if (*stub_sec_p == NULL)
4658 return NULL;
4659
4660 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4661 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4662 | SEC_KEEP;
4663 }
4664
4665 if (!dedicated_output_section)
4666 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4667
4668 if (link_sec_p)
4669 *link_sec_p = link_sec;
4670
4671 return *stub_sec_p;
4672 }
4673
4674 /* Add a new stub entry to the stub hash. Not all fields of the new
4675 stub entry are initialised. */
4676
4677 static struct elf32_arm_stub_hash_entry *
4678 elf32_arm_add_stub (const char *stub_name, asection *section,
4679 struct elf32_arm_link_hash_table *htab,
4680 enum elf32_arm_stub_type stub_type)
4681 {
4682 asection *link_sec;
4683 asection *stub_sec;
4684 struct elf32_arm_stub_hash_entry *stub_entry;
4685
4686 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4687 stub_type);
4688 if (stub_sec == NULL)
4689 return NULL;
4690
4691 /* Enter this entry into the linker stub hash table. */
4692 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4693 TRUE, FALSE);
4694 if (stub_entry == NULL)
4695 {
4696 if (section == NULL)
4697 section = stub_sec;
4698 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4699 section->owner, stub_name);
4700 return NULL;
4701 }
4702
4703 stub_entry->stub_sec = stub_sec;
4704 stub_entry->stub_offset = (bfd_vma) -1;
4705 stub_entry->id_sec = link_sec;
4706
4707 return stub_entry;
4708 }
4709
4710 /* Store an Arm insn into an output section not processed by
4711 elf32_arm_write_section. */
4712
4713 static void
4714 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4715 bfd * output_bfd, bfd_vma val, void * ptr)
4716 {
4717 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4718 bfd_putl32 (val, ptr);
4719 else
4720 bfd_putb32 (val, ptr);
4721 }
4722
4723 /* Store a 16-bit Thumb insn into an output section not processed by
4724 elf32_arm_write_section. */
4725
4726 static void
4727 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4728 bfd * output_bfd, bfd_vma val, void * ptr)
4729 {
4730 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4731 bfd_putl16 (val, ptr);
4732 else
4733 bfd_putb16 (val, ptr);
4734 }
4735
4736 /* Store a Thumb2 insn into an output section not processed by
4737 elf32_arm_write_section. */
4738
4739 static void
4740 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4741 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4742 {
4743 /* T2 instructions are 16-bit streamed. */
4744 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4745 {
4746 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4747 bfd_putl16 ((val & 0xffff), ptr + 2);
4748 }
4749 else
4750 {
4751 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4752 bfd_putb16 ((val & 0xffff), ptr + 2);
4753 }
4754 }
4755
4756 /* If it's possible to change R_TYPE to a more efficient access
4757 model, return the new reloc type. */
4758
4759 static unsigned
4760 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4761 struct elf_link_hash_entry *h)
4762 {
4763 int is_local = (h == NULL);
4764
4765 if (bfd_link_pic (info)
4766 || (h && h->root.type == bfd_link_hash_undefweak))
4767 return r_type;
4768
4769 /* We do not support relaxations for Old TLS models. */
4770 switch (r_type)
4771 {
4772 case R_ARM_TLS_GOTDESC:
4773 case R_ARM_TLS_CALL:
4774 case R_ARM_THM_TLS_CALL:
4775 case R_ARM_TLS_DESCSEQ:
4776 case R_ARM_THM_TLS_DESCSEQ:
4777 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4778 }
4779
4780 return r_type;
4781 }
4782
4783 static bfd_reloc_status_type elf32_arm_final_link_relocate
4784 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4785 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4786 const char *, unsigned char, enum arm_st_branch_type,
4787 struct elf_link_hash_entry *, bfd_boolean *, char **);
4788
4789 static unsigned int
4790 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4791 {
4792 switch (stub_type)
4793 {
4794 case arm_stub_a8_veneer_b_cond:
4795 case arm_stub_a8_veneer_b:
4796 case arm_stub_a8_veneer_bl:
4797 return 2;
4798
4799 case arm_stub_long_branch_any_any:
4800 case arm_stub_long_branch_v4t_arm_thumb:
4801 case arm_stub_long_branch_thumb_only:
4802 case arm_stub_long_branch_thumb2_only:
4803 case arm_stub_long_branch_thumb2_only_pure:
4804 case arm_stub_long_branch_v4t_thumb_thumb:
4805 case arm_stub_long_branch_v4t_thumb_arm:
4806 case arm_stub_short_branch_v4t_thumb_arm:
4807 case arm_stub_long_branch_any_arm_pic:
4808 case arm_stub_long_branch_any_thumb_pic:
4809 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4810 case arm_stub_long_branch_v4t_arm_thumb_pic:
4811 case arm_stub_long_branch_v4t_thumb_arm_pic:
4812 case arm_stub_long_branch_thumb_only_pic:
4813 case arm_stub_long_branch_any_tls_pic:
4814 case arm_stub_long_branch_v4t_thumb_tls_pic:
4815 case arm_stub_cmse_branch_thumb_only:
4816 case arm_stub_a8_veneer_blx:
4817 return 4;
4818
4819 case arm_stub_long_branch_arm_nacl:
4820 case arm_stub_long_branch_arm_nacl_pic:
4821 return 16;
4822
4823 default:
4824 abort (); /* Should be unreachable. */
4825 }
4826 }
4827
4828 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4829 veneering (TRUE) or have their own symbol (FALSE). */
4830
4831 static bfd_boolean
4832 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4833 {
4834 if (stub_type >= max_stub_type)
4835 abort (); /* Should be unreachable. */
4836
4837 switch (stub_type)
4838 {
4839 case arm_stub_cmse_branch_thumb_only:
4840 return TRUE;
4841
4842 default:
4843 return FALSE;
4844 }
4845
4846 abort (); /* Should be unreachable. */
4847 }
4848
4849 /* Returns the padding needed for the dedicated section used stubs of type
4850 STUB_TYPE. */
4851
4852 static int
4853 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4854 {
4855 if (stub_type >= max_stub_type)
4856 abort (); /* Should be unreachable. */
4857
4858 switch (stub_type)
4859 {
4860 case arm_stub_cmse_branch_thumb_only:
4861 return 32;
4862
4863 default:
4864 return 0;
4865 }
4866
4867 abort (); /* Should be unreachable. */
4868 }
4869
4870 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4871 returns the address of the hash table field in HTAB holding the offset at
4872 which new veneers should be layed out in the stub section. */
4873
4874 static bfd_vma*
4875 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4876 enum elf32_arm_stub_type stub_type)
4877 {
4878 switch (stub_type)
4879 {
4880 case arm_stub_cmse_branch_thumb_only:
4881 return &htab->new_cmse_stub_offset;
4882
4883 default:
4884 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4885 return NULL;
4886 }
4887 }
4888
4889 static bfd_boolean
4890 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4891 void * in_arg)
4892 {
4893 #define MAXRELOCS 3
4894 bfd_boolean removed_sg_veneer;
4895 struct elf32_arm_stub_hash_entry *stub_entry;
4896 struct elf32_arm_link_hash_table *globals;
4897 struct bfd_link_info *info;
4898 asection *stub_sec;
4899 bfd *stub_bfd;
4900 bfd_byte *loc;
4901 bfd_vma sym_value;
4902 int template_size;
4903 int size;
4904 const insn_sequence *template_sequence;
4905 int i;
4906 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4907 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4908 int nrelocs = 0;
4909 int just_allocated = 0;
4910
4911 /* Massage our args to the form they really have. */
4912 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4913 info = (struct bfd_link_info *) in_arg;
4914
4915 globals = elf32_arm_hash_table (info);
4916 if (globals == NULL)
4917 return FALSE;
4918
4919 stub_sec = stub_entry->stub_sec;
4920
4921 if ((globals->fix_cortex_a8 < 0)
4922 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4923 /* We have to do less-strictly-aligned fixes last. */
4924 return TRUE;
4925
4926 /* Assign a slot at the end of section if none assigned yet. */
4927 if (stub_entry->stub_offset == (bfd_vma) -1)
4928 {
4929 stub_entry->stub_offset = stub_sec->size;
4930 just_allocated = 1;
4931 }
4932 loc = stub_sec->contents + stub_entry->stub_offset;
4933
4934 stub_bfd = stub_sec->owner;
4935
4936 /* This is the address of the stub destination. */
4937 sym_value = (stub_entry->target_value
4938 + stub_entry->target_section->output_offset
4939 + stub_entry->target_section->output_section->vma);
4940
4941 template_sequence = stub_entry->stub_template;
4942 template_size = stub_entry->stub_template_size;
4943
4944 size = 0;
4945 for (i = 0; i < template_size; i++)
4946 {
4947 switch (template_sequence[i].type)
4948 {
4949 case THUMB16_TYPE:
4950 {
4951 bfd_vma data = (bfd_vma) template_sequence[i].data;
4952 if (template_sequence[i].reloc_addend != 0)
4953 {
4954 /* We've borrowed the reloc_addend field to mean we should
4955 insert a condition code into this (Thumb-1 branch)
4956 instruction. See THUMB16_BCOND_INSN. */
4957 BFD_ASSERT ((data & 0xff00) == 0xd000);
4958 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4959 }
4960 bfd_put_16 (stub_bfd, data, loc + size);
4961 size += 2;
4962 }
4963 break;
4964
4965 case THUMB32_TYPE:
4966 bfd_put_16 (stub_bfd,
4967 (template_sequence[i].data >> 16) & 0xffff,
4968 loc + size);
4969 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4970 loc + size + 2);
4971 if (template_sequence[i].r_type != R_ARM_NONE)
4972 {
4973 stub_reloc_idx[nrelocs] = i;
4974 stub_reloc_offset[nrelocs++] = size;
4975 }
4976 size += 4;
4977 break;
4978
4979 case ARM_TYPE:
4980 bfd_put_32 (stub_bfd, template_sequence[i].data,
4981 loc + size);
4982 /* Handle cases where the target is encoded within the
4983 instruction. */
4984 if (template_sequence[i].r_type == R_ARM_JUMP24)
4985 {
4986 stub_reloc_idx[nrelocs] = i;
4987 stub_reloc_offset[nrelocs++] = size;
4988 }
4989 size += 4;
4990 break;
4991
4992 case DATA_TYPE:
4993 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4994 stub_reloc_idx[nrelocs] = i;
4995 stub_reloc_offset[nrelocs++] = size;
4996 size += 4;
4997 break;
4998
4999 default:
5000 BFD_FAIL ();
5001 return FALSE;
5002 }
5003 }
5004
5005 if (just_allocated)
5006 stub_sec->size += size;
5007
5008 /* Stub size has already been computed in arm_size_one_stub. Check
5009 consistency. */
5010 BFD_ASSERT (size == stub_entry->stub_size);
5011
5012 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5013 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5014 sym_value |= 1;
5015
5016 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5017 to relocate in each stub. */
5018 removed_sg_veneer =
5019 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5020 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5021
5022 for (i = 0; i < nrelocs; i++)
5023 {
5024 Elf_Internal_Rela rel;
5025 bfd_boolean unresolved_reloc;
5026 char *error_message;
5027 bfd_vma points_to =
5028 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5029
5030 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5031 rel.r_info = ELF32_R_INFO (0,
5032 template_sequence[stub_reloc_idx[i]].r_type);
5033 rel.r_addend = 0;
5034
5035 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5036 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5037 template should refer back to the instruction after the original
5038 branch. We use target_section as Cortex-A8 erratum workaround stubs
5039 are only generated when both source and target are in the same
5040 section. */
5041 points_to = stub_entry->target_section->output_section->vma
5042 + stub_entry->target_section->output_offset
5043 + stub_entry->source_value;
5044
5045 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5046 (template_sequence[stub_reloc_idx[i]].r_type),
5047 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5048 points_to, info, stub_entry->target_section, "", STT_FUNC,
5049 stub_entry->branch_type,
5050 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5051 &error_message);
5052 }
5053
5054 return TRUE;
5055 #undef MAXRELOCS
5056 }
5057
5058 /* Calculate the template, template size and instruction size for a stub.
5059 Return value is the instruction size. */
5060
5061 static unsigned int
5062 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5063 const insn_sequence **stub_template,
5064 int *stub_template_size)
5065 {
5066 const insn_sequence *template_sequence = NULL;
5067 int template_size = 0, i;
5068 unsigned int size;
5069
5070 template_sequence = stub_definitions[stub_type].template_sequence;
5071 if (stub_template)
5072 *stub_template = template_sequence;
5073
5074 template_size = stub_definitions[stub_type].template_size;
5075 if (stub_template_size)
5076 *stub_template_size = template_size;
5077
5078 size = 0;
5079 for (i = 0; i < template_size; i++)
5080 {
5081 switch (template_sequence[i].type)
5082 {
5083 case THUMB16_TYPE:
5084 size += 2;
5085 break;
5086
5087 case ARM_TYPE:
5088 case THUMB32_TYPE:
5089 case DATA_TYPE:
5090 size += 4;
5091 break;
5092
5093 default:
5094 BFD_FAIL ();
5095 return 0;
5096 }
5097 }
5098
5099 return size;
5100 }
5101
5102 /* As above, but don't actually build the stub. Just bump offset so
5103 we know stub section sizes. */
5104
5105 static bfd_boolean
5106 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5107 void *in_arg ATTRIBUTE_UNUSED)
5108 {
5109 struct elf32_arm_stub_hash_entry *stub_entry;
5110 const insn_sequence *template_sequence;
5111 int template_size, size;
5112
5113 /* Massage our args to the form they really have. */
5114 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5115
5116 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5117 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5118
5119 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5120 &template_size);
5121
5122 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5123 if (stub_entry->stub_template_size)
5124 {
5125 stub_entry->stub_size = size;
5126 stub_entry->stub_template = template_sequence;
5127 stub_entry->stub_template_size = template_size;
5128 }
5129
5130 /* Already accounted for. */
5131 if (stub_entry->stub_offset != (bfd_vma) -1)
5132 return TRUE;
5133
5134 size = (size + 7) & ~7;
5135 stub_entry->stub_sec->size += size;
5136
5137 return TRUE;
5138 }
5139
5140 /* External entry points for sizing and building linker stubs. */
5141
5142 /* Set up various things so that we can make a list of input sections
5143 for each output section included in the link. Returns -1 on error,
5144 0 when no stubs will be needed, and 1 on success. */
5145
5146 int
5147 elf32_arm_setup_section_lists (bfd *output_bfd,
5148 struct bfd_link_info *info)
5149 {
5150 bfd *input_bfd;
5151 unsigned int bfd_count;
5152 unsigned int top_id, top_index;
5153 asection *section;
5154 asection **input_list, **list;
5155 bfd_size_type amt;
5156 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5157
5158 if (htab == NULL)
5159 return 0;
5160 if (! is_elf_hash_table (htab))
5161 return 0;
5162
5163 /* Count the number of input BFDs and find the top input section id. */
5164 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5165 input_bfd != NULL;
5166 input_bfd = input_bfd->link.next)
5167 {
5168 bfd_count += 1;
5169 for (section = input_bfd->sections;
5170 section != NULL;
5171 section = section->next)
5172 {
5173 if (top_id < section->id)
5174 top_id = section->id;
5175 }
5176 }
5177 htab->bfd_count = bfd_count;
5178
5179 amt = sizeof (struct map_stub) * (top_id + 1);
5180 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5181 if (htab->stub_group == NULL)
5182 return -1;
5183 htab->top_id = top_id;
5184
5185 /* We can't use output_bfd->section_count here to find the top output
5186 section index as some sections may have been removed, and
5187 _bfd_strip_section_from_output doesn't renumber the indices. */
5188 for (section = output_bfd->sections, top_index = 0;
5189 section != NULL;
5190 section = section->next)
5191 {
5192 if (top_index < section->index)
5193 top_index = section->index;
5194 }
5195
5196 htab->top_index = top_index;
5197 amt = sizeof (asection *) * (top_index + 1);
5198 input_list = (asection **) bfd_malloc (amt);
5199 htab->input_list = input_list;
5200 if (input_list == NULL)
5201 return -1;
5202
5203 /* For sections we aren't interested in, mark their entries with a
5204 value we can check later. */
5205 list = input_list + top_index;
5206 do
5207 *list = bfd_abs_section_ptr;
5208 while (list-- != input_list);
5209
5210 for (section = output_bfd->sections;
5211 section != NULL;
5212 section = section->next)
5213 {
5214 if ((section->flags & SEC_CODE) != 0)
5215 input_list[section->index] = NULL;
5216 }
5217
5218 return 1;
5219 }
5220
5221 /* The linker repeatedly calls this function for each input section,
5222 in the order that input sections are linked into output sections.
5223 Build lists of input sections to determine groupings between which
5224 we may insert linker stubs. */
5225
5226 void
5227 elf32_arm_next_input_section (struct bfd_link_info *info,
5228 asection *isec)
5229 {
5230 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5231
5232 if (htab == NULL)
5233 return;
5234
5235 if (isec->output_section->index <= htab->top_index)
5236 {
5237 asection **list = htab->input_list + isec->output_section->index;
5238
5239 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5240 {
5241 /* Steal the link_sec pointer for our list. */
5242 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5243 /* This happens to make the list in reverse order,
5244 which we reverse later. */
5245 PREV_SEC (isec) = *list;
5246 *list = isec;
5247 }
5248 }
5249 }
5250
5251 /* See whether we can group stub sections together. Grouping stub
5252 sections may result in fewer stubs. More importantly, we need to
5253 put all .init* and .fini* stubs at the end of the .init or
5254 .fini output sections respectively, because glibc splits the
5255 _init and _fini functions into multiple parts. Putting a stub in
5256 the middle of a function is not a good idea. */
5257
5258 static void
5259 group_sections (struct elf32_arm_link_hash_table *htab,
5260 bfd_size_type stub_group_size,
5261 bfd_boolean stubs_always_after_branch)
5262 {
5263 asection **list = htab->input_list;
5264
5265 do
5266 {
5267 asection *tail = *list;
5268 asection *head;
5269
5270 if (tail == bfd_abs_section_ptr)
5271 continue;
5272
5273 /* Reverse the list: we must avoid placing stubs at the
5274 beginning of the section because the beginning of the text
5275 section may be required for an interrupt vector in bare metal
5276 code. */
5277 #define NEXT_SEC PREV_SEC
5278 head = NULL;
5279 while (tail != NULL)
5280 {
5281 /* Pop from tail. */
5282 asection *item = tail;
5283 tail = PREV_SEC (item);
5284
5285 /* Push on head. */
5286 NEXT_SEC (item) = head;
5287 head = item;
5288 }
5289
5290 while (head != NULL)
5291 {
5292 asection *curr;
5293 asection *next;
5294 bfd_vma stub_group_start = head->output_offset;
5295 bfd_vma end_of_next;
5296
5297 curr = head;
5298 while (NEXT_SEC (curr) != NULL)
5299 {
5300 next = NEXT_SEC (curr);
5301 end_of_next = next->output_offset + next->size;
5302 if (end_of_next - stub_group_start >= stub_group_size)
5303 /* End of NEXT is too far from start, so stop. */
5304 break;
5305 /* Add NEXT to the group. */
5306 curr = next;
5307 }
5308
5309 /* OK, the size from the start to the start of CURR is less
5310 than stub_group_size and thus can be handled by one stub
5311 section. (Or the head section is itself larger than
5312 stub_group_size, in which case we may be toast.)
5313 We should really be keeping track of the total size of
5314 stubs added here, as stubs contribute to the final output
5315 section size. */
5316 do
5317 {
5318 next = NEXT_SEC (head);
5319 /* Set up this stub group. */
5320 htab->stub_group[head->id].link_sec = curr;
5321 }
5322 while (head != curr && (head = next) != NULL);
5323
5324 /* But wait, there's more! Input sections up to stub_group_size
5325 bytes after the stub section can be handled by it too. */
5326 if (!stubs_always_after_branch)
5327 {
5328 stub_group_start = curr->output_offset + curr->size;
5329
5330 while (next != NULL)
5331 {
5332 end_of_next = next->output_offset + next->size;
5333 if (end_of_next - stub_group_start >= stub_group_size)
5334 /* End of NEXT is too far from stubs, so stop. */
5335 break;
5336 /* Add NEXT to the stub group. */
5337 head = next;
5338 next = NEXT_SEC (head);
5339 htab->stub_group[head->id].link_sec = curr;
5340 }
5341 }
5342 head = next;
5343 }
5344 }
5345 while (list++ != htab->input_list + htab->top_index);
5346
5347 free (htab->input_list);
5348 #undef PREV_SEC
5349 #undef NEXT_SEC
5350 }
5351
5352 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5353 erratum fix. */
5354
5355 static int
5356 a8_reloc_compare (const void *a, const void *b)
5357 {
5358 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5359 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5360
5361 if (ra->from < rb->from)
5362 return -1;
5363 else if (ra->from > rb->from)
5364 return 1;
5365 else
5366 return 0;
5367 }
5368
5369 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5370 const char *, char **);
5371
5372 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5373 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5374 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5375 otherwise. */
5376
5377 static bfd_boolean
5378 cortex_a8_erratum_scan (bfd *input_bfd,
5379 struct bfd_link_info *info,
5380 struct a8_erratum_fix **a8_fixes_p,
5381 unsigned int *num_a8_fixes_p,
5382 unsigned int *a8_fix_table_size_p,
5383 struct a8_erratum_reloc *a8_relocs,
5384 unsigned int num_a8_relocs,
5385 unsigned prev_num_a8_fixes,
5386 bfd_boolean *stub_changed_p)
5387 {
5388 asection *section;
5389 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5390 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5391 unsigned int num_a8_fixes = *num_a8_fixes_p;
5392 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5393
5394 if (htab == NULL)
5395 return FALSE;
5396
5397 for (section = input_bfd->sections;
5398 section != NULL;
5399 section = section->next)
5400 {
5401 bfd_byte *contents = NULL;
5402 struct _arm_elf_section_data *sec_data;
5403 unsigned int span;
5404 bfd_vma base_vma;
5405
5406 if (elf_section_type (section) != SHT_PROGBITS
5407 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5408 || (section->flags & SEC_EXCLUDE) != 0
5409 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5410 || (section->output_section == bfd_abs_section_ptr))
5411 continue;
5412
5413 base_vma = section->output_section->vma + section->output_offset;
5414
5415 if (elf_section_data (section)->this_hdr.contents != NULL)
5416 contents = elf_section_data (section)->this_hdr.contents;
5417 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5418 return TRUE;
5419
5420 sec_data = elf32_arm_section_data (section);
5421
5422 for (span = 0; span < sec_data->mapcount; span++)
5423 {
5424 unsigned int span_start = sec_data->map[span].vma;
5425 unsigned int span_end = (span == sec_data->mapcount - 1)
5426 ? section->size : sec_data->map[span + 1].vma;
5427 unsigned int i;
5428 char span_type = sec_data->map[span].type;
5429 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5430
5431 if (span_type != 't')
5432 continue;
5433
5434 /* Span is entirely within a single 4KB region: skip scanning. */
5435 if (((base_vma + span_start) & ~0xfff)
5436 == ((base_vma + span_end) & ~0xfff))
5437 continue;
5438
5439 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5440
5441 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5442 * The branch target is in the same 4KB region as the
5443 first half of the branch.
5444 * The instruction before the branch is a 32-bit
5445 length non-branch instruction. */
5446 for (i = span_start; i < span_end;)
5447 {
5448 unsigned int insn = bfd_getl16 (&contents[i]);
5449 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5450 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5451
5452 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5453 insn_32bit = TRUE;
5454
5455 if (insn_32bit)
5456 {
5457 /* Load the rest of the insn (in manual-friendly order). */
5458 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5459
5460 /* Encoding T4: B<c>.W. */
5461 is_b = (insn & 0xf800d000) == 0xf0009000;
5462 /* Encoding T1: BL<c>.W. */
5463 is_bl = (insn & 0xf800d000) == 0xf000d000;
5464 /* Encoding T2: BLX<c>.W. */
5465 is_blx = (insn & 0xf800d000) == 0xf000c000;
5466 /* Encoding T3: B<c>.W (not permitted in IT block). */
5467 is_bcc = (insn & 0xf800d000) == 0xf0008000
5468 && (insn & 0x07f00000) != 0x03800000;
5469 }
5470
5471 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5472
5473 if (((base_vma + i) & 0xfff) == 0xffe
5474 && insn_32bit
5475 && is_32bit_branch
5476 && last_was_32bit
5477 && ! last_was_branch)
5478 {
5479 bfd_signed_vma offset = 0;
5480 bfd_boolean force_target_arm = FALSE;
5481 bfd_boolean force_target_thumb = FALSE;
5482 bfd_vma target;
5483 enum elf32_arm_stub_type stub_type = arm_stub_none;
5484 struct a8_erratum_reloc key, *found;
5485 bfd_boolean use_plt = FALSE;
5486
5487 key.from = base_vma + i;
5488 found = (struct a8_erratum_reloc *)
5489 bsearch (&key, a8_relocs, num_a8_relocs,
5490 sizeof (struct a8_erratum_reloc),
5491 &a8_reloc_compare);
5492
5493 if (found)
5494 {
5495 char *error_message = NULL;
5496 struct elf_link_hash_entry *entry;
5497
5498 /* We don't care about the error returned from this
5499 function, only if there is glue or not. */
5500 entry = find_thumb_glue (info, found->sym_name,
5501 &error_message);
5502
5503 if (entry)
5504 found->non_a8_stub = TRUE;
5505
5506 /* Keep a simpler condition, for the sake of clarity. */
5507 if (htab->root.splt != NULL && found->hash != NULL
5508 && found->hash->root.plt.offset != (bfd_vma) -1)
5509 use_plt = TRUE;
5510
5511 if (found->r_type == R_ARM_THM_CALL)
5512 {
5513 if (found->branch_type == ST_BRANCH_TO_ARM
5514 || use_plt)
5515 force_target_arm = TRUE;
5516 else
5517 force_target_thumb = TRUE;
5518 }
5519 }
5520
5521 /* Check if we have an offending branch instruction. */
5522
5523 if (found && found->non_a8_stub)
5524 /* We've already made a stub for this instruction, e.g.
5525 it's a long branch or a Thumb->ARM stub. Assume that
5526 stub will suffice to work around the A8 erratum (see
5527 setting of always_after_branch above). */
5528 ;
5529 else if (is_bcc)
5530 {
5531 offset = (insn & 0x7ff) << 1;
5532 offset |= (insn & 0x3f0000) >> 4;
5533 offset |= (insn & 0x2000) ? 0x40000 : 0;
5534 offset |= (insn & 0x800) ? 0x80000 : 0;
5535 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5536 if (offset & 0x100000)
5537 offset |= ~ ((bfd_signed_vma) 0xfffff);
5538 stub_type = arm_stub_a8_veneer_b_cond;
5539 }
5540 else if (is_b || is_bl || is_blx)
5541 {
5542 int s = (insn & 0x4000000) != 0;
5543 int j1 = (insn & 0x2000) != 0;
5544 int j2 = (insn & 0x800) != 0;
5545 int i1 = !(j1 ^ s);
5546 int i2 = !(j2 ^ s);
5547
5548 offset = (insn & 0x7ff) << 1;
5549 offset |= (insn & 0x3ff0000) >> 4;
5550 offset |= i2 << 22;
5551 offset |= i1 << 23;
5552 offset |= s << 24;
5553 if (offset & 0x1000000)
5554 offset |= ~ ((bfd_signed_vma) 0xffffff);
5555
5556 if (is_blx)
5557 offset &= ~ ((bfd_signed_vma) 3);
5558
5559 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5560 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5561 }
5562
5563 if (stub_type != arm_stub_none)
5564 {
5565 bfd_vma pc_for_insn = base_vma + i + 4;
5566
5567 /* The original instruction is a BL, but the target is
5568 an ARM instruction. If we were not making a stub,
5569 the BL would have been converted to a BLX. Use the
5570 BLX stub instead in that case. */
5571 if (htab->use_blx && force_target_arm
5572 && stub_type == arm_stub_a8_veneer_bl)
5573 {
5574 stub_type = arm_stub_a8_veneer_blx;
5575 is_blx = TRUE;
5576 is_bl = FALSE;
5577 }
5578 /* Conversely, if the original instruction was
5579 BLX but the target is Thumb mode, use the BL
5580 stub. */
5581 else if (force_target_thumb
5582 && stub_type == arm_stub_a8_veneer_blx)
5583 {
5584 stub_type = arm_stub_a8_veneer_bl;
5585 is_blx = FALSE;
5586 is_bl = TRUE;
5587 }
5588
5589 if (is_blx)
5590 pc_for_insn &= ~ ((bfd_vma) 3);
5591
5592 /* If we found a relocation, use the proper destination,
5593 not the offset in the (unrelocated) instruction.
5594 Note this is always done if we switched the stub type
5595 above. */
5596 if (found)
5597 offset =
5598 (bfd_signed_vma) (found->destination - pc_for_insn);
5599
5600 /* If the stub will use a Thumb-mode branch to a
5601 PLT target, redirect it to the preceding Thumb
5602 entry point. */
5603 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5604 offset -= PLT_THUMB_STUB_SIZE;
5605
5606 target = pc_for_insn + offset;
5607
5608 /* The BLX stub is ARM-mode code. Adjust the offset to
5609 take the different PC value (+8 instead of +4) into
5610 account. */
5611 if (stub_type == arm_stub_a8_veneer_blx)
5612 offset += 4;
5613
5614 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5615 {
5616 char *stub_name = NULL;
5617
5618 if (num_a8_fixes == a8_fix_table_size)
5619 {
5620 a8_fix_table_size *= 2;
5621 a8_fixes = (struct a8_erratum_fix *)
5622 bfd_realloc (a8_fixes,
5623 sizeof (struct a8_erratum_fix)
5624 * a8_fix_table_size);
5625 }
5626
5627 if (num_a8_fixes < prev_num_a8_fixes)
5628 {
5629 /* If we're doing a subsequent scan,
5630 check if we've found the same fix as
5631 before, and try and reuse the stub
5632 name. */
5633 stub_name = a8_fixes[num_a8_fixes].stub_name;
5634 if ((a8_fixes[num_a8_fixes].section != section)
5635 || (a8_fixes[num_a8_fixes].offset != i))
5636 {
5637 free (stub_name);
5638 stub_name = NULL;
5639 *stub_changed_p = TRUE;
5640 }
5641 }
5642
5643 if (!stub_name)
5644 {
5645 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5646 if (stub_name != NULL)
5647 sprintf (stub_name, "%x:%x", section->id, i);
5648 }
5649
5650 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5651 a8_fixes[num_a8_fixes].section = section;
5652 a8_fixes[num_a8_fixes].offset = i;
5653 a8_fixes[num_a8_fixes].target_offset =
5654 target - base_vma;
5655 a8_fixes[num_a8_fixes].orig_insn = insn;
5656 a8_fixes[num_a8_fixes].stub_name = stub_name;
5657 a8_fixes[num_a8_fixes].stub_type = stub_type;
5658 a8_fixes[num_a8_fixes].branch_type =
5659 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5660
5661 num_a8_fixes++;
5662 }
5663 }
5664 }
5665
5666 i += insn_32bit ? 4 : 2;
5667 last_was_32bit = insn_32bit;
5668 last_was_branch = is_32bit_branch;
5669 }
5670 }
5671
5672 if (elf_section_data (section)->this_hdr.contents == NULL)
5673 free (contents);
5674 }
5675
5676 *a8_fixes_p = a8_fixes;
5677 *num_a8_fixes_p = num_a8_fixes;
5678 *a8_fix_table_size_p = a8_fix_table_size;
5679
5680 return FALSE;
5681 }
5682
5683 /* Create or update a stub entry depending on whether the stub can already be
5684 found in HTAB. The stub is identified by:
5685 - its type STUB_TYPE
5686 - its source branch (note that several can share the same stub) whose
5687 section and relocation (if any) are given by SECTION and IRELA
5688 respectively
5689 - its target symbol whose input section, hash, name, value and branch type
5690 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5691 respectively
5692
5693 If found, the value of the stub's target symbol is updated from SYM_VALUE
5694 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5695 TRUE and the stub entry is initialized.
5696
5697 Returns the stub that was created or updated, or NULL if an error
5698 occurred. */
5699
5700 static struct elf32_arm_stub_hash_entry *
5701 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5702 enum elf32_arm_stub_type stub_type, asection *section,
5703 Elf_Internal_Rela *irela, asection *sym_sec,
5704 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5705 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5706 bfd_boolean *new_stub)
5707 {
5708 const asection *id_sec;
5709 char *stub_name;
5710 struct elf32_arm_stub_hash_entry *stub_entry;
5711 unsigned int r_type;
5712 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5713
5714 BFD_ASSERT (stub_type != arm_stub_none);
5715 *new_stub = FALSE;
5716
5717 if (sym_claimed)
5718 stub_name = sym_name;
5719 else
5720 {
5721 BFD_ASSERT (irela);
5722 BFD_ASSERT (section);
5723 BFD_ASSERT (section->id <= htab->top_id);
5724
5725 /* Support for grouping stub sections. */
5726 id_sec = htab->stub_group[section->id].link_sec;
5727
5728 /* Get the name of this stub. */
5729 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5730 stub_type);
5731 if (!stub_name)
5732 return NULL;
5733 }
5734
5735 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5736 FALSE);
5737 /* The proper stub has already been created, just update its value. */
5738 if (stub_entry != NULL)
5739 {
5740 if (!sym_claimed)
5741 free (stub_name);
5742 stub_entry->target_value = sym_value;
5743 return stub_entry;
5744 }
5745
5746 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5747 if (stub_entry == NULL)
5748 {
5749 if (!sym_claimed)
5750 free (stub_name);
5751 return NULL;
5752 }
5753
5754 stub_entry->target_value = sym_value;
5755 stub_entry->target_section = sym_sec;
5756 stub_entry->stub_type = stub_type;
5757 stub_entry->h = hash;
5758 stub_entry->branch_type = branch_type;
5759
5760 if (sym_claimed)
5761 stub_entry->output_name = sym_name;
5762 else
5763 {
5764 if (sym_name == NULL)
5765 sym_name = "unnamed";
5766 stub_entry->output_name = (char *)
5767 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5768 + strlen (sym_name));
5769 if (stub_entry->output_name == NULL)
5770 {
5771 free (stub_name);
5772 return NULL;
5773 }
5774
5775 /* For historical reasons, use the existing names for ARM-to-Thumb and
5776 Thumb-to-ARM stubs. */
5777 r_type = ELF32_R_TYPE (irela->r_info);
5778 if ((r_type == (unsigned int) R_ARM_THM_CALL
5779 || r_type == (unsigned int) R_ARM_THM_JUMP24
5780 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5781 && branch_type == ST_BRANCH_TO_ARM)
5782 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5783 else if ((r_type == (unsigned int) R_ARM_CALL
5784 || r_type == (unsigned int) R_ARM_JUMP24)
5785 && branch_type == ST_BRANCH_TO_THUMB)
5786 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5787 else
5788 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5789 }
5790
5791 *new_stub = TRUE;
5792 return stub_entry;
5793 }
5794
5795 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5796 gateway veneer to transition from non secure to secure state and create them
5797 accordingly.
5798
5799 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5800 defines the conditions that govern Secure Gateway veneer creation for a
5801 given symbol <SYM> as follows:
5802 - it has function type
5803 - it has non local binding
5804 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5805 same type, binding and value as <SYM> (called normal symbol).
5806 An entry function can handle secure state transition itself in which case
5807 its special symbol would have a different value from the normal symbol.
5808
5809 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5810 entry mapping while HTAB gives the name to hash entry mapping.
5811 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5812 created.
5813
5814 The return value gives whether a stub failed to be allocated. */
5815
5816 static bfd_boolean
5817 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5818 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5819 int *cmse_stub_created)
5820 {
5821 const struct elf_backend_data *bed;
5822 Elf_Internal_Shdr *symtab_hdr;
5823 unsigned i, j, sym_count, ext_start;
5824 Elf_Internal_Sym *cmse_sym, *local_syms;
5825 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5826 enum arm_st_branch_type branch_type;
5827 char *sym_name, *lsym_name;
5828 bfd_vma sym_value;
5829 asection *section;
5830 struct elf32_arm_stub_hash_entry *stub_entry;
5831 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5832
5833 bed = get_elf_backend_data (input_bfd);
5834 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5835 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5836 ext_start = symtab_hdr->sh_info;
5837 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5838 && out_attr[Tag_CPU_arch_profile].i == 'M');
5839
5840 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5841 if (local_syms == NULL)
5842 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5843 symtab_hdr->sh_info, 0, NULL, NULL,
5844 NULL);
5845 if (symtab_hdr->sh_info && local_syms == NULL)
5846 return FALSE;
5847
5848 /* Scan symbols. */
5849 for (i = 0; i < sym_count; i++)
5850 {
5851 cmse_invalid = FALSE;
5852
5853 if (i < ext_start)
5854 {
5855 cmse_sym = &local_syms[i];
5856 /* Not a special symbol. */
5857 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5858 continue;
5859 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5860 symtab_hdr->sh_link,
5861 cmse_sym->st_name);
5862 /* Special symbol with local binding. */
5863 cmse_invalid = TRUE;
5864 }
5865 else
5866 {
5867 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5868 sym_name = (char *) cmse_hash->root.root.root.string;
5869
5870 /* Not a special symbol. */
5871 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5872 continue;
5873
5874 /* Special symbol has incorrect binding or type. */
5875 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5876 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5877 || cmse_hash->root.type != STT_FUNC)
5878 cmse_invalid = TRUE;
5879 }
5880
5881 if (!is_v8m)
5882 {
5883 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5884 "ARMv8-M architecture or later"),
5885 input_bfd, sym_name);
5886 is_v8m = TRUE; /* Avoid multiple warning. */
5887 ret = FALSE;
5888 }
5889
5890 if (cmse_invalid)
5891 {
5892 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5893 " a global or weak function symbol"),
5894 input_bfd, sym_name);
5895 ret = FALSE;
5896 if (i < ext_start)
5897 continue;
5898 }
5899
5900 sym_name += strlen (CMSE_PREFIX);
5901 hash = (struct elf32_arm_link_hash_entry *)
5902 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5903
5904 /* No associated normal symbol or it is neither global nor weak. */
5905 if (!hash
5906 || (hash->root.root.type != bfd_link_hash_defined
5907 && hash->root.root.type != bfd_link_hash_defweak)
5908 || hash->root.type != STT_FUNC)
5909 {
5910 /* Initialize here to avoid warning about use of possibly
5911 uninitialized variable. */
5912 j = 0;
5913
5914 if (!hash)
5915 {
5916 /* Searching for a normal symbol with local binding. */
5917 for (; j < ext_start; j++)
5918 {
5919 lsym_name =
5920 bfd_elf_string_from_elf_section (input_bfd,
5921 symtab_hdr->sh_link,
5922 local_syms[j].st_name);
5923 if (!strcmp (sym_name, lsym_name))
5924 break;
5925 }
5926 }
5927
5928 if (hash || j < ext_start)
5929 {
5930 _bfd_error_handler
5931 (_("%pB: invalid standard symbol `%s'; it must be "
5932 "a global or weak function symbol"),
5933 input_bfd, sym_name);
5934 }
5935 else
5936 _bfd_error_handler
5937 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
5938 ret = FALSE;
5939 if (!hash)
5940 continue;
5941 }
5942
5943 sym_value = hash->root.root.u.def.value;
5944 section = hash->root.root.u.def.section;
5945
5946 if (cmse_hash->root.root.u.def.section != section)
5947 {
5948 _bfd_error_handler
5949 (_("%pB: `%s' and its special symbol are in different sections"),
5950 input_bfd, sym_name);
5951 ret = FALSE;
5952 }
5953 if (cmse_hash->root.root.u.def.value != sym_value)
5954 continue; /* Ignore: could be an entry function starting with SG. */
5955
5956 /* If this section is a link-once section that will be discarded, then
5957 don't create any stubs. */
5958 if (section->output_section == NULL)
5959 {
5960 _bfd_error_handler
5961 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
5962 continue;
5963 }
5964
5965 if (hash->root.size == 0)
5966 {
5967 _bfd_error_handler
5968 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
5969 ret = FALSE;
5970 }
5971
5972 if (!ret)
5973 continue;
5974 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5975 stub_entry
5976 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
5977 NULL, NULL, section, hash, sym_name,
5978 sym_value, branch_type, &new_stub);
5979
5980 if (stub_entry == NULL)
5981 ret = FALSE;
5982 else
5983 {
5984 BFD_ASSERT (new_stub);
5985 (*cmse_stub_created)++;
5986 }
5987 }
5988
5989 if (!symtab_hdr->contents)
5990 free (local_syms);
5991 return ret;
5992 }
5993
5994 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
5995 code entry function, ie can be called from non secure code without using a
5996 veneer. */
5997
5998 static bfd_boolean
5999 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6000 {
6001 bfd_byte contents[4];
6002 uint32_t first_insn;
6003 asection *section;
6004 file_ptr offset;
6005 bfd *abfd;
6006
6007 /* Defined symbol of function type. */
6008 if (hash->root.root.type != bfd_link_hash_defined
6009 && hash->root.root.type != bfd_link_hash_defweak)
6010 return FALSE;
6011 if (hash->root.type != STT_FUNC)
6012 return FALSE;
6013
6014 /* Read first instruction. */
6015 section = hash->root.root.u.def.section;
6016 abfd = section->owner;
6017 offset = hash->root.root.u.def.value - section->vma;
6018 if (!bfd_get_section_contents (abfd, section, contents, offset,
6019 sizeof (contents)))
6020 return FALSE;
6021
6022 first_insn = bfd_get_32 (abfd, contents);
6023
6024 /* Starts by SG instruction. */
6025 return first_insn == 0xe97fe97f;
6026 }
6027
6028 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6029 secure gateway veneers (ie. the veneers was not in the input import library)
6030 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6031
6032 static bfd_boolean
6033 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6034 {
6035 struct elf32_arm_stub_hash_entry *stub_entry;
6036 struct bfd_link_info *info;
6037
6038 /* Massage our args to the form they really have. */
6039 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6040 info = (struct bfd_link_info *) gen_info;
6041
6042 if (info->out_implib_bfd)
6043 return TRUE;
6044
6045 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6046 return TRUE;
6047
6048 if (stub_entry->stub_offset == (bfd_vma) -1)
6049 _bfd_error_handler (" %s", stub_entry->output_name);
6050
6051 return TRUE;
6052 }
6053
6054 /* Set offset of each secure gateway veneers so that its address remain
6055 identical to the one in the input import library referred by
6056 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6057 (present in input import library but absent from the executable being
6058 linked) or if new veneers appeared and there is no output import library
6059 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6060 number of secure gateway veneers found in the input import library.
6061
6062 The function returns whether an error occurred. If no error occurred,
6063 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6064 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6065 veneer observed set for new veneers to be layed out after. */
6066
6067 static bfd_boolean
6068 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6069 struct elf32_arm_link_hash_table *htab,
6070 int *cmse_stub_created)
6071 {
6072 long symsize;
6073 char *sym_name;
6074 flagword flags;
6075 long i, symcount;
6076 bfd *in_implib_bfd;
6077 asection *stub_out_sec;
6078 bfd_boolean ret = TRUE;
6079 Elf_Internal_Sym *intsym;
6080 const char *out_sec_name;
6081 bfd_size_type cmse_stub_size;
6082 asymbol **sympp = NULL, *sym;
6083 struct elf32_arm_link_hash_entry *hash;
6084 const insn_sequence *cmse_stub_template;
6085 struct elf32_arm_stub_hash_entry *stub_entry;
6086 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6087 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6088 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6089
6090 /* No input secure gateway import library. */
6091 if (!htab->in_implib_bfd)
6092 return TRUE;
6093
6094 in_implib_bfd = htab->in_implib_bfd;
6095 if (!htab->cmse_implib)
6096 {
6097 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6098 "Gateway import libraries"), in_implib_bfd);
6099 return FALSE;
6100 }
6101
6102 /* Get symbol table size. */
6103 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6104 if (symsize < 0)
6105 return FALSE;
6106
6107 /* Read in the input secure gateway import library's symbol table. */
6108 sympp = (asymbol **) xmalloc (symsize);
6109 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6110 if (symcount < 0)
6111 {
6112 ret = FALSE;
6113 goto free_sym_buf;
6114 }
6115
6116 htab->new_cmse_stub_offset = 0;
6117 cmse_stub_size =
6118 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6119 &cmse_stub_template,
6120 &cmse_stub_template_size);
6121 out_sec_name =
6122 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6123 stub_out_sec =
6124 bfd_get_section_by_name (htab->obfd, out_sec_name);
6125 if (stub_out_sec != NULL)
6126 cmse_stub_sec_vma = stub_out_sec->vma;
6127
6128 /* Set addresses of veneers mentionned in input secure gateway import
6129 library's symbol table. */
6130 for (i = 0; i < symcount; i++)
6131 {
6132 sym = sympp[i];
6133 flags = sym->flags;
6134 sym_name = (char *) bfd_asymbol_name (sym);
6135 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6136
6137 if (sym->section != bfd_abs_section_ptr
6138 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6139 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6140 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6141 != ST_BRANCH_TO_THUMB))
6142 {
6143 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6144 "symbol should be absolute, global and "
6145 "refer to Thumb functions"),
6146 in_implib_bfd, sym_name);
6147 ret = FALSE;
6148 continue;
6149 }
6150
6151 veneer_value = bfd_asymbol_value (sym);
6152 stub_offset = veneer_value - cmse_stub_sec_vma;
6153 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6154 FALSE, FALSE);
6155 hash = (struct elf32_arm_link_hash_entry *)
6156 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6157
6158 /* Stub entry should have been created by cmse_scan or the symbol be of
6159 a secure function callable from non secure code. */
6160 if (!stub_entry && !hash)
6161 {
6162 bfd_boolean new_stub;
6163
6164 _bfd_error_handler
6165 (_("entry function `%s' disappeared from secure code"), sym_name);
6166 hash = (struct elf32_arm_link_hash_entry *)
6167 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6168 stub_entry
6169 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6170 NULL, NULL, bfd_abs_section_ptr, hash,
6171 sym_name, veneer_value,
6172 ST_BRANCH_TO_THUMB, &new_stub);
6173 if (stub_entry == NULL)
6174 ret = FALSE;
6175 else
6176 {
6177 BFD_ASSERT (new_stub);
6178 new_cmse_stubs_created++;
6179 (*cmse_stub_created)++;
6180 }
6181 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6182 stub_entry->stub_offset = stub_offset;
6183 }
6184 /* Symbol found is not callable from non secure code. */
6185 else if (!stub_entry)
6186 {
6187 if (!cmse_entry_fct_p (hash))
6188 {
6189 _bfd_error_handler (_("`%s' refers to a non entry function"),
6190 sym_name);
6191 ret = FALSE;
6192 }
6193 continue;
6194 }
6195 else
6196 {
6197 /* Only stubs for SG veneers should have been created. */
6198 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6199
6200 /* Check visibility hasn't changed. */
6201 if (!!(flags & BSF_GLOBAL)
6202 != (hash->root.root.type == bfd_link_hash_defined))
6203 _bfd_error_handler
6204 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6205 sym_name);
6206
6207 stub_entry->stub_offset = stub_offset;
6208 }
6209
6210 /* Size should match that of a SG veneer. */
6211 if (intsym->st_size != cmse_stub_size)
6212 {
6213 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6214 in_implib_bfd, sym_name);
6215 ret = FALSE;
6216 }
6217
6218 /* Previous veneer address is before current SG veneer section. */
6219 if (veneer_value < cmse_stub_sec_vma)
6220 {
6221 /* Avoid offset underflow. */
6222 if (stub_entry)
6223 stub_entry->stub_offset = 0;
6224 stub_offset = 0;
6225 ret = FALSE;
6226 }
6227
6228 /* Complain if stub offset not a multiple of stub size. */
6229 if (stub_offset % cmse_stub_size)
6230 {
6231 _bfd_error_handler
6232 (_("offset of veneer for entry function `%s' not a multiple of "
6233 "its size"), sym_name);
6234 ret = FALSE;
6235 }
6236
6237 if (!ret)
6238 continue;
6239
6240 new_cmse_stubs_created--;
6241 if (veneer_value < cmse_stub_array_start)
6242 cmse_stub_array_start = veneer_value;
6243 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6244 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6245 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6246 }
6247
6248 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6249 {
6250 BFD_ASSERT (new_cmse_stubs_created > 0);
6251 _bfd_error_handler
6252 (_("new entry function(s) introduced but no output import library "
6253 "specified:"));
6254 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6255 }
6256
6257 if (cmse_stub_array_start != cmse_stub_sec_vma)
6258 {
6259 _bfd_error_handler
6260 (_("start address of `%s' is different from previous link"),
6261 out_sec_name);
6262 ret = FALSE;
6263 }
6264
6265 free_sym_buf:
6266 free (sympp);
6267 return ret;
6268 }
6269
6270 /* Determine and set the size of the stub section for a final link.
6271
6272 The basic idea here is to examine all the relocations looking for
6273 PC-relative calls to a target that is unreachable with a "bl"
6274 instruction. */
6275
6276 bfd_boolean
6277 elf32_arm_size_stubs (bfd *output_bfd,
6278 bfd *stub_bfd,
6279 struct bfd_link_info *info,
6280 bfd_signed_vma group_size,
6281 asection * (*add_stub_section) (const char *, asection *,
6282 asection *,
6283 unsigned int),
6284 void (*layout_sections_again) (void))
6285 {
6286 bfd_boolean ret = TRUE;
6287 obj_attribute *out_attr;
6288 int cmse_stub_created = 0;
6289 bfd_size_type stub_group_size;
6290 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6291 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6292 struct a8_erratum_fix *a8_fixes = NULL;
6293 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6294 struct a8_erratum_reloc *a8_relocs = NULL;
6295 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6296
6297 if (htab == NULL)
6298 return FALSE;
6299
6300 if (htab->fix_cortex_a8)
6301 {
6302 a8_fixes = (struct a8_erratum_fix *)
6303 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6304 a8_relocs = (struct a8_erratum_reloc *)
6305 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6306 }
6307
6308 /* Propagate mach to stub bfd, because it may not have been
6309 finalized when we created stub_bfd. */
6310 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6311 bfd_get_mach (output_bfd));
6312
6313 /* Stash our params away. */
6314 htab->stub_bfd = stub_bfd;
6315 htab->add_stub_section = add_stub_section;
6316 htab->layout_sections_again = layout_sections_again;
6317 stubs_always_after_branch = group_size < 0;
6318
6319 out_attr = elf_known_obj_attributes_proc (output_bfd);
6320 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6321
6322 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6323 as the first half of a 32-bit branch straddling two 4K pages. This is a
6324 crude way of enforcing that. */
6325 if (htab->fix_cortex_a8)
6326 stubs_always_after_branch = 1;
6327
6328 if (group_size < 0)
6329 stub_group_size = -group_size;
6330 else
6331 stub_group_size = group_size;
6332
6333 if (stub_group_size == 1)
6334 {
6335 /* Default values. */
6336 /* Thumb branch range is +-4MB has to be used as the default
6337 maximum size (a given section can contain both ARM and Thumb
6338 code, so the worst case has to be taken into account).
6339
6340 This value is 24K less than that, which allows for 2025
6341 12-byte stubs. If we exceed that, then we will fail to link.
6342 The user will have to relink with an explicit group size
6343 option. */
6344 stub_group_size = 4170000;
6345 }
6346
6347 group_sections (htab, stub_group_size, stubs_always_after_branch);
6348
6349 /* If we're applying the cortex A8 fix, we need to determine the
6350 program header size now, because we cannot change it later --
6351 that could alter section placements. Notice the A8 erratum fix
6352 ends up requiring the section addresses to remain unchanged
6353 modulo the page size. That's something we cannot represent
6354 inside BFD, and we don't want to force the section alignment to
6355 be the page size. */
6356 if (htab->fix_cortex_a8)
6357 (*htab->layout_sections_again) ();
6358
6359 while (1)
6360 {
6361 bfd *input_bfd;
6362 unsigned int bfd_indx;
6363 asection *stub_sec;
6364 enum elf32_arm_stub_type stub_type;
6365 bfd_boolean stub_changed = FALSE;
6366 unsigned prev_num_a8_fixes = num_a8_fixes;
6367
6368 num_a8_fixes = 0;
6369 for (input_bfd = info->input_bfds, bfd_indx = 0;
6370 input_bfd != NULL;
6371 input_bfd = input_bfd->link.next, bfd_indx++)
6372 {
6373 Elf_Internal_Shdr *symtab_hdr;
6374 asection *section;
6375 Elf_Internal_Sym *local_syms = NULL;
6376
6377 if (!is_arm_elf (input_bfd))
6378 continue;
6379
6380 num_a8_relocs = 0;
6381
6382 /* We'll need the symbol table in a second. */
6383 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6384 if (symtab_hdr->sh_info == 0)
6385 continue;
6386
6387 /* Limit scan of symbols to object file whose profile is
6388 Microcontroller to not hinder performance in the general case. */
6389 if (m_profile && first_veneer_scan)
6390 {
6391 struct elf_link_hash_entry **sym_hashes;
6392
6393 sym_hashes = elf_sym_hashes (input_bfd);
6394 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6395 &cmse_stub_created))
6396 goto error_ret_free_local;
6397
6398 if (cmse_stub_created != 0)
6399 stub_changed = TRUE;
6400 }
6401
6402 /* Walk over each section attached to the input bfd. */
6403 for (section = input_bfd->sections;
6404 section != NULL;
6405 section = section->next)
6406 {
6407 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6408
6409 /* If there aren't any relocs, then there's nothing more
6410 to do. */
6411 if ((section->flags & SEC_RELOC) == 0
6412 || section->reloc_count == 0
6413 || (section->flags & SEC_CODE) == 0)
6414 continue;
6415
6416 /* If this section is a link-once section that will be
6417 discarded, then don't create any stubs. */
6418 if (section->output_section == NULL
6419 || section->output_section->owner != output_bfd)
6420 continue;
6421
6422 /* Get the relocs. */
6423 internal_relocs
6424 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6425 NULL, info->keep_memory);
6426 if (internal_relocs == NULL)
6427 goto error_ret_free_local;
6428
6429 /* Now examine each relocation. */
6430 irela = internal_relocs;
6431 irelaend = irela + section->reloc_count;
6432 for (; irela < irelaend; irela++)
6433 {
6434 unsigned int r_type, r_indx;
6435 asection *sym_sec;
6436 bfd_vma sym_value;
6437 bfd_vma destination;
6438 struct elf32_arm_link_hash_entry *hash;
6439 const char *sym_name;
6440 unsigned char st_type;
6441 enum arm_st_branch_type branch_type;
6442 bfd_boolean created_stub = FALSE;
6443
6444 r_type = ELF32_R_TYPE (irela->r_info);
6445 r_indx = ELF32_R_SYM (irela->r_info);
6446
6447 if (r_type >= (unsigned int) R_ARM_max)
6448 {
6449 bfd_set_error (bfd_error_bad_value);
6450 error_ret_free_internal:
6451 if (elf_section_data (section)->relocs == NULL)
6452 free (internal_relocs);
6453 /* Fall through. */
6454 error_ret_free_local:
6455 if (local_syms != NULL
6456 && (symtab_hdr->contents
6457 != (unsigned char *) local_syms))
6458 free (local_syms);
6459 return FALSE;
6460 }
6461
6462 hash = NULL;
6463 if (r_indx >= symtab_hdr->sh_info)
6464 hash = elf32_arm_hash_entry
6465 (elf_sym_hashes (input_bfd)
6466 [r_indx - symtab_hdr->sh_info]);
6467
6468 /* Only look for stubs on branch instructions, or
6469 non-relaxed TLSCALL */
6470 if ((r_type != (unsigned int) R_ARM_CALL)
6471 && (r_type != (unsigned int) R_ARM_THM_CALL)
6472 && (r_type != (unsigned int) R_ARM_JUMP24)
6473 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6474 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6475 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6476 && (r_type != (unsigned int) R_ARM_PLT32)
6477 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6478 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6479 && r_type == elf32_arm_tls_transition
6480 (info, r_type, &hash->root)
6481 && ((hash ? hash->tls_type
6482 : (elf32_arm_local_got_tls_type
6483 (input_bfd)[r_indx]))
6484 & GOT_TLS_GDESC) != 0))
6485 continue;
6486
6487 /* Now determine the call target, its name, value,
6488 section. */
6489 sym_sec = NULL;
6490 sym_value = 0;
6491 destination = 0;
6492 sym_name = NULL;
6493
6494 if (r_type == (unsigned int) R_ARM_TLS_CALL
6495 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6496 {
6497 /* A non-relaxed TLS call. The target is the
6498 plt-resident trampoline and nothing to do
6499 with the symbol. */
6500 BFD_ASSERT (htab->tls_trampoline > 0);
6501 sym_sec = htab->root.splt;
6502 sym_value = htab->tls_trampoline;
6503 hash = 0;
6504 st_type = STT_FUNC;
6505 branch_type = ST_BRANCH_TO_ARM;
6506 }
6507 else if (!hash)
6508 {
6509 /* It's a local symbol. */
6510 Elf_Internal_Sym *sym;
6511
6512 if (local_syms == NULL)
6513 {
6514 local_syms
6515 = (Elf_Internal_Sym *) symtab_hdr->contents;
6516 if (local_syms == NULL)
6517 local_syms
6518 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6519 symtab_hdr->sh_info, 0,
6520 NULL, NULL, NULL);
6521 if (local_syms == NULL)
6522 goto error_ret_free_internal;
6523 }
6524
6525 sym = local_syms + r_indx;
6526 if (sym->st_shndx == SHN_UNDEF)
6527 sym_sec = bfd_und_section_ptr;
6528 else if (sym->st_shndx == SHN_ABS)
6529 sym_sec = bfd_abs_section_ptr;
6530 else if (sym->st_shndx == SHN_COMMON)
6531 sym_sec = bfd_com_section_ptr;
6532 else
6533 sym_sec =
6534 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6535
6536 if (!sym_sec)
6537 /* This is an undefined symbol. It can never
6538 be resolved. */
6539 continue;
6540
6541 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6542 sym_value = sym->st_value;
6543 destination = (sym_value + irela->r_addend
6544 + sym_sec->output_offset
6545 + sym_sec->output_section->vma);
6546 st_type = ELF_ST_TYPE (sym->st_info);
6547 branch_type =
6548 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6549 sym_name
6550 = bfd_elf_string_from_elf_section (input_bfd,
6551 symtab_hdr->sh_link,
6552 sym->st_name);
6553 }
6554 else
6555 {
6556 /* It's an external symbol. */
6557 while (hash->root.root.type == bfd_link_hash_indirect
6558 || hash->root.root.type == bfd_link_hash_warning)
6559 hash = ((struct elf32_arm_link_hash_entry *)
6560 hash->root.root.u.i.link);
6561
6562 if (hash->root.root.type == bfd_link_hash_defined
6563 || hash->root.root.type == bfd_link_hash_defweak)
6564 {
6565 sym_sec = hash->root.root.u.def.section;
6566 sym_value = hash->root.root.u.def.value;
6567
6568 struct elf32_arm_link_hash_table *globals =
6569 elf32_arm_hash_table (info);
6570
6571 /* For a destination in a shared library,
6572 use the PLT stub as target address to
6573 decide whether a branch stub is
6574 needed. */
6575 if (globals != NULL
6576 && globals->root.splt != NULL
6577 && hash != NULL
6578 && hash->root.plt.offset != (bfd_vma) -1)
6579 {
6580 sym_sec = globals->root.splt;
6581 sym_value = hash->root.plt.offset;
6582 if (sym_sec->output_section != NULL)
6583 destination = (sym_value
6584 + sym_sec->output_offset
6585 + sym_sec->output_section->vma);
6586 }
6587 else if (sym_sec->output_section != NULL)
6588 destination = (sym_value + irela->r_addend
6589 + sym_sec->output_offset
6590 + sym_sec->output_section->vma);
6591 }
6592 else if ((hash->root.root.type == bfd_link_hash_undefined)
6593 || (hash->root.root.type == bfd_link_hash_undefweak))
6594 {
6595 /* For a shared library, use the PLT stub as
6596 target address to decide whether a long
6597 branch stub is needed.
6598 For absolute code, they cannot be handled. */
6599 struct elf32_arm_link_hash_table *globals =
6600 elf32_arm_hash_table (info);
6601
6602 if (globals != NULL
6603 && globals->root.splt != NULL
6604 && hash != NULL
6605 && hash->root.plt.offset != (bfd_vma) -1)
6606 {
6607 sym_sec = globals->root.splt;
6608 sym_value = hash->root.plt.offset;
6609 if (sym_sec->output_section != NULL)
6610 destination = (sym_value
6611 + sym_sec->output_offset
6612 + sym_sec->output_section->vma);
6613 }
6614 else
6615 continue;
6616 }
6617 else
6618 {
6619 bfd_set_error (bfd_error_bad_value);
6620 goto error_ret_free_internal;
6621 }
6622 st_type = hash->root.type;
6623 branch_type =
6624 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6625 sym_name = hash->root.root.root.string;
6626 }
6627
6628 do
6629 {
6630 bfd_boolean new_stub;
6631 struct elf32_arm_stub_hash_entry *stub_entry;
6632
6633 /* Determine what (if any) linker stub is needed. */
6634 stub_type = arm_type_of_stub (info, section, irela,
6635 st_type, &branch_type,
6636 hash, destination, sym_sec,
6637 input_bfd, sym_name);
6638 if (stub_type == arm_stub_none)
6639 break;
6640
6641 /* We've either created a stub for this reloc already,
6642 or we are about to. */
6643 stub_entry =
6644 elf32_arm_create_stub (htab, stub_type, section, irela,
6645 sym_sec, hash,
6646 (char *) sym_name, sym_value,
6647 branch_type, &new_stub);
6648
6649 created_stub = stub_entry != NULL;
6650 if (!created_stub)
6651 goto error_ret_free_internal;
6652 else if (!new_stub)
6653 break;
6654 else
6655 stub_changed = TRUE;
6656 }
6657 while (0);
6658
6659 /* Look for relocations which might trigger Cortex-A8
6660 erratum. */
6661 if (htab->fix_cortex_a8
6662 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6663 || r_type == (unsigned int) R_ARM_THM_JUMP19
6664 || r_type == (unsigned int) R_ARM_THM_CALL
6665 || r_type == (unsigned int) R_ARM_THM_XPC22))
6666 {
6667 bfd_vma from = section->output_section->vma
6668 + section->output_offset
6669 + irela->r_offset;
6670
6671 if ((from & 0xfff) == 0xffe)
6672 {
6673 /* Found a candidate. Note we haven't checked the
6674 destination is within 4K here: if we do so (and
6675 don't create an entry in a8_relocs) we can't tell
6676 that a branch should have been relocated when
6677 scanning later. */
6678 if (num_a8_relocs == a8_reloc_table_size)
6679 {
6680 a8_reloc_table_size *= 2;
6681 a8_relocs = (struct a8_erratum_reloc *)
6682 bfd_realloc (a8_relocs,
6683 sizeof (struct a8_erratum_reloc)
6684 * a8_reloc_table_size);
6685 }
6686
6687 a8_relocs[num_a8_relocs].from = from;
6688 a8_relocs[num_a8_relocs].destination = destination;
6689 a8_relocs[num_a8_relocs].r_type = r_type;
6690 a8_relocs[num_a8_relocs].branch_type = branch_type;
6691 a8_relocs[num_a8_relocs].sym_name = sym_name;
6692 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6693 a8_relocs[num_a8_relocs].hash = hash;
6694
6695 num_a8_relocs++;
6696 }
6697 }
6698 }
6699
6700 /* We're done with the internal relocs, free them. */
6701 if (elf_section_data (section)->relocs == NULL)
6702 free (internal_relocs);
6703 }
6704
6705 if (htab->fix_cortex_a8)
6706 {
6707 /* Sort relocs which might apply to Cortex-A8 erratum. */
6708 qsort (a8_relocs, num_a8_relocs,
6709 sizeof (struct a8_erratum_reloc),
6710 &a8_reloc_compare);
6711
6712 /* Scan for branches which might trigger Cortex-A8 erratum. */
6713 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6714 &num_a8_fixes, &a8_fix_table_size,
6715 a8_relocs, num_a8_relocs,
6716 prev_num_a8_fixes, &stub_changed)
6717 != 0)
6718 goto error_ret_free_local;
6719 }
6720
6721 if (local_syms != NULL
6722 && symtab_hdr->contents != (unsigned char *) local_syms)
6723 {
6724 if (!info->keep_memory)
6725 free (local_syms);
6726 else
6727 symtab_hdr->contents = (unsigned char *) local_syms;
6728 }
6729 }
6730
6731 if (first_veneer_scan
6732 && !set_cmse_veneer_addr_from_implib (info, htab,
6733 &cmse_stub_created))
6734 ret = FALSE;
6735
6736 if (prev_num_a8_fixes != num_a8_fixes)
6737 stub_changed = TRUE;
6738
6739 if (!stub_changed)
6740 break;
6741
6742 /* OK, we've added some stubs. Find out the new size of the
6743 stub sections. */
6744 for (stub_sec = htab->stub_bfd->sections;
6745 stub_sec != NULL;
6746 stub_sec = stub_sec->next)
6747 {
6748 /* Ignore non-stub sections. */
6749 if (!strstr (stub_sec->name, STUB_SUFFIX))
6750 continue;
6751
6752 stub_sec->size = 0;
6753 }
6754
6755 /* Add new SG veneers after those already in the input import
6756 library. */
6757 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6758 stub_type++)
6759 {
6760 bfd_vma *start_offset_p;
6761 asection **stub_sec_p;
6762
6763 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6764 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6765 if (start_offset_p == NULL)
6766 continue;
6767
6768 BFD_ASSERT (stub_sec_p != NULL);
6769 if (*stub_sec_p != NULL)
6770 (*stub_sec_p)->size = *start_offset_p;
6771 }
6772
6773 /* Compute stub section size, considering padding. */
6774 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6775 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6776 stub_type++)
6777 {
6778 int size, padding;
6779 asection **stub_sec_p;
6780
6781 padding = arm_dedicated_stub_section_padding (stub_type);
6782 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6783 /* Skip if no stub input section or no stub section padding
6784 required. */
6785 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6786 continue;
6787 /* Stub section padding required but no dedicated section. */
6788 BFD_ASSERT (stub_sec_p);
6789
6790 size = (*stub_sec_p)->size;
6791 size = (size + padding - 1) & ~(padding - 1);
6792 (*stub_sec_p)->size = size;
6793 }
6794
6795 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6796 if (htab->fix_cortex_a8)
6797 for (i = 0; i < num_a8_fixes; i++)
6798 {
6799 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6800 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6801
6802 if (stub_sec == NULL)
6803 return FALSE;
6804
6805 stub_sec->size
6806 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6807 NULL);
6808 }
6809
6810
6811 /* Ask the linker to do its stuff. */
6812 (*htab->layout_sections_again) ();
6813 first_veneer_scan = FALSE;
6814 }
6815
6816 /* Add stubs for Cortex-A8 erratum fixes now. */
6817 if (htab->fix_cortex_a8)
6818 {
6819 for (i = 0; i < num_a8_fixes; i++)
6820 {
6821 struct elf32_arm_stub_hash_entry *stub_entry;
6822 char *stub_name = a8_fixes[i].stub_name;
6823 asection *section = a8_fixes[i].section;
6824 unsigned int section_id = a8_fixes[i].section->id;
6825 asection *link_sec = htab->stub_group[section_id].link_sec;
6826 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6827 const insn_sequence *template_sequence;
6828 int template_size, size = 0;
6829
6830 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6831 TRUE, FALSE);
6832 if (stub_entry == NULL)
6833 {
6834 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6835 section->owner, stub_name);
6836 return FALSE;
6837 }
6838
6839 stub_entry->stub_sec = stub_sec;
6840 stub_entry->stub_offset = (bfd_vma) -1;
6841 stub_entry->id_sec = link_sec;
6842 stub_entry->stub_type = a8_fixes[i].stub_type;
6843 stub_entry->source_value = a8_fixes[i].offset;
6844 stub_entry->target_section = a8_fixes[i].section;
6845 stub_entry->target_value = a8_fixes[i].target_offset;
6846 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6847 stub_entry->branch_type = a8_fixes[i].branch_type;
6848
6849 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6850 &template_sequence,
6851 &template_size);
6852
6853 stub_entry->stub_size = size;
6854 stub_entry->stub_template = template_sequence;
6855 stub_entry->stub_template_size = template_size;
6856 }
6857
6858 /* Stash the Cortex-A8 erratum fix array for use later in
6859 elf32_arm_write_section(). */
6860 htab->a8_erratum_fixes = a8_fixes;
6861 htab->num_a8_erratum_fixes = num_a8_fixes;
6862 }
6863 else
6864 {
6865 htab->a8_erratum_fixes = NULL;
6866 htab->num_a8_erratum_fixes = 0;
6867 }
6868 return ret;
6869 }
6870
6871 /* Build all the stubs associated with the current output file. The
6872 stubs are kept in a hash table attached to the main linker hash
6873 table. We also set up the .plt entries for statically linked PIC
6874 functions here. This function is called via arm_elf_finish in the
6875 linker. */
6876
6877 bfd_boolean
6878 elf32_arm_build_stubs (struct bfd_link_info *info)
6879 {
6880 asection *stub_sec;
6881 struct bfd_hash_table *table;
6882 enum elf32_arm_stub_type stub_type;
6883 struct elf32_arm_link_hash_table *htab;
6884
6885 htab = elf32_arm_hash_table (info);
6886 if (htab == NULL)
6887 return FALSE;
6888
6889 for (stub_sec = htab->stub_bfd->sections;
6890 stub_sec != NULL;
6891 stub_sec = stub_sec->next)
6892 {
6893 bfd_size_type size;
6894
6895 /* Ignore non-stub sections. */
6896 if (!strstr (stub_sec->name, STUB_SUFFIX))
6897 continue;
6898
6899 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6900 must at least be done for stub section requiring padding and for SG
6901 veneers to ensure that a non secure code branching to a removed SG
6902 veneer causes an error. */
6903 size = stub_sec->size;
6904 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6905 if (stub_sec->contents == NULL && size != 0)
6906 return FALSE;
6907
6908 stub_sec->size = 0;
6909 }
6910
6911 /* Add new SG veneers after those already in the input import library. */
6912 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6913 {
6914 bfd_vma *start_offset_p;
6915 asection **stub_sec_p;
6916
6917 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6918 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6919 if (start_offset_p == NULL)
6920 continue;
6921
6922 BFD_ASSERT (stub_sec_p != NULL);
6923 if (*stub_sec_p != NULL)
6924 (*stub_sec_p)->size = *start_offset_p;
6925 }
6926
6927 /* Build the stubs as directed by the stub hash table. */
6928 table = &htab->stub_hash_table;
6929 bfd_hash_traverse (table, arm_build_one_stub, info);
6930 if (htab->fix_cortex_a8)
6931 {
6932 /* Place the cortex a8 stubs last. */
6933 htab->fix_cortex_a8 = -1;
6934 bfd_hash_traverse (table, arm_build_one_stub, info);
6935 }
6936
6937 return TRUE;
6938 }
6939
6940 /* Locate the Thumb encoded calling stub for NAME. */
6941
6942 static struct elf_link_hash_entry *
6943 find_thumb_glue (struct bfd_link_info *link_info,
6944 const char *name,
6945 char **error_message)
6946 {
6947 char *tmp_name;
6948 struct elf_link_hash_entry *hash;
6949 struct elf32_arm_link_hash_table *hash_table;
6950
6951 /* We need a pointer to the armelf specific hash table. */
6952 hash_table = elf32_arm_hash_table (link_info);
6953 if (hash_table == NULL)
6954 return NULL;
6955
6956 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6957 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
6958
6959 BFD_ASSERT (tmp_name);
6960
6961 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
6962
6963 hash = elf_link_hash_lookup
6964 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6965
6966 if (hash == NULL
6967 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
6968 "Thumb", tmp_name, name) == -1)
6969 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6970
6971 free (tmp_name);
6972
6973 return hash;
6974 }
6975
6976 /* Locate the ARM encoded calling stub for NAME. */
6977
6978 static struct elf_link_hash_entry *
6979 find_arm_glue (struct bfd_link_info *link_info,
6980 const char *name,
6981 char **error_message)
6982 {
6983 char *tmp_name;
6984 struct elf_link_hash_entry *myh;
6985 struct elf32_arm_link_hash_table *hash_table;
6986
6987 /* We need a pointer to the elfarm specific hash table. */
6988 hash_table = elf32_arm_hash_table (link_info);
6989 if (hash_table == NULL)
6990 return NULL;
6991
6992 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6993 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6994
6995 BFD_ASSERT (tmp_name);
6996
6997 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6998
6999 myh = elf_link_hash_lookup
7000 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7001
7002 if (myh == NULL
7003 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7004 "ARM", tmp_name, name) == -1)
7005 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7006
7007 free (tmp_name);
7008
7009 return myh;
7010 }
7011
7012 /* ARM->Thumb glue (static images):
7013
7014 .arm
7015 __func_from_arm:
7016 ldr r12, __func_addr
7017 bx r12
7018 __func_addr:
7019 .word func @ behave as if you saw a ARM_32 reloc.
7020
7021 (v5t static images)
7022 .arm
7023 __func_from_arm:
7024 ldr pc, __func_addr
7025 __func_addr:
7026 .word func @ behave as if you saw a ARM_32 reloc.
7027
7028 (relocatable images)
7029 .arm
7030 __func_from_arm:
7031 ldr r12, __func_offset
7032 add r12, r12, pc
7033 bx r12
7034 __func_offset:
7035 .word func - . */
7036
7037 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7038 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7039 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7040 static const insn32 a2t3_func_addr_insn = 0x00000001;
7041
7042 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7043 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7044 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7045
7046 #define ARM2THUMB_PIC_GLUE_SIZE 16
7047 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7048 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7049 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7050
7051 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7052
7053 .thumb .thumb
7054 .align 2 .align 2
7055 __func_from_thumb: __func_from_thumb:
7056 bx pc push {r6, lr}
7057 nop ldr r6, __func_addr
7058 .arm mov lr, pc
7059 b func bx r6
7060 .arm
7061 ;; back_to_thumb
7062 ldmia r13! {r6, lr}
7063 bx lr
7064 __func_addr:
7065 .word func */
7066
7067 #define THUMB2ARM_GLUE_SIZE 8
7068 static const insn16 t2a1_bx_pc_insn = 0x4778;
7069 static const insn16 t2a2_noop_insn = 0x46c0;
7070 static const insn32 t2a3_b_insn = 0xea000000;
7071
7072 #define VFP11_ERRATUM_VENEER_SIZE 8
7073 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7074 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7075
7076 #define ARM_BX_VENEER_SIZE 12
7077 static const insn32 armbx1_tst_insn = 0xe3100001;
7078 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7079 static const insn32 armbx3_bx_insn = 0xe12fff10;
7080
7081 #ifndef ELFARM_NABI_C_INCLUDED
7082 static void
7083 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7084 {
7085 asection * s;
7086 bfd_byte * contents;
7087
7088 if (size == 0)
7089 {
7090 /* Do not include empty glue sections in the output. */
7091 if (abfd != NULL)
7092 {
7093 s = bfd_get_linker_section (abfd, name);
7094 if (s != NULL)
7095 s->flags |= SEC_EXCLUDE;
7096 }
7097 return;
7098 }
7099
7100 BFD_ASSERT (abfd != NULL);
7101
7102 s = bfd_get_linker_section (abfd, name);
7103 BFD_ASSERT (s != NULL);
7104
7105 contents = (bfd_byte *) bfd_alloc (abfd, size);
7106
7107 BFD_ASSERT (s->size == size);
7108 s->contents = contents;
7109 }
7110
7111 bfd_boolean
7112 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7113 {
7114 struct elf32_arm_link_hash_table * globals;
7115
7116 globals = elf32_arm_hash_table (info);
7117 BFD_ASSERT (globals != NULL);
7118
7119 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7120 globals->arm_glue_size,
7121 ARM2THUMB_GLUE_SECTION_NAME);
7122
7123 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7124 globals->thumb_glue_size,
7125 THUMB2ARM_GLUE_SECTION_NAME);
7126
7127 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7128 globals->vfp11_erratum_glue_size,
7129 VFP11_ERRATUM_VENEER_SECTION_NAME);
7130
7131 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7132 globals->stm32l4xx_erratum_glue_size,
7133 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7134
7135 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7136 globals->bx_glue_size,
7137 ARM_BX_GLUE_SECTION_NAME);
7138
7139 return TRUE;
7140 }
7141
7142 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7143 returns the symbol identifying the stub. */
7144
7145 static struct elf_link_hash_entry *
7146 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7147 struct elf_link_hash_entry * h)
7148 {
7149 const char * name = h->root.root.string;
7150 asection * s;
7151 char * tmp_name;
7152 struct elf_link_hash_entry * myh;
7153 struct bfd_link_hash_entry * bh;
7154 struct elf32_arm_link_hash_table * globals;
7155 bfd_vma val;
7156 bfd_size_type size;
7157
7158 globals = elf32_arm_hash_table (link_info);
7159 BFD_ASSERT (globals != NULL);
7160 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7161
7162 s = bfd_get_linker_section
7163 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7164
7165 BFD_ASSERT (s != NULL);
7166
7167 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7168 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7169
7170 BFD_ASSERT (tmp_name);
7171
7172 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7173
7174 myh = elf_link_hash_lookup
7175 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7176
7177 if (myh != NULL)
7178 {
7179 /* We've already seen this guy. */
7180 free (tmp_name);
7181 return myh;
7182 }
7183
7184 /* The only trick here is using hash_table->arm_glue_size as the value.
7185 Even though the section isn't allocated yet, this is where we will be
7186 putting it. The +1 on the value marks that the stub has not been
7187 output yet - not that it is a Thumb function. */
7188 bh = NULL;
7189 val = globals->arm_glue_size + 1;
7190 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7191 tmp_name, BSF_GLOBAL, s, val,
7192 NULL, TRUE, FALSE, &bh);
7193
7194 myh = (struct elf_link_hash_entry *) bh;
7195 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7196 myh->forced_local = 1;
7197
7198 free (tmp_name);
7199
7200 if (bfd_link_pic (link_info)
7201 || globals->root.is_relocatable_executable
7202 || globals->pic_veneer)
7203 size = ARM2THUMB_PIC_GLUE_SIZE;
7204 else if (globals->use_blx)
7205 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7206 else
7207 size = ARM2THUMB_STATIC_GLUE_SIZE;
7208
7209 s->size += size;
7210 globals->arm_glue_size += size;
7211
7212 return myh;
7213 }
7214
7215 /* Allocate space for ARMv4 BX veneers. */
7216
7217 static void
7218 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7219 {
7220 asection * s;
7221 struct elf32_arm_link_hash_table *globals;
7222 char *tmp_name;
7223 struct elf_link_hash_entry *myh;
7224 struct bfd_link_hash_entry *bh;
7225 bfd_vma val;
7226
7227 /* BX PC does not need a veneer. */
7228 if (reg == 15)
7229 return;
7230
7231 globals = elf32_arm_hash_table (link_info);
7232 BFD_ASSERT (globals != NULL);
7233 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7234
7235 /* Check if this veneer has already been allocated. */
7236 if (globals->bx_glue_offset[reg])
7237 return;
7238
7239 s = bfd_get_linker_section
7240 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7241
7242 BFD_ASSERT (s != NULL);
7243
7244 /* Add symbol for veneer. */
7245 tmp_name = (char *)
7246 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7247
7248 BFD_ASSERT (tmp_name);
7249
7250 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7251
7252 myh = elf_link_hash_lookup
7253 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7254
7255 BFD_ASSERT (myh == NULL);
7256
7257 bh = NULL;
7258 val = globals->bx_glue_size;
7259 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7260 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7261 NULL, TRUE, FALSE, &bh);
7262
7263 myh = (struct elf_link_hash_entry *) bh;
7264 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7265 myh->forced_local = 1;
7266
7267 s->size += ARM_BX_VENEER_SIZE;
7268 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7269 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7270 }
7271
7272
7273 /* Add an entry to the code/data map for section SEC. */
7274
7275 static void
7276 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7277 {
7278 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7279 unsigned int newidx;
7280
7281 if (sec_data->map == NULL)
7282 {
7283 sec_data->map = (elf32_arm_section_map *)
7284 bfd_malloc (sizeof (elf32_arm_section_map));
7285 sec_data->mapcount = 0;
7286 sec_data->mapsize = 1;
7287 }
7288
7289 newidx = sec_data->mapcount++;
7290
7291 if (sec_data->mapcount > sec_data->mapsize)
7292 {
7293 sec_data->mapsize *= 2;
7294 sec_data->map = (elf32_arm_section_map *)
7295 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7296 * sizeof (elf32_arm_section_map));
7297 }
7298
7299 if (sec_data->map)
7300 {
7301 sec_data->map[newidx].vma = vma;
7302 sec_data->map[newidx].type = type;
7303 }
7304 }
7305
7306
7307 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7308 veneers are handled for now. */
7309
7310 static bfd_vma
7311 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7312 elf32_vfp11_erratum_list *branch,
7313 bfd *branch_bfd,
7314 asection *branch_sec,
7315 unsigned int offset)
7316 {
7317 asection *s;
7318 struct elf32_arm_link_hash_table *hash_table;
7319 char *tmp_name;
7320 struct elf_link_hash_entry *myh;
7321 struct bfd_link_hash_entry *bh;
7322 bfd_vma val;
7323 struct _arm_elf_section_data *sec_data;
7324 elf32_vfp11_erratum_list *newerr;
7325
7326 hash_table = elf32_arm_hash_table (link_info);
7327 BFD_ASSERT (hash_table != NULL);
7328 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7329
7330 s = bfd_get_linker_section
7331 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7332
7333 sec_data = elf32_arm_section_data (s);
7334
7335 BFD_ASSERT (s != NULL);
7336
7337 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7338 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7339
7340 BFD_ASSERT (tmp_name);
7341
7342 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7343 hash_table->num_vfp11_fixes);
7344
7345 myh = elf_link_hash_lookup
7346 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7347
7348 BFD_ASSERT (myh == NULL);
7349
7350 bh = NULL;
7351 val = hash_table->vfp11_erratum_glue_size;
7352 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7353 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7354 NULL, TRUE, FALSE, &bh);
7355
7356 myh = (struct elf_link_hash_entry *) bh;
7357 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7358 myh->forced_local = 1;
7359
7360 /* Link veneer back to calling location. */
7361 sec_data->erratumcount += 1;
7362 newerr = (elf32_vfp11_erratum_list *)
7363 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7364
7365 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7366 newerr->vma = -1;
7367 newerr->u.v.branch = branch;
7368 newerr->u.v.id = hash_table->num_vfp11_fixes;
7369 branch->u.b.veneer = newerr;
7370
7371 newerr->next = sec_data->erratumlist;
7372 sec_data->erratumlist = newerr;
7373
7374 /* A symbol for the return from the veneer. */
7375 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7376 hash_table->num_vfp11_fixes);
7377
7378 myh = elf_link_hash_lookup
7379 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7380
7381 if (myh != NULL)
7382 abort ();
7383
7384 bh = NULL;
7385 val = offset + 4;
7386 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7387 branch_sec, val, NULL, TRUE, FALSE, &bh);
7388
7389 myh = (struct elf_link_hash_entry *) bh;
7390 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7391 myh->forced_local = 1;
7392
7393 free (tmp_name);
7394
7395 /* Generate a mapping symbol for the veneer section, and explicitly add an
7396 entry for that symbol to the code/data map for the section. */
7397 if (hash_table->vfp11_erratum_glue_size == 0)
7398 {
7399 bh = NULL;
7400 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7401 ever requires this erratum fix. */
7402 _bfd_generic_link_add_one_symbol (link_info,
7403 hash_table->bfd_of_glue_owner, "$a",
7404 BSF_LOCAL, s, 0, NULL,
7405 TRUE, FALSE, &bh);
7406
7407 myh = (struct elf_link_hash_entry *) bh;
7408 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7409 myh->forced_local = 1;
7410
7411 /* The elf32_arm_init_maps function only cares about symbols from input
7412 BFDs. We must make a note of this generated mapping symbol
7413 ourselves so that code byteswapping works properly in
7414 elf32_arm_write_section. */
7415 elf32_arm_section_map_add (s, 'a', 0);
7416 }
7417
7418 s->size += VFP11_ERRATUM_VENEER_SIZE;
7419 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7420 hash_table->num_vfp11_fixes++;
7421
7422 /* The offset of the veneer. */
7423 return val;
7424 }
7425
7426 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7427 veneers need to be handled because used only in Cortex-M. */
7428
7429 static bfd_vma
7430 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7431 elf32_stm32l4xx_erratum_list *branch,
7432 bfd *branch_bfd,
7433 asection *branch_sec,
7434 unsigned int offset,
7435 bfd_size_type veneer_size)
7436 {
7437 asection *s;
7438 struct elf32_arm_link_hash_table *hash_table;
7439 char *tmp_name;
7440 struct elf_link_hash_entry *myh;
7441 struct bfd_link_hash_entry *bh;
7442 bfd_vma val;
7443 struct _arm_elf_section_data *sec_data;
7444 elf32_stm32l4xx_erratum_list *newerr;
7445
7446 hash_table = elf32_arm_hash_table (link_info);
7447 BFD_ASSERT (hash_table != NULL);
7448 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7449
7450 s = bfd_get_linker_section
7451 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7452
7453 BFD_ASSERT (s != NULL);
7454
7455 sec_data = elf32_arm_section_data (s);
7456
7457 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7458 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7459
7460 BFD_ASSERT (tmp_name);
7461
7462 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7463 hash_table->num_stm32l4xx_fixes);
7464
7465 myh = elf_link_hash_lookup
7466 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7467
7468 BFD_ASSERT (myh == NULL);
7469
7470 bh = NULL;
7471 val = hash_table->stm32l4xx_erratum_glue_size;
7472 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7473 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7474 NULL, TRUE, FALSE, &bh);
7475
7476 myh = (struct elf_link_hash_entry *) bh;
7477 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7478 myh->forced_local = 1;
7479
7480 /* Link veneer back to calling location. */
7481 sec_data->stm32l4xx_erratumcount += 1;
7482 newerr = (elf32_stm32l4xx_erratum_list *)
7483 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7484
7485 newerr->type = STM32L4XX_ERRATUM_VENEER;
7486 newerr->vma = -1;
7487 newerr->u.v.branch = branch;
7488 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7489 branch->u.b.veneer = newerr;
7490
7491 newerr->next = sec_data->stm32l4xx_erratumlist;
7492 sec_data->stm32l4xx_erratumlist = newerr;
7493
7494 /* A symbol for the return from the veneer. */
7495 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7496 hash_table->num_stm32l4xx_fixes);
7497
7498 myh = elf_link_hash_lookup
7499 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7500
7501 if (myh != NULL)
7502 abort ();
7503
7504 bh = NULL;
7505 val = offset + 4;
7506 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7507 branch_sec, val, NULL, TRUE, FALSE, &bh);
7508
7509 myh = (struct elf_link_hash_entry *) bh;
7510 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7511 myh->forced_local = 1;
7512
7513 free (tmp_name);
7514
7515 /* Generate a mapping symbol for the veneer section, and explicitly add an
7516 entry for that symbol to the code/data map for the section. */
7517 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7518 {
7519 bh = NULL;
7520 /* Creates a THUMB symbol since there is no other choice. */
7521 _bfd_generic_link_add_one_symbol (link_info,
7522 hash_table->bfd_of_glue_owner, "$t",
7523 BSF_LOCAL, s, 0, NULL,
7524 TRUE, FALSE, &bh);
7525
7526 myh = (struct elf_link_hash_entry *) bh;
7527 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7528 myh->forced_local = 1;
7529
7530 /* The elf32_arm_init_maps function only cares about symbols from input
7531 BFDs. We must make a note of this generated mapping symbol
7532 ourselves so that code byteswapping works properly in
7533 elf32_arm_write_section. */
7534 elf32_arm_section_map_add (s, 't', 0);
7535 }
7536
7537 s->size += veneer_size;
7538 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7539 hash_table->num_stm32l4xx_fixes++;
7540
7541 /* The offset of the veneer. */
7542 return val;
7543 }
7544
7545 #define ARM_GLUE_SECTION_FLAGS \
7546 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7547 | SEC_READONLY | SEC_LINKER_CREATED)
7548
7549 /* Create a fake section for use by the ARM backend of the linker. */
7550
7551 static bfd_boolean
7552 arm_make_glue_section (bfd * abfd, const char * name)
7553 {
7554 asection * sec;
7555
7556 sec = bfd_get_linker_section (abfd, name);
7557 if (sec != NULL)
7558 /* Already made. */
7559 return TRUE;
7560
7561 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7562
7563 if (sec == NULL
7564 || !bfd_set_section_alignment (abfd, sec, 2))
7565 return FALSE;
7566
7567 /* Set the gc mark to prevent the section from being removed by garbage
7568 collection, despite the fact that no relocs refer to this section. */
7569 sec->gc_mark = 1;
7570
7571 return TRUE;
7572 }
7573
7574 /* Set size of .plt entries. This function is called from the
7575 linker scripts in ld/emultempl/{armelf}.em. */
7576
7577 void
7578 bfd_elf32_arm_use_long_plt (void)
7579 {
7580 elf32_arm_use_long_plt_entry = TRUE;
7581 }
7582
7583 /* Add the glue sections to ABFD. This function is called from the
7584 linker scripts in ld/emultempl/{armelf}.em. */
7585
7586 bfd_boolean
7587 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7588 struct bfd_link_info *info)
7589 {
7590 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7591 bfd_boolean dostm32l4xx = globals
7592 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7593 bfd_boolean addglue;
7594
7595 /* If we are only performing a partial
7596 link do not bother adding the glue. */
7597 if (bfd_link_relocatable (info))
7598 return TRUE;
7599
7600 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7601 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7602 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7603 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7604
7605 if (!dostm32l4xx)
7606 return addglue;
7607
7608 return addglue
7609 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7610 }
7611
7612 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7613 ensures they are not marked for deletion by
7614 strip_excluded_output_sections () when veneers are going to be created
7615 later. Not doing so would trigger assert on empty section size in
7616 lang_size_sections_1 (). */
7617
7618 void
7619 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7620 {
7621 enum elf32_arm_stub_type stub_type;
7622
7623 /* If we are only performing a partial
7624 link do not bother adding the glue. */
7625 if (bfd_link_relocatable (info))
7626 return;
7627
7628 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7629 {
7630 asection *out_sec;
7631 const char *out_sec_name;
7632
7633 if (!arm_dedicated_stub_output_section_required (stub_type))
7634 continue;
7635
7636 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7637 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7638 if (out_sec != NULL)
7639 out_sec->flags |= SEC_KEEP;
7640 }
7641 }
7642
7643 /* Select a BFD to be used to hold the sections used by the glue code.
7644 This function is called from the linker scripts in ld/emultempl/
7645 {armelf/pe}.em. */
7646
7647 bfd_boolean
7648 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7649 {
7650 struct elf32_arm_link_hash_table *globals;
7651
7652 /* If we are only performing a partial link
7653 do not bother getting a bfd to hold the glue. */
7654 if (bfd_link_relocatable (info))
7655 return TRUE;
7656
7657 /* Make sure we don't attach the glue sections to a dynamic object. */
7658 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7659
7660 globals = elf32_arm_hash_table (info);
7661 BFD_ASSERT (globals != NULL);
7662
7663 if (globals->bfd_of_glue_owner != NULL)
7664 return TRUE;
7665
7666 /* Save the bfd for later use. */
7667 globals->bfd_of_glue_owner = abfd;
7668
7669 return TRUE;
7670 }
7671
7672 static void
7673 check_use_blx (struct elf32_arm_link_hash_table *globals)
7674 {
7675 int cpu_arch;
7676
7677 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7678 Tag_CPU_arch);
7679
7680 if (globals->fix_arm1176)
7681 {
7682 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7683 globals->use_blx = 1;
7684 }
7685 else
7686 {
7687 if (cpu_arch > TAG_CPU_ARCH_V4T)
7688 globals->use_blx = 1;
7689 }
7690 }
7691
7692 bfd_boolean
7693 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7694 struct bfd_link_info *link_info)
7695 {
7696 Elf_Internal_Shdr *symtab_hdr;
7697 Elf_Internal_Rela *internal_relocs = NULL;
7698 Elf_Internal_Rela *irel, *irelend;
7699 bfd_byte *contents = NULL;
7700
7701 asection *sec;
7702 struct elf32_arm_link_hash_table *globals;
7703
7704 /* If we are only performing a partial link do not bother
7705 to construct any glue. */
7706 if (bfd_link_relocatable (link_info))
7707 return TRUE;
7708
7709 /* Here we have a bfd that is to be included on the link. We have a
7710 hook to do reloc rummaging, before section sizes are nailed down. */
7711 globals = elf32_arm_hash_table (link_info);
7712 BFD_ASSERT (globals != NULL);
7713
7714 check_use_blx (globals);
7715
7716 if (globals->byteswap_code && !bfd_big_endian (abfd))
7717 {
7718 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7719 abfd);
7720 return FALSE;
7721 }
7722
7723 /* PR 5398: If we have not decided to include any loadable sections in
7724 the output then we will not have a glue owner bfd. This is OK, it
7725 just means that there is nothing else for us to do here. */
7726 if (globals->bfd_of_glue_owner == NULL)
7727 return TRUE;
7728
7729 /* Rummage around all the relocs and map the glue vectors. */
7730 sec = abfd->sections;
7731
7732 if (sec == NULL)
7733 return TRUE;
7734
7735 for (; sec != NULL; sec = sec->next)
7736 {
7737 if (sec->reloc_count == 0)
7738 continue;
7739
7740 if ((sec->flags & SEC_EXCLUDE) != 0)
7741 continue;
7742
7743 symtab_hdr = & elf_symtab_hdr (abfd);
7744
7745 /* Load the relocs. */
7746 internal_relocs
7747 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7748
7749 if (internal_relocs == NULL)
7750 goto error_return;
7751
7752 irelend = internal_relocs + sec->reloc_count;
7753 for (irel = internal_relocs; irel < irelend; irel++)
7754 {
7755 long r_type;
7756 unsigned long r_index;
7757
7758 struct elf_link_hash_entry *h;
7759
7760 r_type = ELF32_R_TYPE (irel->r_info);
7761 r_index = ELF32_R_SYM (irel->r_info);
7762
7763 /* These are the only relocation types we care about. */
7764 if ( r_type != R_ARM_PC24
7765 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7766 continue;
7767
7768 /* Get the section contents if we haven't done so already. */
7769 if (contents == NULL)
7770 {
7771 /* Get cached copy if it exists. */
7772 if (elf_section_data (sec)->this_hdr.contents != NULL)
7773 contents = elf_section_data (sec)->this_hdr.contents;
7774 else
7775 {
7776 /* Go get them off disk. */
7777 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7778 goto error_return;
7779 }
7780 }
7781
7782 if (r_type == R_ARM_V4BX)
7783 {
7784 int reg;
7785
7786 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7787 record_arm_bx_glue (link_info, reg);
7788 continue;
7789 }
7790
7791 /* If the relocation is not against a symbol it cannot concern us. */
7792 h = NULL;
7793
7794 /* We don't care about local symbols. */
7795 if (r_index < symtab_hdr->sh_info)
7796 continue;
7797
7798 /* This is an external symbol. */
7799 r_index -= symtab_hdr->sh_info;
7800 h = (struct elf_link_hash_entry *)
7801 elf_sym_hashes (abfd)[r_index];
7802
7803 /* If the relocation is against a static symbol it must be within
7804 the current section and so cannot be a cross ARM/Thumb relocation. */
7805 if (h == NULL)
7806 continue;
7807
7808 /* If the call will go through a PLT entry then we do not need
7809 glue. */
7810 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7811 continue;
7812
7813 switch (r_type)
7814 {
7815 case R_ARM_PC24:
7816 /* This one is a call from arm code. We need to look up
7817 the target of the call. If it is a thumb target, we
7818 insert glue. */
7819 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7820 == ST_BRANCH_TO_THUMB)
7821 record_arm_to_thumb_glue (link_info, h);
7822 break;
7823
7824 default:
7825 abort ();
7826 }
7827 }
7828
7829 if (contents != NULL
7830 && elf_section_data (sec)->this_hdr.contents != contents)
7831 free (contents);
7832 contents = NULL;
7833
7834 if (internal_relocs != NULL
7835 && elf_section_data (sec)->relocs != internal_relocs)
7836 free (internal_relocs);
7837 internal_relocs = NULL;
7838 }
7839
7840 return TRUE;
7841
7842 error_return:
7843 if (contents != NULL
7844 && elf_section_data (sec)->this_hdr.contents != contents)
7845 free (contents);
7846 if (internal_relocs != NULL
7847 && elf_section_data (sec)->relocs != internal_relocs)
7848 free (internal_relocs);
7849
7850 return FALSE;
7851 }
7852 #endif
7853
7854
7855 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7856
7857 void
7858 bfd_elf32_arm_init_maps (bfd *abfd)
7859 {
7860 Elf_Internal_Sym *isymbuf;
7861 Elf_Internal_Shdr *hdr;
7862 unsigned int i, localsyms;
7863
7864 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7865 if (! is_arm_elf (abfd))
7866 return;
7867
7868 if ((abfd->flags & DYNAMIC) != 0)
7869 return;
7870
7871 hdr = & elf_symtab_hdr (abfd);
7872 localsyms = hdr->sh_info;
7873
7874 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7875 should contain the number of local symbols, which should come before any
7876 global symbols. Mapping symbols are always local. */
7877 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7878 NULL);
7879
7880 /* No internal symbols read? Skip this BFD. */
7881 if (isymbuf == NULL)
7882 return;
7883
7884 for (i = 0; i < localsyms; i++)
7885 {
7886 Elf_Internal_Sym *isym = &isymbuf[i];
7887 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7888 const char *name;
7889
7890 if (sec != NULL
7891 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7892 {
7893 name = bfd_elf_string_from_elf_section (abfd,
7894 hdr->sh_link, isym->st_name);
7895
7896 if (bfd_is_arm_special_symbol_name (name,
7897 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7898 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7899 }
7900 }
7901 }
7902
7903
7904 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7905 say what they wanted. */
7906
7907 void
7908 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7909 {
7910 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7911 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7912
7913 if (globals == NULL)
7914 return;
7915
7916 if (globals->fix_cortex_a8 == -1)
7917 {
7918 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7919 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7920 && (out_attr[Tag_CPU_arch_profile].i == 'A'
7921 || out_attr[Tag_CPU_arch_profile].i == 0))
7922 globals->fix_cortex_a8 = 1;
7923 else
7924 globals->fix_cortex_a8 = 0;
7925 }
7926 }
7927
7928
7929 void
7930 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
7931 {
7932 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7933 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7934
7935 if (globals == NULL)
7936 return;
7937 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
7938 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
7939 {
7940 switch (globals->vfp11_fix)
7941 {
7942 case BFD_ARM_VFP11_FIX_DEFAULT:
7943 case BFD_ARM_VFP11_FIX_NONE:
7944 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7945 break;
7946
7947 default:
7948 /* Give a warning, but do as the user requests anyway. */
7949 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
7950 "workaround is not necessary for target architecture"), obfd);
7951 }
7952 }
7953 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
7954 /* For earlier architectures, we might need the workaround, but do not
7955 enable it by default. If users is running with broken hardware, they
7956 must enable the erratum fix explicitly. */
7957 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7958 }
7959
7960 void
7961 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
7962 {
7963 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7964 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7965
7966 if (globals == NULL)
7967 return;
7968
7969 /* We assume only Cortex-M4 may require the fix. */
7970 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
7971 || out_attr[Tag_CPU_arch_profile].i != 'M')
7972 {
7973 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
7974 /* Give a warning, but do as the user requests anyway. */
7975 _bfd_error_handler
7976 (_("%pB: warning: selected STM32L4XX erratum "
7977 "workaround is not necessary for target architecture"), obfd);
7978 }
7979 }
7980
7981 enum bfd_arm_vfp11_pipe
7982 {
7983 VFP11_FMAC,
7984 VFP11_LS,
7985 VFP11_DS,
7986 VFP11_BAD
7987 };
7988
7989 /* Return a VFP register number. This is encoded as RX:X for single-precision
7990 registers, or X:RX for double-precision registers, where RX is the group of
7991 four bits in the instruction encoding and X is the single extension bit.
7992 RX and X fields are specified using their lowest (starting) bit. The return
7993 value is:
7994
7995 0...31: single-precision registers s0...s31
7996 32...63: double-precision registers d0...d31.
7997
7998 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7999 encounter VFP3 instructions, so we allow the full range for DP registers. */
8000
8001 static unsigned int
8002 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8003 unsigned int x)
8004 {
8005 if (is_double)
8006 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8007 else
8008 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8009 }
8010
8011 /* Set bits in *WMASK according to a register number REG as encoded by
8012 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8013
8014 static void
8015 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8016 {
8017 if (reg < 32)
8018 *wmask |= 1 << reg;
8019 else if (reg < 48)
8020 *wmask |= 3 << ((reg - 32) * 2);
8021 }
8022
8023 /* Return TRUE if WMASK overwrites anything in REGS. */
8024
8025 static bfd_boolean
8026 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8027 {
8028 int i;
8029
8030 for (i = 0; i < numregs; i++)
8031 {
8032 unsigned int reg = regs[i];
8033
8034 if (reg < 32 && (wmask & (1 << reg)) != 0)
8035 return TRUE;
8036
8037 reg -= 32;
8038
8039 if (reg >= 16)
8040 continue;
8041
8042 if ((wmask & (3 << (reg * 2))) != 0)
8043 return TRUE;
8044 }
8045
8046 return FALSE;
8047 }
8048
8049 /* In this function, we're interested in two things: finding input registers
8050 for VFP data-processing instructions, and finding the set of registers which
8051 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8052 hold the written set, so FLDM etc. are easy to deal with (we're only
8053 interested in 32 SP registers or 16 dp registers, due to the VFP version
8054 implemented by the chip in question). DP registers are marked by setting
8055 both SP registers in the write mask). */
8056
8057 static enum bfd_arm_vfp11_pipe
8058 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8059 int *numregs)
8060 {
8061 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8062 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8063
8064 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8065 {
8066 unsigned int pqrs;
8067 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8068 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8069
8070 pqrs = ((insn & 0x00800000) >> 20)
8071 | ((insn & 0x00300000) >> 19)
8072 | ((insn & 0x00000040) >> 6);
8073
8074 switch (pqrs)
8075 {
8076 case 0: /* fmac[sd]. */
8077 case 1: /* fnmac[sd]. */
8078 case 2: /* fmsc[sd]. */
8079 case 3: /* fnmsc[sd]. */
8080 vpipe = VFP11_FMAC;
8081 bfd_arm_vfp11_write_mask (destmask, fd);
8082 regs[0] = fd;
8083 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8084 regs[2] = fm;
8085 *numregs = 3;
8086 break;
8087
8088 case 4: /* fmul[sd]. */
8089 case 5: /* fnmul[sd]. */
8090 case 6: /* fadd[sd]. */
8091 case 7: /* fsub[sd]. */
8092 vpipe = VFP11_FMAC;
8093 goto vfp_binop;
8094
8095 case 8: /* fdiv[sd]. */
8096 vpipe = VFP11_DS;
8097 vfp_binop:
8098 bfd_arm_vfp11_write_mask (destmask, fd);
8099 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8100 regs[1] = fm;
8101 *numregs = 2;
8102 break;
8103
8104 case 15: /* extended opcode. */
8105 {
8106 unsigned int extn = ((insn >> 15) & 0x1e)
8107 | ((insn >> 7) & 1);
8108
8109 switch (extn)
8110 {
8111 case 0: /* fcpy[sd]. */
8112 case 1: /* fabs[sd]. */
8113 case 2: /* fneg[sd]. */
8114 case 8: /* fcmp[sd]. */
8115 case 9: /* fcmpe[sd]. */
8116 case 10: /* fcmpz[sd]. */
8117 case 11: /* fcmpez[sd]. */
8118 case 16: /* fuito[sd]. */
8119 case 17: /* fsito[sd]. */
8120 case 24: /* ftoui[sd]. */
8121 case 25: /* ftouiz[sd]. */
8122 case 26: /* ftosi[sd]. */
8123 case 27: /* ftosiz[sd]. */
8124 /* These instructions will not bounce due to underflow. */
8125 *numregs = 0;
8126 vpipe = VFP11_FMAC;
8127 break;
8128
8129 case 3: /* fsqrt[sd]. */
8130 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8131 registers to cause the erratum in previous instructions. */
8132 bfd_arm_vfp11_write_mask (destmask, fd);
8133 vpipe = VFP11_DS;
8134 break;
8135
8136 case 15: /* fcvt{ds,sd}. */
8137 {
8138 int rnum = 0;
8139
8140 bfd_arm_vfp11_write_mask (destmask, fd);
8141
8142 /* Only FCVTSD can underflow. */
8143 if ((insn & 0x100) != 0)
8144 regs[rnum++] = fm;
8145
8146 *numregs = rnum;
8147
8148 vpipe = VFP11_FMAC;
8149 }
8150 break;
8151
8152 default:
8153 return VFP11_BAD;
8154 }
8155 }
8156 break;
8157
8158 default:
8159 return VFP11_BAD;
8160 }
8161 }
8162 /* Two-register transfer. */
8163 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8164 {
8165 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8166
8167 if ((insn & 0x100000) == 0)
8168 {
8169 if (is_double)
8170 bfd_arm_vfp11_write_mask (destmask, fm);
8171 else
8172 {
8173 bfd_arm_vfp11_write_mask (destmask, fm);
8174 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8175 }
8176 }
8177
8178 vpipe = VFP11_LS;
8179 }
8180 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8181 {
8182 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8183 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8184
8185 switch (puw)
8186 {
8187 case 0: /* Two-reg transfer. We should catch these above. */
8188 abort ();
8189
8190 case 2: /* fldm[sdx]. */
8191 case 3:
8192 case 5:
8193 {
8194 unsigned int i, offset = insn & 0xff;
8195
8196 if (is_double)
8197 offset >>= 1;
8198
8199 for (i = fd; i < fd + offset; i++)
8200 bfd_arm_vfp11_write_mask (destmask, i);
8201 }
8202 break;
8203
8204 case 4: /* fld[sd]. */
8205 case 6:
8206 bfd_arm_vfp11_write_mask (destmask, fd);
8207 break;
8208
8209 default:
8210 return VFP11_BAD;
8211 }
8212
8213 vpipe = VFP11_LS;
8214 }
8215 /* Single-register transfer. Note L==0. */
8216 else if ((insn & 0x0f100e10) == 0x0e000a10)
8217 {
8218 unsigned int opcode = (insn >> 21) & 7;
8219 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8220
8221 switch (opcode)
8222 {
8223 case 0: /* fmsr/fmdlr. */
8224 case 1: /* fmdhr. */
8225 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8226 destination register. I don't know if this is exactly right,
8227 but it is the conservative choice. */
8228 bfd_arm_vfp11_write_mask (destmask, fn);
8229 break;
8230
8231 case 7: /* fmxr. */
8232 break;
8233 }
8234
8235 vpipe = VFP11_LS;
8236 }
8237
8238 return vpipe;
8239 }
8240
8241
8242 static int elf32_arm_compare_mapping (const void * a, const void * b);
8243
8244
8245 /* Look for potentially-troublesome code sequences which might trigger the
8246 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8247 (available from ARM) for details of the erratum. A short version is
8248 described in ld.texinfo. */
8249
8250 bfd_boolean
8251 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8252 {
8253 asection *sec;
8254 bfd_byte *contents = NULL;
8255 int state = 0;
8256 int regs[3], numregs = 0;
8257 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8258 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8259
8260 if (globals == NULL)
8261 return FALSE;
8262
8263 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8264 The states transition as follows:
8265
8266 0 -> 1 (vector) or 0 -> 2 (scalar)
8267 A VFP FMAC-pipeline instruction has been seen. Fill
8268 regs[0]..regs[numregs-1] with its input operands. Remember this
8269 instruction in 'first_fmac'.
8270
8271 1 -> 2
8272 Any instruction, except for a VFP instruction which overwrites
8273 regs[*].
8274
8275 1 -> 3 [ -> 0 ] or
8276 2 -> 3 [ -> 0 ]
8277 A VFP instruction has been seen which overwrites any of regs[*].
8278 We must make a veneer! Reset state to 0 before examining next
8279 instruction.
8280
8281 2 -> 0
8282 If we fail to match anything in state 2, reset to state 0 and reset
8283 the instruction pointer to the instruction after 'first_fmac'.
8284
8285 If the VFP11 vector mode is in use, there must be at least two unrelated
8286 instructions between anti-dependent VFP11 instructions to properly avoid
8287 triggering the erratum, hence the use of the extra state 1. */
8288
8289 /* If we are only performing a partial link do not bother
8290 to construct any glue. */
8291 if (bfd_link_relocatable (link_info))
8292 return TRUE;
8293
8294 /* Skip if this bfd does not correspond to an ELF image. */
8295 if (! is_arm_elf (abfd))
8296 return TRUE;
8297
8298 /* We should have chosen a fix type by the time we get here. */
8299 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8300
8301 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8302 return TRUE;
8303
8304 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8305 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8306 return TRUE;
8307
8308 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8309 {
8310 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8311 struct _arm_elf_section_data *sec_data;
8312
8313 /* If we don't have executable progbits, we're not interested in this
8314 section. Also skip if section is to be excluded. */
8315 if (elf_section_type (sec) != SHT_PROGBITS
8316 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8317 || (sec->flags & SEC_EXCLUDE) != 0
8318 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8319 || sec->output_section == bfd_abs_section_ptr
8320 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8321 continue;
8322
8323 sec_data = elf32_arm_section_data (sec);
8324
8325 if (sec_data->mapcount == 0)
8326 continue;
8327
8328 if (elf_section_data (sec)->this_hdr.contents != NULL)
8329 contents = elf_section_data (sec)->this_hdr.contents;
8330 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8331 goto error_return;
8332
8333 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8334 elf32_arm_compare_mapping);
8335
8336 for (span = 0; span < sec_data->mapcount; span++)
8337 {
8338 unsigned int span_start = sec_data->map[span].vma;
8339 unsigned int span_end = (span == sec_data->mapcount - 1)
8340 ? sec->size : sec_data->map[span + 1].vma;
8341 char span_type = sec_data->map[span].type;
8342
8343 /* FIXME: Only ARM mode is supported at present. We may need to
8344 support Thumb-2 mode also at some point. */
8345 if (span_type != 'a')
8346 continue;
8347
8348 for (i = span_start; i < span_end;)
8349 {
8350 unsigned int next_i = i + 4;
8351 unsigned int insn = bfd_big_endian (abfd)
8352 ? (contents[i] << 24)
8353 | (contents[i + 1] << 16)
8354 | (contents[i + 2] << 8)
8355 | contents[i + 3]
8356 : (contents[i + 3] << 24)
8357 | (contents[i + 2] << 16)
8358 | (contents[i + 1] << 8)
8359 | contents[i];
8360 unsigned int writemask = 0;
8361 enum bfd_arm_vfp11_pipe vpipe;
8362
8363 switch (state)
8364 {
8365 case 0:
8366 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8367 &numregs);
8368 /* I'm assuming the VFP11 erratum can trigger with denorm
8369 operands on either the FMAC or the DS pipeline. This might
8370 lead to slightly overenthusiastic veneer insertion. */
8371 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8372 {
8373 state = use_vector ? 1 : 2;
8374 first_fmac = i;
8375 veneer_of_insn = insn;
8376 }
8377 break;
8378
8379 case 1:
8380 {
8381 int other_regs[3], other_numregs;
8382 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8383 other_regs,
8384 &other_numregs);
8385 if (vpipe != VFP11_BAD
8386 && bfd_arm_vfp11_antidependency (writemask, regs,
8387 numregs))
8388 state = 3;
8389 else
8390 state = 2;
8391 }
8392 break;
8393
8394 case 2:
8395 {
8396 int other_regs[3], other_numregs;
8397 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8398 other_regs,
8399 &other_numregs);
8400 if (vpipe != VFP11_BAD
8401 && bfd_arm_vfp11_antidependency (writemask, regs,
8402 numregs))
8403 state = 3;
8404 else
8405 {
8406 state = 0;
8407 next_i = first_fmac + 4;
8408 }
8409 }
8410 break;
8411
8412 case 3:
8413 abort (); /* Should be unreachable. */
8414 }
8415
8416 if (state == 3)
8417 {
8418 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8419 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8420
8421 elf32_arm_section_data (sec)->erratumcount += 1;
8422
8423 newerr->u.b.vfp_insn = veneer_of_insn;
8424
8425 switch (span_type)
8426 {
8427 case 'a':
8428 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8429 break;
8430
8431 default:
8432 abort ();
8433 }
8434
8435 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8436 first_fmac);
8437
8438 newerr->vma = -1;
8439
8440 newerr->next = sec_data->erratumlist;
8441 sec_data->erratumlist = newerr;
8442
8443 state = 0;
8444 }
8445
8446 i = next_i;
8447 }
8448 }
8449
8450 if (contents != NULL
8451 && elf_section_data (sec)->this_hdr.contents != contents)
8452 free (contents);
8453 contents = NULL;
8454 }
8455
8456 return TRUE;
8457
8458 error_return:
8459 if (contents != NULL
8460 && elf_section_data (sec)->this_hdr.contents != contents)
8461 free (contents);
8462
8463 return FALSE;
8464 }
8465
8466 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8467 after sections have been laid out, using specially-named symbols. */
8468
8469 void
8470 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8471 struct bfd_link_info *link_info)
8472 {
8473 asection *sec;
8474 struct elf32_arm_link_hash_table *globals;
8475 char *tmp_name;
8476
8477 if (bfd_link_relocatable (link_info))
8478 return;
8479
8480 /* Skip if this bfd does not correspond to an ELF image. */
8481 if (! is_arm_elf (abfd))
8482 return;
8483
8484 globals = elf32_arm_hash_table (link_info);
8485 if (globals == NULL)
8486 return;
8487
8488 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8489 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8490
8491 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8492 {
8493 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8494 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8495
8496 for (; errnode != NULL; errnode = errnode->next)
8497 {
8498 struct elf_link_hash_entry *myh;
8499 bfd_vma vma;
8500
8501 switch (errnode->type)
8502 {
8503 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8504 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8505 /* Find veneer symbol. */
8506 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8507 errnode->u.b.veneer->u.v.id);
8508
8509 myh = elf_link_hash_lookup
8510 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8511
8512 if (myh == NULL)
8513 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8514 abfd, "VFP11", tmp_name);
8515
8516 vma = myh->root.u.def.section->output_section->vma
8517 + myh->root.u.def.section->output_offset
8518 + myh->root.u.def.value;
8519
8520 errnode->u.b.veneer->vma = vma;
8521 break;
8522
8523 case VFP11_ERRATUM_ARM_VENEER:
8524 case VFP11_ERRATUM_THUMB_VENEER:
8525 /* Find return location. */
8526 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8527 errnode->u.v.id);
8528
8529 myh = elf_link_hash_lookup
8530 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8531
8532 if (myh == NULL)
8533 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8534 abfd, "VFP11", tmp_name);
8535
8536 vma = myh->root.u.def.section->output_section->vma
8537 + myh->root.u.def.section->output_offset
8538 + myh->root.u.def.value;
8539
8540 errnode->u.v.branch->vma = vma;
8541 break;
8542
8543 default:
8544 abort ();
8545 }
8546 }
8547 }
8548
8549 free (tmp_name);
8550 }
8551
8552 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8553 return locations after sections have been laid out, using
8554 specially-named symbols. */
8555
8556 void
8557 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8558 struct bfd_link_info *link_info)
8559 {
8560 asection *sec;
8561 struct elf32_arm_link_hash_table *globals;
8562 char *tmp_name;
8563
8564 if (bfd_link_relocatable (link_info))
8565 return;
8566
8567 /* Skip if this bfd does not correspond to an ELF image. */
8568 if (! is_arm_elf (abfd))
8569 return;
8570
8571 globals = elf32_arm_hash_table (link_info);
8572 if (globals == NULL)
8573 return;
8574
8575 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8576 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8577
8578 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8579 {
8580 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8581 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8582
8583 for (; errnode != NULL; errnode = errnode->next)
8584 {
8585 struct elf_link_hash_entry *myh;
8586 bfd_vma vma;
8587
8588 switch (errnode->type)
8589 {
8590 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8591 /* Find veneer symbol. */
8592 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8593 errnode->u.b.veneer->u.v.id);
8594
8595 myh = elf_link_hash_lookup
8596 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8597
8598 if (myh == NULL)
8599 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8600 abfd, "STM32L4XX", tmp_name);
8601
8602 vma = myh->root.u.def.section->output_section->vma
8603 + myh->root.u.def.section->output_offset
8604 + myh->root.u.def.value;
8605
8606 errnode->u.b.veneer->vma = vma;
8607 break;
8608
8609 case STM32L4XX_ERRATUM_VENEER:
8610 /* Find return location. */
8611 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8612 errnode->u.v.id);
8613
8614 myh = elf_link_hash_lookup
8615 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8616
8617 if (myh == NULL)
8618 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8619 abfd, "STM32L4XX", tmp_name);
8620
8621 vma = myh->root.u.def.section->output_section->vma
8622 + myh->root.u.def.section->output_offset
8623 + myh->root.u.def.value;
8624
8625 errnode->u.v.branch->vma = vma;
8626 break;
8627
8628 default:
8629 abort ();
8630 }
8631 }
8632 }
8633
8634 free (tmp_name);
8635 }
8636
8637 static inline bfd_boolean
8638 is_thumb2_ldmia (const insn32 insn)
8639 {
8640 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8641 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8642 return (insn & 0xffd02000) == 0xe8900000;
8643 }
8644
8645 static inline bfd_boolean
8646 is_thumb2_ldmdb (const insn32 insn)
8647 {
8648 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8649 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8650 return (insn & 0xffd02000) == 0xe9100000;
8651 }
8652
8653 static inline bfd_boolean
8654 is_thumb2_vldm (const insn32 insn)
8655 {
8656 /* A6.5 Extension register load or store instruction
8657 A7.7.229
8658 We look for SP 32-bit and DP 64-bit registers.
8659 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8660 <list> is consecutive 64-bit registers
8661 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8662 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8663 <list> is consecutive 32-bit registers
8664 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8665 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8666 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8667 return
8668 (((insn & 0xfe100f00) == 0xec100b00) ||
8669 ((insn & 0xfe100f00) == 0xec100a00))
8670 && /* (IA without !). */
8671 (((((insn << 7) >> 28) & 0xd) == 0x4)
8672 /* (IA with !), includes VPOP (when reg number is SP). */
8673 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8674 /* (DB with !). */
8675 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8676 }
8677
8678 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8679 VLDM opcode and:
8680 - computes the number and the mode of memory accesses
8681 - decides if the replacement should be done:
8682 . replaces only if > 8-word accesses
8683 . or (testing purposes only) replaces all accesses. */
8684
8685 static bfd_boolean
8686 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8687 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8688 {
8689 int nb_words = 0;
8690
8691 /* The field encoding the register list is the same for both LDMIA
8692 and LDMDB encodings. */
8693 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8694 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8695 else if (is_thumb2_vldm (insn))
8696 nb_words = (insn & 0xff);
8697
8698 /* DEFAULT mode accounts for the real bug condition situation,
8699 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8700 return
8701 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8702 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8703 }
8704
8705 /* Look for potentially-troublesome code sequences which might trigger
8706 the STM STM32L4XX erratum. */
8707
8708 bfd_boolean
8709 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8710 struct bfd_link_info *link_info)
8711 {
8712 asection *sec;
8713 bfd_byte *contents = NULL;
8714 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8715
8716 if (globals == NULL)
8717 return FALSE;
8718
8719 /* If we are only performing a partial link do not bother
8720 to construct any glue. */
8721 if (bfd_link_relocatable (link_info))
8722 return TRUE;
8723
8724 /* Skip if this bfd does not correspond to an ELF image. */
8725 if (! is_arm_elf (abfd))
8726 return TRUE;
8727
8728 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8729 return TRUE;
8730
8731 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8732 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8733 return TRUE;
8734
8735 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8736 {
8737 unsigned int i, span;
8738 struct _arm_elf_section_data *sec_data;
8739
8740 /* If we don't have executable progbits, we're not interested in this
8741 section. Also skip if section is to be excluded. */
8742 if (elf_section_type (sec) != SHT_PROGBITS
8743 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8744 || (sec->flags & SEC_EXCLUDE) != 0
8745 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8746 || sec->output_section == bfd_abs_section_ptr
8747 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8748 continue;
8749
8750 sec_data = elf32_arm_section_data (sec);
8751
8752 if (sec_data->mapcount == 0)
8753 continue;
8754
8755 if (elf_section_data (sec)->this_hdr.contents != NULL)
8756 contents = elf_section_data (sec)->this_hdr.contents;
8757 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8758 goto error_return;
8759
8760 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8761 elf32_arm_compare_mapping);
8762
8763 for (span = 0; span < sec_data->mapcount; span++)
8764 {
8765 unsigned int span_start = sec_data->map[span].vma;
8766 unsigned int span_end = (span == sec_data->mapcount - 1)
8767 ? sec->size : sec_data->map[span + 1].vma;
8768 char span_type = sec_data->map[span].type;
8769 int itblock_current_pos = 0;
8770
8771 /* Only Thumb2 mode need be supported with this CM4 specific
8772 code, we should not encounter any arm mode eg span_type
8773 != 'a'. */
8774 if (span_type != 't')
8775 continue;
8776
8777 for (i = span_start; i < span_end;)
8778 {
8779 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8780 bfd_boolean insn_32bit = FALSE;
8781 bfd_boolean is_ldm = FALSE;
8782 bfd_boolean is_vldm = FALSE;
8783 bfd_boolean is_not_last_in_it_block = FALSE;
8784
8785 /* The first 16-bits of all 32-bit thumb2 instructions start
8786 with opcode[15..13]=0b111 and the encoded op1 can be anything
8787 except opcode[12..11]!=0b00.
8788 See 32-bit Thumb instruction encoding. */
8789 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8790 insn_32bit = TRUE;
8791
8792 /* Compute the predicate that tells if the instruction
8793 is concerned by the IT block
8794 - Creates an error if there is a ldm that is not
8795 last in the IT block thus cannot be replaced
8796 - Otherwise we can create a branch at the end of the
8797 IT block, it will be controlled naturally by IT
8798 with the proper pseudo-predicate
8799 - So the only interesting predicate is the one that
8800 tells that we are not on the last item of an IT
8801 block. */
8802 if (itblock_current_pos != 0)
8803 is_not_last_in_it_block = !!--itblock_current_pos;
8804
8805 if (insn_32bit)
8806 {
8807 /* Load the rest of the insn (in manual-friendly order). */
8808 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8809 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8810 is_vldm = is_thumb2_vldm (insn);
8811
8812 /* Veneers are created for (v)ldm depending on
8813 option flags and memory accesses conditions; but
8814 if the instruction is not the last instruction of
8815 an IT block, we cannot create a jump there, so we
8816 bail out. */
8817 if ((is_ldm || is_vldm)
8818 && stm32l4xx_need_create_replacing_stub
8819 (insn, globals->stm32l4xx_fix))
8820 {
8821 if (is_not_last_in_it_block)
8822 {
8823 _bfd_error_handler
8824 /* xgettext:c-format */
8825 (_("%pB(%pA+%#x): error: multiple load detected"
8826 " in non-last IT block instruction:"
8827 " STM32L4XX veneer cannot be generated; "
8828 "use gcc option -mrestrict-it to generate"
8829 " only one instruction per IT block"),
8830 abfd, sec, i);
8831 }
8832 else
8833 {
8834 elf32_stm32l4xx_erratum_list *newerr =
8835 (elf32_stm32l4xx_erratum_list *)
8836 bfd_zmalloc
8837 (sizeof (elf32_stm32l4xx_erratum_list));
8838
8839 elf32_arm_section_data (sec)
8840 ->stm32l4xx_erratumcount += 1;
8841 newerr->u.b.insn = insn;
8842 /* We create only thumb branches. */
8843 newerr->type =
8844 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8845 record_stm32l4xx_erratum_veneer
8846 (link_info, newerr, abfd, sec,
8847 i,
8848 is_ldm ?
8849 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8850 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8851 newerr->vma = -1;
8852 newerr->next = sec_data->stm32l4xx_erratumlist;
8853 sec_data->stm32l4xx_erratumlist = newerr;
8854 }
8855 }
8856 }
8857 else
8858 {
8859 /* A7.7.37 IT p208
8860 IT blocks are only encoded in T1
8861 Encoding T1: IT{x{y{z}}} <firstcond>
8862 1 0 1 1 - 1 1 1 1 - firstcond - mask
8863 if mask = '0000' then see 'related encodings'
8864 We don't deal with UNPREDICTABLE, just ignore these.
8865 There can be no nested IT blocks so an IT block
8866 is naturally a new one for which it is worth
8867 computing its size. */
8868 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8869 && ((insn & 0x000f) != 0x0000);
8870 /* If we have a new IT block we compute its size. */
8871 if (is_newitblock)
8872 {
8873 /* Compute the number of instructions controlled
8874 by the IT block, it will be used to decide
8875 whether we are inside an IT block or not. */
8876 unsigned int mask = insn & 0x000f;
8877 itblock_current_pos = 4 - ctz (mask);
8878 }
8879 }
8880
8881 i += insn_32bit ? 4 : 2;
8882 }
8883 }
8884
8885 if (contents != NULL
8886 && elf_section_data (sec)->this_hdr.contents != contents)
8887 free (contents);
8888 contents = NULL;
8889 }
8890
8891 return TRUE;
8892
8893 error_return:
8894 if (contents != NULL
8895 && elf_section_data (sec)->this_hdr.contents != contents)
8896 free (contents);
8897
8898 return FALSE;
8899 }
8900
8901 /* Set target relocation values needed during linking. */
8902
8903 void
8904 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8905 struct bfd_link_info *link_info,
8906 struct elf32_arm_params *params)
8907 {
8908 struct elf32_arm_link_hash_table *globals;
8909
8910 globals = elf32_arm_hash_table (link_info);
8911 if (globals == NULL)
8912 return;
8913
8914 globals->target1_is_rel = params->target1_is_rel;
8915 if (strcmp (params->target2_type, "rel") == 0)
8916 globals->target2_reloc = R_ARM_REL32;
8917 else if (strcmp (params->target2_type, "abs") == 0)
8918 globals->target2_reloc = R_ARM_ABS32;
8919 else if (strcmp (params->target2_type, "got-rel") == 0)
8920 globals->target2_reloc = R_ARM_GOT_PREL;
8921 else
8922 {
8923 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
8924 params->target2_type);
8925 }
8926 globals->fix_v4bx = params->fix_v4bx;
8927 globals->use_blx |= params->use_blx;
8928 globals->vfp11_fix = params->vfp11_denorm_fix;
8929 globals->stm32l4xx_fix = params->stm32l4xx_fix;
8930 if (globals->fdpic_p)
8931 globals->pic_veneer = 1;
8932 else
8933 globals->pic_veneer = params->pic_veneer;
8934 globals->fix_cortex_a8 = params->fix_cortex_a8;
8935 globals->fix_arm1176 = params->fix_arm1176;
8936 globals->cmse_implib = params->cmse_implib;
8937 globals->in_implib_bfd = params->in_implib_bfd;
8938
8939 BFD_ASSERT (is_arm_elf (output_bfd));
8940 elf_arm_tdata (output_bfd)->no_enum_size_warning
8941 = params->no_enum_size_warning;
8942 elf_arm_tdata (output_bfd)->no_wchar_size_warning
8943 = params->no_wchar_size_warning;
8944 }
8945
8946 /* Replace the target offset of a Thumb bl or b.w instruction. */
8947
8948 static void
8949 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
8950 {
8951 bfd_vma upper;
8952 bfd_vma lower;
8953 int reloc_sign;
8954
8955 BFD_ASSERT ((offset & 1) == 0);
8956
8957 upper = bfd_get_16 (abfd, insn);
8958 lower = bfd_get_16 (abfd, insn + 2);
8959 reloc_sign = (offset < 0) ? 1 : 0;
8960 upper = (upper & ~(bfd_vma) 0x7ff)
8961 | ((offset >> 12) & 0x3ff)
8962 | (reloc_sign << 10);
8963 lower = (lower & ~(bfd_vma) 0x2fff)
8964 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
8965 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
8966 | ((offset >> 1) & 0x7ff);
8967 bfd_put_16 (abfd, upper, insn);
8968 bfd_put_16 (abfd, lower, insn + 2);
8969 }
8970
8971 /* Thumb code calling an ARM function. */
8972
8973 static int
8974 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
8975 const char * name,
8976 bfd * input_bfd,
8977 bfd * output_bfd,
8978 asection * input_section,
8979 bfd_byte * hit_data,
8980 asection * sym_sec,
8981 bfd_vma offset,
8982 bfd_signed_vma addend,
8983 bfd_vma val,
8984 char **error_message)
8985 {
8986 asection * s = 0;
8987 bfd_vma my_offset;
8988 long int ret_offset;
8989 struct elf_link_hash_entry * myh;
8990 struct elf32_arm_link_hash_table * globals;
8991
8992 myh = find_thumb_glue (info, name, error_message);
8993 if (myh == NULL)
8994 return FALSE;
8995
8996 globals = elf32_arm_hash_table (info);
8997 BFD_ASSERT (globals != NULL);
8998 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8999
9000 my_offset = myh->root.u.def.value;
9001
9002 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9003 THUMB2ARM_GLUE_SECTION_NAME);
9004
9005 BFD_ASSERT (s != NULL);
9006 BFD_ASSERT (s->contents != NULL);
9007 BFD_ASSERT (s->output_section != NULL);
9008
9009 if ((my_offset & 0x01) == 0x01)
9010 {
9011 if (sym_sec != NULL
9012 && sym_sec->owner != NULL
9013 && !INTERWORK_FLAG (sym_sec->owner))
9014 {
9015 _bfd_error_handler
9016 (_("%pB(%s): warning: interworking not enabled;"
9017 " first occurrence: %pB: %s call to %s"),
9018 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9019
9020 return FALSE;
9021 }
9022
9023 --my_offset;
9024 myh->root.u.def.value = my_offset;
9025
9026 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9027 s->contents + my_offset);
9028
9029 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9030 s->contents + my_offset + 2);
9031
9032 ret_offset =
9033 /* Address of destination of the stub. */
9034 ((bfd_signed_vma) val)
9035 - ((bfd_signed_vma)
9036 /* Offset from the start of the current section
9037 to the start of the stubs. */
9038 (s->output_offset
9039 /* Offset of the start of this stub from the start of the stubs. */
9040 + my_offset
9041 /* Address of the start of the current section. */
9042 + s->output_section->vma)
9043 /* The branch instruction is 4 bytes into the stub. */
9044 + 4
9045 /* ARM branches work from the pc of the instruction + 8. */
9046 + 8);
9047
9048 put_arm_insn (globals, output_bfd,
9049 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9050 s->contents + my_offset + 4);
9051 }
9052
9053 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9054
9055 /* Now go back and fix up the original BL insn to point to here. */
9056 ret_offset =
9057 /* Address of where the stub is located. */
9058 (s->output_section->vma + s->output_offset + my_offset)
9059 /* Address of where the BL is located. */
9060 - (input_section->output_section->vma + input_section->output_offset
9061 + offset)
9062 /* Addend in the relocation. */
9063 - addend
9064 /* Biassing for PC-relative addressing. */
9065 - 8;
9066
9067 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9068
9069 return TRUE;
9070 }
9071
9072 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9073
9074 static struct elf_link_hash_entry *
9075 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9076 const char * name,
9077 bfd * input_bfd,
9078 bfd * output_bfd,
9079 asection * sym_sec,
9080 bfd_vma val,
9081 asection * s,
9082 char ** error_message)
9083 {
9084 bfd_vma my_offset;
9085 long int ret_offset;
9086 struct elf_link_hash_entry * myh;
9087 struct elf32_arm_link_hash_table * globals;
9088
9089 myh = find_arm_glue (info, name, error_message);
9090 if (myh == NULL)
9091 return NULL;
9092
9093 globals = elf32_arm_hash_table (info);
9094 BFD_ASSERT (globals != NULL);
9095 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9096
9097 my_offset = myh->root.u.def.value;
9098
9099 if ((my_offset & 0x01) == 0x01)
9100 {
9101 if (sym_sec != NULL
9102 && sym_sec->owner != NULL
9103 && !INTERWORK_FLAG (sym_sec->owner))
9104 {
9105 _bfd_error_handler
9106 (_("%pB(%s): warning: interworking not enabled;"
9107 " first occurrence: %pB: %s call to %s"),
9108 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9109 }
9110
9111 --my_offset;
9112 myh->root.u.def.value = my_offset;
9113
9114 if (bfd_link_pic (info)
9115 || globals->root.is_relocatable_executable
9116 || globals->pic_veneer)
9117 {
9118 /* For relocatable objects we can't use absolute addresses,
9119 so construct the address from a relative offset. */
9120 /* TODO: If the offset is small it's probably worth
9121 constructing the address with adds. */
9122 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9123 s->contents + my_offset);
9124 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9125 s->contents + my_offset + 4);
9126 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9127 s->contents + my_offset + 8);
9128 /* Adjust the offset by 4 for the position of the add,
9129 and 8 for the pipeline offset. */
9130 ret_offset = (val - (s->output_offset
9131 + s->output_section->vma
9132 + my_offset + 12))
9133 | 1;
9134 bfd_put_32 (output_bfd, ret_offset,
9135 s->contents + my_offset + 12);
9136 }
9137 else if (globals->use_blx)
9138 {
9139 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9140 s->contents + my_offset);
9141
9142 /* It's a thumb address. Add the low order bit. */
9143 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9144 s->contents + my_offset + 4);
9145 }
9146 else
9147 {
9148 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9149 s->contents + my_offset);
9150
9151 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9152 s->contents + my_offset + 4);
9153
9154 /* It's a thumb address. Add the low order bit. */
9155 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9156 s->contents + my_offset + 8);
9157
9158 my_offset += 12;
9159 }
9160 }
9161
9162 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9163
9164 return myh;
9165 }
9166
9167 /* Arm code calling a Thumb function. */
9168
9169 static int
9170 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9171 const char * name,
9172 bfd * input_bfd,
9173 bfd * output_bfd,
9174 asection * input_section,
9175 bfd_byte * hit_data,
9176 asection * sym_sec,
9177 bfd_vma offset,
9178 bfd_signed_vma addend,
9179 bfd_vma val,
9180 char **error_message)
9181 {
9182 unsigned long int tmp;
9183 bfd_vma my_offset;
9184 asection * s;
9185 long int ret_offset;
9186 struct elf_link_hash_entry * myh;
9187 struct elf32_arm_link_hash_table * globals;
9188
9189 globals = elf32_arm_hash_table (info);
9190 BFD_ASSERT (globals != NULL);
9191 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9192
9193 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9194 ARM2THUMB_GLUE_SECTION_NAME);
9195 BFD_ASSERT (s != NULL);
9196 BFD_ASSERT (s->contents != NULL);
9197 BFD_ASSERT (s->output_section != NULL);
9198
9199 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9200 sym_sec, val, s, error_message);
9201 if (!myh)
9202 return FALSE;
9203
9204 my_offset = myh->root.u.def.value;
9205 tmp = bfd_get_32 (input_bfd, hit_data);
9206 tmp = tmp & 0xFF000000;
9207
9208 /* Somehow these are both 4 too far, so subtract 8. */
9209 ret_offset = (s->output_offset
9210 + my_offset
9211 + s->output_section->vma
9212 - (input_section->output_offset
9213 + input_section->output_section->vma
9214 + offset + addend)
9215 - 8);
9216
9217 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9218
9219 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9220
9221 return TRUE;
9222 }
9223
9224 /* Populate Arm stub for an exported Thumb function. */
9225
9226 static bfd_boolean
9227 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9228 {
9229 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9230 asection * s;
9231 struct elf_link_hash_entry * myh;
9232 struct elf32_arm_link_hash_entry *eh;
9233 struct elf32_arm_link_hash_table * globals;
9234 asection *sec;
9235 bfd_vma val;
9236 char *error_message;
9237
9238 eh = elf32_arm_hash_entry (h);
9239 /* Allocate stubs for exported Thumb functions on v4t. */
9240 if (eh->export_glue == NULL)
9241 return TRUE;
9242
9243 globals = elf32_arm_hash_table (info);
9244 BFD_ASSERT (globals != NULL);
9245 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9246
9247 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9248 ARM2THUMB_GLUE_SECTION_NAME);
9249 BFD_ASSERT (s != NULL);
9250 BFD_ASSERT (s->contents != NULL);
9251 BFD_ASSERT (s->output_section != NULL);
9252
9253 sec = eh->export_glue->root.u.def.section;
9254
9255 BFD_ASSERT (sec->output_section != NULL);
9256
9257 val = eh->export_glue->root.u.def.value + sec->output_offset
9258 + sec->output_section->vma;
9259
9260 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9261 h->root.u.def.section->owner,
9262 globals->obfd, sec, val, s,
9263 &error_message);
9264 BFD_ASSERT (myh);
9265 return TRUE;
9266 }
9267
9268 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9269
9270 static bfd_vma
9271 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9272 {
9273 bfd_byte *p;
9274 bfd_vma glue_addr;
9275 asection *s;
9276 struct elf32_arm_link_hash_table *globals;
9277
9278 globals = elf32_arm_hash_table (info);
9279 BFD_ASSERT (globals != NULL);
9280 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9281
9282 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9283 ARM_BX_GLUE_SECTION_NAME);
9284 BFD_ASSERT (s != NULL);
9285 BFD_ASSERT (s->contents != NULL);
9286 BFD_ASSERT (s->output_section != NULL);
9287
9288 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9289
9290 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9291
9292 if ((globals->bx_glue_offset[reg] & 1) == 0)
9293 {
9294 p = s->contents + glue_addr;
9295 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9296 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9297 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9298 globals->bx_glue_offset[reg] |= 1;
9299 }
9300
9301 return glue_addr + s->output_section->vma + s->output_offset;
9302 }
9303
9304 /* Generate Arm stubs for exported Thumb symbols. */
9305 static void
9306 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9307 struct bfd_link_info *link_info)
9308 {
9309 struct elf32_arm_link_hash_table * globals;
9310
9311 if (link_info == NULL)
9312 /* Ignore this if we are not called by the ELF backend linker. */
9313 return;
9314
9315 globals = elf32_arm_hash_table (link_info);
9316 if (globals == NULL)
9317 return;
9318
9319 /* If blx is available then exported Thumb symbols are OK and there is
9320 nothing to do. */
9321 if (globals->use_blx)
9322 return;
9323
9324 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9325 link_info);
9326 }
9327
9328 /* Reserve space for COUNT dynamic relocations in relocation selection
9329 SRELOC. */
9330
9331 static void
9332 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9333 bfd_size_type count)
9334 {
9335 struct elf32_arm_link_hash_table *htab;
9336
9337 htab = elf32_arm_hash_table (info);
9338 BFD_ASSERT (htab->root.dynamic_sections_created);
9339 if (sreloc == NULL)
9340 abort ();
9341 sreloc->size += RELOC_SIZE (htab) * count;
9342 }
9343
9344 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9345 dynamic, the relocations should go in SRELOC, otherwise they should
9346 go in the special .rel.iplt section. */
9347
9348 static void
9349 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9350 bfd_size_type count)
9351 {
9352 struct elf32_arm_link_hash_table *htab;
9353
9354 htab = elf32_arm_hash_table (info);
9355 if (!htab->root.dynamic_sections_created)
9356 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9357 else
9358 {
9359 BFD_ASSERT (sreloc != NULL);
9360 sreloc->size += RELOC_SIZE (htab) * count;
9361 }
9362 }
9363
9364 /* Add relocation REL to the end of relocation section SRELOC. */
9365
9366 static void
9367 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9368 asection *sreloc, Elf_Internal_Rela *rel)
9369 {
9370 bfd_byte *loc;
9371 struct elf32_arm_link_hash_table *htab;
9372
9373 htab = elf32_arm_hash_table (info);
9374 if (!htab->root.dynamic_sections_created
9375 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9376 sreloc = htab->root.irelplt;
9377 if (sreloc == NULL)
9378 abort ();
9379 loc = sreloc->contents;
9380 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9381 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9382 abort ();
9383 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9384 }
9385
9386 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9387 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9388 to .plt. */
9389
9390 static void
9391 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9392 bfd_boolean is_iplt_entry,
9393 union gotplt_union *root_plt,
9394 struct arm_plt_info *arm_plt)
9395 {
9396 struct elf32_arm_link_hash_table *htab;
9397 asection *splt;
9398 asection *sgotplt;
9399
9400 htab = elf32_arm_hash_table (info);
9401
9402 if (is_iplt_entry)
9403 {
9404 splt = htab->root.iplt;
9405 sgotplt = htab->root.igotplt;
9406
9407 /* NaCl uses a special first entry in .iplt too. */
9408 if (htab->nacl_p && splt->size == 0)
9409 splt->size += htab->plt_header_size;
9410
9411 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9412 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9413 }
9414 else
9415 {
9416 splt = htab->root.splt;
9417 sgotplt = htab->root.sgotplt;
9418
9419 if (htab->fdpic_p)
9420 {
9421 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9422 /* For lazy binding, relocations will be put into .rel.plt, in
9423 .rel.got otherwise. */
9424 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9425 if (info->flags & DF_BIND_NOW)
9426 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9427 else
9428 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9429 }
9430 else
9431 {
9432 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9433 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9434 }
9435
9436 /* If this is the first .plt entry, make room for the special
9437 first entry. */
9438 if (splt->size == 0)
9439 splt->size += htab->plt_header_size;
9440
9441 htab->next_tls_desc_index++;
9442 }
9443
9444 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9445 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9446 splt->size += PLT_THUMB_STUB_SIZE;
9447 root_plt->offset = splt->size;
9448 splt->size += htab->plt_entry_size;
9449
9450 if (!htab->symbian_p)
9451 {
9452 /* We also need to make an entry in the .got.plt section, which
9453 will be placed in the .got section by the linker script. */
9454 if (is_iplt_entry)
9455 arm_plt->got_offset = sgotplt->size;
9456 else
9457 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9458 if (htab->fdpic_p)
9459 /* Function descriptor takes 64 bits in GOT. */
9460 sgotplt->size += 8;
9461 else
9462 sgotplt->size += 4;
9463 }
9464 }
9465
9466 static bfd_vma
9467 arm_movw_immediate (bfd_vma value)
9468 {
9469 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9470 }
9471
9472 static bfd_vma
9473 arm_movt_immediate (bfd_vma value)
9474 {
9475 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9476 }
9477
9478 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9479 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9480 Otherwise, DYNINDX is the index of the symbol in the dynamic
9481 symbol table and SYM_VALUE is undefined.
9482
9483 ROOT_PLT points to the offset of the PLT entry from the start of its
9484 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9485 bookkeeping information.
9486
9487 Returns FALSE if there was a problem. */
9488
9489 static bfd_boolean
9490 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9491 union gotplt_union *root_plt,
9492 struct arm_plt_info *arm_plt,
9493 int dynindx, bfd_vma sym_value)
9494 {
9495 struct elf32_arm_link_hash_table *htab;
9496 asection *sgot;
9497 asection *splt;
9498 asection *srel;
9499 bfd_byte *loc;
9500 bfd_vma plt_index;
9501 Elf_Internal_Rela rel;
9502 bfd_vma plt_header_size;
9503 bfd_vma got_header_size;
9504
9505 htab = elf32_arm_hash_table (info);
9506
9507 /* Pick the appropriate sections and sizes. */
9508 if (dynindx == -1)
9509 {
9510 splt = htab->root.iplt;
9511 sgot = htab->root.igotplt;
9512 srel = htab->root.irelplt;
9513
9514 /* There are no reserved entries in .igot.plt, and no special
9515 first entry in .iplt. */
9516 got_header_size = 0;
9517 plt_header_size = 0;
9518 }
9519 else
9520 {
9521 splt = htab->root.splt;
9522 sgot = htab->root.sgotplt;
9523 srel = htab->root.srelplt;
9524
9525 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9526 plt_header_size = htab->plt_header_size;
9527 }
9528 BFD_ASSERT (splt != NULL && srel != NULL);
9529
9530 /* Fill in the entry in the procedure linkage table. */
9531 if (htab->symbian_p)
9532 {
9533 BFD_ASSERT (dynindx >= 0);
9534 put_arm_insn (htab, output_bfd,
9535 elf32_arm_symbian_plt_entry[0],
9536 splt->contents + root_plt->offset);
9537 bfd_put_32 (output_bfd,
9538 elf32_arm_symbian_plt_entry[1],
9539 splt->contents + root_plt->offset + 4);
9540
9541 /* Fill in the entry in the .rel.plt section. */
9542 rel.r_offset = (splt->output_section->vma
9543 + splt->output_offset
9544 + root_plt->offset + 4);
9545 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9546
9547 /* Get the index in the procedure linkage table which
9548 corresponds to this symbol. This is the index of this symbol
9549 in all the symbols for which we are making plt entries. The
9550 first entry in the procedure linkage table is reserved. */
9551 plt_index = ((root_plt->offset - plt_header_size)
9552 / htab->plt_entry_size);
9553 }
9554 else
9555 {
9556 bfd_vma got_offset, got_address, plt_address;
9557 bfd_vma got_displacement, initial_got_entry;
9558 bfd_byte * ptr;
9559
9560 BFD_ASSERT (sgot != NULL);
9561
9562 /* Get the offset into the .(i)got.plt table of the entry that
9563 corresponds to this function. */
9564 got_offset = (arm_plt->got_offset & -2);
9565
9566 /* Get the index in the procedure linkage table which
9567 corresponds to this symbol. This is the index of this symbol
9568 in all the symbols for which we are making plt entries.
9569 After the reserved .got.plt entries, all symbols appear in
9570 the same order as in .plt. */
9571 if (htab->fdpic_p)
9572 /* Function descriptor takes 8 bytes. */
9573 plt_index = (got_offset - got_header_size) / 8;
9574 else
9575 plt_index = (got_offset - got_header_size) / 4;
9576
9577 /* Calculate the address of the GOT entry. */
9578 got_address = (sgot->output_section->vma
9579 + sgot->output_offset
9580 + got_offset);
9581
9582 /* ...and the address of the PLT entry. */
9583 plt_address = (splt->output_section->vma
9584 + splt->output_offset
9585 + root_plt->offset);
9586
9587 ptr = splt->contents + root_plt->offset;
9588 if (htab->vxworks_p && bfd_link_pic (info))
9589 {
9590 unsigned int i;
9591 bfd_vma val;
9592
9593 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9594 {
9595 val = elf32_arm_vxworks_shared_plt_entry[i];
9596 if (i == 2)
9597 val |= got_address - sgot->output_section->vma;
9598 if (i == 5)
9599 val |= plt_index * RELOC_SIZE (htab);
9600 if (i == 2 || i == 5)
9601 bfd_put_32 (output_bfd, val, ptr);
9602 else
9603 put_arm_insn (htab, output_bfd, val, ptr);
9604 }
9605 }
9606 else if (htab->vxworks_p)
9607 {
9608 unsigned int i;
9609 bfd_vma val;
9610
9611 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9612 {
9613 val = elf32_arm_vxworks_exec_plt_entry[i];
9614 if (i == 2)
9615 val |= got_address;
9616 if (i == 4)
9617 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9618 if (i == 5)
9619 val |= plt_index * RELOC_SIZE (htab);
9620 if (i == 2 || i == 5)
9621 bfd_put_32 (output_bfd, val, ptr);
9622 else
9623 put_arm_insn (htab, output_bfd, val, ptr);
9624 }
9625
9626 loc = (htab->srelplt2->contents
9627 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9628
9629 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9630 referencing the GOT for this PLT entry. */
9631 rel.r_offset = plt_address + 8;
9632 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9633 rel.r_addend = got_offset;
9634 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9635 loc += RELOC_SIZE (htab);
9636
9637 /* Create the R_ARM_ABS32 relocation referencing the
9638 beginning of the PLT for this GOT entry. */
9639 rel.r_offset = got_address;
9640 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9641 rel.r_addend = 0;
9642 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9643 }
9644 else if (htab->nacl_p)
9645 {
9646 /* Calculate the displacement between the PLT slot and the
9647 common tail that's part of the special initial PLT slot. */
9648 int32_t tail_displacement
9649 = ((splt->output_section->vma + splt->output_offset
9650 + ARM_NACL_PLT_TAIL_OFFSET)
9651 - (plt_address + htab->plt_entry_size + 4));
9652 BFD_ASSERT ((tail_displacement & 3) == 0);
9653 tail_displacement >>= 2;
9654
9655 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9656 || (-tail_displacement & 0xff000000) == 0);
9657
9658 /* Calculate the displacement between the PLT slot and the entry
9659 in the GOT. The offset accounts for the value produced by
9660 adding to pc in the penultimate instruction of the PLT stub. */
9661 got_displacement = (got_address
9662 - (plt_address + htab->plt_entry_size));
9663
9664 /* NaCl does not support interworking at all. */
9665 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9666
9667 put_arm_insn (htab, output_bfd,
9668 elf32_arm_nacl_plt_entry[0]
9669 | arm_movw_immediate (got_displacement),
9670 ptr + 0);
9671 put_arm_insn (htab, output_bfd,
9672 elf32_arm_nacl_plt_entry[1]
9673 | arm_movt_immediate (got_displacement),
9674 ptr + 4);
9675 put_arm_insn (htab, output_bfd,
9676 elf32_arm_nacl_plt_entry[2],
9677 ptr + 8);
9678 put_arm_insn (htab, output_bfd,
9679 elf32_arm_nacl_plt_entry[3]
9680 | (tail_displacement & 0x00ffffff),
9681 ptr + 12);
9682 }
9683 else if (htab->fdpic_p)
9684 {
9685 /* Fill-up Thumb stub if needed. */
9686 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9687 {
9688 put_thumb_insn (htab, output_bfd,
9689 elf32_arm_plt_thumb_stub[0], ptr - 4);
9690 put_thumb_insn (htab, output_bfd,
9691 elf32_arm_plt_thumb_stub[1], ptr - 2);
9692 }
9693 put_arm_insn(htab, output_bfd,
9694 elf32_arm_fdpic_plt_entry[0], ptr + 0);
9695 put_arm_insn(htab, output_bfd,
9696 elf32_arm_fdpic_plt_entry[1], ptr + 4);
9697 put_arm_insn(htab, output_bfd,
9698 elf32_arm_fdpic_plt_entry[2], ptr + 8);
9699 put_arm_insn(htab, output_bfd,
9700 elf32_arm_fdpic_plt_entry[3], ptr + 12);
9701 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9702
9703 if (!(info->flags & DF_BIND_NOW))
9704 {
9705 /* funcdesc_value_reloc_offset. */
9706 bfd_put_32 (output_bfd,
9707 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9708 ptr + 20);
9709 put_arm_insn(htab, output_bfd,
9710 elf32_arm_fdpic_plt_entry[6], ptr + 24);
9711 put_arm_insn(htab, output_bfd,
9712 elf32_arm_fdpic_plt_entry[7], ptr + 28);
9713 put_arm_insn(htab, output_bfd,
9714 elf32_arm_fdpic_plt_entry[8], ptr + 32);
9715 put_arm_insn(htab, output_bfd,
9716 elf32_arm_fdpic_plt_entry[9], ptr + 36);
9717 }
9718 }
9719 else if (using_thumb_only (htab))
9720 {
9721 /* PR ld/16017: Generate thumb only PLT entries. */
9722 if (!using_thumb2 (htab))
9723 {
9724 /* FIXME: We ought to be able to generate thumb-1 PLT
9725 instructions... */
9726 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9727 output_bfd);
9728 return FALSE;
9729 }
9730
9731 /* Calculate the displacement between the PLT slot and the entry in
9732 the GOT. The 12-byte offset accounts for the value produced by
9733 adding to pc in the 3rd instruction of the PLT stub. */
9734 got_displacement = got_address - (plt_address + 12);
9735
9736 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9737 instead of 'put_thumb_insn'. */
9738 put_arm_insn (htab, output_bfd,
9739 elf32_thumb2_plt_entry[0]
9740 | ((got_displacement & 0x000000ff) << 16)
9741 | ((got_displacement & 0x00000700) << 20)
9742 | ((got_displacement & 0x00000800) >> 1)
9743 | ((got_displacement & 0x0000f000) >> 12),
9744 ptr + 0);
9745 put_arm_insn (htab, output_bfd,
9746 elf32_thumb2_plt_entry[1]
9747 | ((got_displacement & 0x00ff0000) )
9748 | ((got_displacement & 0x07000000) << 4)
9749 | ((got_displacement & 0x08000000) >> 17)
9750 | ((got_displacement & 0xf0000000) >> 28),
9751 ptr + 4);
9752 put_arm_insn (htab, output_bfd,
9753 elf32_thumb2_plt_entry[2],
9754 ptr + 8);
9755 put_arm_insn (htab, output_bfd,
9756 elf32_thumb2_plt_entry[3],
9757 ptr + 12);
9758 }
9759 else
9760 {
9761 /* Calculate the displacement between the PLT slot and the
9762 entry in the GOT. The eight-byte offset accounts for the
9763 value produced by adding to pc in the first instruction
9764 of the PLT stub. */
9765 got_displacement = got_address - (plt_address + 8);
9766
9767 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9768 {
9769 put_thumb_insn (htab, output_bfd,
9770 elf32_arm_plt_thumb_stub[0], ptr - 4);
9771 put_thumb_insn (htab, output_bfd,
9772 elf32_arm_plt_thumb_stub[1], ptr - 2);
9773 }
9774
9775 if (!elf32_arm_use_long_plt_entry)
9776 {
9777 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9778
9779 put_arm_insn (htab, output_bfd,
9780 elf32_arm_plt_entry_short[0]
9781 | ((got_displacement & 0x0ff00000) >> 20),
9782 ptr + 0);
9783 put_arm_insn (htab, output_bfd,
9784 elf32_arm_plt_entry_short[1]
9785 | ((got_displacement & 0x000ff000) >> 12),
9786 ptr+ 4);
9787 put_arm_insn (htab, output_bfd,
9788 elf32_arm_plt_entry_short[2]
9789 | (got_displacement & 0x00000fff),
9790 ptr + 8);
9791 #ifdef FOUR_WORD_PLT
9792 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9793 #endif
9794 }
9795 else
9796 {
9797 put_arm_insn (htab, output_bfd,
9798 elf32_arm_plt_entry_long[0]
9799 | ((got_displacement & 0xf0000000) >> 28),
9800 ptr + 0);
9801 put_arm_insn (htab, output_bfd,
9802 elf32_arm_plt_entry_long[1]
9803 | ((got_displacement & 0x0ff00000) >> 20),
9804 ptr + 4);
9805 put_arm_insn (htab, output_bfd,
9806 elf32_arm_plt_entry_long[2]
9807 | ((got_displacement & 0x000ff000) >> 12),
9808 ptr+ 8);
9809 put_arm_insn (htab, output_bfd,
9810 elf32_arm_plt_entry_long[3]
9811 | (got_displacement & 0x00000fff),
9812 ptr + 12);
9813 }
9814 }
9815
9816 /* Fill in the entry in the .rel(a).(i)plt section. */
9817 rel.r_offset = got_address;
9818 rel.r_addend = 0;
9819 if (dynindx == -1)
9820 {
9821 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9822 The dynamic linker or static executable then calls SYM_VALUE
9823 to determine the correct run-time value of the .igot.plt entry. */
9824 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9825 initial_got_entry = sym_value;
9826 }
9827 else
9828 {
9829 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9830 used by PLT entry. */
9831 if (htab->fdpic_p)
9832 {
9833 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9834 initial_got_entry = 0;
9835 }
9836 else
9837 {
9838 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9839 initial_got_entry = (splt->output_section->vma
9840 + splt->output_offset);
9841 }
9842 }
9843
9844 /* Fill in the entry in the global offset table. */
9845 bfd_put_32 (output_bfd, initial_got_entry,
9846 sgot->contents + got_offset);
9847
9848 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9849 {
9850 /* Setup initial funcdesc value. */
9851 /* FIXME: we don't support lazy binding because there is a
9852 race condition between both words getting written and
9853 some other thread attempting to read them. The ARM
9854 architecture does not have an atomic 64 bit load/store
9855 instruction that could be used to prevent it; it is
9856 recommended that threaded FDPIC applications run with the
9857 LD_BIND_NOW environment variable set. */
9858 bfd_put_32(output_bfd, plt_address + 0x18,
9859 sgot->contents + got_offset);
9860 bfd_put_32(output_bfd, -1 /*TODO*/,
9861 sgot->contents + got_offset + 4);
9862 }
9863 }
9864
9865 if (dynindx == -1)
9866 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9867 else
9868 {
9869 if (htab->fdpic_p)
9870 {
9871 /* For FDPIC we put PLT relocationss into .rel.got when not
9872 lazy binding otherwise we put them in .rel.plt. For now,
9873 we don't support lazy binding so put it in .rel.got. */
9874 if (info->flags & DF_BIND_NOW)
9875 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9876 else
9877 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9878 }
9879 else
9880 {
9881 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9882 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9883 }
9884 }
9885
9886 return TRUE;
9887 }
9888
9889 /* Some relocations map to different relocations depending on the
9890 target. Return the real relocation. */
9891
9892 static int
9893 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9894 int r_type)
9895 {
9896 switch (r_type)
9897 {
9898 case R_ARM_TARGET1:
9899 if (globals->target1_is_rel)
9900 return R_ARM_REL32;
9901 else
9902 return R_ARM_ABS32;
9903
9904 case R_ARM_TARGET2:
9905 return globals->target2_reloc;
9906
9907 default:
9908 return r_type;
9909 }
9910 }
9911
9912 /* Return the base VMA address which should be subtracted from real addresses
9913 when resolving @dtpoff relocation.
9914 This is PT_TLS segment p_vaddr. */
9915
9916 static bfd_vma
9917 dtpoff_base (struct bfd_link_info *info)
9918 {
9919 /* If tls_sec is NULL, we should have signalled an error already. */
9920 if (elf_hash_table (info)->tls_sec == NULL)
9921 return 0;
9922 return elf_hash_table (info)->tls_sec->vma;
9923 }
9924
9925 /* Return the relocation value for @tpoff relocation
9926 if STT_TLS virtual address is ADDRESS. */
9927
9928 static bfd_vma
9929 tpoff (struct bfd_link_info *info, bfd_vma address)
9930 {
9931 struct elf_link_hash_table *htab = elf_hash_table (info);
9932 bfd_vma base;
9933
9934 /* If tls_sec is NULL, we should have signalled an error already. */
9935 if (htab->tls_sec == NULL)
9936 return 0;
9937 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
9938 return address - htab->tls_sec->vma + base;
9939 }
9940
9941 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
9942 VALUE is the relocation value. */
9943
9944 static bfd_reloc_status_type
9945 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
9946 {
9947 if (value > 0xfff)
9948 return bfd_reloc_overflow;
9949
9950 value |= bfd_get_32 (abfd, data) & 0xfffff000;
9951 bfd_put_32 (abfd, value, data);
9952 return bfd_reloc_ok;
9953 }
9954
9955 /* Handle TLS relaxations. Relaxing is possible for symbols that use
9956 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
9957 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
9958
9959 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
9960 is to then call final_link_relocate. Return other values in the
9961 case of error.
9962
9963 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
9964 the pre-relaxed code. It would be nice if the relocs were updated
9965 to match the optimization. */
9966
9967 static bfd_reloc_status_type
9968 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
9969 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
9970 Elf_Internal_Rela *rel, unsigned long is_local)
9971 {
9972 unsigned long insn;
9973
9974 switch (ELF32_R_TYPE (rel->r_info))
9975 {
9976 default:
9977 return bfd_reloc_notsupported;
9978
9979 case R_ARM_TLS_GOTDESC:
9980 if (is_local)
9981 insn = 0;
9982 else
9983 {
9984 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
9985 if (insn & 1)
9986 insn -= 5; /* THUMB */
9987 else
9988 insn -= 8; /* ARM */
9989 }
9990 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9991 return bfd_reloc_continue;
9992
9993 case R_ARM_THM_TLS_DESCSEQ:
9994 /* Thumb insn. */
9995 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
9996 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
9997 {
9998 if (is_local)
9999 /* nop */
10000 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10001 }
10002 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10003 {
10004 if (is_local)
10005 /* nop */
10006 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10007 else
10008 /* ldr rx,[ry] */
10009 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10010 }
10011 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10012 {
10013 if (is_local)
10014 /* nop */
10015 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10016 else
10017 /* mov r0, rx */
10018 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10019 contents + rel->r_offset);
10020 }
10021 else
10022 {
10023 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10024 /* It's a 32 bit instruction, fetch the rest of it for
10025 error generation. */
10026 insn = (insn << 16)
10027 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10028 _bfd_error_handler
10029 /* xgettext:c-format */
10030 (_("%pB(%pA+%#" PRIx64 "): "
10031 "unexpected %s instruction '%#lx' in TLS trampoline"),
10032 input_bfd, input_sec, (uint64_t) rel->r_offset,
10033 "Thumb", insn);
10034 return bfd_reloc_notsupported;
10035 }
10036 break;
10037
10038 case R_ARM_TLS_DESCSEQ:
10039 /* arm insn. */
10040 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10041 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10042 {
10043 if (is_local)
10044 /* mov rx, ry */
10045 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10046 contents + rel->r_offset);
10047 }
10048 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10049 {
10050 if (is_local)
10051 /* nop */
10052 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10053 else
10054 /* ldr rx,[ry] */
10055 bfd_put_32 (input_bfd, insn & 0xfffff000,
10056 contents + rel->r_offset);
10057 }
10058 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10059 {
10060 if (is_local)
10061 /* nop */
10062 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10063 else
10064 /* mov r0, rx */
10065 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10066 contents + rel->r_offset);
10067 }
10068 else
10069 {
10070 _bfd_error_handler
10071 /* xgettext:c-format */
10072 (_("%pB(%pA+%#" PRIx64 "): "
10073 "unexpected %s instruction '%#lx' in TLS trampoline"),
10074 input_bfd, input_sec, (uint64_t) rel->r_offset,
10075 "ARM", insn);
10076 return bfd_reloc_notsupported;
10077 }
10078 break;
10079
10080 case R_ARM_TLS_CALL:
10081 /* GD->IE relaxation, turn the instruction into 'nop' or
10082 'ldr r0, [pc,r0]' */
10083 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10084 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10085 break;
10086
10087 case R_ARM_THM_TLS_CALL:
10088 /* GD->IE relaxation. */
10089 if (!is_local)
10090 /* add r0,pc; ldr r0, [r0] */
10091 insn = 0x44786800;
10092 else if (using_thumb2 (globals))
10093 /* nop.w */
10094 insn = 0xf3af8000;
10095 else
10096 /* nop; nop */
10097 insn = 0xbf00bf00;
10098
10099 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10100 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10101 break;
10102 }
10103 return bfd_reloc_ok;
10104 }
10105
10106 /* For a given value of n, calculate the value of G_n as required to
10107 deal with group relocations. We return it in the form of an
10108 encoded constant-and-rotation, together with the final residual. If n is
10109 specified as less than zero, then final_residual is filled with the
10110 input value and no further action is performed. */
10111
10112 static bfd_vma
10113 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10114 {
10115 int current_n;
10116 bfd_vma g_n;
10117 bfd_vma encoded_g_n = 0;
10118 bfd_vma residual = value; /* Also known as Y_n. */
10119
10120 for (current_n = 0; current_n <= n; current_n++)
10121 {
10122 int shift;
10123
10124 /* Calculate which part of the value to mask. */
10125 if (residual == 0)
10126 shift = 0;
10127 else
10128 {
10129 int msb;
10130
10131 /* Determine the most significant bit in the residual and
10132 align the resulting value to a 2-bit boundary. */
10133 for (msb = 30; msb >= 0; msb -= 2)
10134 if (residual & (3 << msb))
10135 break;
10136
10137 /* The desired shift is now (msb - 6), or zero, whichever
10138 is the greater. */
10139 shift = msb - 6;
10140 if (shift < 0)
10141 shift = 0;
10142 }
10143
10144 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10145 g_n = residual & (0xff << shift);
10146 encoded_g_n = (g_n >> shift)
10147 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10148
10149 /* Calculate the residual for the next time around. */
10150 residual &= ~g_n;
10151 }
10152
10153 *final_residual = residual;
10154
10155 return encoded_g_n;
10156 }
10157
10158 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10159 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10160
10161 static int
10162 identify_add_or_sub (bfd_vma insn)
10163 {
10164 int opcode = insn & 0x1e00000;
10165
10166 if (opcode == 1 << 23) /* ADD */
10167 return 1;
10168
10169 if (opcode == 1 << 22) /* SUB */
10170 return -1;
10171
10172 return 0;
10173 }
10174
10175 /* Perform a relocation as part of a final link. */
10176
10177 static bfd_reloc_status_type
10178 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10179 bfd * input_bfd,
10180 bfd * output_bfd,
10181 asection * input_section,
10182 bfd_byte * contents,
10183 Elf_Internal_Rela * rel,
10184 bfd_vma value,
10185 struct bfd_link_info * info,
10186 asection * sym_sec,
10187 const char * sym_name,
10188 unsigned char st_type,
10189 enum arm_st_branch_type branch_type,
10190 struct elf_link_hash_entry * h,
10191 bfd_boolean * unresolved_reloc_p,
10192 char ** error_message)
10193 {
10194 unsigned long r_type = howto->type;
10195 unsigned long r_symndx;
10196 bfd_byte * hit_data = contents + rel->r_offset;
10197 bfd_vma * local_got_offsets;
10198 bfd_vma * local_tlsdesc_gotents;
10199 asection * sgot;
10200 asection * splt;
10201 asection * sreloc = NULL;
10202 asection * srelgot;
10203 bfd_vma addend;
10204 bfd_signed_vma signed_addend;
10205 unsigned char dynreloc_st_type;
10206 bfd_vma dynreloc_value;
10207 struct elf32_arm_link_hash_table * globals;
10208 struct elf32_arm_link_hash_entry *eh;
10209 union gotplt_union *root_plt;
10210 struct arm_plt_info *arm_plt;
10211 bfd_vma plt_offset;
10212 bfd_vma gotplt_offset;
10213 bfd_boolean has_iplt_entry;
10214 bfd_boolean resolved_to_zero;
10215
10216 globals = elf32_arm_hash_table (info);
10217 if (globals == NULL)
10218 return bfd_reloc_notsupported;
10219
10220 BFD_ASSERT (is_arm_elf (input_bfd));
10221 BFD_ASSERT (howto != NULL);
10222
10223 /* Some relocation types map to different relocations depending on the
10224 target. We pick the right one here. */
10225 r_type = arm_real_reloc_type (globals, r_type);
10226
10227 /* It is possible to have linker relaxations on some TLS access
10228 models. Update our information here. */
10229 r_type = elf32_arm_tls_transition (info, r_type, h);
10230
10231 if (r_type != howto->type)
10232 howto = elf32_arm_howto_from_type (r_type);
10233
10234 eh = (struct elf32_arm_link_hash_entry *) h;
10235 sgot = globals->root.sgot;
10236 local_got_offsets = elf_local_got_offsets (input_bfd);
10237 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10238
10239 if (globals->root.dynamic_sections_created)
10240 srelgot = globals->root.srelgot;
10241 else
10242 srelgot = NULL;
10243
10244 r_symndx = ELF32_R_SYM (rel->r_info);
10245
10246 if (globals->use_rel)
10247 {
10248 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10249
10250 if (addend & ((howto->src_mask + 1) >> 1))
10251 {
10252 signed_addend = -1;
10253 signed_addend &= ~ howto->src_mask;
10254 signed_addend |= addend;
10255 }
10256 else
10257 signed_addend = addend;
10258 }
10259 else
10260 addend = signed_addend = rel->r_addend;
10261
10262 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10263 are resolving a function call relocation. */
10264 if (using_thumb_only (globals)
10265 && (r_type == R_ARM_THM_CALL
10266 || r_type == R_ARM_THM_JUMP24)
10267 && branch_type == ST_BRANCH_TO_ARM)
10268 branch_type = ST_BRANCH_TO_THUMB;
10269
10270 /* Record the symbol information that should be used in dynamic
10271 relocations. */
10272 dynreloc_st_type = st_type;
10273 dynreloc_value = value;
10274 if (branch_type == ST_BRANCH_TO_THUMB)
10275 dynreloc_value |= 1;
10276
10277 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10278 VALUE appropriately for relocations that we resolve at link time. */
10279 has_iplt_entry = FALSE;
10280 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10281 &arm_plt)
10282 && root_plt->offset != (bfd_vma) -1)
10283 {
10284 plt_offset = root_plt->offset;
10285 gotplt_offset = arm_plt->got_offset;
10286
10287 if (h == NULL || eh->is_iplt)
10288 {
10289 has_iplt_entry = TRUE;
10290 splt = globals->root.iplt;
10291
10292 /* Populate .iplt entries here, because not all of them will
10293 be seen by finish_dynamic_symbol. The lower bit is set if
10294 we have already populated the entry. */
10295 if (plt_offset & 1)
10296 plt_offset--;
10297 else
10298 {
10299 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10300 -1, dynreloc_value))
10301 root_plt->offset |= 1;
10302 else
10303 return bfd_reloc_notsupported;
10304 }
10305
10306 /* Static relocations always resolve to the .iplt entry. */
10307 st_type = STT_FUNC;
10308 value = (splt->output_section->vma
10309 + splt->output_offset
10310 + plt_offset);
10311 branch_type = ST_BRANCH_TO_ARM;
10312
10313 /* If there are non-call relocations that resolve to the .iplt
10314 entry, then all dynamic ones must too. */
10315 if (arm_plt->noncall_refcount != 0)
10316 {
10317 dynreloc_st_type = st_type;
10318 dynreloc_value = value;
10319 }
10320 }
10321 else
10322 /* We populate the .plt entry in finish_dynamic_symbol. */
10323 splt = globals->root.splt;
10324 }
10325 else
10326 {
10327 splt = NULL;
10328 plt_offset = (bfd_vma) -1;
10329 gotplt_offset = (bfd_vma) -1;
10330 }
10331
10332 resolved_to_zero = (h != NULL
10333 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10334
10335 switch (r_type)
10336 {
10337 case R_ARM_NONE:
10338 /* We don't need to find a value for this symbol. It's just a
10339 marker. */
10340 *unresolved_reloc_p = FALSE;
10341 return bfd_reloc_ok;
10342
10343 case R_ARM_ABS12:
10344 if (!globals->vxworks_p)
10345 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10346 /* Fall through. */
10347
10348 case R_ARM_PC24:
10349 case R_ARM_ABS32:
10350 case R_ARM_ABS32_NOI:
10351 case R_ARM_REL32:
10352 case R_ARM_REL32_NOI:
10353 case R_ARM_CALL:
10354 case R_ARM_JUMP24:
10355 case R_ARM_XPC25:
10356 case R_ARM_PREL31:
10357 case R_ARM_PLT32:
10358 /* Handle relocations which should use the PLT entry. ABS32/REL32
10359 will use the symbol's value, which may point to a PLT entry, but we
10360 don't need to handle that here. If we created a PLT entry, all
10361 branches in this object should go to it, except if the PLT is too
10362 far away, in which case a long branch stub should be inserted. */
10363 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10364 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10365 && r_type != R_ARM_CALL
10366 && r_type != R_ARM_JUMP24
10367 && r_type != R_ARM_PLT32)
10368 && plt_offset != (bfd_vma) -1)
10369 {
10370 /* If we've created a .plt section, and assigned a PLT entry
10371 to this function, it must either be a STT_GNU_IFUNC reference
10372 or not be known to bind locally. In other cases, we should
10373 have cleared the PLT entry by now. */
10374 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10375
10376 value = (splt->output_section->vma
10377 + splt->output_offset
10378 + plt_offset);
10379 *unresolved_reloc_p = FALSE;
10380 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10381 contents, rel->r_offset, value,
10382 rel->r_addend);
10383 }
10384
10385 /* When generating a shared object or relocatable executable, these
10386 relocations are copied into the output file to be resolved at
10387 run time. */
10388 if ((bfd_link_pic (info)
10389 || globals->root.is_relocatable_executable
10390 || globals->fdpic_p)
10391 && (input_section->flags & SEC_ALLOC)
10392 && !(globals->vxworks_p
10393 && strcmp (input_section->output_section->name,
10394 ".tls_vars") == 0)
10395 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10396 || !SYMBOL_CALLS_LOCAL (info, h))
10397 && !(input_bfd == globals->stub_bfd
10398 && strstr (input_section->name, STUB_SUFFIX))
10399 && (h == NULL
10400 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10401 && !resolved_to_zero)
10402 || h->root.type != bfd_link_hash_undefweak)
10403 && r_type != R_ARM_PC24
10404 && r_type != R_ARM_CALL
10405 && r_type != R_ARM_JUMP24
10406 && r_type != R_ARM_PREL31
10407 && r_type != R_ARM_PLT32)
10408 {
10409 Elf_Internal_Rela outrel;
10410 bfd_boolean skip, relocate;
10411 int isrofixup = 0;
10412
10413 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10414 && !h->def_regular)
10415 {
10416 char *v = _("shared object");
10417
10418 if (bfd_link_executable (info))
10419 v = _("PIE executable");
10420
10421 _bfd_error_handler
10422 (_("%pB: relocation %s against external or undefined symbol `%s'"
10423 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10424 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10425 return bfd_reloc_notsupported;
10426 }
10427
10428 *unresolved_reloc_p = FALSE;
10429
10430 if (sreloc == NULL && globals->root.dynamic_sections_created)
10431 {
10432 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10433 ! globals->use_rel);
10434
10435 if (sreloc == NULL)
10436 return bfd_reloc_notsupported;
10437 }
10438
10439 skip = FALSE;
10440 relocate = FALSE;
10441
10442 outrel.r_addend = addend;
10443 outrel.r_offset =
10444 _bfd_elf_section_offset (output_bfd, info, input_section,
10445 rel->r_offset);
10446 if (outrel.r_offset == (bfd_vma) -1)
10447 skip = TRUE;
10448 else if (outrel.r_offset == (bfd_vma) -2)
10449 skip = TRUE, relocate = TRUE;
10450 outrel.r_offset += (input_section->output_section->vma
10451 + input_section->output_offset);
10452
10453 if (skip)
10454 memset (&outrel, 0, sizeof outrel);
10455 else if (h != NULL
10456 && h->dynindx != -1
10457 && (!bfd_link_pic (info)
10458 || !(bfd_link_pie (info)
10459 || SYMBOLIC_BIND (info, h))
10460 || !h->def_regular))
10461 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10462 else
10463 {
10464 int symbol;
10465
10466 /* This symbol is local, or marked to become local. */
10467 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10468 || (globals->fdpic_p && !bfd_link_pic(info)));
10469 if (globals->symbian_p)
10470 {
10471 asection *osec;
10472
10473 /* On Symbian OS, the data segment and text segement
10474 can be relocated independently. Therefore, we
10475 must indicate the segment to which this
10476 relocation is relative. The BPABI allows us to
10477 use any symbol in the right segment; we just use
10478 the section symbol as it is convenient. (We
10479 cannot use the symbol given by "h" directly as it
10480 will not appear in the dynamic symbol table.)
10481
10482 Note that the dynamic linker ignores the section
10483 symbol value, so we don't subtract osec->vma
10484 from the emitted reloc addend. */
10485 if (sym_sec)
10486 osec = sym_sec->output_section;
10487 else
10488 osec = input_section->output_section;
10489 symbol = elf_section_data (osec)->dynindx;
10490 if (symbol == 0)
10491 {
10492 struct elf_link_hash_table *htab = elf_hash_table (info);
10493
10494 if ((osec->flags & SEC_READONLY) == 0
10495 && htab->data_index_section != NULL)
10496 osec = htab->data_index_section;
10497 else
10498 osec = htab->text_index_section;
10499 symbol = elf_section_data (osec)->dynindx;
10500 }
10501 BFD_ASSERT (symbol != 0);
10502 }
10503 else
10504 /* On SVR4-ish systems, the dynamic loader cannot
10505 relocate the text and data segments independently,
10506 so the symbol does not matter. */
10507 symbol = 0;
10508 if (dynreloc_st_type == STT_GNU_IFUNC)
10509 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10510 to the .iplt entry. Instead, every non-call reference
10511 must use an R_ARM_IRELATIVE relocation to obtain the
10512 correct run-time address. */
10513 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10514 else if (globals->fdpic_p && !bfd_link_pic(info))
10515 isrofixup = 1;
10516 else
10517 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10518 if (globals->use_rel)
10519 relocate = TRUE;
10520 else
10521 outrel.r_addend += dynreloc_value;
10522 }
10523
10524 if (isrofixup)
10525 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10526 else
10527 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10528
10529 /* If this reloc is against an external symbol, we do not want to
10530 fiddle with the addend. Otherwise, we need to include the symbol
10531 value so that it becomes an addend for the dynamic reloc. */
10532 if (! relocate)
10533 return bfd_reloc_ok;
10534
10535 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10536 contents, rel->r_offset,
10537 dynreloc_value, (bfd_vma) 0);
10538 }
10539 else switch (r_type)
10540 {
10541 case R_ARM_ABS12:
10542 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10543
10544 case R_ARM_XPC25: /* Arm BLX instruction. */
10545 case R_ARM_CALL:
10546 case R_ARM_JUMP24:
10547 case R_ARM_PC24: /* Arm B/BL instruction. */
10548 case R_ARM_PLT32:
10549 {
10550 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10551
10552 if (r_type == R_ARM_XPC25)
10553 {
10554 /* Check for Arm calling Arm function. */
10555 /* FIXME: Should we translate the instruction into a BL
10556 instruction instead ? */
10557 if (branch_type != ST_BRANCH_TO_THUMB)
10558 _bfd_error_handler
10559 (_("\%pB: warning: %s BLX instruction targets"
10560 " %s function '%s'"),
10561 input_bfd, "ARM",
10562 "ARM", h ? h->root.root.string : "(local)");
10563 }
10564 else if (r_type == R_ARM_PC24)
10565 {
10566 /* Check for Arm calling Thumb function. */
10567 if (branch_type == ST_BRANCH_TO_THUMB)
10568 {
10569 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10570 output_bfd, input_section,
10571 hit_data, sym_sec, rel->r_offset,
10572 signed_addend, value,
10573 error_message))
10574 return bfd_reloc_ok;
10575 else
10576 return bfd_reloc_dangerous;
10577 }
10578 }
10579
10580 /* Check if a stub has to be inserted because the
10581 destination is too far or we are changing mode. */
10582 if ( r_type == R_ARM_CALL
10583 || r_type == R_ARM_JUMP24
10584 || r_type == R_ARM_PLT32)
10585 {
10586 enum elf32_arm_stub_type stub_type = arm_stub_none;
10587 struct elf32_arm_link_hash_entry *hash;
10588
10589 hash = (struct elf32_arm_link_hash_entry *) h;
10590 stub_type = arm_type_of_stub (info, input_section, rel,
10591 st_type, &branch_type,
10592 hash, value, sym_sec,
10593 input_bfd, sym_name);
10594
10595 if (stub_type != arm_stub_none)
10596 {
10597 /* The target is out of reach, so redirect the
10598 branch to the local stub for this function. */
10599 stub_entry = elf32_arm_get_stub_entry (input_section,
10600 sym_sec, h,
10601 rel, globals,
10602 stub_type);
10603 {
10604 if (stub_entry != NULL)
10605 value = (stub_entry->stub_offset
10606 + stub_entry->stub_sec->output_offset
10607 + stub_entry->stub_sec->output_section->vma);
10608
10609 if (plt_offset != (bfd_vma) -1)
10610 *unresolved_reloc_p = FALSE;
10611 }
10612 }
10613 else
10614 {
10615 /* If the call goes through a PLT entry, make sure to
10616 check distance to the right destination address. */
10617 if (plt_offset != (bfd_vma) -1)
10618 {
10619 value = (splt->output_section->vma
10620 + splt->output_offset
10621 + plt_offset);
10622 *unresolved_reloc_p = FALSE;
10623 /* The PLT entry is in ARM mode, regardless of the
10624 target function. */
10625 branch_type = ST_BRANCH_TO_ARM;
10626 }
10627 }
10628 }
10629
10630 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10631 where:
10632 S is the address of the symbol in the relocation.
10633 P is address of the instruction being relocated.
10634 A is the addend (extracted from the instruction) in bytes.
10635
10636 S is held in 'value'.
10637 P is the base address of the section containing the
10638 instruction plus the offset of the reloc into that
10639 section, ie:
10640 (input_section->output_section->vma +
10641 input_section->output_offset +
10642 rel->r_offset).
10643 A is the addend, converted into bytes, ie:
10644 (signed_addend * 4)
10645
10646 Note: None of these operations have knowledge of the pipeline
10647 size of the processor, thus it is up to the assembler to
10648 encode this information into the addend. */
10649 value -= (input_section->output_section->vma
10650 + input_section->output_offset);
10651 value -= rel->r_offset;
10652 if (globals->use_rel)
10653 value += (signed_addend << howto->size);
10654 else
10655 /* RELA addends do not have to be adjusted by howto->size. */
10656 value += signed_addend;
10657
10658 signed_addend = value;
10659 signed_addend >>= howto->rightshift;
10660
10661 /* A branch to an undefined weak symbol is turned into a jump to
10662 the next instruction unless a PLT entry will be created.
10663 Do the same for local undefined symbols (but not for STN_UNDEF).
10664 The jump to the next instruction is optimized as a NOP depending
10665 on the architecture. */
10666 if (h ? (h->root.type == bfd_link_hash_undefweak
10667 && plt_offset == (bfd_vma) -1)
10668 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10669 {
10670 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10671
10672 if (arch_has_arm_nop (globals))
10673 value |= 0x0320f000;
10674 else
10675 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10676 }
10677 else
10678 {
10679 /* Perform a signed range check. */
10680 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10681 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10682 return bfd_reloc_overflow;
10683
10684 addend = (value & 2);
10685
10686 value = (signed_addend & howto->dst_mask)
10687 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10688
10689 if (r_type == R_ARM_CALL)
10690 {
10691 /* Set the H bit in the BLX instruction. */
10692 if (branch_type == ST_BRANCH_TO_THUMB)
10693 {
10694 if (addend)
10695 value |= (1 << 24);
10696 else
10697 value &= ~(bfd_vma)(1 << 24);
10698 }
10699
10700 /* Select the correct instruction (BL or BLX). */
10701 /* Only if we are not handling a BL to a stub. In this
10702 case, mode switching is performed by the stub. */
10703 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10704 value |= (1 << 28);
10705 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10706 {
10707 value &= ~(bfd_vma)(1 << 28);
10708 value |= (1 << 24);
10709 }
10710 }
10711 }
10712 }
10713 break;
10714
10715 case R_ARM_ABS32:
10716 value += addend;
10717 if (branch_type == ST_BRANCH_TO_THUMB)
10718 value |= 1;
10719 break;
10720
10721 case R_ARM_ABS32_NOI:
10722 value += addend;
10723 break;
10724
10725 case R_ARM_REL32:
10726 value += addend;
10727 if (branch_type == ST_BRANCH_TO_THUMB)
10728 value |= 1;
10729 value -= (input_section->output_section->vma
10730 + input_section->output_offset + rel->r_offset);
10731 break;
10732
10733 case R_ARM_REL32_NOI:
10734 value += addend;
10735 value -= (input_section->output_section->vma
10736 + input_section->output_offset + rel->r_offset);
10737 break;
10738
10739 case R_ARM_PREL31:
10740 value -= (input_section->output_section->vma
10741 + input_section->output_offset + rel->r_offset);
10742 value += signed_addend;
10743 if (! h || h->root.type != bfd_link_hash_undefweak)
10744 {
10745 /* Check for overflow. */
10746 if ((value ^ (value >> 1)) & (1 << 30))
10747 return bfd_reloc_overflow;
10748 }
10749 value &= 0x7fffffff;
10750 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10751 if (branch_type == ST_BRANCH_TO_THUMB)
10752 value |= 1;
10753 break;
10754 }
10755
10756 bfd_put_32 (input_bfd, value, hit_data);
10757 return bfd_reloc_ok;
10758
10759 case R_ARM_ABS8:
10760 /* PR 16202: Refectch the addend using the correct size. */
10761 if (globals->use_rel)
10762 addend = bfd_get_8 (input_bfd, hit_data);
10763 value += addend;
10764
10765 /* There is no way to tell whether the user intended to use a signed or
10766 unsigned addend. When checking for overflow we accept either,
10767 as specified by the AAELF. */
10768 if ((long) value > 0xff || (long) value < -0x80)
10769 return bfd_reloc_overflow;
10770
10771 bfd_put_8 (input_bfd, value, hit_data);
10772 return bfd_reloc_ok;
10773
10774 case R_ARM_ABS16:
10775 /* PR 16202: Refectch the addend using the correct size. */
10776 if (globals->use_rel)
10777 addend = bfd_get_16 (input_bfd, hit_data);
10778 value += addend;
10779
10780 /* See comment for R_ARM_ABS8. */
10781 if ((long) value > 0xffff || (long) value < -0x8000)
10782 return bfd_reloc_overflow;
10783
10784 bfd_put_16 (input_bfd, value, hit_data);
10785 return bfd_reloc_ok;
10786
10787 case R_ARM_THM_ABS5:
10788 /* Support ldr and str instructions for the thumb. */
10789 if (globals->use_rel)
10790 {
10791 /* Need to refetch addend. */
10792 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10793 /* ??? Need to determine shift amount from operand size. */
10794 addend >>= howto->rightshift;
10795 }
10796 value += addend;
10797
10798 /* ??? Isn't value unsigned? */
10799 if ((long) value > 0x1f || (long) value < -0x10)
10800 return bfd_reloc_overflow;
10801
10802 /* ??? Value needs to be properly shifted into place first. */
10803 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10804 bfd_put_16 (input_bfd, value, hit_data);
10805 return bfd_reloc_ok;
10806
10807 case R_ARM_THM_ALU_PREL_11_0:
10808 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10809 {
10810 bfd_vma insn;
10811 bfd_signed_vma relocation;
10812
10813 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10814 | bfd_get_16 (input_bfd, hit_data + 2);
10815
10816 if (globals->use_rel)
10817 {
10818 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10819 | ((insn & (1 << 26)) >> 15);
10820 if (insn & 0xf00000)
10821 signed_addend = -signed_addend;
10822 }
10823
10824 relocation = value + signed_addend;
10825 relocation -= Pa (input_section->output_section->vma
10826 + input_section->output_offset
10827 + rel->r_offset);
10828
10829 /* PR 21523: Use an absolute value. The user of this reloc will
10830 have already selected an ADD or SUB insn appropriately. */
10831 value = labs (relocation);
10832
10833 if (value >= 0x1000)
10834 return bfd_reloc_overflow;
10835
10836 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10837 if (branch_type == ST_BRANCH_TO_THUMB)
10838 value |= 1;
10839
10840 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10841 | ((value & 0x700) << 4)
10842 | ((value & 0x800) << 15);
10843 if (relocation < 0)
10844 insn |= 0xa00000;
10845
10846 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10847 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10848
10849 return bfd_reloc_ok;
10850 }
10851
10852 case R_ARM_THM_PC8:
10853 /* PR 10073: This reloc is not generated by the GNU toolchain,
10854 but it is supported for compatibility with third party libraries
10855 generated by other compilers, specifically the ARM/IAR. */
10856 {
10857 bfd_vma insn;
10858 bfd_signed_vma relocation;
10859
10860 insn = bfd_get_16 (input_bfd, hit_data);
10861
10862 if (globals->use_rel)
10863 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10864
10865 relocation = value + addend;
10866 relocation -= Pa (input_section->output_section->vma
10867 + input_section->output_offset
10868 + rel->r_offset);
10869
10870 value = relocation;
10871
10872 /* We do not check for overflow of this reloc. Although strictly
10873 speaking this is incorrect, it appears to be necessary in order
10874 to work with IAR generated relocs. Since GCC and GAS do not
10875 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10876 a problem for them. */
10877 value &= 0x3fc;
10878
10879 insn = (insn & 0xff00) | (value >> 2);
10880
10881 bfd_put_16 (input_bfd, insn, hit_data);
10882
10883 return bfd_reloc_ok;
10884 }
10885
10886 case R_ARM_THM_PC12:
10887 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10888 {
10889 bfd_vma insn;
10890 bfd_signed_vma relocation;
10891
10892 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10893 | bfd_get_16 (input_bfd, hit_data + 2);
10894
10895 if (globals->use_rel)
10896 {
10897 signed_addend = insn & 0xfff;
10898 if (!(insn & (1 << 23)))
10899 signed_addend = -signed_addend;
10900 }
10901
10902 relocation = value + signed_addend;
10903 relocation -= Pa (input_section->output_section->vma
10904 + input_section->output_offset
10905 + rel->r_offset);
10906
10907 value = relocation;
10908
10909 if (value >= 0x1000)
10910 return bfd_reloc_overflow;
10911
10912 insn = (insn & 0xff7ff000) | value;
10913 if (relocation >= 0)
10914 insn |= (1 << 23);
10915
10916 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10917 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10918
10919 return bfd_reloc_ok;
10920 }
10921
10922 case R_ARM_THM_XPC22:
10923 case R_ARM_THM_CALL:
10924 case R_ARM_THM_JUMP24:
10925 /* Thumb BL (branch long instruction). */
10926 {
10927 bfd_vma relocation;
10928 bfd_vma reloc_sign;
10929 bfd_boolean overflow = FALSE;
10930 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10931 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10932 bfd_signed_vma reloc_signed_max;
10933 bfd_signed_vma reloc_signed_min;
10934 bfd_vma check;
10935 bfd_signed_vma signed_check;
10936 int bitsize;
10937 const int thumb2 = using_thumb2 (globals);
10938 const int thumb2_bl = using_thumb2_bl (globals);
10939
10940 /* A branch to an undefined weak symbol is turned into a jump to
10941 the next instruction unless a PLT entry will be created.
10942 The jump to the next instruction is optimized as a NOP.W for
10943 Thumb-2 enabled architectures. */
10944 if (h && h->root.type == bfd_link_hash_undefweak
10945 && plt_offset == (bfd_vma) -1)
10946 {
10947 if (thumb2)
10948 {
10949 bfd_put_16 (input_bfd, 0xf3af, hit_data);
10950 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
10951 }
10952 else
10953 {
10954 bfd_put_16 (input_bfd, 0xe000, hit_data);
10955 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
10956 }
10957 return bfd_reloc_ok;
10958 }
10959
10960 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
10961 with Thumb-1) involving the J1 and J2 bits. */
10962 if (globals->use_rel)
10963 {
10964 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
10965 bfd_vma upper = upper_insn & 0x3ff;
10966 bfd_vma lower = lower_insn & 0x7ff;
10967 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
10968 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
10969 bfd_vma i1 = j1 ^ s ? 0 : 1;
10970 bfd_vma i2 = j2 ^ s ? 0 : 1;
10971
10972 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
10973 /* Sign extend. */
10974 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
10975
10976 signed_addend = addend;
10977 }
10978
10979 if (r_type == R_ARM_THM_XPC22)
10980 {
10981 /* Check for Thumb to Thumb call. */
10982 /* FIXME: Should we translate the instruction into a BL
10983 instruction instead ? */
10984 if (branch_type == ST_BRANCH_TO_THUMB)
10985 _bfd_error_handler
10986 (_("%pB: warning: %s BLX instruction targets"
10987 " %s function '%s'"),
10988 input_bfd, "Thumb",
10989 "Thumb", h ? h->root.root.string : "(local)");
10990 }
10991 else
10992 {
10993 /* If it is not a call to Thumb, assume call to Arm.
10994 If it is a call relative to a section name, then it is not a
10995 function call at all, but rather a long jump. Calls through
10996 the PLT do not require stubs. */
10997 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
10998 {
10999 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11000 {
11001 /* Convert BL to BLX. */
11002 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11003 }
11004 else if (( r_type != R_ARM_THM_CALL)
11005 && (r_type != R_ARM_THM_JUMP24))
11006 {
11007 if (elf32_thumb_to_arm_stub
11008 (info, sym_name, input_bfd, output_bfd, input_section,
11009 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11010 error_message))
11011 return bfd_reloc_ok;
11012 else
11013 return bfd_reloc_dangerous;
11014 }
11015 }
11016 else if (branch_type == ST_BRANCH_TO_THUMB
11017 && globals->use_blx
11018 && r_type == R_ARM_THM_CALL)
11019 {
11020 /* Make sure this is a BL. */
11021 lower_insn |= 0x1800;
11022 }
11023 }
11024
11025 enum elf32_arm_stub_type stub_type = arm_stub_none;
11026 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11027 {
11028 /* Check if a stub has to be inserted because the destination
11029 is too far. */
11030 struct elf32_arm_stub_hash_entry *stub_entry;
11031 struct elf32_arm_link_hash_entry *hash;
11032
11033 hash = (struct elf32_arm_link_hash_entry *) h;
11034
11035 stub_type = arm_type_of_stub (info, input_section, rel,
11036 st_type, &branch_type,
11037 hash, value, sym_sec,
11038 input_bfd, sym_name);
11039
11040 if (stub_type != arm_stub_none)
11041 {
11042 /* The target is out of reach or we are changing modes, so
11043 redirect the branch to the local stub for this
11044 function. */
11045 stub_entry = elf32_arm_get_stub_entry (input_section,
11046 sym_sec, h,
11047 rel, globals,
11048 stub_type);
11049 if (stub_entry != NULL)
11050 {
11051 value = (stub_entry->stub_offset
11052 + stub_entry->stub_sec->output_offset
11053 + stub_entry->stub_sec->output_section->vma);
11054
11055 if (plt_offset != (bfd_vma) -1)
11056 *unresolved_reloc_p = FALSE;
11057 }
11058
11059 /* If this call becomes a call to Arm, force BLX. */
11060 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11061 {
11062 if ((stub_entry
11063 && !arm_stub_is_thumb (stub_entry->stub_type))
11064 || branch_type != ST_BRANCH_TO_THUMB)
11065 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11066 }
11067 }
11068 }
11069
11070 /* Handle calls via the PLT. */
11071 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11072 {
11073 value = (splt->output_section->vma
11074 + splt->output_offset
11075 + plt_offset);
11076
11077 if (globals->use_blx
11078 && r_type == R_ARM_THM_CALL
11079 && ! using_thumb_only (globals))
11080 {
11081 /* If the Thumb BLX instruction is available, convert
11082 the BL to a BLX instruction to call the ARM-mode
11083 PLT entry. */
11084 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11085 branch_type = ST_BRANCH_TO_ARM;
11086 }
11087 else
11088 {
11089 if (! using_thumb_only (globals))
11090 /* Target the Thumb stub before the ARM PLT entry. */
11091 value -= PLT_THUMB_STUB_SIZE;
11092 branch_type = ST_BRANCH_TO_THUMB;
11093 }
11094 *unresolved_reloc_p = FALSE;
11095 }
11096
11097 relocation = value + signed_addend;
11098
11099 relocation -= (input_section->output_section->vma
11100 + input_section->output_offset
11101 + rel->r_offset);
11102
11103 check = relocation >> howto->rightshift;
11104
11105 /* If this is a signed value, the rightshift just dropped
11106 leading 1 bits (assuming twos complement). */
11107 if ((bfd_signed_vma) relocation >= 0)
11108 signed_check = check;
11109 else
11110 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11111
11112 /* Calculate the permissable maximum and minimum values for
11113 this relocation according to whether we're relocating for
11114 Thumb-2 or not. */
11115 bitsize = howto->bitsize;
11116 if (!thumb2_bl)
11117 bitsize -= 2;
11118 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11119 reloc_signed_min = ~reloc_signed_max;
11120
11121 /* Assumes two's complement. */
11122 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11123 overflow = TRUE;
11124
11125 if ((lower_insn & 0x5000) == 0x4000)
11126 /* For a BLX instruction, make sure that the relocation is rounded up
11127 to a word boundary. This follows the semantics of the instruction
11128 which specifies that bit 1 of the target address will come from bit
11129 1 of the base address. */
11130 relocation = (relocation + 2) & ~ 3;
11131
11132 /* Put RELOCATION back into the insn. Assumes two's complement.
11133 We use the Thumb-2 encoding, which is safe even if dealing with
11134 a Thumb-1 instruction by virtue of our overflow check above. */
11135 reloc_sign = (signed_check < 0) ? 1 : 0;
11136 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11137 | ((relocation >> 12) & 0x3ff)
11138 | (reloc_sign << 10);
11139 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11140 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11141 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11142 | ((relocation >> 1) & 0x7ff);
11143
11144 /* Put the relocated value back in the object file: */
11145 bfd_put_16 (input_bfd, upper_insn, hit_data);
11146 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11147
11148 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11149 }
11150 break;
11151
11152 case R_ARM_THM_JUMP19:
11153 /* Thumb32 conditional branch instruction. */
11154 {
11155 bfd_vma relocation;
11156 bfd_boolean overflow = FALSE;
11157 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11158 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11159 bfd_signed_vma reloc_signed_max = 0xffffe;
11160 bfd_signed_vma reloc_signed_min = -0x100000;
11161 bfd_signed_vma signed_check;
11162 enum elf32_arm_stub_type stub_type = arm_stub_none;
11163 struct elf32_arm_stub_hash_entry *stub_entry;
11164 struct elf32_arm_link_hash_entry *hash;
11165
11166 /* Need to refetch the addend, reconstruct the top three bits,
11167 and squish the two 11 bit pieces together. */
11168 if (globals->use_rel)
11169 {
11170 bfd_vma S = (upper_insn & 0x0400) >> 10;
11171 bfd_vma upper = (upper_insn & 0x003f);
11172 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11173 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11174 bfd_vma lower = (lower_insn & 0x07ff);
11175
11176 upper |= J1 << 6;
11177 upper |= J2 << 7;
11178 upper |= (!S) << 8;
11179 upper -= 0x0100; /* Sign extend. */
11180
11181 addend = (upper << 12) | (lower << 1);
11182 signed_addend = addend;
11183 }
11184
11185 /* Handle calls via the PLT. */
11186 if (plt_offset != (bfd_vma) -1)
11187 {
11188 value = (splt->output_section->vma
11189 + splt->output_offset
11190 + plt_offset);
11191 /* Target the Thumb stub before the ARM PLT entry. */
11192 value -= PLT_THUMB_STUB_SIZE;
11193 *unresolved_reloc_p = FALSE;
11194 }
11195
11196 hash = (struct elf32_arm_link_hash_entry *)h;
11197
11198 stub_type = arm_type_of_stub (info, input_section, rel,
11199 st_type, &branch_type,
11200 hash, value, sym_sec,
11201 input_bfd, sym_name);
11202 if (stub_type != arm_stub_none)
11203 {
11204 stub_entry = elf32_arm_get_stub_entry (input_section,
11205 sym_sec, h,
11206 rel, globals,
11207 stub_type);
11208 if (stub_entry != NULL)
11209 {
11210 value = (stub_entry->stub_offset
11211 + stub_entry->stub_sec->output_offset
11212 + stub_entry->stub_sec->output_section->vma);
11213 }
11214 }
11215
11216 relocation = value + signed_addend;
11217 relocation -= (input_section->output_section->vma
11218 + input_section->output_offset
11219 + rel->r_offset);
11220 signed_check = (bfd_signed_vma) relocation;
11221
11222 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11223 overflow = TRUE;
11224
11225 /* Put RELOCATION back into the insn. */
11226 {
11227 bfd_vma S = (relocation & 0x00100000) >> 20;
11228 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11229 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11230 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11231 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11232
11233 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11234 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11235 }
11236
11237 /* Put the relocated value back in the object file: */
11238 bfd_put_16 (input_bfd, upper_insn, hit_data);
11239 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11240
11241 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11242 }
11243
11244 case R_ARM_THM_JUMP11:
11245 case R_ARM_THM_JUMP8:
11246 case R_ARM_THM_JUMP6:
11247 /* Thumb B (branch) instruction). */
11248 {
11249 bfd_signed_vma relocation;
11250 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11251 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11252 bfd_signed_vma signed_check;
11253
11254 /* CZB cannot jump backward. */
11255 if (r_type == R_ARM_THM_JUMP6)
11256 reloc_signed_min = 0;
11257
11258 if (globals->use_rel)
11259 {
11260 /* Need to refetch addend. */
11261 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11262 if (addend & ((howto->src_mask + 1) >> 1))
11263 {
11264 signed_addend = -1;
11265 signed_addend &= ~ howto->src_mask;
11266 signed_addend |= addend;
11267 }
11268 else
11269 signed_addend = addend;
11270 /* The value in the insn has been right shifted. We need to
11271 undo this, so that we can perform the address calculation
11272 in terms of bytes. */
11273 signed_addend <<= howto->rightshift;
11274 }
11275 relocation = value + signed_addend;
11276
11277 relocation -= (input_section->output_section->vma
11278 + input_section->output_offset
11279 + rel->r_offset);
11280
11281 relocation >>= howto->rightshift;
11282 signed_check = relocation;
11283
11284 if (r_type == R_ARM_THM_JUMP6)
11285 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11286 else
11287 relocation &= howto->dst_mask;
11288 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11289
11290 bfd_put_16 (input_bfd, relocation, hit_data);
11291
11292 /* Assumes two's complement. */
11293 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11294 return bfd_reloc_overflow;
11295
11296 return bfd_reloc_ok;
11297 }
11298
11299 case R_ARM_ALU_PCREL7_0:
11300 case R_ARM_ALU_PCREL15_8:
11301 case R_ARM_ALU_PCREL23_15:
11302 {
11303 bfd_vma insn;
11304 bfd_vma relocation;
11305
11306 insn = bfd_get_32 (input_bfd, hit_data);
11307 if (globals->use_rel)
11308 {
11309 /* Extract the addend. */
11310 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11311 signed_addend = addend;
11312 }
11313 relocation = value + signed_addend;
11314
11315 relocation -= (input_section->output_section->vma
11316 + input_section->output_offset
11317 + rel->r_offset);
11318 insn = (insn & ~0xfff)
11319 | ((howto->bitpos << 7) & 0xf00)
11320 | ((relocation >> howto->bitpos) & 0xff);
11321 bfd_put_32 (input_bfd, value, hit_data);
11322 }
11323 return bfd_reloc_ok;
11324
11325 case R_ARM_GNU_VTINHERIT:
11326 case R_ARM_GNU_VTENTRY:
11327 return bfd_reloc_ok;
11328
11329 case R_ARM_GOTOFF32:
11330 /* Relocation is relative to the start of the
11331 global offset table. */
11332
11333 BFD_ASSERT (sgot != NULL);
11334 if (sgot == NULL)
11335 return bfd_reloc_notsupported;
11336
11337 /* If we are addressing a Thumb function, we need to adjust the
11338 address by one, so that attempts to call the function pointer will
11339 correctly interpret it as Thumb code. */
11340 if (branch_type == ST_BRANCH_TO_THUMB)
11341 value += 1;
11342
11343 /* Note that sgot->output_offset is not involved in this
11344 calculation. We always want the start of .got. If we
11345 define _GLOBAL_OFFSET_TABLE in a different way, as is
11346 permitted by the ABI, we might have to change this
11347 calculation. */
11348 value -= sgot->output_section->vma;
11349 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11350 contents, rel->r_offset, value,
11351 rel->r_addend);
11352
11353 case R_ARM_GOTPC:
11354 /* Use global offset table as symbol value. */
11355 BFD_ASSERT (sgot != NULL);
11356
11357 if (sgot == NULL)
11358 return bfd_reloc_notsupported;
11359
11360 *unresolved_reloc_p = FALSE;
11361 value = sgot->output_section->vma;
11362 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11363 contents, rel->r_offset, value,
11364 rel->r_addend);
11365
11366 case R_ARM_GOT32:
11367 case R_ARM_GOT_PREL:
11368 /* Relocation is to the entry for this symbol in the
11369 global offset table. */
11370 if (sgot == NULL)
11371 return bfd_reloc_notsupported;
11372
11373 if (dynreloc_st_type == STT_GNU_IFUNC
11374 && plt_offset != (bfd_vma) -1
11375 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11376 {
11377 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11378 symbol, and the relocation resolves directly to the runtime
11379 target rather than to the .iplt entry. This means that any
11380 .got entry would be the same value as the .igot.plt entry,
11381 so there's no point creating both. */
11382 sgot = globals->root.igotplt;
11383 value = sgot->output_offset + gotplt_offset;
11384 }
11385 else if (h != NULL)
11386 {
11387 bfd_vma off;
11388
11389 off = h->got.offset;
11390 BFD_ASSERT (off != (bfd_vma) -1);
11391 if ((off & 1) != 0)
11392 {
11393 /* We have already processsed one GOT relocation against
11394 this symbol. */
11395 off &= ~1;
11396 if (globals->root.dynamic_sections_created
11397 && !SYMBOL_REFERENCES_LOCAL (info, h))
11398 *unresolved_reloc_p = FALSE;
11399 }
11400 else
11401 {
11402 Elf_Internal_Rela outrel;
11403 int isrofixup = 0;
11404
11405 if (((h->dynindx != -1) || globals->fdpic_p)
11406 && !SYMBOL_REFERENCES_LOCAL (info, h))
11407 {
11408 /* If the symbol doesn't resolve locally in a static
11409 object, we have an undefined reference. If the
11410 symbol doesn't resolve locally in a dynamic object,
11411 it should be resolved by the dynamic linker. */
11412 if (globals->root.dynamic_sections_created)
11413 {
11414 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11415 *unresolved_reloc_p = FALSE;
11416 }
11417 else
11418 outrel.r_info = 0;
11419 outrel.r_addend = 0;
11420 }
11421 else
11422 {
11423 if (dynreloc_st_type == STT_GNU_IFUNC)
11424 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11425 else if (bfd_link_pic (info)
11426 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11427 || h->root.type != bfd_link_hash_undefweak))
11428 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11429 else if (globals->fdpic_p)
11430 isrofixup = 1;
11431 else
11432 outrel.r_info = 0;
11433 outrel.r_addend = dynreloc_value;
11434 }
11435
11436 /* The GOT entry is initialized to zero by default.
11437 See if we should install a different value. */
11438 if (outrel.r_addend != 0
11439 && (outrel.r_info == 0 || globals->use_rel || isrofixup))
11440 {
11441 bfd_put_32 (output_bfd, outrel.r_addend,
11442 sgot->contents + off);
11443 outrel.r_addend = 0;
11444 }
11445
11446 if (outrel.r_info != 0 && !isrofixup)
11447 {
11448 outrel.r_offset = (sgot->output_section->vma
11449 + sgot->output_offset
11450 + off);
11451 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11452 }
11453 else if (isrofixup)
11454 {
11455 arm_elf_add_rofixup(output_bfd,
11456 elf32_arm_hash_table(info)->srofixup,
11457 sgot->output_section->vma
11458 + sgot->output_offset + off);
11459 }
11460 h->got.offset |= 1;
11461 }
11462 value = sgot->output_offset + off;
11463 }
11464 else
11465 {
11466 bfd_vma off;
11467
11468 BFD_ASSERT (local_got_offsets != NULL
11469 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11470
11471 off = local_got_offsets[r_symndx];
11472
11473 /* The offset must always be a multiple of 4. We use the
11474 least significant bit to record whether we have already
11475 generated the necessary reloc. */
11476 if ((off & 1) != 0)
11477 off &= ~1;
11478 else
11479 {
11480 if (globals->use_rel)
11481 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11482
11483 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
11484 {
11485 Elf_Internal_Rela outrel;
11486
11487 outrel.r_addend = addend + dynreloc_value;
11488 outrel.r_offset = (sgot->output_section->vma
11489 + sgot->output_offset
11490 + off);
11491 if (dynreloc_st_type == STT_GNU_IFUNC)
11492 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11493 else
11494 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11495 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11496 }
11497 else if (globals->fdpic_p)
11498 {
11499 /* For FDPIC executables, we use rofixup to fix
11500 address at runtime. */
11501 arm_elf_add_rofixup(output_bfd, globals->srofixup,
11502 sgot->output_section->vma + sgot->output_offset
11503 + off);
11504 }
11505
11506 local_got_offsets[r_symndx] |= 1;
11507 }
11508
11509 value = sgot->output_offset + off;
11510 }
11511 if (r_type != R_ARM_GOT32)
11512 value += sgot->output_section->vma;
11513
11514 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11515 contents, rel->r_offset, value,
11516 rel->r_addend);
11517
11518 case R_ARM_TLS_LDO32:
11519 value = value - dtpoff_base (info);
11520
11521 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11522 contents, rel->r_offset, value,
11523 rel->r_addend);
11524
11525 case R_ARM_TLS_LDM32:
11526 {
11527 bfd_vma off;
11528
11529 if (sgot == NULL)
11530 abort ();
11531
11532 off = globals->tls_ldm_got.offset;
11533
11534 if ((off & 1) != 0)
11535 off &= ~1;
11536 else
11537 {
11538 /* If we don't know the module number, create a relocation
11539 for it. */
11540 if (bfd_link_pic (info))
11541 {
11542 Elf_Internal_Rela outrel;
11543
11544 if (srelgot == NULL)
11545 abort ();
11546
11547 outrel.r_addend = 0;
11548 outrel.r_offset = (sgot->output_section->vma
11549 + sgot->output_offset + off);
11550 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11551
11552 if (globals->use_rel)
11553 bfd_put_32 (output_bfd, outrel.r_addend,
11554 sgot->contents + off);
11555
11556 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11557 }
11558 else
11559 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11560
11561 globals->tls_ldm_got.offset |= 1;
11562 }
11563
11564 if (globals->fdpic_p)
11565 {
11566 bfd_put_32(output_bfd,
11567 globals->root.sgot->output_offset + off,
11568 contents + rel->r_offset);
11569
11570 return bfd_reloc_ok;
11571 }
11572 else
11573 {
11574 value = sgot->output_section->vma + sgot->output_offset + off
11575 - (input_section->output_section->vma
11576 + input_section->output_offset + rel->r_offset);
11577
11578 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11579 contents, rel->r_offset, value,
11580 rel->r_addend);
11581 }
11582 }
11583
11584 case R_ARM_TLS_CALL:
11585 case R_ARM_THM_TLS_CALL:
11586 case R_ARM_TLS_GD32:
11587 case R_ARM_TLS_IE32:
11588 case R_ARM_TLS_GOTDESC:
11589 case R_ARM_TLS_DESCSEQ:
11590 case R_ARM_THM_TLS_DESCSEQ:
11591 {
11592 bfd_vma off, offplt;
11593 int indx = 0;
11594 char tls_type;
11595
11596 BFD_ASSERT (sgot != NULL);
11597
11598 if (h != NULL)
11599 {
11600 bfd_boolean dyn;
11601 dyn = globals->root.dynamic_sections_created;
11602 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11603 bfd_link_pic (info),
11604 h)
11605 && (!bfd_link_pic (info)
11606 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11607 {
11608 *unresolved_reloc_p = FALSE;
11609 indx = h->dynindx;
11610 }
11611 off = h->got.offset;
11612 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11613 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11614 }
11615 else
11616 {
11617 BFD_ASSERT (local_got_offsets != NULL);
11618 off = local_got_offsets[r_symndx];
11619 offplt = local_tlsdesc_gotents[r_symndx];
11620 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11621 }
11622
11623 /* Linker relaxations happens from one of the
11624 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11625 if (ELF32_R_TYPE(rel->r_info) != r_type)
11626 tls_type = GOT_TLS_IE;
11627
11628 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11629
11630 if ((off & 1) != 0)
11631 off &= ~1;
11632 else
11633 {
11634 bfd_boolean need_relocs = FALSE;
11635 Elf_Internal_Rela outrel;
11636 int cur_off = off;
11637
11638 /* The GOT entries have not been initialized yet. Do it
11639 now, and emit any relocations. If both an IE GOT and a
11640 GD GOT are necessary, we emit the GD first. */
11641
11642 if ((bfd_link_pic (info) || indx != 0)
11643 && (h == NULL
11644 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11645 && !resolved_to_zero)
11646 || h->root.type != bfd_link_hash_undefweak))
11647 {
11648 need_relocs = TRUE;
11649 BFD_ASSERT (srelgot != NULL);
11650 }
11651
11652 if (tls_type & GOT_TLS_GDESC)
11653 {
11654 bfd_byte *loc;
11655
11656 /* We should have relaxed, unless this is an undefined
11657 weak symbol. */
11658 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11659 || bfd_link_pic (info));
11660 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11661 <= globals->root.sgotplt->size);
11662
11663 outrel.r_addend = 0;
11664 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11665 + globals->root.sgotplt->output_offset
11666 + offplt
11667 + globals->sgotplt_jump_table_size);
11668
11669 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11670 sreloc = globals->root.srelplt;
11671 loc = sreloc->contents;
11672 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11673 BFD_ASSERT (loc + RELOC_SIZE (globals)
11674 <= sreloc->contents + sreloc->size);
11675
11676 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11677
11678 /* For globals, the first word in the relocation gets
11679 the relocation index and the top bit set, or zero,
11680 if we're binding now. For locals, it gets the
11681 symbol's offset in the tls section. */
11682 bfd_put_32 (output_bfd,
11683 !h ? value - elf_hash_table (info)->tls_sec->vma
11684 : info->flags & DF_BIND_NOW ? 0
11685 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11686 globals->root.sgotplt->contents + offplt
11687 + globals->sgotplt_jump_table_size);
11688
11689 /* Second word in the relocation is always zero. */
11690 bfd_put_32 (output_bfd, 0,
11691 globals->root.sgotplt->contents + offplt
11692 + globals->sgotplt_jump_table_size + 4);
11693 }
11694 if (tls_type & GOT_TLS_GD)
11695 {
11696 if (need_relocs)
11697 {
11698 outrel.r_addend = 0;
11699 outrel.r_offset = (sgot->output_section->vma
11700 + sgot->output_offset
11701 + cur_off);
11702 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11703
11704 if (globals->use_rel)
11705 bfd_put_32 (output_bfd, outrel.r_addend,
11706 sgot->contents + cur_off);
11707
11708 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11709
11710 if (indx == 0)
11711 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11712 sgot->contents + cur_off + 4);
11713 else
11714 {
11715 outrel.r_addend = 0;
11716 outrel.r_info = ELF32_R_INFO (indx,
11717 R_ARM_TLS_DTPOFF32);
11718 outrel.r_offset += 4;
11719
11720 if (globals->use_rel)
11721 bfd_put_32 (output_bfd, outrel.r_addend,
11722 sgot->contents + cur_off + 4);
11723
11724 elf32_arm_add_dynreloc (output_bfd, info,
11725 srelgot, &outrel);
11726 }
11727 }
11728 else
11729 {
11730 /* If we are not emitting relocations for a
11731 general dynamic reference, then we must be in a
11732 static link or an executable link with the
11733 symbol binding locally. Mark it as belonging
11734 to module 1, the executable. */
11735 bfd_put_32 (output_bfd, 1,
11736 sgot->contents + cur_off);
11737 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11738 sgot->contents + cur_off + 4);
11739 }
11740
11741 cur_off += 8;
11742 }
11743
11744 if (tls_type & GOT_TLS_IE)
11745 {
11746 if (need_relocs)
11747 {
11748 if (indx == 0)
11749 outrel.r_addend = value - dtpoff_base (info);
11750 else
11751 outrel.r_addend = 0;
11752 outrel.r_offset = (sgot->output_section->vma
11753 + sgot->output_offset
11754 + cur_off);
11755 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11756
11757 if (globals->use_rel)
11758 bfd_put_32 (output_bfd, outrel.r_addend,
11759 sgot->contents + cur_off);
11760
11761 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11762 }
11763 else
11764 bfd_put_32 (output_bfd, tpoff (info, value),
11765 sgot->contents + cur_off);
11766 cur_off += 4;
11767 }
11768
11769 if (h != NULL)
11770 h->got.offset |= 1;
11771 else
11772 local_got_offsets[r_symndx] |= 1;
11773 }
11774
11775 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
11776 off += 8;
11777 else if (tls_type & GOT_TLS_GDESC)
11778 off = offplt;
11779
11780 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11781 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11782 {
11783 bfd_signed_vma offset;
11784 /* TLS stubs are arm mode. The original symbol is a
11785 data object, so branch_type is bogus. */
11786 branch_type = ST_BRANCH_TO_ARM;
11787 enum elf32_arm_stub_type stub_type
11788 = arm_type_of_stub (info, input_section, rel,
11789 st_type, &branch_type,
11790 (struct elf32_arm_link_hash_entry *)h,
11791 globals->tls_trampoline, globals->root.splt,
11792 input_bfd, sym_name);
11793
11794 if (stub_type != arm_stub_none)
11795 {
11796 struct elf32_arm_stub_hash_entry *stub_entry
11797 = elf32_arm_get_stub_entry
11798 (input_section, globals->root.splt, 0, rel,
11799 globals, stub_type);
11800 offset = (stub_entry->stub_offset
11801 + stub_entry->stub_sec->output_offset
11802 + stub_entry->stub_sec->output_section->vma);
11803 }
11804 else
11805 offset = (globals->root.splt->output_section->vma
11806 + globals->root.splt->output_offset
11807 + globals->tls_trampoline);
11808
11809 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11810 {
11811 unsigned long inst;
11812
11813 offset -= (input_section->output_section->vma
11814 + input_section->output_offset
11815 + rel->r_offset + 8);
11816
11817 inst = offset >> 2;
11818 inst &= 0x00ffffff;
11819 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11820 }
11821 else
11822 {
11823 /* Thumb blx encodes the offset in a complicated
11824 fashion. */
11825 unsigned upper_insn, lower_insn;
11826 unsigned neg;
11827
11828 offset -= (input_section->output_section->vma
11829 + input_section->output_offset
11830 + rel->r_offset + 4);
11831
11832 if (stub_type != arm_stub_none
11833 && arm_stub_is_thumb (stub_type))
11834 {
11835 lower_insn = 0xd000;
11836 }
11837 else
11838 {
11839 lower_insn = 0xc000;
11840 /* Round up the offset to a word boundary. */
11841 offset = (offset + 2) & ~2;
11842 }
11843
11844 neg = offset < 0;
11845 upper_insn = (0xf000
11846 | ((offset >> 12) & 0x3ff)
11847 | (neg << 10));
11848 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11849 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11850 | ((offset >> 1) & 0x7ff);
11851 bfd_put_16 (input_bfd, upper_insn, hit_data);
11852 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11853 return bfd_reloc_ok;
11854 }
11855 }
11856 /* These relocations needs special care, as besides the fact
11857 they point somewhere in .gotplt, the addend must be
11858 adjusted accordingly depending on the type of instruction
11859 we refer to. */
11860 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11861 {
11862 unsigned long data, insn;
11863 unsigned thumb;
11864
11865 data = bfd_get_32 (input_bfd, hit_data);
11866 thumb = data & 1;
11867 data &= ~1u;
11868
11869 if (thumb)
11870 {
11871 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11872 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11873 insn = (insn << 16)
11874 | bfd_get_16 (input_bfd,
11875 contents + rel->r_offset - data + 2);
11876 if ((insn & 0xf800c000) == 0xf000c000)
11877 /* bl/blx */
11878 value = -6;
11879 else if ((insn & 0xffffff00) == 0x4400)
11880 /* add */
11881 value = -5;
11882 else
11883 {
11884 _bfd_error_handler
11885 /* xgettext:c-format */
11886 (_("%pB(%pA+%#" PRIx64 "): "
11887 "unexpected %s instruction '%#lx' "
11888 "referenced by TLS_GOTDESC"),
11889 input_bfd, input_section, (uint64_t) rel->r_offset,
11890 "Thumb", insn);
11891 return bfd_reloc_notsupported;
11892 }
11893 }
11894 else
11895 {
11896 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11897
11898 switch (insn >> 24)
11899 {
11900 case 0xeb: /* bl */
11901 case 0xfa: /* blx */
11902 value = -4;
11903 break;
11904
11905 case 0xe0: /* add */
11906 value = -8;
11907 break;
11908
11909 default:
11910 _bfd_error_handler
11911 /* xgettext:c-format */
11912 (_("%pB(%pA+%#" PRIx64 "): "
11913 "unexpected %s instruction '%#lx' "
11914 "referenced by TLS_GOTDESC"),
11915 input_bfd, input_section, (uint64_t) rel->r_offset,
11916 "ARM", insn);
11917 return bfd_reloc_notsupported;
11918 }
11919 }
11920
11921 value += ((globals->root.sgotplt->output_section->vma
11922 + globals->root.sgotplt->output_offset + off)
11923 - (input_section->output_section->vma
11924 + input_section->output_offset
11925 + rel->r_offset)
11926 + globals->sgotplt_jump_table_size);
11927 }
11928 else
11929 value = ((globals->root.sgot->output_section->vma
11930 + globals->root.sgot->output_offset + off)
11931 - (input_section->output_section->vma
11932 + input_section->output_offset + rel->r_offset));
11933
11934 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32 ||
11935 r_type == R_ARM_TLS_IE32))
11936 {
11937 /* For FDPIC relocations, resolve to the offset of the GOT
11938 entry from the start of GOT. */
11939 bfd_put_32(output_bfd,
11940 globals->root.sgot->output_offset + off,
11941 contents + rel->r_offset);
11942
11943 return bfd_reloc_ok;
11944 }
11945 else
11946 {
11947 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11948 contents, rel->r_offset, value,
11949 rel->r_addend);
11950 }
11951 }
11952
11953 case R_ARM_TLS_LE32:
11954 if (bfd_link_dll (info))
11955 {
11956 _bfd_error_handler
11957 /* xgettext:c-format */
11958 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
11959 "in shared object"),
11960 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
11961 return bfd_reloc_notsupported;
11962 }
11963 else
11964 value = tpoff (info, value);
11965
11966 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11967 contents, rel->r_offset, value,
11968 rel->r_addend);
11969
11970 case R_ARM_V4BX:
11971 if (globals->fix_v4bx)
11972 {
11973 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11974
11975 /* Ensure that we have a BX instruction. */
11976 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
11977
11978 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
11979 {
11980 /* Branch to veneer. */
11981 bfd_vma glue_addr;
11982 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
11983 glue_addr -= input_section->output_section->vma
11984 + input_section->output_offset
11985 + rel->r_offset + 8;
11986 insn = (insn & 0xf0000000) | 0x0a000000
11987 | ((glue_addr >> 2) & 0x00ffffff);
11988 }
11989 else
11990 {
11991 /* Preserve Rm (lowest four bits) and the condition code
11992 (highest four bits). Other bits encode MOV PC,Rm. */
11993 insn = (insn & 0xf000000f) | 0x01a0f000;
11994 }
11995
11996 bfd_put_32 (input_bfd, insn, hit_data);
11997 }
11998 return bfd_reloc_ok;
11999
12000 case R_ARM_MOVW_ABS_NC:
12001 case R_ARM_MOVT_ABS:
12002 case R_ARM_MOVW_PREL_NC:
12003 case R_ARM_MOVT_PREL:
12004 /* Until we properly support segment-base-relative addressing then
12005 we assume the segment base to be zero, as for the group relocations.
12006 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12007 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12008 case R_ARM_MOVW_BREL_NC:
12009 case R_ARM_MOVW_BREL:
12010 case R_ARM_MOVT_BREL:
12011 {
12012 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12013
12014 if (globals->use_rel)
12015 {
12016 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12017 signed_addend = (addend ^ 0x8000) - 0x8000;
12018 }
12019
12020 value += signed_addend;
12021
12022 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12023 value -= (input_section->output_section->vma
12024 + input_section->output_offset + rel->r_offset);
12025
12026 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12027 return bfd_reloc_overflow;
12028
12029 if (branch_type == ST_BRANCH_TO_THUMB)
12030 value |= 1;
12031
12032 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12033 || r_type == R_ARM_MOVT_BREL)
12034 value >>= 16;
12035
12036 insn &= 0xfff0f000;
12037 insn |= value & 0xfff;
12038 insn |= (value & 0xf000) << 4;
12039 bfd_put_32 (input_bfd, insn, hit_data);
12040 }
12041 return bfd_reloc_ok;
12042
12043 case R_ARM_THM_MOVW_ABS_NC:
12044 case R_ARM_THM_MOVT_ABS:
12045 case R_ARM_THM_MOVW_PREL_NC:
12046 case R_ARM_THM_MOVT_PREL:
12047 /* Until we properly support segment-base-relative addressing then
12048 we assume the segment base to be zero, as for the above relocations.
12049 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12050 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12051 as R_ARM_THM_MOVT_ABS. */
12052 case R_ARM_THM_MOVW_BREL_NC:
12053 case R_ARM_THM_MOVW_BREL:
12054 case R_ARM_THM_MOVT_BREL:
12055 {
12056 bfd_vma insn;
12057
12058 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12059 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12060
12061 if (globals->use_rel)
12062 {
12063 addend = ((insn >> 4) & 0xf000)
12064 | ((insn >> 15) & 0x0800)
12065 | ((insn >> 4) & 0x0700)
12066 | (insn & 0x00ff);
12067 signed_addend = (addend ^ 0x8000) - 0x8000;
12068 }
12069
12070 value += signed_addend;
12071
12072 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12073 value -= (input_section->output_section->vma
12074 + input_section->output_offset + rel->r_offset);
12075
12076 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12077 return bfd_reloc_overflow;
12078
12079 if (branch_type == ST_BRANCH_TO_THUMB)
12080 value |= 1;
12081
12082 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12083 || r_type == R_ARM_THM_MOVT_BREL)
12084 value >>= 16;
12085
12086 insn &= 0xfbf08f00;
12087 insn |= (value & 0xf000) << 4;
12088 insn |= (value & 0x0800) << 15;
12089 insn |= (value & 0x0700) << 4;
12090 insn |= (value & 0x00ff);
12091
12092 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12093 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12094 }
12095 return bfd_reloc_ok;
12096
12097 case R_ARM_ALU_PC_G0_NC:
12098 case R_ARM_ALU_PC_G1_NC:
12099 case R_ARM_ALU_PC_G0:
12100 case R_ARM_ALU_PC_G1:
12101 case R_ARM_ALU_PC_G2:
12102 case R_ARM_ALU_SB_G0_NC:
12103 case R_ARM_ALU_SB_G1_NC:
12104 case R_ARM_ALU_SB_G0:
12105 case R_ARM_ALU_SB_G1:
12106 case R_ARM_ALU_SB_G2:
12107 {
12108 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12109 bfd_vma pc = input_section->output_section->vma
12110 + input_section->output_offset + rel->r_offset;
12111 /* sb is the origin of the *segment* containing the symbol. */
12112 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12113 bfd_vma residual;
12114 bfd_vma g_n;
12115 bfd_signed_vma signed_value;
12116 int group = 0;
12117
12118 /* Determine which group of bits to select. */
12119 switch (r_type)
12120 {
12121 case R_ARM_ALU_PC_G0_NC:
12122 case R_ARM_ALU_PC_G0:
12123 case R_ARM_ALU_SB_G0_NC:
12124 case R_ARM_ALU_SB_G0:
12125 group = 0;
12126 break;
12127
12128 case R_ARM_ALU_PC_G1_NC:
12129 case R_ARM_ALU_PC_G1:
12130 case R_ARM_ALU_SB_G1_NC:
12131 case R_ARM_ALU_SB_G1:
12132 group = 1;
12133 break;
12134
12135 case R_ARM_ALU_PC_G2:
12136 case R_ARM_ALU_SB_G2:
12137 group = 2;
12138 break;
12139
12140 default:
12141 abort ();
12142 }
12143
12144 /* If REL, extract the addend from the insn. If RELA, it will
12145 have already been fetched for us. */
12146 if (globals->use_rel)
12147 {
12148 int negative;
12149 bfd_vma constant = insn & 0xff;
12150 bfd_vma rotation = (insn & 0xf00) >> 8;
12151
12152 if (rotation == 0)
12153 signed_addend = constant;
12154 else
12155 {
12156 /* Compensate for the fact that in the instruction, the
12157 rotation is stored in multiples of 2 bits. */
12158 rotation *= 2;
12159
12160 /* Rotate "constant" right by "rotation" bits. */
12161 signed_addend = (constant >> rotation) |
12162 (constant << (8 * sizeof (bfd_vma) - rotation));
12163 }
12164
12165 /* Determine if the instruction is an ADD or a SUB.
12166 (For REL, this determines the sign of the addend.) */
12167 negative = identify_add_or_sub (insn);
12168 if (negative == 0)
12169 {
12170 _bfd_error_handler
12171 /* xgettext:c-format */
12172 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12173 "are allowed for ALU group relocations"),
12174 input_bfd, input_section, (uint64_t) rel->r_offset);
12175 return bfd_reloc_overflow;
12176 }
12177
12178 signed_addend *= negative;
12179 }
12180
12181 /* Compute the value (X) to go in the place. */
12182 if (r_type == R_ARM_ALU_PC_G0_NC
12183 || r_type == R_ARM_ALU_PC_G1_NC
12184 || r_type == R_ARM_ALU_PC_G0
12185 || r_type == R_ARM_ALU_PC_G1
12186 || r_type == R_ARM_ALU_PC_G2)
12187 /* PC relative. */
12188 signed_value = value - pc + signed_addend;
12189 else
12190 /* Section base relative. */
12191 signed_value = value - sb + signed_addend;
12192
12193 /* If the target symbol is a Thumb function, then set the
12194 Thumb bit in the address. */
12195 if (branch_type == ST_BRANCH_TO_THUMB)
12196 signed_value |= 1;
12197
12198 /* Calculate the value of the relevant G_n, in encoded
12199 constant-with-rotation format. */
12200 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12201 group, &residual);
12202
12203 /* Check for overflow if required. */
12204 if ((r_type == R_ARM_ALU_PC_G0
12205 || r_type == R_ARM_ALU_PC_G1
12206 || r_type == R_ARM_ALU_PC_G2
12207 || r_type == R_ARM_ALU_SB_G0
12208 || r_type == R_ARM_ALU_SB_G1
12209 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12210 {
12211 _bfd_error_handler
12212 /* xgettext:c-format */
12213 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12214 "splitting %#" PRIx64 " for group relocation %s"),
12215 input_bfd, input_section, (uint64_t) rel->r_offset,
12216 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12217 howto->name);
12218 return bfd_reloc_overflow;
12219 }
12220
12221 /* Mask out the value and the ADD/SUB part of the opcode; take care
12222 not to destroy the S bit. */
12223 insn &= 0xff1ff000;
12224
12225 /* Set the opcode according to whether the value to go in the
12226 place is negative. */
12227 if (signed_value < 0)
12228 insn |= 1 << 22;
12229 else
12230 insn |= 1 << 23;
12231
12232 /* Encode the offset. */
12233 insn |= g_n;
12234
12235 bfd_put_32 (input_bfd, insn, hit_data);
12236 }
12237 return bfd_reloc_ok;
12238
12239 case R_ARM_LDR_PC_G0:
12240 case R_ARM_LDR_PC_G1:
12241 case R_ARM_LDR_PC_G2:
12242 case R_ARM_LDR_SB_G0:
12243 case R_ARM_LDR_SB_G1:
12244 case R_ARM_LDR_SB_G2:
12245 {
12246 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12247 bfd_vma pc = input_section->output_section->vma
12248 + input_section->output_offset + rel->r_offset;
12249 /* sb is the origin of the *segment* containing the symbol. */
12250 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12251 bfd_vma residual;
12252 bfd_signed_vma signed_value;
12253 int group = 0;
12254
12255 /* Determine which groups of bits to calculate. */
12256 switch (r_type)
12257 {
12258 case R_ARM_LDR_PC_G0:
12259 case R_ARM_LDR_SB_G0:
12260 group = 0;
12261 break;
12262
12263 case R_ARM_LDR_PC_G1:
12264 case R_ARM_LDR_SB_G1:
12265 group = 1;
12266 break;
12267
12268 case R_ARM_LDR_PC_G2:
12269 case R_ARM_LDR_SB_G2:
12270 group = 2;
12271 break;
12272
12273 default:
12274 abort ();
12275 }
12276
12277 /* If REL, extract the addend from the insn. If RELA, it will
12278 have already been fetched for us. */
12279 if (globals->use_rel)
12280 {
12281 int negative = (insn & (1 << 23)) ? 1 : -1;
12282 signed_addend = negative * (insn & 0xfff);
12283 }
12284
12285 /* Compute the value (X) to go in the place. */
12286 if (r_type == R_ARM_LDR_PC_G0
12287 || r_type == R_ARM_LDR_PC_G1
12288 || r_type == R_ARM_LDR_PC_G2)
12289 /* PC relative. */
12290 signed_value = value - pc + signed_addend;
12291 else
12292 /* Section base relative. */
12293 signed_value = value - sb + signed_addend;
12294
12295 /* Calculate the value of the relevant G_{n-1} to obtain
12296 the residual at that stage. */
12297 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12298 group - 1, &residual);
12299
12300 /* Check for overflow. */
12301 if (residual >= 0x1000)
12302 {
12303 _bfd_error_handler
12304 /* xgettext:c-format */
12305 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12306 "splitting %#" PRIx64 " for group relocation %s"),
12307 input_bfd, input_section, (uint64_t) rel->r_offset,
12308 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12309 howto->name);
12310 return bfd_reloc_overflow;
12311 }
12312
12313 /* Mask out the value and U bit. */
12314 insn &= 0xff7ff000;
12315
12316 /* Set the U bit if the value to go in the place is non-negative. */
12317 if (signed_value >= 0)
12318 insn |= 1 << 23;
12319
12320 /* Encode the offset. */
12321 insn |= residual;
12322
12323 bfd_put_32 (input_bfd, insn, hit_data);
12324 }
12325 return bfd_reloc_ok;
12326
12327 case R_ARM_LDRS_PC_G0:
12328 case R_ARM_LDRS_PC_G1:
12329 case R_ARM_LDRS_PC_G2:
12330 case R_ARM_LDRS_SB_G0:
12331 case R_ARM_LDRS_SB_G1:
12332 case R_ARM_LDRS_SB_G2:
12333 {
12334 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12335 bfd_vma pc = input_section->output_section->vma
12336 + input_section->output_offset + rel->r_offset;
12337 /* sb is the origin of the *segment* containing the symbol. */
12338 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12339 bfd_vma residual;
12340 bfd_signed_vma signed_value;
12341 int group = 0;
12342
12343 /* Determine which groups of bits to calculate. */
12344 switch (r_type)
12345 {
12346 case R_ARM_LDRS_PC_G0:
12347 case R_ARM_LDRS_SB_G0:
12348 group = 0;
12349 break;
12350
12351 case R_ARM_LDRS_PC_G1:
12352 case R_ARM_LDRS_SB_G1:
12353 group = 1;
12354 break;
12355
12356 case R_ARM_LDRS_PC_G2:
12357 case R_ARM_LDRS_SB_G2:
12358 group = 2;
12359 break;
12360
12361 default:
12362 abort ();
12363 }
12364
12365 /* If REL, extract the addend from the insn. If RELA, it will
12366 have already been fetched for us. */
12367 if (globals->use_rel)
12368 {
12369 int negative = (insn & (1 << 23)) ? 1 : -1;
12370 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12371 }
12372
12373 /* Compute the value (X) to go in the place. */
12374 if (r_type == R_ARM_LDRS_PC_G0
12375 || r_type == R_ARM_LDRS_PC_G1
12376 || r_type == R_ARM_LDRS_PC_G2)
12377 /* PC relative. */
12378 signed_value = value - pc + signed_addend;
12379 else
12380 /* Section base relative. */
12381 signed_value = value - sb + signed_addend;
12382
12383 /* Calculate the value of the relevant G_{n-1} to obtain
12384 the residual at that stage. */
12385 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12386 group - 1, &residual);
12387
12388 /* Check for overflow. */
12389 if (residual >= 0x100)
12390 {
12391 _bfd_error_handler
12392 /* xgettext:c-format */
12393 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12394 "splitting %#" PRIx64 " for group relocation %s"),
12395 input_bfd, input_section, (uint64_t) rel->r_offset,
12396 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12397 howto->name);
12398 return bfd_reloc_overflow;
12399 }
12400
12401 /* Mask out the value and U bit. */
12402 insn &= 0xff7ff0f0;
12403
12404 /* Set the U bit if the value to go in the place is non-negative. */
12405 if (signed_value >= 0)
12406 insn |= 1 << 23;
12407
12408 /* Encode the offset. */
12409 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12410
12411 bfd_put_32 (input_bfd, insn, hit_data);
12412 }
12413 return bfd_reloc_ok;
12414
12415 case R_ARM_LDC_PC_G0:
12416 case R_ARM_LDC_PC_G1:
12417 case R_ARM_LDC_PC_G2:
12418 case R_ARM_LDC_SB_G0:
12419 case R_ARM_LDC_SB_G1:
12420 case R_ARM_LDC_SB_G2:
12421 {
12422 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12423 bfd_vma pc = input_section->output_section->vma
12424 + input_section->output_offset + rel->r_offset;
12425 /* sb is the origin of the *segment* containing the symbol. */
12426 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12427 bfd_vma residual;
12428 bfd_signed_vma signed_value;
12429 int group = 0;
12430
12431 /* Determine which groups of bits to calculate. */
12432 switch (r_type)
12433 {
12434 case R_ARM_LDC_PC_G0:
12435 case R_ARM_LDC_SB_G0:
12436 group = 0;
12437 break;
12438
12439 case R_ARM_LDC_PC_G1:
12440 case R_ARM_LDC_SB_G1:
12441 group = 1;
12442 break;
12443
12444 case R_ARM_LDC_PC_G2:
12445 case R_ARM_LDC_SB_G2:
12446 group = 2;
12447 break;
12448
12449 default:
12450 abort ();
12451 }
12452
12453 /* If REL, extract the addend from the insn. If RELA, it will
12454 have already been fetched for us. */
12455 if (globals->use_rel)
12456 {
12457 int negative = (insn & (1 << 23)) ? 1 : -1;
12458 signed_addend = negative * ((insn & 0xff) << 2);
12459 }
12460
12461 /* Compute the value (X) to go in the place. */
12462 if (r_type == R_ARM_LDC_PC_G0
12463 || r_type == R_ARM_LDC_PC_G1
12464 || r_type == R_ARM_LDC_PC_G2)
12465 /* PC relative. */
12466 signed_value = value - pc + signed_addend;
12467 else
12468 /* Section base relative. */
12469 signed_value = value - sb + signed_addend;
12470
12471 /* Calculate the value of the relevant G_{n-1} to obtain
12472 the residual at that stage. */
12473 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12474 group - 1, &residual);
12475
12476 /* Check for overflow. (The absolute value to go in the place must be
12477 divisible by four and, after having been divided by four, must
12478 fit in eight bits.) */
12479 if ((residual & 0x3) != 0 || residual >= 0x400)
12480 {
12481 _bfd_error_handler
12482 /* xgettext:c-format */
12483 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12484 "splitting %#" PRIx64 " for group relocation %s"),
12485 input_bfd, input_section, (uint64_t) rel->r_offset,
12486 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12487 howto->name);
12488 return bfd_reloc_overflow;
12489 }
12490
12491 /* Mask out the value and U bit. */
12492 insn &= 0xff7fff00;
12493
12494 /* Set the U bit if the value to go in the place is non-negative. */
12495 if (signed_value >= 0)
12496 insn |= 1 << 23;
12497
12498 /* Encode the offset. */
12499 insn |= residual >> 2;
12500
12501 bfd_put_32 (input_bfd, insn, hit_data);
12502 }
12503 return bfd_reloc_ok;
12504
12505 case R_ARM_THM_ALU_ABS_G0_NC:
12506 case R_ARM_THM_ALU_ABS_G1_NC:
12507 case R_ARM_THM_ALU_ABS_G2_NC:
12508 case R_ARM_THM_ALU_ABS_G3_NC:
12509 {
12510 const int shift_array[4] = {0, 8, 16, 24};
12511 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12512 bfd_vma addr = value;
12513 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12514
12515 /* Compute address. */
12516 if (globals->use_rel)
12517 signed_addend = insn & 0xff;
12518 addr += signed_addend;
12519 if (branch_type == ST_BRANCH_TO_THUMB)
12520 addr |= 1;
12521 /* Clean imm8 insn. */
12522 insn &= 0xff00;
12523 /* And update with correct part of address. */
12524 insn |= (addr >> shift) & 0xff;
12525 /* Update insn. */
12526 bfd_put_16 (input_bfd, insn, hit_data);
12527 }
12528
12529 *unresolved_reloc_p = FALSE;
12530 return bfd_reloc_ok;
12531
12532 case R_ARM_GOTOFFFUNCDESC:
12533 {
12534 if (h == NULL)
12535 {
12536 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12537 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12538 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12539 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12540 bfd_vma seg = -1;
12541
12542 if (bfd_link_pic(info) && dynindx == 0)
12543 abort();
12544
12545 /* Resolve relocation. */
12546 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12547 , contents + rel->r_offset);
12548 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12549 not done yet. */
12550 arm_elf_fill_funcdesc(output_bfd, info,
12551 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12552 dynindx, offset, addr, dynreloc_value, seg);
12553 }
12554 else
12555 {
12556 int dynindx;
12557 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12558 bfd_vma addr;
12559 bfd_vma seg = -1;
12560
12561 /* For static binaries, sym_sec can be null. */
12562 if (sym_sec)
12563 {
12564 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12565 addr = dynreloc_value - sym_sec->output_section->vma;
12566 }
12567 else
12568 {
12569 dynindx = 0;
12570 addr = 0;
12571 }
12572
12573 if (bfd_link_pic(info) && dynindx == 0)
12574 abort();
12575
12576 /* This case cannot occur since funcdesc is allocated by
12577 the dynamic loader so we cannot resolve the relocation. */
12578 if (h->dynindx != -1)
12579 abort();
12580
12581 /* Resolve relocation. */
12582 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12583 contents + rel->r_offset);
12584 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12585 arm_elf_fill_funcdesc(output_bfd, info,
12586 &eh->fdpic_cnts.funcdesc_offset,
12587 dynindx, offset, addr, dynreloc_value, seg);
12588 }
12589 }
12590 *unresolved_reloc_p = FALSE;
12591 return bfd_reloc_ok;
12592
12593 case R_ARM_GOTFUNCDESC:
12594 {
12595 if (h != NULL)
12596 {
12597 Elf_Internal_Rela outrel;
12598
12599 /* Resolve relocation. */
12600 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12601 + sgot->output_offset),
12602 contents + rel->r_offset);
12603 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12604 if(h->dynindx == -1)
12605 {
12606 int dynindx;
12607 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12608 bfd_vma addr;
12609 bfd_vma seg = -1;
12610
12611 /* For static binaries sym_sec can be null. */
12612 if (sym_sec)
12613 {
12614 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12615 addr = dynreloc_value - sym_sec->output_section->vma;
12616 }
12617 else
12618 {
12619 dynindx = 0;
12620 addr = 0;
12621 }
12622
12623 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12624 arm_elf_fill_funcdesc(output_bfd, info,
12625 &eh->fdpic_cnts.funcdesc_offset,
12626 dynindx, offset, addr, dynreloc_value, seg);
12627 }
12628
12629 /* Add a dynamic relocation on GOT entry if not already done. */
12630 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12631 {
12632 if (h->dynindx == -1)
12633 {
12634 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12635 if (h->root.type == bfd_link_hash_undefweak)
12636 bfd_put_32(output_bfd, 0, sgot->contents
12637 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12638 else
12639 bfd_put_32(output_bfd, sgot->output_section->vma
12640 + sgot->output_offset
12641 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12642 sgot->contents
12643 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12644 }
12645 else
12646 {
12647 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12648 }
12649 outrel.r_offset = sgot->output_section->vma
12650 + sgot->output_offset
12651 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12652 outrel.r_addend = 0;
12653 if (h->dynindx == -1 && !bfd_link_pic(info))
12654 if (h->root.type == bfd_link_hash_undefweak)
12655 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12656 else
12657 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12658 else
12659 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12660 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12661 }
12662 }
12663 else
12664 {
12665 /* Such relocation on static function should not have been
12666 emitted by the compiler. */
12667 abort();
12668 }
12669 }
12670 *unresolved_reloc_p = FALSE;
12671 return bfd_reloc_ok;
12672
12673 case R_ARM_FUNCDESC:
12674 {
12675 if (h == NULL)
12676 {
12677 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12678 Elf_Internal_Rela outrel;
12679 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12680 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12681 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12682 bfd_vma seg = -1;
12683
12684 if (bfd_link_pic(info) && dynindx == 0)
12685 abort();
12686
12687 /* Replace static FUNCDESC relocation with a
12688 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12689 executable. */
12690 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12691 outrel.r_offset = input_section->output_section->vma
12692 + input_section->output_offset + rel->r_offset;
12693 outrel.r_addend = 0;
12694 if (bfd_link_pic(info))
12695 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12696 else
12697 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12698
12699 bfd_put_32 (input_bfd, sgot->output_section->vma
12700 + sgot->output_offset + offset, hit_data);
12701
12702 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12703 arm_elf_fill_funcdesc(output_bfd, info,
12704 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12705 dynindx, offset, addr, dynreloc_value, seg);
12706 }
12707 else
12708 {
12709 if (h->dynindx == -1)
12710 {
12711 int dynindx;
12712 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12713 bfd_vma addr;
12714 bfd_vma seg = -1;
12715 Elf_Internal_Rela outrel;
12716
12717 /* For static binaries sym_sec can be null. */
12718 if (sym_sec)
12719 {
12720 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12721 addr = dynreloc_value - sym_sec->output_section->vma;
12722 }
12723 else
12724 {
12725 dynindx = 0;
12726 addr = 0;
12727 }
12728
12729 if (bfd_link_pic(info) && dynindx == 0)
12730 abort();
12731
12732 /* Replace static FUNCDESC relocation with a
12733 R_ARM_RELATIVE dynamic relocation. */
12734 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12735 outrel.r_offset = input_section->output_section->vma
12736 + input_section->output_offset + rel->r_offset;
12737 outrel.r_addend = 0;
12738 if (bfd_link_pic(info))
12739 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12740 else
12741 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12742
12743 bfd_put_32 (input_bfd, sgot->output_section->vma
12744 + sgot->output_offset + offset, hit_data);
12745
12746 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12747 arm_elf_fill_funcdesc(output_bfd, info,
12748 &eh->fdpic_cnts.funcdesc_offset,
12749 dynindx, offset, addr, dynreloc_value, seg);
12750 }
12751 else
12752 {
12753 Elf_Internal_Rela outrel;
12754
12755 /* Add a dynamic relocation. */
12756 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12757 outrel.r_offset = input_section->output_section->vma
12758 + input_section->output_offset + rel->r_offset;
12759 outrel.r_addend = 0;
12760 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12761 }
12762 }
12763 }
12764 *unresolved_reloc_p = FALSE;
12765 return bfd_reloc_ok;
12766
12767 default:
12768 return bfd_reloc_notsupported;
12769 }
12770 }
12771
12772 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12773 static void
12774 arm_add_to_rel (bfd * abfd,
12775 bfd_byte * address,
12776 reloc_howto_type * howto,
12777 bfd_signed_vma increment)
12778 {
12779 bfd_signed_vma addend;
12780
12781 if (howto->type == R_ARM_THM_CALL
12782 || howto->type == R_ARM_THM_JUMP24)
12783 {
12784 int upper_insn, lower_insn;
12785 int upper, lower;
12786
12787 upper_insn = bfd_get_16 (abfd, address);
12788 lower_insn = bfd_get_16 (abfd, address + 2);
12789 upper = upper_insn & 0x7ff;
12790 lower = lower_insn & 0x7ff;
12791
12792 addend = (upper << 12) | (lower << 1);
12793 addend += increment;
12794 addend >>= 1;
12795
12796 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12797 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12798
12799 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12800 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12801 }
12802 else
12803 {
12804 bfd_vma contents;
12805
12806 contents = bfd_get_32 (abfd, address);
12807
12808 /* Get the (signed) value from the instruction. */
12809 addend = contents & howto->src_mask;
12810 if (addend & ((howto->src_mask + 1) >> 1))
12811 {
12812 bfd_signed_vma mask;
12813
12814 mask = -1;
12815 mask &= ~ howto->src_mask;
12816 addend |= mask;
12817 }
12818
12819 /* Add in the increment, (which is a byte value). */
12820 switch (howto->type)
12821 {
12822 default:
12823 addend += increment;
12824 break;
12825
12826 case R_ARM_PC24:
12827 case R_ARM_PLT32:
12828 case R_ARM_CALL:
12829 case R_ARM_JUMP24:
12830 addend <<= howto->size;
12831 addend += increment;
12832
12833 /* Should we check for overflow here ? */
12834
12835 /* Drop any undesired bits. */
12836 addend >>= howto->rightshift;
12837 break;
12838 }
12839
12840 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12841
12842 bfd_put_32 (abfd, contents, address);
12843 }
12844 }
12845
12846 #define IS_ARM_TLS_RELOC(R_TYPE) \
12847 ((R_TYPE) == R_ARM_TLS_GD32 \
12848 || (R_TYPE) == R_ARM_TLS_LDO32 \
12849 || (R_TYPE) == R_ARM_TLS_LDM32 \
12850 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12851 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12852 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12853 || (R_TYPE) == R_ARM_TLS_LE32 \
12854 || (R_TYPE) == R_ARM_TLS_IE32 \
12855 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12856
12857 /* Specific set of relocations for the gnu tls dialect. */
12858 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12859 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12860 || (R_TYPE) == R_ARM_TLS_CALL \
12861 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12862 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12863 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12864
12865 /* Relocate an ARM ELF section. */
12866
12867 static bfd_boolean
12868 elf32_arm_relocate_section (bfd * output_bfd,
12869 struct bfd_link_info * info,
12870 bfd * input_bfd,
12871 asection * input_section,
12872 bfd_byte * contents,
12873 Elf_Internal_Rela * relocs,
12874 Elf_Internal_Sym * local_syms,
12875 asection ** local_sections)
12876 {
12877 Elf_Internal_Shdr *symtab_hdr;
12878 struct elf_link_hash_entry **sym_hashes;
12879 Elf_Internal_Rela *rel;
12880 Elf_Internal_Rela *relend;
12881 const char *name;
12882 struct elf32_arm_link_hash_table * globals;
12883
12884 globals = elf32_arm_hash_table (info);
12885 if (globals == NULL)
12886 return FALSE;
12887
12888 symtab_hdr = & elf_symtab_hdr (input_bfd);
12889 sym_hashes = elf_sym_hashes (input_bfd);
12890
12891 rel = relocs;
12892 relend = relocs + input_section->reloc_count;
12893 for (; rel < relend; rel++)
12894 {
12895 int r_type;
12896 reloc_howto_type * howto;
12897 unsigned long r_symndx;
12898 Elf_Internal_Sym * sym;
12899 asection * sec;
12900 struct elf_link_hash_entry * h;
12901 bfd_vma relocation;
12902 bfd_reloc_status_type r;
12903 arelent bfd_reloc;
12904 char sym_type;
12905 bfd_boolean unresolved_reloc = FALSE;
12906 char *error_message = NULL;
12907
12908 r_symndx = ELF32_R_SYM (rel->r_info);
12909 r_type = ELF32_R_TYPE (rel->r_info);
12910 r_type = arm_real_reloc_type (globals, r_type);
12911
12912 if ( r_type == R_ARM_GNU_VTENTRY
12913 || r_type == R_ARM_GNU_VTINHERIT)
12914 continue;
12915
12916 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
12917
12918 if (howto == NULL)
12919 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
12920
12921 h = NULL;
12922 sym = NULL;
12923 sec = NULL;
12924
12925 if (r_symndx < symtab_hdr->sh_info)
12926 {
12927 sym = local_syms + r_symndx;
12928 sym_type = ELF32_ST_TYPE (sym->st_info);
12929 sec = local_sections[r_symndx];
12930
12931 /* An object file might have a reference to a local
12932 undefined symbol. This is a daft object file, but we
12933 should at least do something about it. V4BX & NONE
12934 relocations do not use the symbol and are explicitly
12935 allowed to use the undefined symbol, so allow those.
12936 Likewise for relocations against STN_UNDEF. */
12937 if (r_type != R_ARM_V4BX
12938 && r_type != R_ARM_NONE
12939 && r_symndx != STN_UNDEF
12940 && bfd_is_und_section (sec)
12941 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
12942 (*info->callbacks->undefined_symbol)
12943 (info, bfd_elf_string_from_elf_section
12944 (input_bfd, symtab_hdr->sh_link, sym->st_name),
12945 input_bfd, input_section,
12946 rel->r_offset, TRUE);
12947
12948 if (globals->use_rel)
12949 {
12950 relocation = (sec->output_section->vma
12951 + sec->output_offset
12952 + sym->st_value);
12953 if (!bfd_link_relocatable (info)
12954 && (sec->flags & SEC_MERGE)
12955 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
12956 {
12957 asection *msec;
12958 bfd_vma addend, value;
12959
12960 switch (r_type)
12961 {
12962 case R_ARM_MOVW_ABS_NC:
12963 case R_ARM_MOVT_ABS:
12964 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
12965 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
12966 addend = (addend ^ 0x8000) - 0x8000;
12967 break;
12968
12969 case R_ARM_THM_MOVW_ABS_NC:
12970 case R_ARM_THM_MOVT_ABS:
12971 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
12972 << 16;
12973 value |= bfd_get_16 (input_bfd,
12974 contents + rel->r_offset + 2);
12975 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
12976 | ((value & 0x04000000) >> 15);
12977 addend = (addend ^ 0x8000) - 0x8000;
12978 break;
12979
12980 default:
12981 if (howto->rightshift
12982 || (howto->src_mask & (howto->src_mask + 1)))
12983 {
12984 _bfd_error_handler
12985 /* xgettext:c-format */
12986 (_("%pB(%pA+%#" PRIx64 "): "
12987 "%s relocation against SEC_MERGE section"),
12988 input_bfd, input_section,
12989 (uint64_t) rel->r_offset, howto->name);
12990 return FALSE;
12991 }
12992
12993 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
12994
12995 /* Get the (signed) value from the instruction. */
12996 addend = value & howto->src_mask;
12997 if (addend & ((howto->src_mask + 1) >> 1))
12998 {
12999 bfd_signed_vma mask;
13000
13001 mask = -1;
13002 mask &= ~ howto->src_mask;
13003 addend |= mask;
13004 }
13005 break;
13006 }
13007
13008 msec = sec;
13009 addend =
13010 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13011 - relocation;
13012 addend += msec->output_section->vma + msec->output_offset;
13013
13014 /* Cases here must match those in the preceding
13015 switch statement. */
13016 switch (r_type)
13017 {
13018 case R_ARM_MOVW_ABS_NC:
13019 case R_ARM_MOVT_ABS:
13020 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13021 | (addend & 0xfff);
13022 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13023 break;
13024
13025 case R_ARM_THM_MOVW_ABS_NC:
13026 case R_ARM_THM_MOVT_ABS:
13027 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13028 | (addend & 0xff) | ((addend & 0x0800) << 15);
13029 bfd_put_16 (input_bfd, value >> 16,
13030 contents + rel->r_offset);
13031 bfd_put_16 (input_bfd, value,
13032 contents + rel->r_offset + 2);
13033 break;
13034
13035 default:
13036 value = (value & ~ howto->dst_mask)
13037 | (addend & howto->dst_mask);
13038 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13039 break;
13040 }
13041 }
13042 }
13043 else
13044 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13045 }
13046 else
13047 {
13048 bfd_boolean warned, ignored;
13049
13050 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13051 r_symndx, symtab_hdr, sym_hashes,
13052 h, sec, relocation,
13053 unresolved_reloc, warned, ignored);
13054
13055 sym_type = h->type;
13056 }
13057
13058 if (sec != NULL && discarded_section (sec))
13059 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13060 rel, 1, relend, howto, 0, contents);
13061
13062 if (bfd_link_relocatable (info))
13063 {
13064 /* This is a relocatable link. We don't have to change
13065 anything, unless the reloc is against a section symbol,
13066 in which case we have to adjust according to where the
13067 section symbol winds up in the output section. */
13068 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13069 {
13070 if (globals->use_rel)
13071 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13072 howto, (bfd_signed_vma) sec->output_offset);
13073 else
13074 rel->r_addend += sec->output_offset;
13075 }
13076 continue;
13077 }
13078
13079 if (h != NULL)
13080 name = h->root.root.string;
13081 else
13082 {
13083 name = (bfd_elf_string_from_elf_section
13084 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13085 if (name == NULL || *name == '\0')
13086 name = bfd_section_name (input_bfd, sec);
13087 }
13088
13089 if (r_symndx != STN_UNDEF
13090 && r_type != R_ARM_NONE
13091 && (h == NULL
13092 || h->root.type == bfd_link_hash_defined
13093 || h->root.type == bfd_link_hash_defweak)
13094 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13095 {
13096 _bfd_error_handler
13097 ((sym_type == STT_TLS
13098 /* xgettext:c-format */
13099 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13100 /* xgettext:c-format */
13101 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13102 input_bfd,
13103 input_section,
13104 (uint64_t) rel->r_offset,
13105 howto->name,
13106 name);
13107 }
13108
13109 /* We call elf32_arm_final_link_relocate unless we're completely
13110 done, i.e., the relaxation produced the final output we want,
13111 and we won't let anybody mess with it. Also, we have to do
13112 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13113 both in relaxed and non-relaxed cases. */
13114 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13115 || (IS_ARM_TLS_GNU_RELOC (r_type)
13116 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13117 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13118 & GOT_TLS_GDESC)))
13119 {
13120 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13121 contents, rel, h == NULL);
13122 /* This may have been marked unresolved because it came from
13123 a shared library. But we've just dealt with that. */
13124 unresolved_reloc = 0;
13125 }
13126 else
13127 r = bfd_reloc_continue;
13128
13129 if (r == bfd_reloc_continue)
13130 {
13131 unsigned char branch_type =
13132 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13133 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13134
13135 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13136 input_section, contents, rel,
13137 relocation, info, sec, name,
13138 sym_type, branch_type, h,
13139 &unresolved_reloc,
13140 &error_message);
13141 }
13142
13143 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13144 because such sections are not SEC_ALLOC and thus ld.so will
13145 not process them. */
13146 if (unresolved_reloc
13147 && !((input_section->flags & SEC_DEBUGGING) != 0
13148 && h->def_dynamic)
13149 && _bfd_elf_section_offset (output_bfd, info, input_section,
13150 rel->r_offset) != (bfd_vma) -1)
13151 {
13152 _bfd_error_handler
13153 /* xgettext:c-format */
13154 (_("%pB(%pA+%#" PRIx64 "): "
13155 "unresolvable %s relocation against symbol `%s'"),
13156 input_bfd,
13157 input_section,
13158 (uint64_t) rel->r_offset,
13159 howto->name,
13160 h->root.root.string);
13161 return FALSE;
13162 }
13163
13164 if (r != bfd_reloc_ok)
13165 {
13166 switch (r)
13167 {
13168 case bfd_reloc_overflow:
13169 /* If the overflowing reloc was to an undefined symbol,
13170 we have already printed one error message and there
13171 is no point complaining again. */
13172 if (!h || h->root.type != bfd_link_hash_undefined)
13173 (*info->callbacks->reloc_overflow)
13174 (info, (h ? &h->root : NULL), name, howto->name,
13175 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13176 break;
13177
13178 case bfd_reloc_undefined:
13179 (*info->callbacks->undefined_symbol)
13180 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13181 break;
13182
13183 case bfd_reloc_outofrange:
13184 error_message = _("out of range");
13185 goto common_error;
13186
13187 case bfd_reloc_notsupported:
13188 error_message = _("unsupported relocation");
13189 goto common_error;
13190
13191 case bfd_reloc_dangerous:
13192 /* error_message should already be set. */
13193 goto common_error;
13194
13195 default:
13196 error_message = _("unknown error");
13197 /* Fall through. */
13198
13199 common_error:
13200 BFD_ASSERT (error_message != NULL);
13201 (*info->callbacks->reloc_dangerous)
13202 (info, error_message, input_bfd, input_section, rel->r_offset);
13203 break;
13204 }
13205 }
13206 }
13207
13208 return TRUE;
13209 }
13210
13211 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13212 adds the edit to the start of the list. (The list must be built in order of
13213 ascending TINDEX: the function's callers are primarily responsible for
13214 maintaining that condition). */
13215
13216 static void
13217 add_unwind_table_edit (arm_unwind_table_edit **head,
13218 arm_unwind_table_edit **tail,
13219 arm_unwind_edit_type type,
13220 asection *linked_section,
13221 unsigned int tindex)
13222 {
13223 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13224 xmalloc (sizeof (arm_unwind_table_edit));
13225
13226 new_edit->type = type;
13227 new_edit->linked_section = linked_section;
13228 new_edit->index = tindex;
13229
13230 if (tindex > 0)
13231 {
13232 new_edit->next = NULL;
13233
13234 if (*tail)
13235 (*tail)->next = new_edit;
13236
13237 (*tail) = new_edit;
13238
13239 if (!*head)
13240 (*head) = new_edit;
13241 }
13242 else
13243 {
13244 new_edit->next = *head;
13245
13246 if (!*tail)
13247 *tail = new_edit;
13248
13249 *head = new_edit;
13250 }
13251 }
13252
13253 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13254
13255 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13256 static void
13257 adjust_exidx_size(asection *exidx_sec, int adjust)
13258 {
13259 asection *out_sec;
13260
13261 if (!exidx_sec->rawsize)
13262 exidx_sec->rawsize = exidx_sec->size;
13263
13264 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13265 out_sec = exidx_sec->output_section;
13266 /* Adjust size of output section. */
13267 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13268 }
13269
13270 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13271 static void
13272 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13273 {
13274 struct _arm_elf_section_data *exidx_arm_data;
13275
13276 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13277 add_unwind_table_edit (
13278 &exidx_arm_data->u.exidx.unwind_edit_list,
13279 &exidx_arm_data->u.exidx.unwind_edit_tail,
13280 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13281
13282 exidx_arm_data->additional_reloc_count++;
13283
13284 adjust_exidx_size(exidx_sec, 8);
13285 }
13286
13287 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13288 made to those tables, such that:
13289
13290 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13291 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13292 codes which have been inlined into the index).
13293
13294 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13295
13296 The edits are applied when the tables are written
13297 (in elf32_arm_write_section). */
13298
13299 bfd_boolean
13300 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13301 unsigned int num_text_sections,
13302 struct bfd_link_info *info,
13303 bfd_boolean merge_exidx_entries)
13304 {
13305 bfd *inp;
13306 unsigned int last_second_word = 0, i;
13307 asection *last_exidx_sec = NULL;
13308 asection *last_text_sec = NULL;
13309 int last_unwind_type = -1;
13310
13311 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13312 text sections. */
13313 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13314 {
13315 asection *sec;
13316
13317 for (sec = inp->sections; sec != NULL; sec = sec->next)
13318 {
13319 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13320 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13321
13322 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13323 continue;
13324
13325 if (elf_sec->linked_to)
13326 {
13327 Elf_Internal_Shdr *linked_hdr
13328 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13329 struct _arm_elf_section_data *linked_sec_arm_data
13330 = get_arm_elf_section_data (linked_hdr->bfd_section);
13331
13332 if (linked_sec_arm_data == NULL)
13333 continue;
13334
13335 /* Link this .ARM.exidx section back from the text section it
13336 describes. */
13337 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13338 }
13339 }
13340 }
13341
13342 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13343 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13344 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13345
13346 for (i = 0; i < num_text_sections; i++)
13347 {
13348 asection *sec = text_section_order[i];
13349 asection *exidx_sec;
13350 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13351 struct _arm_elf_section_data *exidx_arm_data;
13352 bfd_byte *contents = NULL;
13353 int deleted_exidx_bytes = 0;
13354 bfd_vma j;
13355 arm_unwind_table_edit *unwind_edit_head = NULL;
13356 arm_unwind_table_edit *unwind_edit_tail = NULL;
13357 Elf_Internal_Shdr *hdr;
13358 bfd *ibfd;
13359
13360 if (arm_data == NULL)
13361 continue;
13362
13363 exidx_sec = arm_data->u.text.arm_exidx_sec;
13364 if (exidx_sec == NULL)
13365 {
13366 /* Section has no unwind data. */
13367 if (last_unwind_type == 0 || !last_exidx_sec)
13368 continue;
13369
13370 /* Ignore zero sized sections. */
13371 if (sec->size == 0)
13372 continue;
13373
13374 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13375 last_unwind_type = 0;
13376 continue;
13377 }
13378
13379 /* Skip /DISCARD/ sections. */
13380 if (bfd_is_abs_section (exidx_sec->output_section))
13381 continue;
13382
13383 hdr = &elf_section_data (exidx_sec)->this_hdr;
13384 if (hdr->sh_type != SHT_ARM_EXIDX)
13385 continue;
13386
13387 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13388 if (exidx_arm_data == NULL)
13389 continue;
13390
13391 ibfd = exidx_sec->owner;
13392
13393 if (hdr->contents != NULL)
13394 contents = hdr->contents;
13395 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13396 /* An error? */
13397 continue;
13398
13399 if (last_unwind_type > 0)
13400 {
13401 unsigned int first_word = bfd_get_32 (ibfd, contents);
13402 /* Add cantunwind if first unwind item does not match section
13403 start. */
13404 if (first_word != sec->vma)
13405 {
13406 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13407 last_unwind_type = 0;
13408 }
13409 }
13410
13411 for (j = 0; j < hdr->sh_size; j += 8)
13412 {
13413 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13414 int unwind_type;
13415 int elide = 0;
13416
13417 /* An EXIDX_CANTUNWIND entry. */
13418 if (second_word == 1)
13419 {
13420 if (last_unwind_type == 0)
13421 elide = 1;
13422 unwind_type = 0;
13423 }
13424 /* Inlined unwinding data. Merge if equal to previous. */
13425 else if ((second_word & 0x80000000) != 0)
13426 {
13427 if (merge_exidx_entries
13428 && last_second_word == second_word && last_unwind_type == 1)
13429 elide = 1;
13430 unwind_type = 1;
13431 last_second_word = second_word;
13432 }
13433 /* Normal table entry. In theory we could merge these too,
13434 but duplicate entries are likely to be much less common. */
13435 else
13436 unwind_type = 2;
13437
13438 if (elide && !bfd_link_relocatable (info))
13439 {
13440 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13441 DELETE_EXIDX_ENTRY, NULL, j / 8);
13442
13443 deleted_exidx_bytes += 8;
13444 }
13445
13446 last_unwind_type = unwind_type;
13447 }
13448
13449 /* Free contents if we allocated it ourselves. */
13450 if (contents != hdr->contents)
13451 free (contents);
13452
13453 /* Record edits to be applied later (in elf32_arm_write_section). */
13454 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13455 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13456
13457 if (deleted_exidx_bytes > 0)
13458 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13459
13460 last_exidx_sec = exidx_sec;
13461 last_text_sec = sec;
13462 }
13463
13464 /* Add terminating CANTUNWIND entry. */
13465 if (!bfd_link_relocatable (info) && last_exidx_sec
13466 && last_unwind_type != 0)
13467 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13468
13469 return TRUE;
13470 }
13471
13472 static bfd_boolean
13473 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13474 bfd *ibfd, const char *name)
13475 {
13476 asection *sec, *osec;
13477
13478 sec = bfd_get_linker_section (ibfd, name);
13479 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13480 return TRUE;
13481
13482 osec = sec->output_section;
13483 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13484 return TRUE;
13485
13486 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13487 sec->output_offset, sec->size))
13488 return FALSE;
13489
13490 return TRUE;
13491 }
13492
13493 static bfd_boolean
13494 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13495 {
13496 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13497 asection *sec, *osec;
13498
13499 if (globals == NULL)
13500 return FALSE;
13501
13502 /* Invoke the regular ELF backend linker to do all the work. */
13503 if (!bfd_elf_final_link (abfd, info))
13504 return FALSE;
13505
13506 /* Process stub sections (eg BE8 encoding, ...). */
13507 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13508 unsigned int i;
13509 for (i=0; i<htab->top_id; i++)
13510 {
13511 sec = htab->stub_group[i].stub_sec;
13512 /* Only process it once, in its link_sec slot. */
13513 if (sec && i == htab->stub_group[i].link_sec->id)
13514 {
13515 osec = sec->output_section;
13516 elf32_arm_write_section (abfd, info, sec, sec->contents);
13517 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13518 sec->output_offset, sec->size))
13519 return FALSE;
13520 }
13521 }
13522
13523 /* Write out any glue sections now that we have created all the
13524 stubs. */
13525 if (globals->bfd_of_glue_owner != NULL)
13526 {
13527 if (! elf32_arm_output_glue_section (info, abfd,
13528 globals->bfd_of_glue_owner,
13529 ARM2THUMB_GLUE_SECTION_NAME))
13530 return FALSE;
13531
13532 if (! elf32_arm_output_glue_section (info, abfd,
13533 globals->bfd_of_glue_owner,
13534 THUMB2ARM_GLUE_SECTION_NAME))
13535 return FALSE;
13536
13537 if (! elf32_arm_output_glue_section (info, abfd,
13538 globals->bfd_of_glue_owner,
13539 VFP11_ERRATUM_VENEER_SECTION_NAME))
13540 return FALSE;
13541
13542 if (! elf32_arm_output_glue_section (info, abfd,
13543 globals->bfd_of_glue_owner,
13544 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13545 return FALSE;
13546
13547 if (! elf32_arm_output_glue_section (info, abfd,
13548 globals->bfd_of_glue_owner,
13549 ARM_BX_GLUE_SECTION_NAME))
13550 return FALSE;
13551 }
13552
13553 return TRUE;
13554 }
13555
13556 /* Return a best guess for the machine number based on the attributes. */
13557
13558 static unsigned int
13559 bfd_arm_get_mach_from_attributes (bfd * abfd)
13560 {
13561 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13562
13563 switch (arch)
13564 {
13565 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13566 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13567 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13568
13569 case TAG_CPU_ARCH_V5TE:
13570 {
13571 char * name;
13572
13573 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13574 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13575
13576 if (name)
13577 {
13578 if (strcmp (name, "IWMMXT2") == 0)
13579 return bfd_mach_arm_iWMMXt2;
13580
13581 if (strcmp (name, "IWMMXT") == 0)
13582 return bfd_mach_arm_iWMMXt;
13583
13584 if (strcmp (name, "XSCALE") == 0)
13585 {
13586 int wmmx;
13587
13588 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13589 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13590 switch (wmmx)
13591 {
13592 case 1: return bfd_mach_arm_iWMMXt;
13593 case 2: return bfd_mach_arm_iWMMXt2;
13594 default: return bfd_mach_arm_XScale;
13595 }
13596 }
13597 }
13598
13599 return bfd_mach_arm_5TE;
13600 }
13601
13602 default:
13603 return bfd_mach_arm_unknown;
13604 }
13605 }
13606
13607 /* Set the right machine number. */
13608
13609 static bfd_boolean
13610 elf32_arm_object_p (bfd *abfd)
13611 {
13612 unsigned int mach;
13613
13614 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13615
13616 if (mach == bfd_mach_arm_unknown)
13617 {
13618 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13619 mach = bfd_mach_arm_ep9312;
13620 else
13621 mach = bfd_arm_get_mach_from_attributes (abfd);
13622 }
13623
13624 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13625 return TRUE;
13626 }
13627
13628 /* Function to keep ARM specific flags in the ELF header. */
13629
13630 static bfd_boolean
13631 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13632 {
13633 if (elf_flags_init (abfd)
13634 && elf_elfheader (abfd)->e_flags != flags)
13635 {
13636 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13637 {
13638 if (flags & EF_ARM_INTERWORK)
13639 _bfd_error_handler
13640 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13641 abfd);
13642 else
13643 _bfd_error_handler
13644 (_("warning: clearing the interworking flag of %pB due to outside request"),
13645 abfd);
13646 }
13647 }
13648 else
13649 {
13650 elf_elfheader (abfd)->e_flags = flags;
13651 elf_flags_init (abfd) = TRUE;
13652 }
13653
13654 return TRUE;
13655 }
13656
13657 /* Copy backend specific data from one object module to another. */
13658
13659 static bfd_boolean
13660 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13661 {
13662 flagword in_flags;
13663 flagword out_flags;
13664
13665 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13666 return TRUE;
13667
13668 in_flags = elf_elfheader (ibfd)->e_flags;
13669 out_flags = elf_elfheader (obfd)->e_flags;
13670
13671 if (elf_flags_init (obfd)
13672 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13673 && in_flags != out_flags)
13674 {
13675 /* Cannot mix APCS26 and APCS32 code. */
13676 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13677 return FALSE;
13678
13679 /* Cannot mix float APCS and non-float APCS code. */
13680 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13681 return FALSE;
13682
13683 /* If the src and dest have different interworking flags
13684 then turn off the interworking bit. */
13685 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13686 {
13687 if (out_flags & EF_ARM_INTERWORK)
13688 _bfd_error_handler
13689 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13690 obfd, ibfd);
13691
13692 in_flags &= ~EF_ARM_INTERWORK;
13693 }
13694
13695 /* Likewise for PIC, though don't warn for this case. */
13696 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13697 in_flags &= ~EF_ARM_PIC;
13698 }
13699
13700 elf_elfheader (obfd)->e_flags = in_flags;
13701 elf_flags_init (obfd) = TRUE;
13702
13703 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13704 }
13705
13706 /* Values for Tag_ABI_PCS_R9_use. */
13707 enum
13708 {
13709 AEABI_R9_V6,
13710 AEABI_R9_SB,
13711 AEABI_R9_TLS,
13712 AEABI_R9_unused
13713 };
13714
13715 /* Values for Tag_ABI_PCS_RW_data. */
13716 enum
13717 {
13718 AEABI_PCS_RW_data_absolute,
13719 AEABI_PCS_RW_data_PCrel,
13720 AEABI_PCS_RW_data_SBrel,
13721 AEABI_PCS_RW_data_unused
13722 };
13723
13724 /* Values for Tag_ABI_enum_size. */
13725 enum
13726 {
13727 AEABI_enum_unused,
13728 AEABI_enum_short,
13729 AEABI_enum_wide,
13730 AEABI_enum_forced_wide
13731 };
13732
13733 /* Determine whether an object attribute tag takes an integer, a
13734 string or both. */
13735
13736 static int
13737 elf32_arm_obj_attrs_arg_type (int tag)
13738 {
13739 if (tag == Tag_compatibility)
13740 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13741 else if (tag == Tag_nodefaults)
13742 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13743 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13744 return ATTR_TYPE_FLAG_STR_VAL;
13745 else if (tag < 32)
13746 return ATTR_TYPE_FLAG_INT_VAL;
13747 else
13748 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13749 }
13750
13751 /* The ABI defines that Tag_conformance should be emitted first, and that
13752 Tag_nodefaults should be second (if either is defined). This sets those
13753 two positions, and bumps up the position of all the remaining tags to
13754 compensate. */
13755 static int
13756 elf32_arm_obj_attrs_order (int num)
13757 {
13758 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13759 return Tag_conformance;
13760 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13761 return Tag_nodefaults;
13762 if ((num - 2) < Tag_nodefaults)
13763 return num - 2;
13764 if ((num - 1) < Tag_conformance)
13765 return num - 1;
13766 return num;
13767 }
13768
13769 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13770 static bfd_boolean
13771 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13772 {
13773 if ((tag & 127) < 64)
13774 {
13775 _bfd_error_handler
13776 (_("%pB: unknown mandatory EABI object attribute %d"),
13777 abfd, tag);
13778 bfd_set_error (bfd_error_bad_value);
13779 return FALSE;
13780 }
13781 else
13782 {
13783 _bfd_error_handler
13784 (_("warning: %pB: unknown EABI object attribute %d"),
13785 abfd, tag);
13786 return TRUE;
13787 }
13788 }
13789
13790 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13791 Returns -1 if no architecture could be read. */
13792
13793 static int
13794 get_secondary_compatible_arch (bfd *abfd)
13795 {
13796 obj_attribute *attr =
13797 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13798
13799 /* Note: the tag and its argument below are uleb128 values, though
13800 currently-defined values fit in one byte for each. */
13801 if (attr->s
13802 && attr->s[0] == Tag_CPU_arch
13803 && (attr->s[1] & 128) != 128
13804 && attr->s[2] == 0)
13805 return attr->s[1];
13806
13807 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13808 return -1;
13809 }
13810
13811 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13812 The tag is removed if ARCH is -1. */
13813
13814 static void
13815 set_secondary_compatible_arch (bfd *abfd, int arch)
13816 {
13817 obj_attribute *attr =
13818 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13819
13820 if (arch == -1)
13821 {
13822 attr->s = NULL;
13823 return;
13824 }
13825
13826 /* Note: the tag and its argument below are uleb128 values, though
13827 currently-defined values fit in one byte for each. */
13828 if (!attr->s)
13829 attr->s = (char *) bfd_alloc (abfd, 3);
13830 attr->s[0] = Tag_CPU_arch;
13831 attr->s[1] = arch;
13832 attr->s[2] = '\0';
13833 }
13834
13835 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13836 into account. */
13837
13838 static int
13839 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
13840 int newtag, int secondary_compat)
13841 {
13842 #define T(X) TAG_CPU_ARCH_##X
13843 int tagl, tagh, result;
13844 const int v6t2[] =
13845 {
13846 T(V6T2), /* PRE_V4. */
13847 T(V6T2), /* V4. */
13848 T(V6T2), /* V4T. */
13849 T(V6T2), /* V5T. */
13850 T(V6T2), /* V5TE. */
13851 T(V6T2), /* V5TEJ. */
13852 T(V6T2), /* V6. */
13853 T(V7), /* V6KZ. */
13854 T(V6T2) /* V6T2. */
13855 };
13856 const int v6k[] =
13857 {
13858 T(V6K), /* PRE_V4. */
13859 T(V6K), /* V4. */
13860 T(V6K), /* V4T. */
13861 T(V6K), /* V5T. */
13862 T(V6K), /* V5TE. */
13863 T(V6K), /* V5TEJ. */
13864 T(V6K), /* V6. */
13865 T(V6KZ), /* V6KZ. */
13866 T(V7), /* V6T2. */
13867 T(V6K) /* V6K. */
13868 };
13869 const int v7[] =
13870 {
13871 T(V7), /* PRE_V4. */
13872 T(V7), /* V4. */
13873 T(V7), /* V4T. */
13874 T(V7), /* V5T. */
13875 T(V7), /* V5TE. */
13876 T(V7), /* V5TEJ. */
13877 T(V7), /* V6. */
13878 T(V7), /* V6KZ. */
13879 T(V7), /* V6T2. */
13880 T(V7), /* V6K. */
13881 T(V7) /* V7. */
13882 };
13883 const int v6_m[] =
13884 {
13885 -1, /* PRE_V4. */
13886 -1, /* V4. */
13887 T(V6K), /* V4T. */
13888 T(V6K), /* V5T. */
13889 T(V6K), /* V5TE. */
13890 T(V6K), /* V5TEJ. */
13891 T(V6K), /* V6. */
13892 T(V6KZ), /* V6KZ. */
13893 T(V7), /* V6T2. */
13894 T(V6K), /* V6K. */
13895 T(V7), /* V7. */
13896 T(V6_M) /* V6_M. */
13897 };
13898 const int v6s_m[] =
13899 {
13900 -1, /* PRE_V4. */
13901 -1, /* V4. */
13902 T(V6K), /* V4T. */
13903 T(V6K), /* V5T. */
13904 T(V6K), /* V5TE. */
13905 T(V6K), /* V5TEJ. */
13906 T(V6K), /* V6. */
13907 T(V6KZ), /* V6KZ. */
13908 T(V7), /* V6T2. */
13909 T(V6K), /* V6K. */
13910 T(V7), /* V7. */
13911 T(V6S_M), /* V6_M. */
13912 T(V6S_M) /* V6S_M. */
13913 };
13914 const int v7e_m[] =
13915 {
13916 -1, /* PRE_V4. */
13917 -1, /* V4. */
13918 T(V7E_M), /* V4T. */
13919 T(V7E_M), /* V5T. */
13920 T(V7E_M), /* V5TE. */
13921 T(V7E_M), /* V5TEJ. */
13922 T(V7E_M), /* V6. */
13923 T(V7E_M), /* V6KZ. */
13924 T(V7E_M), /* V6T2. */
13925 T(V7E_M), /* V6K. */
13926 T(V7E_M), /* V7. */
13927 T(V7E_M), /* V6_M. */
13928 T(V7E_M), /* V6S_M. */
13929 T(V7E_M) /* V7E_M. */
13930 };
13931 const int v8[] =
13932 {
13933 T(V8), /* PRE_V4. */
13934 T(V8), /* V4. */
13935 T(V8), /* V4T. */
13936 T(V8), /* V5T. */
13937 T(V8), /* V5TE. */
13938 T(V8), /* V5TEJ. */
13939 T(V8), /* V6. */
13940 T(V8), /* V6KZ. */
13941 T(V8), /* V6T2. */
13942 T(V8), /* V6K. */
13943 T(V8), /* V7. */
13944 T(V8), /* V6_M. */
13945 T(V8), /* V6S_M. */
13946 T(V8), /* V7E_M. */
13947 T(V8) /* V8. */
13948 };
13949 const int v8r[] =
13950 {
13951 T(V8R), /* PRE_V4. */
13952 T(V8R), /* V4. */
13953 T(V8R), /* V4T. */
13954 T(V8R), /* V5T. */
13955 T(V8R), /* V5TE. */
13956 T(V8R), /* V5TEJ. */
13957 T(V8R), /* V6. */
13958 T(V8R), /* V6KZ. */
13959 T(V8R), /* V6T2. */
13960 T(V8R), /* V6K. */
13961 T(V8R), /* V7. */
13962 T(V8R), /* V6_M. */
13963 T(V8R), /* V6S_M. */
13964 T(V8R), /* V7E_M. */
13965 T(V8), /* V8. */
13966 T(V8R), /* V8R. */
13967 };
13968 const int v8m_baseline[] =
13969 {
13970 -1, /* PRE_V4. */
13971 -1, /* V4. */
13972 -1, /* V4T. */
13973 -1, /* V5T. */
13974 -1, /* V5TE. */
13975 -1, /* V5TEJ. */
13976 -1, /* V6. */
13977 -1, /* V6KZ. */
13978 -1, /* V6T2. */
13979 -1, /* V6K. */
13980 -1, /* V7. */
13981 T(V8M_BASE), /* V6_M. */
13982 T(V8M_BASE), /* V6S_M. */
13983 -1, /* V7E_M. */
13984 -1, /* V8. */
13985 -1, /* V8R. */
13986 T(V8M_BASE) /* V8-M BASELINE. */
13987 };
13988 const int v8m_mainline[] =
13989 {
13990 -1, /* PRE_V4. */
13991 -1, /* V4. */
13992 -1, /* V4T. */
13993 -1, /* V5T. */
13994 -1, /* V5TE. */
13995 -1, /* V5TEJ. */
13996 -1, /* V6. */
13997 -1, /* V6KZ. */
13998 -1, /* V6T2. */
13999 -1, /* V6K. */
14000 T(V8M_MAIN), /* V7. */
14001 T(V8M_MAIN), /* V6_M. */
14002 T(V8M_MAIN), /* V6S_M. */
14003 T(V8M_MAIN), /* V7E_M. */
14004 -1, /* V8. */
14005 -1, /* V8R. */
14006 T(V8M_MAIN), /* V8-M BASELINE. */
14007 T(V8M_MAIN) /* V8-M MAINLINE. */
14008 };
14009 const int v4t_plus_v6_m[] =
14010 {
14011 -1, /* PRE_V4. */
14012 -1, /* V4. */
14013 T(V4T), /* V4T. */
14014 T(V5T), /* V5T. */
14015 T(V5TE), /* V5TE. */
14016 T(V5TEJ), /* V5TEJ. */
14017 T(V6), /* V6. */
14018 T(V6KZ), /* V6KZ. */
14019 T(V6T2), /* V6T2. */
14020 T(V6K), /* V6K. */
14021 T(V7), /* V7. */
14022 T(V6_M), /* V6_M. */
14023 T(V6S_M), /* V6S_M. */
14024 T(V7E_M), /* V7E_M. */
14025 T(V8), /* V8. */
14026 -1, /* V8R. */
14027 T(V8M_BASE), /* V8-M BASELINE. */
14028 T(V8M_MAIN), /* V8-M MAINLINE. */
14029 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14030 };
14031 const int *comb[] =
14032 {
14033 v6t2,
14034 v6k,
14035 v7,
14036 v6_m,
14037 v6s_m,
14038 v7e_m,
14039 v8,
14040 v8r,
14041 v8m_baseline,
14042 v8m_mainline,
14043 /* Pseudo-architecture. */
14044 v4t_plus_v6_m
14045 };
14046
14047 /* Check we've not got a higher architecture than we know about. */
14048
14049 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14050 {
14051 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14052 return -1;
14053 }
14054
14055 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14056
14057 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14058 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14059 oldtag = T(V4T_PLUS_V6_M);
14060
14061 /* And override the new tag if we have a Tag_also_compatible_with on the
14062 input. */
14063
14064 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14065 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14066 newtag = T(V4T_PLUS_V6_M);
14067
14068 tagl = (oldtag < newtag) ? oldtag : newtag;
14069 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14070
14071 /* Architectures before V6KZ add features monotonically. */
14072 if (tagh <= TAG_CPU_ARCH_V6KZ)
14073 return result;
14074
14075 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14076
14077 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14078 as the canonical version. */
14079 if (result == T(V4T_PLUS_V6_M))
14080 {
14081 result = T(V4T);
14082 *secondary_compat_out = T(V6_M);
14083 }
14084 else
14085 *secondary_compat_out = -1;
14086
14087 if (result == -1)
14088 {
14089 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14090 ibfd, oldtag, newtag);
14091 return -1;
14092 }
14093
14094 return result;
14095 #undef T
14096 }
14097
14098 /* Query attributes object to see if integer divide instructions may be
14099 present in an object. */
14100 static bfd_boolean
14101 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14102 {
14103 int arch = attr[Tag_CPU_arch].i;
14104 int profile = attr[Tag_CPU_arch_profile].i;
14105
14106 switch (attr[Tag_DIV_use].i)
14107 {
14108 case 0:
14109 /* Integer divide allowed if instruction contained in archetecture. */
14110 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14111 return TRUE;
14112 else if (arch >= TAG_CPU_ARCH_V7E_M)
14113 return TRUE;
14114 else
14115 return FALSE;
14116
14117 case 1:
14118 /* Integer divide explicitly prohibited. */
14119 return FALSE;
14120
14121 default:
14122 /* Unrecognised case - treat as allowing divide everywhere. */
14123 case 2:
14124 /* Integer divide allowed in ARM state. */
14125 return TRUE;
14126 }
14127 }
14128
14129 /* Query attributes object to see if integer divide instructions are
14130 forbidden to be in the object. This is not the inverse of
14131 elf32_arm_attributes_accept_div. */
14132 static bfd_boolean
14133 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14134 {
14135 return attr[Tag_DIV_use].i == 1;
14136 }
14137
14138 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14139 are conflicting attributes. */
14140
14141 static bfd_boolean
14142 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14143 {
14144 bfd *obfd = info->output_bfd;
14145 obj_attribute *in_attr;
14146 obj_attribute *out_attr;
14147 /* Some tags have 0 = don't care, 1 = strong requirement,
14148 2 = weak requirement. */
14149 static const int order_021[3] = {0, 2, 1};
14150 int i;
14151 bfd_boolean result = TRUE;
14152 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14153
14154 /* Skip the linker stubs file. This preserves previous behavior
14155 of accepting unknown attributes in the first input file - but
14156 is that a bug? */
14157 if (ibfd->flags & BFD_LINKER_CREATED)
14158 return TRUE;
14159
14160 /* Skip any input that hasn't attribute section.
14161 This enables to link object files without attribute section with
14162 any others. */
14163 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14164 return TRUE;
14165
14166 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14167 {
14168 /* This is the first object. Copy the attributes. */
14169 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14170
14171 out_attr = elf_known_obj_attributes_proc (obfd);
14172
14173 /* Use the Tag_null value to indicate the attributes have been
14174 initialized. */
14175 out_attr[0].i = 1;
14176
14177 /* We do not output objects with Tag_MPextension_use_legacy - we move
14178 the attribute's value to Tag_MPextension_use. */
14179 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14180 {
14181 if (out_attr[Tag_MPextension_use].i != 0
14182 && out_attr[Tag_MPextension_use_legacy].i
14183 != out_attr[Tag_MPextension_use].i)
14184 {
14185 _bfd_error_handler
14186 (_("Error: %pB has both the current and legacy "
14187 "Tag_MPextension_use attributes"), ibfd);
14188 result = FALSE;
14189 }
14190
14191 out_attr[Tag_MPextension_use] =
14192 out_attr[Tag_MPextension_use_legacy];
14193 out_attr[Tag_MPextension_use_legacy].type = 0;
14194 out_attr[Tag_MPextension_use_legacy].i = 0;
14195 }
14196
14197 return result;
14198 }
14199
14200 in_attr = elf_known_obj_attributes_proc (ibfd);
14201 out_attr = elf_known_obj_attributes_proc (obfd);
14202 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14203 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14204 {
14205 /* Ignore mismatches if the object doesn't use floating point or is
14206 floating point ABI independent. */
14207 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14208 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14209 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14210 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14211 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14212 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14213 {
14214 _bfd_error_handler
14215 (_("error: %pB uses VFP register arguments, %pB does not"),
14216 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14217 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14218 result = FALSE;
14219 }
14220 }
14221
14222 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14223 {
14224 /* Merge this attribute with existing attributes. */
14225 switch (i)
14226 {
14227 case Tag_CPU_raw_name:
14228 case Tag_CPU_name:
14229 /* These are merged after Tag_CPU_arch. */
14230 break;
14231
14232 case Tag_ABI_optimization_goals:
14233 case Tag_ABI_FP_optimization_goals:
14234 /* Use the first value seen. */
14235 break;
14236
14237 case Tag_CPU_arch:
14238 {
14239 int secondary_compat = -1, secondary_compat_out = -1;
14240 unsigned int saved_out_attr = out_attr[i].i;
14241 int arch_attr;
14242 static const char *name_table[] =
14243 {
14244 /* These aren't real CPU names, but we can't guess
14245 that from the architecture version alone. */
14246 "Pre v4",
14247 "ARM v4",
14248 "ARM v4T",
14249 "ARM v5T",
14250 "ARM v5TE",
14251 "ARM v5TEJ",
14252 "ARM v6",
14253 "ARM v6KZ",
14254 "ARM v6T2",
14255 "ARM v6K",
14256 "ARM v7",
14257 "ARM v6-M",
14258 "ARM v6S-M",
14259 "ARM v8",
14260 "",
14261 "ARM v8-M.baseline",
14262 "ARM v8-M.mainline",
14263 };
14264
14265 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14266 secondary_compat = get_secondary_compatible_arch (ibfd);
14267 secondary_compat_out = get_secondary_compatible_arch (obfd);
14268 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14269 &secondary_compat_out,
14270 in_attr[i].i,
14271 secondary_compat);
14272
14273 /* Return with error if failed to merge. */
14274 if (arch_attr == -1)
14275 return FALSE;
14276
14277 out_attr[i].i = arch_attr;
14278
14279 set_secondary_compatible_arch (obfd, secondary_compat_out);
14280
14281 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14282 if (out_attr[i].i == saved_out_attr)
14283 ; /* Leave the names alone. */
14284 else if (out_attr[i].i == in_attr[i].i)
14285 {
14286 /* The output architecture has been changed to match the
14287 input architecture. Use the input names. */
14288 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14289 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14290 : NULL;
14291 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14292 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14293 : NULL;
14294 }
14295 else
14296 {
14297 out_attr[Tag_CPU_name].s = NULL;
14298 out_attr[Tag_CPU_raw_name].s = NULL;
14299 }
14300
14301 /* If we still don't have a value for Tag_CPU_name,
14302 make one up now. Tag_CPU_raw_name remains blank. */
14303 if (out_attr[Tag_CPU_name].s == NULL
14304 && out_attr[i].i < ARRAY_SIZE (name_table))
14305 out_attr[Tag_CPU_name].s =
14306 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14307 }
14308 break;
14309
14310 case Tag_ARM_ISA_use:
14311 case Tag_THUMB_ISA_use:
14312 case Tag_WMMX_arch:
14313 case Tag_Advanced_SIMD_arch:
14314 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14315 case Tag_ABI_FP_rounding:
14316 case Tag_ABI_FP_exceptions:
14317 case Tag_ABI_FP_user_exceptions:
14318 case Tag_ABI_FP_number_model:
14319 case Tag_FP_HP_extension:
14320 case Tag_CPU_unaligned_access:
14321 case Tag_T2EE_use:
14322 case Tag_MPextension_use:
14323 /* Use the largest value specified. */
14324 if (in_attr[i].i > out_attr[i].i)
14325 out_attr[i].i = in_attr[i].i;
14326 break;
14327
14328 case Tag_ABI_align_preserved:
14329 case Tag_ABI_PCS_RO_data:
14330 /* Use the smallest value specified. */
14331 if (in_attr[i].i < out_attr[i].i)
14332 out_attr[i].i = in_attr[i].i;
14333 break;
14334
14335 case Tag_ABI_align_needed:
14336 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14337 && (in_attr[Tag_ABI_align_preserved].i == 0
14338 || out_attr[Tag_ABI_align_preserved].i == 0))
14339 {
14340 /* This error message should be enabled once all non-conformant
14341 binaries in the toolchain have had the attributes set
14342 properly.
14343 _bfd_error_handler
14344 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14345 obfd, ibfd);
14346 result = FALSE; */
14347 }
14348 /* Fall through. */
14349 case Tag_ABI_FP_denormal:
14350 case Tag_ABI_PCS_GOT_use:
14351 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14352 value if greater than 2 (for future-proofing). */
14353 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14354 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14355 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14356 out_attr[i].i = in_attr[i].i;
14357 break;
14358
14359 case Tag_Virtualization_use:
14360 /* The virtualization tag effectively stores two bits of
14361 information: the intended use of TrustZone (in bit 0), and the
14362 intended use of Virtualization (in bit 1). */
14363 if (out_attr[i].i == 0)
14364 out_attr[i].i = in_attr[i].i;
14365 else if (in_attr[i].i != 0
14366 && in_attr[i].i != out_attr[i].i)
14367 {
14368 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14369 out_attr[i].i = 3;
14370 else
14371 {
14372 _bfd_error_handler
14373 (_("error: %pB: unable to merge virtualization attributes "
14374 "with %pB"),
14375 obfd, ibfd);
14376 result = FALSE;
14377 }
14378 }
14379 break;
14380
14381 case Tag_CPU_arch_profile:
14382 if (out_attr[i].i != in_attr[i].i)
14383 {
14384 /* 0 will merge with anything.
14385 'A' and 'S' merge to 'A'.
14386 'R' and 'S' merge to 'R'.
14387 'M' and 'A|R|S' is an error. */
14388 if (out_attr[i].i == 0
14389 || (out_attr[i].i == 'S'
14390 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14391 out_attr[i].i = in_attr[i].i;
14392 else if (in_attr[i].i == 0
14393 || (in_attr[i].i == 'S'
14394 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14395 ; /* Do nothing. */
14396 else
14397 {
14398 _bfd_error_handler
14399 (_("error: %pB: conflicting architecture profiles %c/%c"),
14400 ibfd,
14401 in_attr[i].i ? in_attr[i].i : '0',
14402 out_attr[i].i ? out_attr[i].i : '0');
14403 result = FALSE;
14404 }
14405 }
14406 break;
14407
14408 case Tag_DSP_extension:
14409 /* No need to change output value if any of:
14410 - pre (<=) ARMv5T input architecture (do not have DSP)
14411 - M input profile not ARMv7E-M and do not have DSP. */
14412 if (in_attr[Tag_CPU_arch].i <= 3
14413 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14414 && in_attr[Tag_CPU_arch].i != 13
14415 && in_attr[i].i == 0))
14416 ; /* Do nothing. */
14417 /* Output value should be 0 if DSP part of architecture, ie.
14418 - post (>=) ARMv5te architecture output
14419 - A, R or S profile output or ARMv7E-M output architecture. */
14420 else if (out_attr[Tag_CPU_arch].i >= 4
14421 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14422 || out_attr[Tag_CPU_arch_profile].i == 'R'
14423 || out_attr[Tag_CPU_arch_profile].i == 'S'
14424 || out_attr[Tag_CPU_arch].i == 13))
14425 out_attr[i].i = 0;
14426 /* Otherwise, DSP instructions are added and not part of output
14427 architecture. */
14428 else
14429 out_attr[i].i = 1;
14430 break;
14431
14432 case Tag_FP_arch:
14433 {
14434 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14435 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14436 when it's 0. It might mean absence of FP hardware if
14437 Tag_FP_arch is zero. */
14438
14439 #define VFP_VERSION_COUNT 9
14440 static const struct
14441 {
14442 int ver;
14443 int regs;
14444 } vfp_versions[VFP_VERSION_COUNT] =
14445 {
14446 {0, 0},
14447 {1, 16},
14448 {2, 16},
14449 {3, 32},
14450 {3, 16},
14451 {4, 32},
14452 {4, 16},
14453 {8, 32},
14454 {8, 16}
14455 };
14456 int ver;
14457 int regs;
14458 int newval;
14459
14460 /* If the output has no requirement about FP hardware,
14461 follow the requirement of the input. */
14462 if (out_attr[i].i == 0)
14463 {
14464 /* This assert is still reasonable, we shouldn't
14465 produce the suspicious build attribute
14466 combination (See below for in_attr). */
14467 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14468 out_attr[i].i = in_attr[i].i;
14469 out_attr[Tag_ABI_HardFP_use].i
14470 = in_attr[Tag_ABI_HardFP_use].i;
14471 break;
14472 }
14473 /* If the input has no requirement about FP hardware, do
14474 nothing. */
14475 else if (in_attr[i].i == 0)
14476 {
14477 /* We used to assert that Tag_ABI_HardFP_use was
14478 zero here, but we should never assert when
14479 consuming an object file that has suspicious
14480 build attributes. The single precision variant
14481 of 'no FP architecture' is still 'no FP
14482 architecture', so we just ignore the tag in this
14483 case. */
14484 break;
14485 }
14486
14487 /* Both the input and the output have nonzero Tag_FP_arch.
14488 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14489
14490 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14491 do nothing. */
14492 if (in_attr[Tag_ABI_HardFP_use].i == 0
14493 && out_attr[Tag_ABI_HardFP_use].i == 0)
14494 ;
14495 /* If the input and the output have different Tag_ABI_HardFP_use,
14496 the combination of them is 0 (implied by Tag_FP_arch). */
14497 else if (in_attr[Tag_ABI_HardFP_use].i
14498 != out_attr[Tag_ABI_HardFP_use].i)
14499 out_attr[Tag_ABI_HardFP_use].i = 0;
14500
14501 /* Now we can handle Tag_FP_arch. */
14502
14503 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14504 pick the biggest. */
14505 if (in_attr[i].i >= VFP_VERSION_COUNT
14506 && in_attr[i].i > out_attr[i].i)
14507 {
14508 out_attr[i] = in_attr[i];
14509 break;
14510 }
14511 /* The output uses the superset of input features
14512 (ISA version) and registers. */
14513 ver = vfp_versions[in_attr[i].i].ver;
14514 if (ver < vfp_versions[out_attr[i].i].ver)
14515 ver = vfp_versions[out_attr[i].i].ver;
14516 regs = vfp_versions[in_attr[i].i].regs;
14517 if (regs < vfp_versions[out_attr[i].i].regs)
14518 regs = vfp_versions[out_attr[i].i].regs;
14519 /* This assumes all possible supersets are also a valid
14520 options. */
14521 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14522 {
14523 if (regs == vfp_versions[newval].regs
14524 && ver == vfp_versions[newval].ver)
14525 break;
14526 }
14527 out_attr[i].i = newval;
14528 }
14529 break;
14530 case Tag_PCS_config:
14531 if (out_attr[i].i == 0)
14532 out_attr[i].i = in_attr[i].i;
14533 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14534 {
14535 /* It's sometimes ok to mix different configs, so this is only
14536 a warning. */
14537 _bfd_error_handler
14538 (_("warning: %pB: conflicting platform configuration"), ibfd);
14539 }
14540 break;
14541 case Tag_ABI_PCS_R9_use:
14542 if (in_attr[i].i != out_attr[i].i
14543 && out_attr[i].i != AEABI_R9_unused
14544 && in_attr[i].i != AEABI_R9_unused)
14545 {
14546 _bfd_error_handler
14547 (_("error: %pB: conflicting use of R9"), ibfd);
14548 result = FALSE;
14549 }
14550 if (out_attr[i].i == AEABI_R9_unused)
14551 out_attr[i].i = in_attr[i].i;
14552 break;
14553 case Tag_ABI_PCS_RW_data:
14554 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14555 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14556 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14557 {
14558 _bfd_error_handler
14559 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14560 ibfd);
14561 result = FALSE;
14562 }
14563 /* Use the smallest value specified. */
14564 if (in_attr[i].i < out_attr[i].i)
14565 out_attr[i].i = in_attr[i].i;
14566 break;
14567 case Tag_ABI_PCS_wchar_t:
14568 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14569 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14570 {
14571 _bfd_error_handler
14572 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14573 ibfd, in_attr[i].i, out_attr[i].i);
14574 }
14575 else if (in_attr[i].i && !out_attr[i].i)
14576 out_attr[i].i = in_attr[i].i;
14577 break;
14578 case Tag_ABI_enum_size:
14579 if (in_attr[i].i != AEABI_enum_unused)
14580 {
14581 if (out_attr[i].i == AEABI_enum_unused
14582 || out_attr[i].i == AEABI_enum_forced_wide)
14583 {
14584 /* The existing object is compatible with anything.
14585 Use whatever requirements the new object has. */
14586 out_attr[i].i = in_attr[i].i;
14587 }
14588 else if (in_attr[i].i != AEABI_enum_forced_wide
14589 && out_attr[i].i != in_attr[i].i
14590 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14591 {
14592 static const char *aeabi_enum_names[] =
14593 { "", "variable-size", "32-bit", "" };
14594 const char *in_name =
14595 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14596 ? aeabi_enum_names[in_attr[i].i]
14597 : "<unknown>";
14598 const char *out_name =
14599 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14600 ? aeabi_enum_names[out_attr[i].i]
14601 : "<unknown>";
14602 _bfd_error_handler
14603 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14604 ibfd, in_name, out_name);
14605 }
14606 }
14607 break;
14608 case Tag_ABI_VFP_args:
14609 /* Aready done. */
14610 break;
14611 case Tag_ABI_WMMX_args:
14612 if (in_attr[i].i != out_attr[i].i)
14613 {
14614 _bfd_error_handler
14615 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14616 ibfd, obfd);
14617 result = FALSE;
14618 }
14619 break;
14620 case Tag_compatibility:
14621 /* Merged in target-independent code. */
14622 break;
14623 case Tag_ABI_HardFP_use:
14624 /* This is handled along with Tag_FP_arch. */
14625 break;
14626 case Tag_ABI_FP_16bit_format:
14627 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14628 {
14629 if (in_attr[i].i != out_attr[i].i)
14630 {
14631 _bfd_error_handler
14632 (_("error: fp16 format mismatch between %pB and %pB"),
14633 ibfd, obfd);
14634 result = FALSE;
14635 }
14636 }
14637 if (in_attr[i].i != 0)
14638 out_attr[i].i = in_attr[i].i;
14639 break;
14640
14641 case Tag_DIV_use:
14642 /* A value of zero on input means that the divide instruction may
14643 be used if available in the base architecture as specified via
14644 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14645 the user did not want divide instructions. A value of 2
14646 explicitly means that divide instructions were allowed in ARM
14647 and Thumb state. */
14648 if (in_attr[i].i == out_attr[i].i)
14649 /* Do nothing. */ ;
14650 else if (elf32_arm_attributes_forbid_div (in_attr)
14651 && !elf32_arm_attributes_accept_div (out_attr))
14652 out_attr[i].i = 1;
14653 else if (elf32_arm_attributes_forbid_div (out_attr)
14654 && elf32_arm_attributes_accept_div (in_attr))
14655 out_attr[i].i = in_attr[i].i;
14656 else if (in_attr[i].i == 2)
14657 out_attr[i].i = in_attr[i].i;
14658 break;
14659
14660 case Tag_MPextension_use_legacy:
14661 /* We don't output objects with Tag_MPextension_use_legacy - we
14662 move the value to Tag_MPextension_use. */
14663 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14664 {
14665 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14666 {
14667 _bfd_error_handler
14668 (_("%pB has both the current and legacy "
14669 "Tag_MPextension_use attributes"),
14670 ibfd);
14671 result = FALSE;
14672 }
14673 }
14674
14675 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14676 out_attr[Tag_MPextension_use] = in_attr[i];
14677
14678 break;
14679
14680 case Tag_nodefaults:
14681 /* This tag is set if it exists, but the value is unused (and is
14682 typically zero). We don't actually need to do anything here -
14683 the merge happens automatically when the type flags are merged
14684 below. */
14685 break;
14686 case Tag_also_compatible_with:
14687 /* Already done in Tag_CPU_arch. */
14688 break;
14689 case Tag_conformance:
14690 /* Keep the attribute if it matches. Throw it away otherwise.
14691 No attribute means no claim to conform. */
14692 if (!in_attr[i].s || !out_attr[i].s
14693 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14694 out_attr[i].s = NULL;
14695 break;
14696
14697 default:
14698 result
14699 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14700 }
14701
14702 /* If out_attr was copied from in_attr then it won't have a type yet. */
14703 if (in_attr[i].type && !out_attr[i].type)
14704 out_attr[i].type = in_attr[i].type;
14705 }
14706
14707 /* Merge Tag_compatibility attributes and any common GNU ones. */
14708 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14709 return FALSE;
14710
14711 /* Check for any attributes not known on ARM. */
14712 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14713
14714 return result;
14715 }
14716
14717
14718 /* Return TRUE if the two EABI versions are incompatible. */
14719
14720 static bfd_boolean
14721 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14722 {
14723 /* v4 and v5 are the same spec before and after it was released,
14724 so allow mixing them. */
14725 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14726 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14727 return TRUE;
14728
14729 return (iver == over);
14730 }
14731
14732 /* Merge backend specific data from an object file to the output
14733 object file when linking. */
14734
14735 static bfd_boolean
14736 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14737
14738 /* Display the flags field. */
14739
14740 static bfd_boolean
14741 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14742 {
14743 FILE * file = (FILE *) ptr;
14744 unsigned long flags;
14745
14746 BFD_ASSERT (abfd != NULL && ptr != NULL);
14747
14748 /* Print normal ELF private data. */
14749 _bfd_elf_print_private_bfd_data (abfd, ptr);
14750
14751 flags = elf_elfheader (abfd)->e_flags;
14752 /* Ignore init flag - it may not be set, despite the flags field
14753 containing valid data. */
14754
14755 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
14756
14757 switch (EF_ARM_EABI_VERSION (flags))
14758 {
14759 case EF_ARM_EABI_UNKNOWN:
14760 /* The following flag bits are GNU extensions and not part of the
14761 official ARM ELF extended ABI. Hence they are only decoded if
14762 the EABI version is not set. */
14763 if (flags & EF_ARM_INTERWORK)
14764 fprintf (file, _(" [interworking enabled]"));
14765
14766 if (flags & EF_ARM_APCS_26)
14767 fprintf (file, " [APCS-26]");
14768 else
14769 fprintf (file, " [APCS-32]");
14770
14771 if (flags & EF_ARM_VFP_FLOAT)
14772 fprintf (file, _(" [VFP float format]"));
14773 else if (flags & EF_ARM_MAVERICK_FLOAT)
14774 fprintf (file, _(" [Maverick float format]"));
14775 else
14776 fprintf (file, _(" [FPA float format]"));
14777
14778 if (flags & EF_ARM_APCS_FLOAT)
14779 fprintf (file, _(" [floats passed in float registers]"));
14780
14781 if (flags & EF_ARM_PIC)
14782 fprintf (file, _(" [position independent]"));
14783
14784 if (flags & EF_ARM_NEW_ABI)
14785 fprintf (file, _(" [new ABI]"));
14786
14787 if (flags & EF_ARM_OLD_ABI)
14788 fprintf (file, _(" [old ABI]"));
14789
14790 if (flags & EF_ARM_SOFT_FLOAT)
14791 fprintf (file, _(" [software FP]"));
14792
14793 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
14794 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
14795 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
14796 | EF_ARM_MAVERICK_FLOAT);
14797 break;
14798
14799 case EF_ARM_EABI_VER1:
14800 fprintf (file, _(" [Version1 EABI]"));
14801
14802 if (flags & EF_ARM_SYMSARESORTED)
14803 fprintf (file, _(" [sorted symbol table]"));
14804 else
14805 fprintf (file, _(" [unsorted symbol table]"));
14806
14807 flags &= ~ EF_ARM_SYMSARESORTED;
14808 break;
14809
14810 case EF_ARM_EABI_VER2:
14811 fprintf (file, _(" [Version2 EABI]"));
14812
14813 if (flags & EF_ARM_SYMSARESORTED)
14814 fprintf (file, _(" [sorted symbol table]"));
14815 else
14816 fprintf (file, _(" [unsorted symbol table]"));
14817
14818 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
14819 fprintf (file, _(" [dynamic symbols use segment index]"));
14820
14821 if (flags & EF_ARM_MAPSYMSFIRST)
14822 fprintf (file, _(" [mapping symbols precede others]"));
14823
14824 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
14825 | EF_ARM_MAPSYMSFIRST);
14826 break;
14827
14828 case EF_ARM_EABI_VER3:
14829 fprintf (file, _(" [Version3 EABI]"));
14830 break;
14831
14832 case EF_ARM_EABI_VER4:
14833 fprintf (file, _(" [Version4 EABI]"));
14834 goto eabi;
14835
14836 case EF_ARM_EABI_VER5:
14837 fprintf (file, _(" [Version5 EABI]"));
14838
14839 if (flags & EF_ARM_ABI_FLOAT_SOFT)
14840 fprintf (file, _(" [soft-float ABI]"));
14841
14842 if (flags & EF_ARM_ABI_FLOAT_HARD)
14843 fprintf (file, _(" [hard-float ABI]"));
14844
14845 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
14846
14847 eabi:
14848 if (flags & EF_ARM_BE8)
14849 fprintf (file, _(" [BE8]"));
14850
14851 if (flags & EF_ARM_LE8)
14852 fprintf (file, _(" [LE8]"));
14853
14854 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
14855 break;
14856
14857 default:
14858 fprintf (file, _(" <EABI version unrecognised>"));
14859 break;
14860 }
14861
14862 flags &= ~ EF_ARM_EABIMASK;
14863
14864 if (flags & EF_ARM_RELEXEC)
14865 fprintf (file, _(" [relocatable executable]"));
14866
14867 if (flags & EF_ARM_PIC)
14868 fprintf (file, _(" [position independent]"));
14869
14870 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
14871 fprintf (file, _(" [FDPIC ABI supplement]"));
14872
14873 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
14874
14875 if (flags)
14876 fprintf (file, _("<Unrecognised flag bits set>"));
14877
14878 fputc ('\n', file);
14879
14880 return TRUE;
14881 }
14882
14883 static int
14884 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
14885 {
14886 switch (ELF_ST_TYPE (elf_sym->st_info))
14887 {
14888 case STT_ARM_TFUNC:
14889 return ELF_ST_TYPE (elf_sym->st_info);
14890
14891 case STT_ARM_16BIT:
14892 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
14893 This allows us to distinguish between data used by Thumb instructions
14894 and non-data (which is probably code) inside Thumb regions of an
14895 executable. */
14896 if (type != STT_OBJECT && type != STT_TLS)
14897 return ELF_ST_TYPE (elf_sym->st_info);
14898 break;
14899
14900 default:
14901 break;
14902 }
14903
14904 return type;
14905 }
14906
14907 static asection *
14908 elf32_arm_gc_mark_hook (asection *sec,
14909 struct bfd_link_info *info,
14910 Elf_Internal_Rela *rel,
14911 struct elf_link_hash_entry *h,
14912 Elf_Internal_Sym *sym)
14913 {
14914 if (h != NULL)
14915 switch (ELF32_R_TYPE (rel->r_info))
14916 {
14917 case R_ARM_GNU_VTINHERIT:
14918 case R_ARM_GNU_VTENTRY:
14919 return NULL;
14920 }
14921
14922 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
14923 }
14924
14925 /* Look through the relocs for a section during the first phase. */
14926
14927 static bfd_boolean
14928 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
14929 asection *sec, const Elf_Internal_Rela *relocs)
14930 {
14931 Elf_Internal_Shdr *symtab_hdr;
14932 struct elf_link_hash_entry **sym_hashes;
14933 const Elf_Internal_Rela *rel;
14934 const Elf_Internal_Rela *rel_end;
14935 bfd *dynobj;
14936 asection *sreloc;
14937 struct elf32_arm_link_hash_table *htab;
14938 bfd_boolean call_reloc_p;
14939 bfd_boolean may_become_dynamic_p;
14940 bfd_boolean may_need_local_target_p;
14941 unsigned long nsyms;
14942
14943 if (bfd_link_relocatable (info))
14944 return TRUE;
14945
14946 BFD_ASSERT (is_arm_elf (abfd));
14947
14948 htab = elf32_arm_hash_table (info);
14949 if (htab == NULL)
14950 return FALSE;
14951
14952 sreloc = NULL;
14953
14954 /* Create dynamic sections for relocatable executables so that we can
14955 copy relocations. */
14956 if (htab->root.is_relocatable_executable
14957 && ! htab->root.dynamic_sections_created)
14958 {
14959 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
14960 return FALSE;
14961 }
14962
14963 if (htab->root.dynobj == NULL)
14964 htab->root.dynobj = abfd;
14965 if (!create_ifunc_sections (info))
14966 return FALSE;
14967
14968 dynobj = htab->root.dynobj;
14969
14970 symtab_hdr = & elf_symtab_hdr (abfd);
14971 sym_hashes = elf_sym_hashes (abfd);
14972 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
14973
14974 rel_end = relocs + sec->reloc_count;
14975 for (rel = relocs; rel < rel_end; rel++)
14976 {
14977 Elf_Internal_Sym *isym;
14978 struct elf_link_hash_entry *h;
14979 struct elf32_arm_link_hash_entry *eh;
14980 unsigned int r_symndx;
14981 int r_type;
14982
14983 r_symndx = ELF32_R_SYM (rel->r_info);
14984 r_type = ELF32_R_TYPE (rel->r_info);
14985 r_type = arm_real_reloc_type (htab, r_type);
14986
14987 if (r_symndx >= nsyms
14988 /* PR 9934: It is possible to have relocations that do not
14989 refer to symbols, thus it is also possible to have an
14990 object file containing relocations but no symbol table. */
14991 && (r_symndx > STN_UNDEF || nsyms > 0))
14992 {
14993 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
14994 r_symndx);
14995 return FALSE;
14996 }
14997
14998 h = NULL;
14999 isym = NULL;
15000 if (nsyms > 0)
15001 {
15002 if (r_symndx < symtab_hdr->sh_info)
15003 {
15004 /* A local symbol. */
15005 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15006 abfd, r_symndx);
15007 if (isym == NULL)
15008 return FALSE;
15009 }
15010 else
15011 {
15012 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15013 while (h->root.type == bfd_link_hash_indirect
15014 || h->root.type == bfd_link_hash_warning)
15015 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15016 }
15017 }
15018
15019 eh = (struct elf32_arm_link_hash_entry *) h;
15020
15021 call_reloc_p = FALSE;
15022 may_become_dynamic_p = FALSE;
15023 may_need_local_target_p = FALSE;
15024
15025 /* Could be done earlier, if h were already available. */
15026 r_type = elf32_arm_tls_transition (info, r_type, h);
15027 switch (r_type)
15028 {
15029 case R_ARM_GOTOFFFUNCDESC:
15030 {
15031 if (h == NULL)
15032 {
15033 if (!elf32_arm_allocate_local_sym_info (abfd))
15034 return FALSE;
15035 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15036 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15037 }
15038 else
15039 {
15040 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15041 }
15042 }
15043 break;
15044
15045 case R_ARM_GOTFUNCDESC:
15046 {
15047 if (h == NULL)
15048 {
15049 /* Such a relocation is not supposed to be generated
15050 by gcc on a static function. */
15051 /* Anyway if needed it could be handled. */
15052 abort();
15053 }
15054 else
15055 {
15056 eh->fdpic_cnts.gotfuncdesc_cnt++;
15057 }
15058 }
15059 break;
15060
15061 case R_ARM_FUNCDESC:
15062 {
15063 if (h == NULL)
15064 {
15065 if (!elf32_arm_allocate_local_sym_info (abfd))
15066 return FALSE;
15067 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15068 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15069 }
15070 else
15071 {
15072 eh->fdpic_cnts.funcdesc_cnt++;
15073 }
15074 }
15075 break;
15076
15077 case R_ARM_GOT32:
15078 case R_ARM_GOT_PREL:
15079 case R_ARM_TLS_GD32:
15080 case R_ARM_TLS_IE32:
15081 case R_ARM_TLS_GOTDESC:
15082 case R_ARM_TLS_DESCSEQ:
15083 case R_ARM_THM_TLS_DESCSEQ:
15084 case R_ARM_TLS_CALL:
15085 case R_ARM_THM_TLS_CALL:
15086 /* This symbol requires a global offset table entry. */
15087 {
15088 int tls_type, old_tls_type;
15089
15090 switch (r_type)
15091 {
15092 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15093
15094 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15095
15096 case R_ARM_TLS_GOTDESC:
15097 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15098 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15099 tls_type = GOT_TLS_GDESC; break;
15100
15101 default: tls_type = GOT_NORMAL; break;
15102 }
15103
15104 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15105 info->flags |= DF_STATIC_TLS;
15106
15107 if (h != NULL)
15108 {
15109 h->got.refcount++;
15110 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15111 }
15112 else
15113 {
15114 /* This is a global offset table entry for a local symbol. */
15115 if (!elf32_arm_allocate_local_sym_info (abfd))
15116 return FALSE;
15117 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15118 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15119 }
15120
15121 /* If a variable is accessed with both tls methods, two
15122 slots may be created. */
15123 if (GOT_TLS_GD_ANY_P (old_tls_type)
15124 && GOT_TLS_GD_ANY_P (tls_type))
15125 tls_type |= old_tls_type;
15126
15127 /* We will already have issued an error message if there
15128 is a TLS/non-TLS mismatch, based on the symbol
15129 type. So just combine any TLS types needed. */
15130 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15131 && tls_type != GOT_NORMAL)
15132 tls_type |= old_tls_type;
15133
15134 /* If the symbol is accessed in both IE and GDESC
15135 method, we're able to relax. Turn off the GDESC flag,
15136 without messing up with any other kind of tls types
15137 that may be involved. */
15138 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15139 tls_type &= ~GOT_TLS_GDESC;
15140
15141 if (old_tls_type != tls_type)
15142 {
15143 if (h != NULL)
15144 elf32_arm_hash_entry (h)->tls_type = tls_type;
15145 else
15146 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15147 }
15148 }
15149 /* Fall through. */
15150
15151 case R_ARM_TLS_LDM32:
15152 if (r_type == R_ARM_TLS_LDM32)
15153 htab->tls_ldm_got.refcount++;
15154 /* Fall through. */
15155
15156 case R_ARM_GOTOFF32:
15157 case R_ARM_GOTPC:
15158 if (htab->root.sgot == NULL
15159 && !create_got_section (htab->root.dynobj, info))
15160 return FALSE;
15161 break;
15162
15163 case R_ARM_PC24:
15164 case R_ARM_PLT32:
15165 case R_ARM_CALL:
15166 case R_ARM_JUMP24:
15167 case R_ARM_PREL31:
15168 case R_ARM_THM_CALL:
15169 case R_ARM_THM_JUMP24:
15170 case R_ARM_THM_JUMP19:
15171 call_reloc_p = TRUE;
15172 may_need_local_target_p = TRUE;
15173 break;
15174
15175 case R_ARM_ABS12:
15176 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15177 ldr __GOTT_INDEX__ offsets. */
15178 if (!htab->vxworks_p)
15179 {
15180 may_need_local_target_p = TRUE;
15181 break;
15182 }
15183 else goto jump_over;
15184
15185 /* Fall through. */
15186
15187 case R_ARM_MOVW_ABS_NC:
15188 case R_ARM_MOVT_ABS:
15189 case R_ARM_THM_MOVW_ABS_NC:
15190 case R_ARM_THM_MOVT_ABS:
15191 if (bfd_link_pic (info))
15192 {
15193 _bfd_error_handler
15194 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15195 abfd, elf32_arm_howto_table_1[r_type].name,
15196 (h) ? h->root.root.string : "a local symbol");
15197 bfd_set_error (bfd_error_bad_value);
15198 return FALSE;
15199 }
15200
15201 /* Fall through. */
15202 case R_ARM_ABS32:
15203 case R_ARM_ABS32_NOI:
15204 jump_over:
15205 if (h != NULL && bfd_link_executable (info))
15206 {
15207 h->pointer_equality_needed = 1;
15208 }
15209 /* Fall through. */
15210 case R_ARM_REL32:
15211 case R_ARM_REL32_NOI:
15212 case R_ARM_MOVW_PREL_NC:
15213 case R_ARM_MOVT_PREL:
15214 case R_ARM_THM_MOVW_PREL_NC:
15215 case R_ARM_THM_MOVT_PREL:
15216
15217 /* Should the interworking branches be listed here? */
15218 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15219 || htab->fdpic_p)
15220 && (sec->flags & SEC_ALLOC) != 0)
15221 {
15222 if (h == NULL
15223 && elf32_arm_howto_from_type (r_type)->pc_relative)
15224 {
15225 /* In shared libraries and relocatable executables,
15226 we treat local relative references as calls;
15227 see the related SYMBOL_CALLS_LOCAL code in
15228 allocate_dynrelocs. */
15229 call_reloc_p = TRUE;
15230 may_need_local_target_p = TRUE;
15231 }
15232 else
15233 /* We are creating a shared library or relocatable
15234 executable, and this is a reloc against a global symbol,
15235 or a non-PC-relative reloc against a local symbol.
15236 We may need to copy the reloc into the output. */
15237 may_become_dynamic_p = TRUE;
15238 }
15239 else
15240 may_need_local_target_p = TRUE;
15241 break;
15242
15243 /* This relocation describes the C++ object vtable hierarchy.
15244 Reconstruct it for later use during GC. */
15245 case R_ARM_GNU_VTINHERIT:
15246 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15247 return FALSE;
15248 break;
15249
15250 /* This relocation describes which C++ vtable entries are actually
15251 used. Record for later use during GC. */
15252 case R_ARM_GNU_VTENTRY:
15253 BFD_ASSERT (h != NULL);
15254 if (h != NULL
15255 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15256 return FALSE;
15257 break;
15258 }
15259
15260 if (h != NULL)
15261 {
15262 if (call_reloc_p)
15263 /* We may need a .plt entry if the function this reloc
15264 refers to is in a different object, regardless of the
15265 symbol's type. We can't tell for sure yet, because
15266 something later might force the symbol local. */
15267 h->needs_plt = 1;
15268 else if (may_need_local_target_p)
15269 /* If this reloc is in a read-only section, we might
15270 need a copy reloc. We can't check reliably at this
15271 stage whether the section is read-only, as input
15272 sections have not yet been mapped to output sections.
15273 Tentatively set the flag for now, and correct in
15274 adjust_dynamic_symbol. */
15275 h->non_got_ref = 1;
15276 }
15277
15278 if (may_need_local_target_p
15279 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15280 {
15281 union gotplt_union *root_plt;
15282 struct arm_plt_info *arm_plt;
15283 struct arm_local_iplt_info *local_iplt;
15284
15285 if (h != NULL)
15286 {
15287 root_plt = &h->plt;
15288 arm_plt = &eh->plt;
15289 }
15290 else
15291 {
15292 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15293 if (local_iplt == NULL)
15294 return FALSE;
15295 root_plt = &local_iplt->root;
15296 arm_plt = &local_iplt->arm;
15297 }
15298
15299 /* If the symbol is a function that doesn't bind locally,
15300 this relocation will need a PLT entry. */
15301 if (root_plt->refcount != -1)
15302 root_plt->refcount += 1;
15303
15304 if (!call_reloc_p)
15305 arm_plt->noncall_refcount++;
15306
15307 /* It's too early to use htab->use_blx here, so we have to
15308 record possible blx references separately from
15309 relocs that definitely need a thumb stub. */
15310
15311 if (r_type == R_ARM_THM_CALL)
15312 arm_plt->maybe_thumb_refcount += 1;
15313
15314 if (r_type == R_ARM_THM_JUMP24
15315 || r_type == R_ARM_THM_JUMP19)
15316 arm_plt->thumb_refcount += 1;
15317 }
15318
15319 if (may_become_dynamic_p)
15320 {
15321 struct elf_dyn_relocs *p, **head;
15322
15323 /* Create a reloc section in dynobj. */
15324 if (sreloc == NULL)
15325 {
15326 sreloc = _bfd_elf_make_dynamic_reloc_section
15327 (sec, dynobj, 2, abfd, ! htab->use_rel);
15328
15329 if (sreloc == NULL)
15330 return FALSE;
15331
15332 /* BPABI objects never have dynamic relocations mapped. */
15333 if (htab->symbian_p)
15334 {
15335 flagword flags;
15336
15337 flags = bfd_get_section_flags (dynobj, sreloc);
15338 flags &= ~(SEC_LOAD | SEC_ALLOC);
15339 bfd_set_section_flags (dynobj, sreloc, flags);
15340 }
15341 }
15342
15343 /* If this is a global symbol, count the number of
15344 relocations we need for this symbol. */
15345 if (h != NULL)
15346 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15347 else
15348 {
15349 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15350 if (head == NULL)
15351 return FALSE;
15352 }
15353
15354 p = *head;
15355 if (p == NULL || p->sec != sec)
15356 {
15357 bfd_size_type amt = sizeof *p;
15358
15359 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15360 if (p == NULL)
15361 return FALSE;
15362 p->next = *head;
15363 *head = p;
15364 p->sec = sec;
15365 p->count = 0;
15366 p->pc_count = 0;
15367 }
15368
15369 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15370 p->pc_count += 1;
15371 p->count += 1;
15372 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15373 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15374 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15375 that will become rofixup. */
15376 /* This is due to the fact that we suppose all will become rofixup. */
15377 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15378 _bfd_error_handler
15379 (_("FDPIC does not yet support %s relocation"
15380 " to become dynamic for executable"),
15381 elf32_arm_howto_table_1[r_type].name);
15382 abort();
15383 }
15384 }
15385 }
15386
15387 return TRUE;
15388 }
15389
15390 static void
15391 elf32_arm_update_relocs (asection *o,
15392 struct bfd_elf_section_reloc_data *reldata)
15393 {
15394 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15395 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15396 const struct elf_backend_data *bed;
15397 _arm_elf_section_data *eado;
15398 struct bfd_link_order *p;
15399 bfd_byte *erela_head, *erela;
15400 Elf_Internal_Rela *irela_head, *irela;
15401 Elf_Internal_Shdr *rel_hdr;
15402 bfd *abfd;
15403 unsigned int count;
15404
15405 eado = get_arm_elf_section_data (o);
15406
15407 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15408 return;
15409
15410 abfd = o->owner;
15411 bed = get_elf_backend_data (abfd);
15412 rel_hdr = reldata->hdr;
15413
15414 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15415 {
15416 swap_in = bed->s->swap_reloc_in;
15417 swap_out = bed->s->swap_reloc_out;
15418 }
15419 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15420 {
15421 swap_in = bed->s->swap_reloca_in;
15422 swap_out = bed->s->swap_reloca_out;
15423 }
15424 else
15425 abort ();
15426
15427 erela_head = rel_hdr->contents;
15428 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15429 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15430
15431 erela = erela_head;
15432 irela = irela_head;
15433 count = 0;
15434
15435 for (p = o->map_head.link_order; p; p = p->next)
15436 {
15437 if (p->type == bfd_section_reloc_link_order
15438 || p->type == bfd_symbol_reloc_link_order)
15439 {
15440 (*swap_in) (abfd, erela, irela);
15441 erela += rel_hdr->sh_entsize;
15442 irela++;
15443 count++;
15444 }
15445 else if (p->type == bfd_indirect_link_order)
15446 {
15447 struct bfd_elf_section_reloc_data *input_reldata;
15448 arm_unwind_table_edit *edit_list, *edit_tail;
15449 _arm_elf_section_data *eadi;
15450 bfd_size_type j;
15451 bfd_vma offset;
15452 asection *i;
15453
15454 i = p->u.indirect.section;
15455
15456 eadi = get_arm_elf_section_data (i);
15457 edit_list = eadi->u.exidx.unwind_edit_list;
15458 edit_tail = eadi->u.exidx.unwind_edit_tail;
15459 offset = o->vma + i->output_offset;
15460
15461 if (eadi->elf.rel.hdr &&
15462 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15463 input_reldata = &eadi->elf.rel;
15464 else if (eadi->elf.rela.hdr &&
15465 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15466 input_reldata = &eadi->elf.rela;
15467 else
15468 abort ();
15469
15470 if (edit_list)
15471 {
15472 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15473 {
15474 arm_unwind_table_edit *edit_node, *edit_next;
15475 bfd_vma bias;
15476 bfd_vma reloc_index;
15477
15478 (*swap_in) (abfd, erela, irela);
15479 reloc_index = (irela->r_offset - offset) / 8;
15480
15481 bias = 0;
15482 edit_node = edit_list;
15483 for (edit_next = edit_list;
15484 edit_next && edit_next->index <= reloc_index;
15485 edit_next = edit_node->next)
15486 {
15487 bias++;
15488 edit_node = edit_next;
15489 }
15490
15491 if (edit_node->type != DELETE_EXIDX_ENTRY
15492 || edit_node->index != reloc_index)
15493 {
15494 irela->r_offset -= bias * 8;
15495 irela++;
15496 count++;
15497 }
15498
15499 erela += rel_hdr->sh_entsize;
15500 }
15501
15502 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15503 {
15504 /* New relocation entity. */
15505 asection *text_sec = edit_tail->linked_section;
15506 asection *text_out = text_sec->output_section;
15507 bfd_vma exidx_offset = offset + i->size - 8;
15508
15509 irela->r_addend = 0;
15510 irela->r_offset = exidx_offset;
15511 irela->r_info = ELF32_R_INFO
15512 (text_out->target_index, R_ARM_PREL31);
15513 irela++;
15514 count++;
15515 }
15516 }
15517 else
15518 {
15519 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15520 {
15521 (*swap_in) (abfd, erela, irela);
15522 erela += rel_hdr->sh_entsize;
15523 irela++;
15524 }
15525
15526 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15527 }
15528 }
15529 }
15530
15531 reldata->count = count;
15532 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15533
15534 erela = erela_head;
15535 irela = irela_head;
15536 while (count > 0)
15537 {
15538 (*swap_out) (abfd, irela, erela);
15539 erela += rel_hdr->sh_entsize;
15540 irela++;
15541 count--;
15542 }
15543
15544 free (irela_head);
15545
15546 /* Hashes are no longer valid. */
15547 free (reldata->hashes);
15548 reldata->hashes = NULL;
15549 }
15550
15551 /* Unwinding tables are not referenced directly. This pass marks them as
15552 required if the corresponding code section is marked. Similarly, ARMv8-M
15553 secure entry functions can only be referenced by SG veneers which are
15554 created after the GC process. They need to be marked in case they reside in
15555 their own section (as would be the case if code was compiled with
15556 -ffunction-sections). */
15557
15558 static bfd_boolean
15559 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15560 elf_gc_mark_hook_fn gc_mark_hook)
15561 {
15562 bfd *sub;
15563 Elf_Internal_Shdr **elf_shdrp;
15564 asection *cmse_sec;
15565 obj_attribute *out_attr;
15566 Elf_Internal_Shdr *symtab_hdr;
15567 unsigned i, sym_count, ext_start;
15568 const struct elf_backend_data *bed;
15569 struct elf_link_hash_entry **sym_hashes;
15570 struct elf32_arm_link_hash_entry *cmse_hash;
15571 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15572
15573 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15574
15575 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15576 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15577 && out_attr[Tag_CPU_arch_profile].i == 'M';
15578
15579 /* Marking EH data may cause additional code sections to be marked,
15580 requiring multiple passes. */
15581 again = TRUE;
15582 while (again)
15583 {
15584 again = FALSE;
15585 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15586 {
15587 asection *o;
15588
15589 if (! is_arm_elf (sub))
15590 continue;
15591
15592 elf_shdrp = elf_elfsections (sub);
15593 for (o = sub->sections; o != NULL; o = o->next)
15594 {
15595 Elf_Internal_Shdr *hdr;
15596
15597 hdr = &elf_section_data (o)->this_hdr;
15598 if (hdr->sh_type == SHT_ARM_EXIDX
15599 && hdr->sh_link
15600 && hdr->sh_link < elf_numsections (sub)
15601 && !o->gc_mark
15602 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15603 {
15604 again = TRUE;
15605 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15606 return FALSE;
15607 }
15608 }
15609
15610 /* Mark section holding ARMv8-M secure entry functions. We mark all
15611 of them so no need for a second browsing. */
15612 if (is_v8m && first_bfd_browse)
15613 {
15614 sym_hashes = elf_sym_hashes (sub);
15615 bed = get_elf_backend_data (sub);
15616 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15617 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15618 ext_start = symtab_hdr->sh_info;
15619
15620 /* Scan symbols. */
15621 for (i = ext_start; i < sym_count; i++)
15622 {
15623 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15624
15625 /* Assume it is a special symbol. If not, cmse_scan will
15626 warn about it and user can do something about it. */
15627 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15628 {
15629 cmse_sec = cmse_hash->root.root.u.def.section;
15630 if (!cmse_sec->gc_mark
15631 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15632 return FALSE;
15633 }
15634 }
15635 }
15636 }
15637 first_bfd_browse = FALSE;
15638 }
15639
15640 return TRUE;
15641 }
15642
15643 /* Treat mapping symbols as special target symbols. */
15644
15645 static bfd_boolean
15646 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15647 {
15648 return bfd_is_arm_special_symbol_name (sym->name,
15649 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15650 }
15651
15652 /* This is a copy of elf_find_function() from elf.c except that
15653 ARM mapping symbols are ignored when looking for function names
15654 and STT_ARM_TFUNC is considered to a function type. */
15655
15656 static bfd_boolean
15657 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15658 asymbol ** symbols,
15659 asection * section,
15660 bfd_vma offset,
15661 const char ** filename_ptr,
15662 const char ** functionname_ptr)
15663 {
15664 const char * filename = NULL;
15665 asymbol * func = NULL;
15666 bfd_vma low_func = 0;
15667 asymbol ** p;
15668
15669 for (p = symbols; *p != NULL; p++)
15670 {
15671 elf_symbol_type *q;
15672
15673 q = (elf_symbol_type *) *p;
15674
15675 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15676 {
15677 default:
15678 break;
15679 case STT_FILE:
15680 filename = bfd_asymbol_name (&q->symbol);
15681 break;
15682 case STT_FUNC:
15683 case STT_ARM_TFUNC:
15684 case STT_NOTYPE:
15685 /* Skip mapping symbols. */
15686 if ((q->symbol.flags & BSF_LOCAL)
15687 && bfd_is_arm_special_symbol_name (q->symbol.name,
15688 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15689 continue;
15690 /* Fall through. */
15691 if (bfd_get_section (&q->symbol) == section
15692 && q->symbol.value >= low_func
15693 && q->symbol.value <= offset)
15694 {
15695 func = (asymbol *) q;
15696 low_func = q->symbol.value;
15697 }
15698 break;
15699 }
15700 }
15701
15702 if (func == NULL)
15703 return FALSE;
15704
15705 if (filename_ptr)
15706 *filename_ptr = filename;
15707 if (functionname_ptr)
15708 *functionname_ptr = bfd_asymbol_name (func);
15709
15710 return TRUE;
15711 }
15712
15713
15714 /* Find the nearest line to a particular section and offset, for error
15715 reporting. This code is a duplicate of the code in elf.c, except
15716 that it uses arm_elf_find_function. */
15717
15718 static bfd_boolean
15719 elf32_arm_find_nearest_line (bfd * abfd,
15720 asymbol ** symbols,
15721 asection * section,
15722 bfd_vma offset,
15723 const char ** filename_ptr,
15724 const char ** functionname_ptr,
15725 unsigned int * line_ptr,
15726 unsigned int * discriminator_ptr)
15727 {
15728 bfd_boolean found = FALSE;
15729
15730 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
15731 filename_ptr, functionname_ptr,
15732 line_ptr, discriminator_ptr,
15733 dwarf_debug_sections, 0,
15734 & elf_tdata (abfd)->dwarf2_find_line_info))
15735 {
15736 if (!*functionname_ptr)
15737 arm_elf_find_function (abfd, symbols, section, offset,
15738 *filename_ptr ? NULL : filename_ptr,
15739 functionname_ptr);
15740
15741 return TRUE;
15742 }
15743
15744 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15745 uses DWARF1. */
15746
15747 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
15748 & found, filename_ptr,
15749 functionname_ptr, line_ptr,
15750 & elf_tdata (abfd)->line_info))
15751 return FALSE;
15752
15753 if (found && (*functionname_ptr || *line_ptr))
15754 return TRUE;
15755
15756 if (symbols == NULL)
15757 return FALSE;
15758
15759 if (! arm_elf_find_function (abfd, symbols, section, offset,
15760 filename_ptr, functionname_ptr))
15761 return FALSE;
15762
15763 *line_ptr = 0;
15764 return TRUE;
15765 }
15766
15767 static bfd_boolean
15768 elf32_arm_find_inliner_info (bfd * abfd,
15769 const char ** filename_ptr,
15770 const char ** functionname_ptr,
15771 unsigned int * line_ptr)
15772 {
15773 bfd_boolean found;
15774 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15775 functionname_ptr, line_ptr,
15776 & elf_tdata (abfd)->dwarf2_find_line_info);
15777 return found;
15778 }
15779
15780 /* Find dynamic relocs for H that apply to read-only sections. */
15781
15782 static asection *
15783 readonly_dynrelocs (struct elf_link_hash_entry *h)
15784 {
15785 struct elf_dyn_relocs *p;
15786
15787 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
15788 {
15789 asection *s = p->sec->output_section;
15790
15791 if (s != NULL && (s->flags & SEC_READONLY) != 0)
15792 return p->sec;
15793 }
15794 return NULL;
15795 }
15796
15797 /* Adjust a symbol defined by a dynamic object and referenced by a
15798 regular object. The current definition is in some section of the
15799 dynamic object, but we're not including those sections. We have to
15800 change the definition to something the rest of the link can
15801 understand. */
15802
15803 static bfd_boolean
15804 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15805 struct elf_link_hash_entry * h)
15806 {
15807 bfd * dynobj;
15808 asection *s, *srel;
15809 struct elf32_arm_link_hash_entry * eh;
15810 struct elf32_arm_link_hash_table *globals;
15811
15812 globals = elf32_arm_hash_table (info);
15813 if (globals == NULL)
15814 return FALSE;
15815
15816 dynobj = elf_hash_table (info)->dynobj;
15817
15818 /* Make sure we know what is going on here. */
15819 BFD_ASSERT (dynobj != NULL
15820 && (h->needs_plt
15821 || h->type == STT_GNU_IFUNC
15822 || h->is_weakalias
15823 || (h->def_dynamic
15824 && h->ref_regular
15825 && !h->def_regular)));
15826
15827 eh = (struct elf32_arm_link_hash_entry *) h;
15828
15829 /* If this is a function, put it in the procedure linkage table. We
15830 will fill in the contents of the procedure linkage table later,
15831 when we know the address of the .got section. */
15832 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15833 {
15834 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15835 symbol binds locally. */
15836 if (h->plt.refcount <= 0
15837 || (h->type != STT_GNU_IFUNC
15838 && (SYMBOL_CALLS_LOCAL (info, h)
15839 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15840 && h->root.type == bfd_link_hash_undefweak))))
15841 {
15842 /* This case can occur if we saw a PLT32 reloc in an input
15843 file, but the symbol was never referred to by a dynamic
15844 object, or if all references were garbage collected. In
15845 such a case, we don't actually need to build a procedure
15846 linkage table, and we can just do a PC24 reloc instead. */
15847 h->plt.offset = (bfd_vma) -1;
15848 eh->plt.thumb_refcount = 0;
15849 eh->plt.maybe_thumb_refcount = 0;
15850 eh->plt.noncall_refcount = 0;
15851 h->needs_plt = 0;
15852 }
15853
15854 return TRUE;
15855 }
15856 else
15857 {
15858 /* It's possible that we incorrectly decided a .plt reloc was
15859 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15860 in check_relocs. We can't decide accurately between function
15861 and non-function syms in check-relocs; Objects loaded later in
15862 the link may change h->type. So fix it now. */
15863 h->plt.offset = (bfd_vma) -1;
15864 eh->plt.thumb_refcount = 0;
15865 eh->plt.maybe_thumb_refcount = 0;
15866 eh->plt.noncall_refcount = 0;
15867 }
15868
15869 /* If this is a weak symbol, and there is a real definition, the
15870 processor independent code will have arranged for us to see the
15871 real definition first, and we can just use the same value. */
15872 if (h->is_weakalias)
15873 {
15874 struct elf_link_hash_entry *def = weakdef (h);
15875 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
15876 h->root.u.def.section = def->root.u.def.section;
15877 h->root.u.def.value = def->root.u.def.value;
15878 return TRUE;
15879 }
15880
15881 /* If there are no non-GOT references, we do not need a copy
15882 relocation. */
15883 if (!h->non_got_ref)
15884 return TRUE;
15885
15886 /* This is a reference to a symbol defined by a dynamic object which
15887 is not a function. */
15888
15889 /* If we are creating a shared library, we must presume that the
15890 only references to the symbol are via the global offset table.
15891 For such cases we need not do anything here; the relocations will
15892 be handled correctly by relocate_section. Relocatable executables
15893 can reference data in shared objects directly, so we don't need to
15894 do anything here. */
15895 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
15896 return TRUE;
15897
15898 /* We must allocate the symbol in our .dynbss section, which will
15899 become part of the .bss section of the executable. There will be
15900 an entry for this symbol in the .dynsym section. The dynamic
15901 object will contain position independent code, so all references
15902 from the dynamic object to this symbol will go through the global
15903 offset table. The dynamic linker will use the .dynsym entry to
15904 determine the address it must put in the global offset table, so
15905 both the dynamic object and the regular object will refer to the
15906 same memory location for the variable. */
15907 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
15908 linker to copy the initial value out of the dynamic object and into
15909 the runtime process image. We need to remember the offset into the
15910 .rel(a).bss section we are going to use. */
15911 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
15912 {
15913 s = globals->root.sdynrelro;
15914 srel = globals->root.sreldynrelro;
15915 }
15916 else
15917 {
15918 s = globals->root.sdynbss;
15919 srel = globals->root.srelbss;
15920 }
15921 if (info->nocopyreloc == 0
15922 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
15923 && h->size != 0)
15924 {
15925 elf32_arm_allocate_dynrelocs (info, srel, 1);
15926 h->needs_copy = 1;
15927 }
15928
15929 return _bfd_elf_adjust_dynamic_copy (info, h, s);
15930 }
15931
15932 /* Allocate space in .plt, .got and associated reloc sections for
15933 dynamic relocs. */
15934
15935 static bfd_boolean
15936 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
15937 {
15938 struct bfd_link_info *info;
15939 struct elf32_arm_link_hash_table *htab;
15940 struct elf32_arm_link_hash_entry *eh;
15941 struct elf_dyn_relocs *p;
15942
15943 if (h->root.type == bfd_link_hash_indirect)
15944 return TRUE;
15945
15946 eh = (struct elf32_arm_link_hash_entry *) h;
15947
15948 info = (struct bfd_link_info *) inf;
15949 htab = elf32_arm_hash_table (info);
15950 if (htab == NULL)
15951 return FALSE;
15952
15953 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
15954 && h->plt.refcount > 0)
15955 {
15956 /* Make sure this symbol is output as a dynamic symbol.
15957 Undefined weak syms won't yet be marked as dynamic. */
15958 if (h->dynindx == -1 && !h->forced_local
15959 && h->root.type == bfd_link_hash_undefweak)
15960 {
15961 if (! bfd_elf_link_record_dynamic_symbol (info, h))
15962 return FALSE;
15963 }
15964
15965 /* If the call in the PLT entry binds locally, the associated
15966 GOT entry should use an R_ARM_IRELATIVE relocation instead of
15967 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
15968 than the .plt section. */
15969 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
15970 {
15971 eh->is_iplt = 1;
15972 if (eh->plt.noncall_refcount == 0
15973 && SYMBOL_REFERENCES_LOCAL (info, h))
15974 /* All non-call references can be resolved directly.
15975 This means that they can (and in some cases, must)
15976 resolve directly to the run-time target, rather than
15977 to the PLT. That in turns means that any .got entry
15978 would be equal to the .igot.plt entry, so there's
15979 no point having both. */
15980 h->got.refcount = 0;
15981 }
15982
15983 if (bfd_link_pic (info)
15984 || eh->is_iplt
15985 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
15986 {
15987 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
15988
15989 /* If this symbol is not defined in a regular file, and we are
15990 not generating a shared library, then set the symbol to this
15991 location in the .plt. This is required to make function
15992 pointers compare as equal between the normal executable and
15993 the shared library. */
15994 if (! bfd_link_pic (info)
15995 && !h->def_regular)
15996 {
15997 h->root.u.def.section = htab->root.splt;
15998 h->root.u.def.value = h->plt.offset;
15999
16000 /* Make sure the function is not marked as Thumb, in case
16001 it is the target of an ABS32 relocation, which will
16002 point to the PLT entry. */
16003 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16004 }
16005
16006 /* VxWorks executables have a second set of relocations for
16007 each PLT entry. They go in a separate relocation section,
16008 which is processed by the kernel loader. */
16009 if (htab->vxworks_p && !bfd_link_pic (info))
16010 {
16011 /* There is a relocation for the initial PLT entry:
16012 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16013 if (h->plt.offset == htab->plt_header_size)
16014 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16015
16016 /* There are two extra relocations for each subsequent
16017 PLT entry: an R_ARM_32 relocation for the GOT entry,
16018 and an R_ARM_32 relocation for the PLT entry. */
16019 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16020 }
16021 }
16022 else
16023 {
16024 h->plt.offset = (bfd_vma) -1;
16025 h->needs_plt = 0;
16026 }
16027 }
16028 else
16029 {
16030 h->plt.offset = (bfd_vma) -1;
16031 h->needs_plt = 0;
16032 }
16033
16034 eh = (struct elf32_arm_link_hash_entry *) h;
16035 eh->tlsdesc_got = (bfd_vma) -1;
16036
16037 if (h->got.refcount > 0)
16038 {
16039 asection *s;
16040 bfd_boolean dyn;
16041 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16042 int indx;
16043
16044 /* Make sure this symbol is output as a dynamic symbol.
16045 Undefined weak syms won't yet be marked as dynamic. */
16046 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16047 && h->root.type == bfd_link_hash_undefweak)
16048 {
16049 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16050 return FALSE;
16051 }
16052
16053 if (!htab->symbian_p)
16054 {
16055 s = htab->root.sgot;
16056 h->got.offset = s->size;
16057
16058 if (tls_type == GOT_UNKNOWN)
16059 abort ();
16060
16061 if (tls_type == GOT_NORMAL)
16062 /* Non-TLS symbols need one GOT slot. */
16063 s->size += 4;
16064 else
16065 {
16066 if (tls_type & GOT_TLS_GDESC)
16067 {
16068 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16069 eh->tlsdesc_got
16070 = (htab->root.sgotplt->size
16071 - elf32_arm_compute_jump_table_size (htab));
16072 htab->root.sgotplt->size += 8;
16073 h->got.offset = (bfd_vma) -2;
16074 /* plt.got_offset needs to know there's a TLS_DESC
16075 reloc in the middle of .got.plt. */
16076 htab->num_tls_desc++;
16077 }
16078
16079 if (tls_type & GOT_TLS_GD)
16080 {
16081 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
16082 the symbol is both GD and GDESC, got.offset may
16083 have been overwritten. */
16084 h->got.offset = s->size;
16085 s->size += 8;
16086 }
16087
16088 if (tls_type & GOT_TLS_IE)
16089 /* R_ARM_TLS_IE32 needs one GOT slot. */
16090 s->size += 4;
16091 }
16092
16093 dyn = htab->root.dynamic_sections_created;
16094
16095 indx = 0;
16096 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16097 bfd_link_pic (info),
16098 h)
16099 && (!bfd_link_pic (info)
16100 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16101 indx = h->dynindx;
16102
16103 if (tls_type != GOT_NORMAL
16104 && (bfd_link_pic (info) || indx != 0)
16105 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16106 || h->root.type != bfd_link_hash_undefweak))
16107 {
16108 if (tls_type & GOT_TLS_IE)
16109 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16110
16111 if (tls_type & GOT_TLS_GD)
16112 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16113
16114 if (tls_type & GOT_TLS_GDESC)
16115 {
16116 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16117 /* GDESC needs a trampoline to jump to. */
16118 htab->tls_trampoline = -1;
16119 }
16120
16121 /* Only GD needs it. GDESC just emits one relocation per
16122 2 entries. */
16123 if ((tls_type & GOT_TLS_GD) && indx != 0)
16124 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16125 }
16126 else if (((indx != -1) || htab->fdpic_p)
16127 && !SYMBOL_REFERENCES_LOCAL (info, h))
16128 {
16129 if (htab->root.dynamic_sections_created)
16130 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16131 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16132 }
16133 else if (h->type == STT_GNU_IFUNC
16134 && eh->plt.noncall_refcount == 0)
16135 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16136 they all resolve dynamically instead. Reserve room for the
16137 GOT entry's R_ARM_IRELATIVE relocation. */
16138 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16139 else if (bfd_link_pic (info)
16140 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16141 || h->root.type != bfd_link_hash_undefweak))
16142 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16143 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16144 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16145 /* Reserve room for rofixup for FDPIC executable. */
16146 /* TLS relocs do not need space since they are completely
16147 resolved. */
16148 htab->srofixup->size += 4;
16149 }
16150 }
16151 else
16152 h->got.offset = (bfd_vma) -1;
16153
16154 /* FDPIC support. */
16155 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16156 {
16157 /* Symbol musn't be exported. */
16158 if (h->dynindx != -1)
16159 abort();
16160
16161 /* We only allocate one function descriptor with its associated relocation. */
16162 if (eh->fdpic_cnts.funcdesc_offset == -1)
16163 {
16164 asection *s = htab->root.sgot;
16165
16166 eh->fdpic_cnts.funcdesc_offset = s->size;
16167 s->size += 8;
16168 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16169 if (bfd_link_pic(info))
16170 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16171 else
16172 htab->srofixup->size += 8;
16173 }
16174 }
16175
16176 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16177 {
16178 asection *s = htab->root.sgot;
16179
16180 if (htab->root.dynamic_sections_created && h->dynindx == -1
16181 && !h->forced_local)
16182 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16183 return FALSE;
16184
16185 if (h->dynindx == -1)
16186 {
16187 /* We only allocate one function descriptor with its associated relocation. q */
16188 if (eh->fdpic_cnts.funcdesc_offset == -1)
16189 {
16190
16191 eh->fdpic_cnts.funcdesc_offset = s->size;
16192 s->size += 8;
16193 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16194 if (bfd_link_pic(info))
16195 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16196 else
16197 htab->srofixup->size += 8;
16198 }
16199 }
16200
16201 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16202 R_ARM_RELATIVE/rofixup relocation on it. */
16203 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16204 s->size += 4;
16205 if (h->dynindx == -1 && !bfd_link_pic(info))
16206 htab->srofixup->size += 4;
16207 else
16208 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16209 }
16210
16211 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16212 {
16213 if (htab->root.dynamic_sections_created && h->dynindx == -1
16214 && !h->forced_local)
16215 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16216 return FALSE;
16217
16218 if (h->dynindx == -1)
16219 {
16220 /* We only allocate one function descriptor with its associated relocation. */
16221 if (eh->fdpic_cnts.funcdesc_offset == -1)
16222 {
16223 asection *s = htab->root.sgot;
16224
16225 eh->fdpic_cnts.funcdesc_offset = s->size;
16226 s->size += 8;
16227 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16228 if (bfd_link_pic(info))
16229 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16230 else
16231 htab->srofixup->size += 8;
16232 }
16233 }
16234 if (h->dynindx == -1 && !bfd_link_pic(info))
16235 {
16236 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16237 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16238 }
16239 else
16240 {
16241 /* Will need one dynamic reloc per reference. will be either
16242 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16243 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16244 eh->fdpic_cnts.funcdesc_cnt);
16245 }
16246 }
16247
16248 /* Allocate stubs for exported Thumb functions on v4t. */
16249 if (!htab->use_blx && h->dynindx != -1
16250 && h->def_regular
16251 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16252 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16253 {
16254 struct elf_link_hash_entry * th;
16255 struct bfd_link_hash_entry * bh;
16256 struct elf_link_hash_entry * myh;
16257 char name[1024];
16258 asection *s;
16259 bh = NULL;
16260 /* Create a new symbol to regist the real location of the function. */
16261 s = h->root.u.def.section;
16262 sprintf (name, "__real_%s", h->root.root.string);
16263 _bfd_generic_link_add_one_symbol (info, s->owner,
16264 name, BSF_GLOBAL, s,
16265 h->root.u.def.value,
16266 NULL, TRUE, FALSE, &bh);
16267
16268 myh = (struct elf_link_hash_entry *) bh;
16269 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16270 myh->forced_local = 1;
16271 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16272 eh->export_glue = myh;
16273 th = record_arm_to_thumb_glue (info, h);
16274 /* Point the symbol at the stub. */
16275 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16276 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16277 h->root.u.def.section = th->root.u.def.section;
16278 h->root.u.def.value = th->root.u.def.value & ~1;
16279 }
16280
16281 if (eh->dyn_relocs == NULL)
16282 return TRUE;
16283
16284 /* In the shared -Bsymbolic case, discard space allocated for
16285 dynamic pc-relative relocs against symbols which turn out to be
16286 defined in regular objects. For the normal shared case, discard
16287 space for pc-relative relocs that have become local due to symbol
16288 visibility changes. */
16289
16290 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16291 {
16292 /* Relocs that use pc_count are PC-relative forms, which will appear
16293 on something like ".long foo - ." or "movw REG, foo - .". We want
16294 calls to protected symbols to resolve directly to the function
16295 rather than going via the plt. If people want function pointer
16296 comparisons to work as expected then they should avoid writing
16297 assembly like ".long foo - .". */
16298 if (SYMBOL_CALLS_LOCAL (info, h))
16299 {
16300 struct elf_dyn_relocs **pp;
16301
16302 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16303 {
16304 p->count -= p->pc_count;
16305 p->pc_count = 0;
16306 if (p->count == 0)
16307 *pp = p->next;
16308 else
16309 pp = &p->next;
16310 }
16311 }
16312
16313 if (htab->vxworks_p)
16314 {
16315 struct elf_dyn_relocs **pp;
16316
16317 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16318 {
16319 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16320 *pp = p->next;
16321 else
16322 pp = &p->next;
16323 }
16324 }
16325
16326 /* Also discard relocs on undefined weak syms with non-default
16327 visibility. */
16328 if (eh->dyn_relocs != NULL
16329 && h->root.type == bfd_link_hash_undefweak)
16330 {
16331 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16332 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16333 eh->dyn_relocs = NULL;
16334
16335 /* Make sure undefined weak symbols are output as a dynamic
16336 symbol in PIEs. */
16337 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16338 && !h->forced_local)
16339 {
16340 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16341 return FALSE;
16342 }
16343 }
16344
16345 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16346 && h->root.type == bfd_link_hash_new)
16347 {
16348 /* Output absolute symbols so that we can create relocations
16349 against them. For normal symbols we output a relocation
16350 against the section that contains them. */
16351 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16352 return FALSE;
16353 }
16354
16355 }
16356 else
16357 {
16358 /* For the non-shared case, discard space for relocs against
16359 symbols which turn out to need copy relocs or are not
16360 dynamic. */
16361
16362 if (!h->non_got_ref
16363 && ((h->def_dynamic
16364 && !h->def_regular)
16365 || (htab->root.dynamic_sections_created
16366 && (h->root.type == bfd_link_hash_undefweak
16367 || h->root.type == bfd_link_hash_undefined))))
16368 {
16369 /* Make sure this symbol is output as a dynamic symbol.
16370 Undefined weak syms won't yet be marked as dynamic. */
16371 if (h->dynindx == -1 && !h->forced_local
16372 && h->root.type == bfd_link_hash_undefweak)
16373 {
16374 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16375 return FALSE;
16376 }
16377
16378 /* If that succeeded, we know we'll be keeping all the
16379 relocs. */
16380 if (h->dynindx != -1)
16381 goto keep;
16382 }
16383
16384 eh->dyn_relocs = NULL;
16385
16386 keep: ;
16387 }
16388
16389 /* Finally, allocate space. */
16390 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16391 {
16392 asection *sreloc = elf_section_data (p->sec)->sreloc;
16393
16394 if (h->type == STT_GNU_IFUNC
16395 && eh->plt.noncall_refcount == 0
16396 && SYMBOL_REFERENCES_LOCAL (info, h))
16397 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16398 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16399 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16400 else if (htab->fdpic_p && !bfd_link_pic(info))
16401 htab->srofixup->size += 4 * p->count;
16402 else
16403 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16404 }
16405
16406 return TRUE;
16407 }
16408
16409 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16410 read-only sections. */
16411
16412 static bfd_boolean
16413 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16414 {
16415 asection *sec;
16416
16417 if (h->root.type == bfd_link_hash_indirect)
16418 return TRUE;
16419
16420 sec = readonly_dynrelocs (h);
16421 if (sec != NULL)
16422 {
16423 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16424
16425 info->flags |= DF_TEXTREL;
16426 info->callbacks->minfo
16427 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16428 sec->owner, h->root.root.string, sec);
16429
16430 /* Not an error, just cut short the traversal. */
16431 return FALSE;
16432 }
16433 return TRUE;
16434 }
16435
16436 void
16437 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16438 int byteswap_code)
16439 {
16440 struct elf32_arm_link_hash_table *globals;
16441
16442 globals = elf32_arm_hash_table (info);
16443 if (globals == NULL)
16444 return;
16445
16446 globals->byteswap_code = byteswap_code;
16447 }
16448
16449 /* Set the sizes of the dynamic sections. */
16450
16451 static bfd_boolean
16452 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16453 struct bfd_link_info * info)
16454 {
16455 bfd * dynobj;
16456 asection * s;
16457 bfd_boolean plt;
16458 bfd_boolean relocs;
16459 bfd *ibfd;
16460 struct elf32_arm_link_hash_table *htab;
16461
16462 htab = elf32_arm_hash_table (info);
16463 if (htab == NULL)
16464 return FALSE;
16465
16466 dynobj = elf_hash_table (info)->dynobj;
16467 BFD_ASSERT (dynobj != NULL);
16468 check_use_blx (htab);
16469
16470 if (elf_hash_table (info)->dynamic_sections_created)
16471 {
16472 /* Set the contents of the .interp section to the interpreter. */
16473 if (bfd_link_executable (info) && !info->nointerp)
16474 {
16475 s = bfd_get_linker_section (dynobj, ".interp");
16476 BFD_ASSERT (s != NULL);
16477 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16478 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16479 }
16480 }
16481
16482 /* Set up .got offsets for local syms, and space for local dynamic
16483 relocs. */
16484 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16485 {
16486 bfd_signed_vma *local_got;
16487 bfd_signed_vma *end_local_got;
16488 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16489 char *local_tls_type;
16490 bfd_vma *local_tlsdesc_gotent;
16491 bfd_size_type locsymcount;
16492 Elf_Internal_Shdr *symtab_hdr;
16493 asection *srel;
16494 bfd_boolean is_vxworks = htab->vxworks_p;
16495 unsigned int symndx;
16496 struct fdpic_local *local_fdpic_cnts;
16497
16498 if (! is_arm_elf (ibfd))
16499 continue;
16500
16501 for (s = ibfd->sections; s != NULL; s = s->next)
16502 {
16503 struct elf_dyn_relocs *p;
16504
16505 for (p = (struct elf_dyn_relocs *)
16506 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16507 {
16508 if (!bfd_is_abs_section (p->sec)
16509 && bfd_is_abs_section (p->sec->output_section))
16510 {
16511 /* Input section has been discarded, either because
16512 it is a copy of a linkonce section or due to
16513 linker script /DISCARD/, so we'll be discarding
16514 the relocs too. */
16515 }
16516 else if (is_vxworks
16517 && strcmp (p->sec->output_section->name,
16518 ".tls_vars") == 0)
16519 {
16520 /* Relocations in vxworks .tls_vars sections are
16521 handled specially by the loader. */
16522 }
16523 else if (p->count != 0)
16524 {
16525 srel = elf_section_data (p->sec)->sreloc;
16526 if (htab->fdpic_p && !bfd_link_pic(info))
16527 htab->srofixup->size += 4 * p->count;
16528 else
16529 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16530 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16531 info->flags |= DF_TEXTREL;
16532 }
16533 }
16534 }
16535
16536 local_got = elf_local_got_refcounts (ibfd);
16537 if (!local_got)
16538 continue;
16539
16540 symtab_hdr = & elf_symtab_hdr (ibfd);
16541 locsymcount = symtab_hdr->sh_info;
16542 end_local_got = local_got + locsymcount;
16543 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16544 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16545 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16546 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16547 symndx = 0;
16548 s = htab->root.sgot;
16549 srel = htab->root.srelgot;
16550 for (; local_got < end_local_got;
16551 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16552 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16553 {
16554 *local_tlsdesc_gotent = (bfd_vma) -1;
16555 local_iplt = *local_iplt_ptr;
16556
16557 /* FDPIC support. */
16558 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16559 {
16560 if (local_fdpic_cnts->funcdesc_offset == -1)
16561 {
16562 local_fdpic_cnts->funcdesc_offset = s->size;
16563 s->size += 8;
16564
16565 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16566 if (bfd_link_pic(info))
16567 elf32_arm_allocate_dynrelocs (info, srel, 1);
16568 else
16569 htab->srofixup->size += 8;
16570 }
16571 }
16572
16573 if (local_fdpic_cnts->funcdesc_cnt > 0)
16574 {
16575 if (local_fdpic_cnts->funcdesc_offset == -1)
16576 {
16577 local_fdpic_cnts->funcdesc_offset = s->size;
16578 s->size += 8;
16579
16580 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16581 if (bfd_link_pic(info))
16582 elf32_arm_allocate_dynrelocs (info, srel, 1);
16583 else
16584 htab->srofixup->size += 8;
16585 }
16586
16587 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16588 if (bfd_link_pic(info))
16589 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16590 else
16591 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16592 }
16593
16594 if (local_iplt != NULL)
16595 {
16596 struct elf_dyn_relocs *p;
16597
16598 if (local_iplt->root.refcount > 0)
16599 {
16600 elf32_arm_allocate_plt_entry (info, TRUE,
16601 &local_iplt->root,
16602 &local_iplt->arm);
16603 if (local_iplt->arm.noncall_refcount == 0)
16604 /* All references to the PLT are calls, so all
16605 non-call references can resolve directly to the
16606 run-time target. This means that the .got entry
16607 would be the same as the .igot.plt entry, so there's
16608 no point creating both. */
16609 *local_got = 0;
16610 }
16611 else
16612 {
16613 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16614 local_iplt->root.offset = (bfd_vma) -1;
16615 }
16616
16617 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16618 {
16619 asection *psrel;
16620
16621 psrel = elf_section_data (p->sec)->sreloc;
16622 if (local_iplt->arm.noncall_refcount == 0)
16623 elf32_arm_allocate_irelocs (info, psrel, p->count);
16624 else
16625 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16626 }
16627 }
16628 if (*local_got > 0)
16629 {
16630 Elf_Internal_Sym *isym;
16631
16632 *local_got = s->size;
16633 if (*local_tls_type & GOT_TLS_GD)
16634 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16635 s->size += 8;
16636 if (*local_tls_type & GOT_TLS_GDESC)
16637 {
16638 *local_tlsdesc_gotent = htab->root.sgotplt->size
16639 - elf32_arm_compute_jump_table_size (htab);
16640 htab->root.sgotplt->size += 8;
16641 *local_got = (bfd_vma) -2;
16642 /* plt.got_offset needs to know there's a TLS_DESC
16643 reloc in the middle of .got.plt. */
16644 htab->num_tls_desc++;
16645 }
16646 if (*local_tls_type & GOT_TLS_IE)
16647 s->size += 4;
16648
16649 if (*local_tls_type & GOT_NORMAL)
16650 {
16651 /* If the symbol is both GD and GDESC, *local_got
16652 may have been overwritten. */
16653 *local_got = s->size;
16654 s->size += 4;
16655 }
16656
16657 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16658 if (isym == NULL)
16659 return FALSE;
16660
16661 /* If all references to an STT_GNU_IFUNC PLT are calls,
16662 then all non-call references, including this GOT entry,
16663 resolve directly to the run-time target. */
16664 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16665 && (local_iplt == NULL
16666 || local_iplt->arm.noncall_refcount == 0))
16667 elf32_arm_allocate_irelocs (info, srel, 1);
16668 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16669 {
16670 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16671 elf32_arm_allocate_dynrelocs (info, srel, 1);
16672 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16673 htab->srofixup->size += 4;
16674
16675 if ((bfd_link_pic (info) || htab->fdpic_p)
16676 && *local_tls_type & GOT_TLS_GDESC)
16677 {
16678 elf32_arm_allocate_dynrelocs (info,
16679 htab->root.srelplt, 1);
16680 htab->tls_trampoline = -1;
16681 }
16682 }
16683 }
16684 else
16685 *local_got = (bfd_vma) -1;
16686 }
16687 }
16688
16689 if (htab->tls_ldm_got.refcount > 0)
16690 {
16691 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16692 for R_ARM_TLS_LDM32 relocations. */
16693 htab->tls_ldm_got.offset = htab->root.sgot->size;
16694 htab->root.sgot->size += 8;
16695 if (bfd_link_pic (info))
16696 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16697 }
16698 else
16699 htab->tls_ldm_got.offset = -1;
16700
16701 /* At the very end of the .rofixup section is a pointer to the GOT,
16702 reserve space for it. */
16703 if (htab->fdpic_p && htab->srofixup != NULL)
16704 htab->srofixup->size += 4;
16705
16706 /* Allocate global sym .plt and .got entries, and space for global
16707 sym dynamic relocs. */
16708 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16709
16710 /* Here we rummage through the found bfds to collect glue information. */
16711 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16712 {
16713 if (! is_arm_elf (ibfd))
16714 continue;
16715
16716 /* Initialise mapping tables for code/data. */
16717 bfd_elf32_arm_init_maps (ibfd);
16718
16719 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16720 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16721 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16722 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16723 }
16724
16725 /* Allocate space for the glue sections now that we've sized them. */
16726 bfd_elf32_arm_allocate_interworking_sections (info);
16727
16728 /* For every jump slot reserved in the sgotplt, reloc_count is
16729 incremented. However, when we reserve space for TLS descriptors,
16730 it's not incremented, so in order to compute the space reserved
16731 for them, it suffices to multiply the reloc count by the jump
16732 slot size. */
16733 if (htab->root.srelplt)
16734 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16735
16736 if (htab->tls_trampoline)
16737 {
16738 if (htab->root.splt->size == 0)
16739 htab->root.splt->size += htab->plt_header_size;
16740
16741 htab->tls_trampoline = htab->root.splt->size;
16742 htab->root.splt->size += htab->plt_entry_size;
16743
16744 /* If we're not using lazy TLS relocations, don't generate the
16745 PLT and GOT entries they require. */
16746 if (!(info->flags & DF_BIND_NOW))
16747 {
16748 htab->dt_tlsdesc_got = htab->root.sgot->size;
16749 htab->root.sgot->size += 4;
16750
16751 htab->dt_tlsdesc_plt = htab->root.splt->size;
16752 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16753 }
16754 }
16755
16756 /* The check_relocs and adjust_dynamic_symbol entry points have
16757 determined the sizes of the various dynamic sections. Allocate
16758 memory for them. */
16759 plt = FALSE;
16760 relocs = FALSE;
16761 for (s = dynobj->sections; s != NULL; s = s->next)
16762 {
16763 const char * name;
16764
16765 if ((s->flags & SEC_LINKER_CREATED) == 0)
16766 continue;
16767
16768 /* It's OK to base decisions on the section name, because none
16769 of the dynobj section names depend upon the input files. */
16770 name = bfd_get_section_name (dynobj, s);
16771
16772 if (s == htab->root.splt)
16773 {
16774 /* Remember whether there is a PLT. */
16775 plt = s->size != 0;
16776 }
16777 else if (CONST_STRNEQ (name, ".rel"))
16778 {
16779 if (s->size != 0)
16780 {
16781 /* Remember whether there are any reloc sections other
16782 than .rel(a).plt and .rela.plt.unloaded. */
16783 if (s != htab->root.srelplt && s != htab->srelplt2)
16784 relocs = TRUE;
16785
16786 /* We use the reloc_count field as a counter if we need
16787 to copy relocs into the output file. */
16788 s->reloc_count = 0;
16789 }
16790 }
16791 else if (s != htab->root.sgot
16792 && s != htab->root.sgotplt
16793 && s != htab->root.iplt
16794 && s != htab->root.igotplt
16795 && s != htab->root.sdynbss
16796 && s != htab->root.sdynrelro
16797 && s != htab->srofixup)
16798 {
16799 /* It's not one of our sections, so don't allocate space. */
16800 continue;
16801 }
16802
16803 if (s->size == 0)
16804 {
16805 /* If we don't need this section, strip it from the
16806 output file. This is mostly to handle .rel(a).bss and
16807 .rel(a).plt. We must create both sections in
16808 create_dynamic_sections, because they must be created
16809 before the linker maps input sections to output
16810 sections. The linker does that before
16811 adjust_dynamic_symbol is called, and it is that
16812 function which decides whether anything needs to go
16813 into these sections. */
16814 s->flags |= SEC_EXCLUDE;
16815 continue;
16816 }
16817
16818 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16819 continue;
16820
16821 /* Allocate memory for the section contents. */
16822 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16823 if (s->contents == NULL)
16824 return FALSE;
16825 }
16826
16827 if (elf_hash_table (info)->dynamic_sections_created)
16828 {
16829 /* Add some entries to the .dynamic section. We fill in the
16830 values later, in elf32_arm_finish_dynamic_sections, but we
16831 must add the entries now so that we get the correct size for
16832 the .dynamic section. The DT_DEBUG entry is filled in by the
16833 dynamic linker and used by the debugger. */
16834 #define add_dynamic_entry(TAG, VAL) \
16835 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16836
16837 if (bfd_link_executable (info))
16838 {
16839 if (!add_dynamic_entry (DT_DEBUG, 0))
16840 return FALSE;
16841 }
16842
16843 if (plt)
16844 {
16845 if ( !add_dynamic_entry (DT_PLTGOT, 0)
16846 || !add_dynamic_entry (DT_PLTRELSZ, 0)
16847 || !add_dynamic_entry (DT_PLTREL,
16848 htab->use_rel ? DT_REL : DT_RELA)
16849 || !add_dynamic_entry (DT_JMPREL, 0))
16850 return FALSE;
16851
16852 if (htab->dt_tlsdesc_plt
16853 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
16854 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
16855 return FALSE;
16856 }
16857
16858 if (relocs)
16859 {
16860 if (htab->use_rel)
16861 {
16862 if (!add_dynamic_entry (DT_REL, 0)
16863 || !add_dynamic_entry (DT_RELSZ, 0)
16864 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
16865 return FALSE;
16866 }
16867 else
16868 {
16869 if (!add_dynamic_entry (DT_RELA, 0)
16870 || !add_dynamic_entry (DT_RELASZ, 0)
16871 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
16872 return FALSE;
16873 }
16874 }
16875
16876 /* If any dynamic relocs apply to a read-only section,
16877 then we need a DT_TEXTREL entry. */
16878 if ((info->flags & DF_TEXTREL) == 0)
16879 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
16880
16881 if ((info->flags & DF_TEXTREL) != 0)
16882 {
16883 if (!add_dynamic_entry (DT_TEXTREL, 0))
16884 return FALSE;
16885 }
16886 if (htab->vxworks_p
16887 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
16888 return FALSE;
16889 }
16890 #undef add_dynamic_entry
16891
16892 return TRUE;
16893 }
16894
16895 /* Size sections even though they're not dynamic. We use it to setup
16896 _TLS_MODULE_BASE_, if needed. */
16897
16898 static bfd_boolean
16899 elf32_arm_always_size_sections (bfd *output_bfd,
16900 struct bfd_link_info *info)
16901 {
16902 asection *tls_sec;
16903
16904 if (bfd_link_relocatable (info))
16905 return TRUE;
16906
16907 tls_sec = elf_hash_table (info)->tls_sec;
16908
16909 if (tls_sec)
16910 {
16911 struct elf_link_hash_entry *tlsbase;
16912
16913 tlsbase = elf_link_hash_lookup
16914 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
16915
16916 if (tlsbase)
16917 {
16918 struct bfd_link_hash_entry *bh = NULL;
16919 const struct elf_backend_data *bed
16920 = get_elf_backend_data (output_bfd);
16921
16922 if (!(_bfd_generic_link_add_one_symbol
16923 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
16924 tls_sec, 0, NULL, FALSE,
16925 bed->collect, &bh)))
16926 return FALSE;
16927
16928 tlsbase->type = STT_TLS;
16929 tlsbase = (struct elf_link_hash_entry *)bh;
16930 tlsbase->def_regular = 1;
16931 tlsbase->other = STV_HIDDEN;
16932 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
16933 }
16934 }
16935 return TRUE;
16936 }
16937
16938 /* Finish up dynamic symbol handling. We set the contents of various
16939 dynamic sections here. */
16940
16941 static bfd_boolean
16942 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
16943 struct bfd_link_info * info,
16944 struct elf_link_hash_entry * h,
16945 Elf_Internal_Sym * sym)
16946 {
16947 struct elf32_arm_link_hash_table *htab;
16948 struct elf32_arm_link_hash_entry *eh;
16949
16950 htab = elf32_arm_hash_table (info);
16951 if (htab == NULL)
16952 return FALSE;
16953
16954 eh = (struct elf32_arm_link_hash_entry *) h;
16955
16956 if (h->plt.offset != (bfd_vma) -1)
16957 {
16958 if (!eh->is_iplt)
16959 {
16960 BFD_ASSERT (h->dynindx != -1);
16961 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
16962 h->dynindx, 0))
16963 return FALSE;
16964 }
16965
16966 if (!h->def_regular)
16967 {
16968 /* Mark the symbol as undefined, rather than as defined in
16969 the .plt section. */
16970 sym->st_shndx = SHN_UNDEF;
16971 /* If the symbol is weak we need to clear the value.
16972 Otherwise, the PLT entry would provide a definition for
16973 the symbol even if the symbol wasn't defined anywhere,
16974 and so the symbol would never be NULL. Leave the value if
16975 there were any relocations where pointer equality matters
16976 (this is a clue for the dynamic linker, to make function
16977 pointer comparisons work between an application and shared
16978 library). */
16979 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
16980 sym->st_value = 0;
16981 }
16982 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
16983 {
16984 /* At least one non-call relocation references this .iplt entry,
16985 so the .iplt entry is the function's canonical address. */
16986 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
16987 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
16988 sym->st_shndx = (_bfd_elf_section_from_bfd_section
16989 (output_bfd, htab->root.iplt->output_section));
16990 sym->st_value = (h->plt.offset
16991 + htab->root.iplt->output_section->vma
16992 + htab->root.iplt->output_offset);
16993 }
16994 }
16995
16996 if (h->needs_copy)
16997 {
16998 asection * s;
16999 Elf_Internal_Rela rel;
17000
17001 /* This symbol needs a copy reloc. Set it up. */
17002 BFD_ASSERT (h->dynindx != -1
17003 && (h->root.type == bfd_link_hash_defined
17004 || h->root.type == bfd_link_hash_defweak));
17005
17006 rel.r_addend = 0;
17007 rel.r_offset = (h->root.u.def.value
17008 + h->root.u.def.section->output_section->vma
17009 + h->root.u.def.section->output_offset);
17010 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17011 if (h->root.u.def.section == htab->root.sdynrelro)
17012 s = htab->root.sreldynrelro;
17013 else
17014 s = htab->root.srelbss;
17015 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17016 }
17017
17018 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17019 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
17020 to the ".got" section. */
17021 if (h == htab->root.hdynamic
17022 || (!htab->vxworks_p && h == htab->root.hgot))
17023 sym->st_shndx = SHN_ABS;
17024
17025 return TRUE;
17026 }
17027
17028 static void
17029 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17030 void *contents,
17031 const unsigned long *template, unsigned count)
17032 {
17033 unsigned ix;
17034
17035 for (ix = 0; ix != count; ix++)
17036 {
17037 unsigned long insn = template[ix];
17038
17039 /* Emit mov pc,rx if bx is not permitted. */
17040 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17041 insn = (insn & 0xf000000f) | 0x01a0f000;
17042 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17043 }
17044 }
17045
17046 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17047 other variants, NaCl needs this entry in a static executable's
17048 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17049 zero. For .iplt really only the last bundle is useful, and .iplt
17050 could have a shorter first entry, with each individual PLT entry's
17051 relative branch calculated differently so it targets the last
17052 bundle instead of the instruction before it (labelled .Lplt_tail
17053 above). But it's simpler to keep the size and layout of PLT0
17054 consistent with the dynamic case, at the cost of some dead code at
17055 the start of .iplt and the one dead store to the stack at the start
17056 of .Lplt_tail. */
17057 static void
17058 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17059 asection *plt, bfd_vma got_displacement)
17060 {
17061 unsigned int i;
17062
17063 put_arm_insn (htab, output_bfd,
17064 elf32_arm_nacl_plt0_entry[0]
17065 | arm_movw_immediate (got_displacement),
17066 plt->contents + 0);
17067 put_arm_insn (htab, output_bfd,
17068 elf32_arm_nacl_plt0_entry[1]
17069 | arm_movt_immediate (got_displacement),
17070 plt->contents + 4);
17071
17072 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17073 put_arm_insn (htab, output_bfd,
17074 elf32_arm_nacl_plt0_entry[i],
17075 plt->contents + (i * 4));
17076 }
17077
17078 /* Finish up the dynamic sections. */
17079
17080 static bfd_boolean
17081 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17082 {
17083 bfd * dynobj;
17084 asection * sgot;
17085 asection * sdyn;
17086 struct elf32_arm_link_hash_table *htab;
17087
17088 htab = elf32_arm_hash_table (info);
17089 if (htab == NULL)
17090 return FALSE;
17091
17092 dynobj = elf_hash_table (info)->dynobj;
17093
17094 sgot = htab->root.sgotplt;
17095 /* A broken linker script might have discarded the dynamic sections.
17096 Catch this here so that we do not seg-fault later on. */
17097 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17098 return FALSE;
17099 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17100
17101 if (elf_hash_table (info)->dynamic_sections_created)
17102 {
17103 asection *splt;
17104 Elf32_External_Dyn *dyncon, *dynconend;
17105
17106 splt = htab->root.splt;
17107 BFD_ASSERT (splt != NULL && sdyn != NULL);
17108 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17109
17110 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17111 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17112
17113 for (; dyncon < dynconend; dyncon++)
17114 {
17115 Elf_Internal_Dyn dyn;
17116 const char * name;
17117 asection * s;
17118
17119 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17120
17121 switch (dyn.d_tag)
17122 {
17123 unsigned int type;
17124
17125 default:
17126 if (htab->vxworks_p
17127 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17128 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17129 break;
17130
17131 case DT_HASH:
17132 name = ".hash";
17133 goto get_vma_if_bpabi;
17134 case DT_STRTAB:
17135 name = ".dynstr";
17136 goto get_vma_if_bpabi;
17137 case DT_SYMTAB:
17138 name = ".dynsym";
17139 goto get_vma_if_bpabi;
17140 case DT_VERSYM:
17141 name = ".gnu.version";
17142 goto get_vma_if_bpabi;
17143 case DT_VERDEF:
17144 name = ".gnu.version_d";
17145 goto get_vma_if_bpabi;
17146 case DT_VERNEED:
17147 name = ".gnu.version_r";
17148 goto get_vma_if_bpabi;
17149
17150 case DT_PLTGOT:
17151 name = htab->symbian_p ? ".got" : ".got.plt";
17152 goto get_vma;
17153 case DT_JMPREL:
17154 name = RELOC_SECTION (htab, ".plt");
17155 get_vma:
17156 s = bfd_get_linker_section (dynobj, name);
17157 if (s == NULL)
17158 {
17159 _bfd_error_handler
17160 (_("could not find section %s"), name);
17161 bfd_set_error (bfd_error_invalid_operation);
17162 return FALSE;
17163 }
17164 if (!htab->symbian_p)
17165 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17166 else
17167 /* In the BPABI, tags in the PT_DYNAMIC section point
17168 at the file offset, not the memory address, for the
17169 convenience of the post linker. */
17170 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17171 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17172 break;
17173
17174 get_vma_if_bpabi:
17175 if (htab->symbian_p)
17176 goto get_vma;
17177 break;
17178
17179 case DT_PLTRELSZ:
17180 s = htab->root.srelplt;
17181 BFD_ASSERT (s != NULL);
17182 dyn.d_un.d_val = s->size;
17183 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17184 break;
17185
17186 case DT_RELSZ:
17187 case DT_RELASZ:
17188 case DT_REL:
17189 case DT_RELA:
17190 /* In the BPABI, the DT_REL tag must point at the file
17191 offset, not the VMA, of the first relocation
17192 section. So, we use code similar to that in
17193 elflink.c, but do not check for SHF_ALLOC on the
17194 relocation section, since relocation sections are
17195 never allocated under the BPABI. PLT relocs are also
17196 included. */
17197 if (htab->symbian_p)
17198 {
17199 unsigned int i;
17200 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17201 ? SHT_REL : SHT_RELA);
17202 dyn.d_un.d_val = 0;
17203 for (i = 1; i < elf_numsections (output_bfd); i++)
17204 {
17205 Elf_Internal_Shdr *hdr
17206 = elf_elfsections (output_bfd)[i];
17207 if (hdr->sh_type == type)
17208 {
17209 if (dyn.d_tag == DT_RELSZ
17210 || dyn.d_tag == DT_RELASZ)
17211 dyn.d_un.d_val += hdr->sh_size;
17212 else if ((ufile_ptr) hdr->sh_offset
17213 <= dyn.d_un.d_val - 1)
17214 dyn.d_un.d_val = hdr->sh_offset;
17215 }
17216 }
17217 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17218 }
17219 break;
17220
17221 case DT_TLSDESC_PLT:
17222 s = htab->root.splt;
17223 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17224 + htab->dt_tlsdesc_plt);
17225 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17226 break;
17227
17228 case DT_TLSDESC_GOT:
17229 s = htab->root.sgot;
17230 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17231 + htab->dt_tlsdesc_got);
17232 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17233 break;
17234
17235 /* Set the bottom bit of DT_INIT/FINI if the
17236 corresponding function is Thumb. */
17237 case DT_INIT:
17238 name = info->init_function;
17239 goto get_sym;
17240 case DT_FINI:
17241 name = info->fini_function;
17242 get_sym:
17243 /* If it wasn't set by elf_bfd_final_link
17244 then there is nothing to adjust. */
17245 if (dyn.d_un.d_val != 0)
17246 {
17247 struct elf_link_hash_entry * eh;
17248
17249 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17250 FALSE, FALSE, TRUE);
17251 if (eh != NULL
17252 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17253 == ST_BRANCH_TO_THUMB)
17254 {
17255 dyn.d_un.d_val |= 1;
17256 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17257 }
17258 }
17259 break;
17260 }
17261 }
17262
17263 /* Fill in the first entry in the procedure linkage table. */
17264 if (splt->size > 0 && htab->plt_header_size)
17265 {
17266 const bfd_vma *plt0_entry;
17267 bfd_vma got_address, plt_address, got_displacement;
17268
17269 /* Calculate the addresses of the GOT and PLT. */
17270 got_address = sgot->output_section->vma + sgot->output_offset;
17271 plt_address = splt->output_section->vma + splt->output_offset;
17272
17273 if (htab->vxworks_p)
17274 {
17275 /* The VxWorks GOT is relocated by the dynamic linker.
17276 Therefore, we must emit relocations rather than simply
17277 computing the values now. */
17278 Elf_Internal_Rela rel;
17279
17280 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17281 put_arm_insn (htab, output_bfd, plt0_entry[0],
17282 splt->contents + 0);
17283 put_arm_insn (htab, output_bfd, plt0_entry[1],
17284 splt->contents + 4);
17285 put_arm_insn (htab, output_bfd, plt0_entry[2],
17286 splt->contents + 8);
17287 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17288
17289 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17290 rel.r_offset = plt_address + 12;
17291 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17292 rel.r_addend = 0;
17293 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17294 htab->srelplt2->contents);
17295 }
17296 else if (htab->nacl_p)
17297 arm_nacl_put_plt0 (htab, output_bfd, splt,
17298 got_address + 8 - (plt_address + 16));
17299 else if (using_thumb_only (htab))
17300 {
17301 got_displacement = got_address - (plt_address + 12);
17302
17303 plt0_entry = elf32_thumb2_plt0_entry;
17304 put_arm_insn (htab, output_bfd, plt0_entry[0],
17305 splt->contents + 0);
17306 put_arm_insn (htab, output_bfd, plt0_entry[1],
17307 splt->contents + 4);
17308 put_arm_insn (htab, output_bfd, plt0_entry[2],
17309 splt->contents + 8);
17310
17311 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17312 }
17313 else
17314 {
17315 got_displacement = got_address - (plt_address + 16);
17316
17317 plt0_entry = elf32_arm_plt0_entry;
17318 put_arm_insn (htab, output_bfd, plt0_entry[0],
17319 splt->contents + 0);
17320 put_arm_insn (htab, output_bfd, plt0_entry[1],
17321 splt->contents + 4);
17322 put_arm_insn (htab, output_bfd, plt0_entry[2],
17323 splt->contents + 8);
17324 put_arm_insn (htab, output_bfd, plt0_entry[3],
17325 splt->contents + 12);
17326
17327 #ifdef FOUR_WORD_PLT
17328 /* The displacement value goes in the otherwise-unused
17329 last word of the second entry. */
17330 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17331 #else
17332 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17333 #endif
17334 }
17335 }
17336
17337 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17338 really seem like the right value. */
17339 if (splt->output_section->owner == output_bfd)
17340 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17341
17342 if (htab->dt_tlsdesc_plt)
17343 {
17344 bfd_vma got_address
17345 = sgot->output_section->vma + sgot->output_offset;
17346 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17347 + htab->root.sgot->output_offset);
17348 bfd_vma plt_address
17349 = splt->output_section->vma + splt->output_offset;
17350
17351 arm_put_trampoline (htab, output_bfd,
17352 splt->contents + htab->dt_tlsdesc_plt,
17353 dl_tlsdesc_lazy_trampoline, 6);
17354
17355 bfd_put_32 (output_bfd,
17356 gotplt_address + htab->dt_tlsdesc_got
17357 - (plt_address + htab->dt_tlsdesc_plt)
17358 - dl_tlsdesc_lazy_trampoline[6],
17359 splt->contents + htab->dt_tlsdesc_plt + 24);
17360 bfd_put_32 (output_bfd,
17361 got_address - (plt_address + htab->dt_tlsdesc_plt)
17362 - dl_tlsdesc_lazy_trampoline[7],
17363 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17364 }
17365
17366 if (htab->tls_trampoline)
17367 {
17368 arm_put_trampoline (htab, output_bfd,
17369 splt->contents + htab->tls_trampoline,
17370 tls_trampoline, 3);
17371 #ifdef FOUR_WORD_PLT
17372 bfd_put_32 (output_bfd, 0x00000000,
17373 splt->contents + htab->tls_trampoline + 12);
17374 #endif
17375 }
17376
17377 if (htab->vxworks_p
17378 && !bfd_link_pic (info)
17379 && htab->root.splt->size > 0)
17380 {
17381 /* Correct the .rel(a).plt.unloaded relocations. They will have
17382 incorrect symbol indexes. */
17383 int num_plts;
17384 unsigned char *p;
17385
17386 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17387 / htab->plt_entry_size);
17388 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17389
17390 for (; num_plts; num_plts--)
17391 {
17392 Elf_Internal_Rela rel;
17393
17394 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17395 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17396 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17397 p += RELOC_SIZE (htab);
17398
17399 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17400 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17401 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17402 p += RELOC_SIZE (htab);
17403 }
17404 }
17405 }
17406
17407 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17408 /* NaCl uses a special first entry in .iplt too. */
17409 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17410
17411 /* Fill in the first three entries in the global offset table. */
17412 if (sgot)
17413 {
17414 if (sgot->size > 0)
17415 {
17416 if (sdyn == NULL)
17417 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17418 else
17419 bfd_put_32 (output_bfd,
17420 sdyn->output_section->vma + sdyn->output_offset,
17421 sgot->contents);
17422 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17423 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17424 }
17425
17426 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17427 }
17428
17429 /* At the very end of the .rofixup section is a pointer to the GOT. */
17430 if (htab->fdpic_p && htab->srofixup != NULL)
17431 {
17432 struct elf_link_hash_entry *hgot = htab->root.hgot;
17433
17434 bfd_vma got_value = hgot->root.u.def.value
17435 + hgot->root.u.def.section->output_section->vma
17436 + hgot->root.u.def.section->output_offset;
17437
17438 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17439
17440 /* Make sure we allocated and generated the same number of fixups. */
17441 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17442 }
17443
17444 return TRUE;
17445 }
17446
17447 static void
17448 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17449 {
17450 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17451 struct elf32_arm_link_hash_table *globals;
17452 struct elf_segment_map *m;
17453
17454 i_ehdrp = elf_elfheader (abfd);
17455
17456 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17457 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17458 else
17459 _bfd_elf_post_process_headers (abfd, link_info);
17460 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17461
17462 if (link_info)
17463 {
17464 globals = elf32_arm_hash_table (link_info);
17465 if (globals != NULL && globals->byteswap_code)
17466 i_ehdrp->e_flags |= EF_ARM_BE8;
17467
17468 if (globals->fdpic_p)
17469 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17470 }
17471
17472 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17473 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17474 {
17475 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17476 if (abi == AEABI_VFP_args_vfp)
17477 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17478 else
17479 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17480 }
17481
17482 /* Scan segment to set p_flags attribute if it contains only sections with
17483 SHF_ARM_PURECODE flag. */
17484 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17485 {
17486 unsigned int j;
17487
17488 if (m->count == 0)
17489 continue;
17490 for (j = 0; j < m->count; j++)
17491 {
17492 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17493 break;
17494 }
17495 if (j == m->count)
17496 {
17497 m->p_flags = PF_X;
17498 m->p_flags_valid = 1;
17499 }
17500 }
17501 }
17502
17503 static enum elf_reloc_type_class
17504 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17505 const asection *rel_sec ATTRIBUTE_UNUSED,
17506 const Elf_Internal_Rela *rela)
17507 {
17508 switch ((int) ELF32_R_TYPE (rela->r_info))
17509 {
17510 case R_ARM_RELATIVE:
17511 return reloc_class_relative;
17512 case R_ARM_JUMP_SLOT:
17513 return reloc_class_plt;
17514 case R_ARM_COPY:
17515 return reloc_class_copy;
17516 case R_ARM_IRELATIVE:
17517 return reloc_class_ifunc;
17518 default:
17519 return reloc_class_normal;
17520 }
17521 }
17522
17523 static void
17524 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17525 {
17526 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17527 }
17528
17529 /* Return TRUE if this is an unwinding table entry. */
17530
17531 static bfd_boolean
17532 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17533 {
17534 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17535 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17536 }
17537
17538
17539 /* Set the type and flags for an ARM section. We do this by
17540 the section name, which is a hack, but ought to work. */
17541
17542 static bfd_boolean
17543 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17544 {
17545 const char * name;
17546
17547 name = bfd_get_section_name (abfd, sec);
17548
17549 if (is_arm_elf_unwind_section_name (abfd, name))
17550 {
17551 hdr->sh_type = SHT_ARM_EXIDX;
17552 hdr->sh_flags |= SHF_LINK_ORDER;
17553 }
17554
17555 if (sec->flags & SEC_ELF_PURECODE)
17556 hdr->sh_flags |= SHF_ARM_PURECODE;
17557
17558 return TRUE;
17559 }
17560
17561 /* Handle an ARM specific section when reading an object file. This is
17562 called when bfd_section_from_shdr finds a section with an unknown
17563 type. */
17564
17565 static bfd_boolean
17566 elf32_arm_section_from_shdr (bfd *abfd,
17567 Elf_Internal_Shdr * hdr,
17568 const char *name,
17569 int shindex)
17570 {
17571 /* There ought to be a place to keep ELF backend specific flags, but
17572 at the moment there isn't one. We just keep track of the
17573 sections by their name, instead. Fortunately, the ABI gives
17574 names for all the ARM specific sections, so we will probably get
17575 away with this. */
17576 switch (hdr->sh_type)
17577 {
17578 case SHT_ARM_EXIDX:
17579 case SHT_ARM_PREEMPTMAP:
17580 case SHT_ARM_ATTRIBUTES:
17581 break;
17582
17583 default:
17584 return FALSE;
17585 }
17586
17587 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17588 return FALSE;
17589
17590 return TRUE;
17591 }
17592
17593 static _arm_elf_section_data *
17594 get_arm_elf_section_data (asection * sec)
17595 {
17596 if (sec && sec->owner && is_arm_elf (sec->owner))
17597 return elf32_arm_section_data (sec);
17598 else
17599 return NULL;
17600 }
17601
17602 typedef struct
17603 {
17604 void *flaginfo;
17605 struct bfd_link_info *info;
17606 asection *sec;
17607 int sec_shndx;
17608 int (*func) (void *, const char *, Elf_Internal_Sym *,
17609 asection *, struct elf_link_hash_entry *);
17610 } output_arch_syminfo;
17611
17612 enum map_symbol_type
17613 {
17614 ARM_MAP_ARM,
17615 ARM_MAP_THUMB,
17616 ARM_MAP_DATA
17617 };
17618
17619
17620 /* Output a single mapping symbol. */
17621
17622 static bfd_boolean
17623 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17624 enum map_symbol_type type,
17625 bfd_vma offset)
17626 {
17627 static const char *names[3] = {"$a", "$t", "$d"};
17628 Elf_Internal_Sym sym;
17629
17630 sym.st_value = osi->sec->output_section->vma
17631 + osi->sec->output_offset
17632 + offset;
17633 sym.st_size = 0;
17634 sym.st_other = 0;
17635 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17636 sym.st_shndx = osi->sec_shndx;
17637 sym.st_target_internal = 0;
17638 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17639 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17640 }
17641
17642 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17643 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17644
17645 static bfd_boolean
17646 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17647 bfd_boolean is_iplt_entry_p,
17648 union gotplt_union *root_plt,
17649 struct arm_plt_info *arm_plt)
17650 {
17651 struct elf32_arm_link_hash_table *htab;
17652 bfd_vma addr, plt_header_size;
17653
17654 if (root_plt->offset == (bfd_vma) -1)
17655 return TRUE;
17656
17657 htab = elf32_arm_hash_table (osi->info);
17658 if (htab == NULL)
17659 return FALSE;
17660
17661 if (is_iplt_entry_p)
17662 {
17663 osi->sec = htab->root.iplt;
17664 plt_header_size = 0;
17665 }
17666 else
17667 {
17668 osi->sec = htab->root.splt;
17669 plt_header_size = htab->plt_header_size;
17670 }
17671 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17672 (osi->info->output_bfd, osi->sec->output_section));
17673
17674 addr = root_plt->offset & -2;
17675 if (htab->symbian_p)
17676 {
17677 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17678 return FALSE;
17679 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17680 return FALSE;
17681 }
17682 else if (htab->vxworks_p)
17683 {
17684 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17685 return FALSE;
17686 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17687 return FALSE;
17688 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17689 return FALSE;
17690 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17691 return FALSE;
17692 }
17693 else if (htab->nacl_p)
17694 {
17695 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17696 return FALSE;
17697 }
17698 else if (htab->fdpic_p)
17699 {
17700 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17701 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17702 return FALSE;
17703 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17704 return FALSE;
17705 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17706 return FALSE;
17707 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17708 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 24))
17709 return FALSE;
17710 }
17711 else if (using_thumb_only (htab))
17712 {
17713 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17714 return FALSE;
17715 }
17716 else
17717 {
17718 bfd_boolean thumb_stub_p;
17719
17720 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17721 if (thumb_stub_p)
17722 {
17723 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17724 return FALSE;
17725 }
17726 #ifdef FOUR_WORD_PLT
17727 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17728 return FALSE;
17729 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17730 return FALSE;
17731 #else
17732 /* A three-word PLT with no Thumb thunk contains only Arm code,
17733 so only need to output a mapping symbol for the first PLT entry and
17734 entries with thumb thunks. */
17735 if (thumb_stub_p || addr == plt_header_size)
17736 {
17737 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17738 return FALSE;
17739 }
17740 #endif
17741 }
17742
17743 return TRUE;
17744 }
17745
17746 /* Output mapping symbols for PLT entries associated with H. */
17747
17748 static bfd_boolean
17749 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17750 {
17751 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17752 struct elf32_arm_link_hash_entry *eh;
17753
17754 if (h->root.type == bfd_link_hash_indirect)
17755 return TRUE;
17756
17757 if (h->root.type == bfd_link_hash_warning)
17758 /* When warning symbols are created, they **replace** the "real"
17759 entry in the hash table, thus we never get to see the real
17760 symbol in a hash traversal. So look at it now. */
17761 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17762
17763 eh = (struct elf32_arm_link_hash_entry *) h;
17764 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17765 &h->plt, &eh->plt);
17766 }
17767
17768 /* Bind a veneered symbol to its veneer identified by its hash entry
17769 STUB_ENTRY. The veneered location thus loose its symbol. */
17770
17771 static void
17772 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17773 {
17774 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17775
17776 BFD_ASSERT (hash);
17777 hash->root.root.u.def.section = stub_entry->stub_sec;
17778 hash->root.root.u.def.value = stub_entry->stub_offset;
17779 hash->root.size = stub_entry->stub_size;
17780 }
17781
17782 /* Output a single local symbol for a generated stub. */
17783
17784 static bfd_boolean
17785 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17786 bfd_vma offset, bfd_vma size)
17787 {
17788 Elf_Internal_Sym sym;
17789
17790 sym.st_value = osi->sec->output_section->vma
17791 + osi->sec->output_offset
17792 + offset;
17793 sym.st_size = size;
17794 sym.st_other = 0;
17795 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17796 sym.st_shndx = osi->sec_shndx;
17797 sym.st_target_internal = 0;
17798 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17799 }
17800
17801 static bfd_boolean
17802 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17803 void * in_arg)
17804 {
17805 struct elf32_arm_stub_hash_entry *stub_entry;
17806 asection *stub_sec;
17807 bfd_vma addr;
17808 char *stub_name;
17809 output_arch_syminfo *osi;
17810 const insn_sequence *template_sequence;
17811 enum stub_insn_type prev_type;
17812 int size;
17813 int i;
17814 enum map_symbol_type sym_type;
17815
17816 /* Massage our args to the form they really have. */
17817 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17818 osi = (output_arch_syminfo *) in_arg;
17819
17820 stub_sec = stub_entry->stub_sec;
17821
17822 /* Ensure this stub is attached to the current section being
17823 processed. */
17824 if (stub_sec != osi->sec)
17825 return TRUE;
17826
17827 addr = (bfd_vma) stub_entry->stub_offset;
17828 template_sequence = stub_entry->stub_template;
17829
17830 if (arm_stub_sym_claimed (stub_entry->stub_type))
17831 arm_stub_claim_sym (stub_entry);
17832 else
17833 {
17834 stub_name = stub_entry->output_name;
17835 switch (template_sequence[0].type)
17836 {
17837 case ARM_TYPE:
17838 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17839 stub_entry->stub_size))
17840 return FALSE;
17841 break;
17842 case THUMB16_TYPE:
17843 case THUMB32_TYPE:
17844 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17845 stub_entry->stub_size))
17846 return FALSE;
17847 break;
17848 default:
17849 BFD_FAIL ();
17850 return 0;
17851 }
17852 }
17853
17854 prev_type = DATA_TYPE;
17855 size = 0;
17856 for (i = 0; i < stub_entry->stub_template_size; i++)
17857 {
17858 switch (template_sequence[i].type)
17859 {
17860 case ARM_TYPE:
17861 sym_type = ARM_MAP_ARM;
17862 break;
17863
17864 case THUMB16_TYPE:
17865 case THUMB32_TYPE:
17866 sym_type = ARM_MAP_THUMB;
17867 break;
17868
17869 case DATA_TYPE:
17870 sym_type = ARM_MAP_DATA;
17871 break;
17872
17873 default:
17874 BFD_FAIL ();
17875 return FALSE;
17876 }
17877
17878 if (template_sequence[i].type != prev_type)
17879 {
17880 prev_type = template_sequence[i].type;
17881 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
17882 return FALSE;
17883 }
17884
17885 switch (template_sequence[i].type)
17886 {
17887 case ARM_TYPE:
17888 case THUMB32_TYPE:
17889 size += 4;
17890 break;
17891
17892 case THUMB16_TYPE:
17893 size += 2;
17894 break;
17895
17896 case DATA_TYPE:
17897 size += 4;
17898 break;
17899
17900 default:
17901 BFD_FAIL ();
17902 return FALSE;
17903 }
17904 }
17905
17906 return TRUE;
17907 }
17908
17909 /* Output mapping symbols for linker generated sections,
17910 and for those data-only sections that do not have a
17911 $d. */
17912
17913 static bfd_boolean
17914 elf32_arm_output_arch_local_syms (bfd *output_bfd,
17915 struct bfd_link_info *info,
17916 void *flaginfo,
17917 int (*func) (void *, const char *,
17918 Elf_Internal_Sym *,
17919 asection *,
17920 struct elf_link_hash_entry *))
17921 {
17922 output_arch_syminfo osi;
17923 struct elf32_arm_link_hash_table *htab;
17924 bfd_vma offset;
17925 bfd_size_type size;
17926 bfd *input_bfd;
17927
17928 htab = elf32_arm_hash_table (info);
17929 if (htab == NULL)
17930 return FALSE;
17931
17932 check_use_blx (htab);
17933
17934 osi.flaginfo = flaginfo;
17935 osi.info = info;
17936 osi.func = func;
17937
17938 /* Add a $d mapping symbol to data-only sections that
17939 don't have any mapping symbol. This may result in (harmless) redundant
17940 mapping symbols. */
17941 for (input_bfd = info->input_bfds;
17942 input_bfd != NULL;
17943 input_bfd = input_bfd->link.next)
17944 {
17945 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
17946 for (osi.sec = input_bfd->sections;
17947 osi.sec != NULL;
17948 osi.sec = osi.sec->next)
17949 {
17950 if (osi.sec->output_section != NULL
17951 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
17952 != 0)
17953 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
17954 == SEC_HAS_CONTENTS
17955 && get_arm_elf_section_data (osi.sec) != NULL
17956 && get_arm_elf_section_data (osi.sec)->mapcount == 0
17957 && osi.sec->size > 0
17958 && (osi.sec->flags & SEC_EXCLUDE) == 0)
17959 {
17960 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17961 (output_bfd, osi.sec->output_section);
17962 if (osi.sec_shndx != (int)SHN_BAD)
17963 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
17964 }
17965 }
17966 }
17967
17968 /* ARM->Thumb glue. */
17969 if (htab->arm_glue_size > 0)
17970 {
17971 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
17972 ARM2THUMB_GLUE_SECTION_NAME);
17973
17974 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17975 (output_bfd, osi.sec->output_section);
17976 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
17977 || htab->pic_veneer)
17978 size = ARM2THUMB_PIC_GLUE_SIZE;
17979 else if (htab->use_blx)
17980 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
17981 else
17982 size = ARM2THUMB_STATIC_GLUE_SIZE;
17983
17984 for (offset = 0; offset < htab->arm_glue_size; offset += size)
17985 {
17986 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
17987 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
17988 }
17989 }
17990
17991 /* Thumb->ARM glue. */
17992 if (htab->thumb_glue_size > 0)
17993 {
17994 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
17995 THUMB2ARM_GLUE_SECTION_NAME);
17996
17997 osi.sec_shndx = _bfd_elf_section_from_bfd_section
17998 (output_bfd, osi.sec->output_section);
17999 size = THUMB2ARM_GLUE_SIZE;
18000
18001 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18002 {
18003 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18004 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18005 }
18006 }
18007
18008 /* ARMv4 BX veneers. */
18009 if (htab->bx_glue_size > 0)
18010 {
18011 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18012 ARM_BX_GLUE_SECTION_NAME);
18013
18014 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18015 (output_bfd, osi.sec->output_section);
18016
18017 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18018 }
18019
18020 /* Long calls stubs. */
18021 if (htab->stub_bfd && htab->stub_bfd->sections)
18022 {
18023 asection* stub_sec;
18024
18025 for (stub_sec = htab->stub_bfd->sections;
18026 stub_sec != NULL;
18027 stub_sec = stub_sec->next)
18028 {
18029 /* Ignore non-stub sections. */
18030 if (!strstr (stub_sec->name, STUB_SUFFIX))
18031 continue;
18032
18033 osi.sec = stub_sec;
18034
18035 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18036 (output_bfd, osi.sec->output_section);
18037
18038 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18039 }
18040 }
18041
18042 /* Finally, output mapping symbols for the PLT. */
18043 if (htab->root.splt && htab->root.splt->size > 0)
18044 {
18045 osi.sec = htab->root.splt;
18046 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18047 (output_bfd, osi.sec->output_section));
18048
18049 /* Output mapping symbols for the plt header. SymbianOS does not have a
18050 plt header. */
18051 if (htab->vxworks_p)
18052 {
18053 /* VxWorks shared libraries have no PLT header. */
18054 if (!bfd_link_pic (info))
18055 {
18056 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18057 return FALSE;
18058 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18059 return FALSE;
18060 }
18061 }
18062 else if (htab->nacl_p)
18063 {
18064 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18065 return FALSE;
18066 }
18067 else if (using_thumb_only (htab))
18068 {
18069 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18070 return FALSE;
18071 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18072 return FALSE;
18073 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18074 return FALSE;
18075 }
18076 else if (!htab->symbian_p && !htab->fdpic_p)
18077 {
18078 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18079 return FALSE;
18080 #ifndef FOUR_WORD_PLT
18081 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18082 return FALSE;
18083 #endif
18084 }
18085 }
18086 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18087 {
18088 /* NaCl uses a special first entry in .iplt too. */
18089 osi.sec = htab->root.iplt;
18090 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18091 (output_bfd, osi.sec->output_section));
18092 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18093 return FALSE;
18094 }
18095 if ((htab->root.splt && htab->root.splt->size > 0)
18096 || (htab->root.iplt && htab->root.iplt->size > 0))
18097 {
18098 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18099 for (input_bfd = info->input_bfds;
18100 input_bfd != NULL;
18101 input_bfd = input_bfd->link.next)
18102 {
18103 struct arm_local_iplt_info **local_iplt;
18104 unsigned int i, num_syms;
18105
18106 local_iplt = elf32_arm_local_iplt (input_bfd);
18107 if (local_iplt != NULL)
18108 {
18109 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18110 for (i = 0; i < num_syms; i++)
18111 if (local_iplt[i] != NULL
18112 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18113 &local_iplt[i]->root,
18114 &local_iplt[i]->arm))
18115 return FALSE;
18116 }
18117 }
18118 }
18119 if (htab->dt_tlsdesc_plt != 0)
18120 {
18121 /* Mapping symbols for the lazy tls trampoline. */
18122 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18123 return FALSE;
18124
18125 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18126 htab->dt_tlsdesc_plt + 24))
18127 return FALSE;
18128 }
18129 if (htab->tls_trampoline != 0)
18130 {
18131 /* Mapping symbols for the tls trampoline. */
18132 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18133 return FALSE;
18134 #ifdef FOUR_WORD_PLT
18135 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18136 htab->tls_trampoline + 12))
18137 return FALSE;
18138 #endif
18139 }
18140
18141 return TRUE;
18142 }
18143
18144 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18145 the import library. All SYMCOUNT symbols of ABFD can be examined
18146 from their pointers in SYMS. Pointers of symbols to keep should be
18147 stored continuously at the beginning of that array.
18148
18149 Returns the number of symbols to keep. */
18150
18151 static unsigned int
18152 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18153 struct bfd_link_info *info,
18154 asymbol **syms, long symcount)
18155 {
18156 size_t maxnamelen;
18157 char *cmse_name;
18158 long src_count, dst_count = 0;
18159 struct elf32_arm_link_hash_table *htab;
18160
18161 htab = elf32_arm_hash_table (info);
18162 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18163 symcount = 0;
18164
18165 maxnamelen = 128;
18166 cmse_name = (char *) bfd_malloc (maxnamelen);
18167 for (src_count = 0; src_count < symcount; src_count++)
18168 {
18169 struct elf32_arm_link_hash_entry *cmse_hash;
18170 asymbol *sym;
18171 flagword flags;
18172 char *name;
18173 size_t namelen;
18174
18175 sym = syms[src_count];
18176 flags = sym->flags;
18177 name = (char *) bfd_asymbol_name (sym);
18178
18179 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18180 continue;
18181 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18182 continue;
18183
18184 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18185 if (namelen > maxnamelen)
18186 {
18187 cmse_name = (char *)
18188 bfd_realloc (cmse_name, namelen);
18189 maxnamelen = namelen;
18190 }
18191 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18192 cmse_hash = (struct elf32_arm_link_hash_entry *)
18193 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18194
18195 if (!cmse_hash
18196 || (cmse_hash->root.root.type != bfd_link_hash_defined
18197 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18198 || cmse_hash->root.type != STT_FUNC)
18199 continue;
18200
18201 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18202 continue;
18203
18204 syms[dst_count++] = sym;
18205 }
18206 free (cmse_name);
18207
18208 syms[dst_count] = NULL;
18209
18210 return dst_count;
18211 }
18212
18213 /* Filter symbols of ABFD to include in the import library. All
18214 SYMCOUNT symbols of ABFD can be examined from their pointers in
18215 SYMS. Pointers of symbols to keep should be stored continuously at
18216 the beginning of that array.
18217
18218 Returns the number of symbols to keep. */
18219
18220 static unsigned int
18221 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18222 struct bfd_link_info *info,
18223 asymbol **syms, long symcount)
18224 {
18225 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18226
18227 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18228 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18229 library to be a relocatable object file. */
18230 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18231 if (globals->cmse_implib)
18232 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18233 else
18234 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18235 }
18236
18237 /* Allocate target specific section data. */
18238
18239 static bfd_boolean
18240 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18241 {
18242 if (!sec->used_by_bfd)
18243 {
18244 _arm_elf_section_data *sdata;
18245 bfd_size_type amt = sizeof (*sdata);
18246
18247 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18248 if (sdata == NULL)
18249 return FALSE;
18250 sec->used_by_bfd = sdata;
18251 }
18252
18253 return _bfd_elf_new_section_hook (abfd, sec);
18254 }
18255
18256
18257 /* Used to order a list of mapping symbols by address. */
18258
18259 static int
18260 elf32_arm_compare_mapping (const void * a, const void * b)
18261 {
18262 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18263 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18264
18265 if (amap->vma > bmap->vma)
18266 return 1;
18267 else if (amap->vma < bmap->vma)
18268 return -1;
18269 else if (amap->type > bmap->type)
18270 /* Ensure results do not depend on the host qsort for objects with
18271 multiple mapping symbols at the same address by sorting on type
18272 after vma. */
18273 return 1;
18274 else if (amap->type < bmap->type)
18275 return -1;
18276 else
18277 return 0;
18278 }
18279
18280 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18281
18282 static unsigned long
18283 offset_prel31 (unsigned long addr, bfd_vma offset)
18284 {
18285 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18286 }
18287
18288 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18289 relocations. */
18290
18291 static void
18292 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18293 {
18294 unsigned long first_word = bfd_get_32 (output_bfd, from);
18295 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18296
18297 /* High bit of first word is supposed to be zero. */
18298 if ((first_word & 0x80000000ul) == 0)
18299 first_word = offset_prel31 (first_word, offset);
18300
18301 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18302 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18303 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18304 second_word = offset_prel31 (second_word, offset);
18305
18306 bfd_put_32 (output_bfd, first_word, to);
18307 bfd_put_32 (output_bfd, second_word, to + 4);
18308 }
18309
18310 /* Data for make_branch_to_a8_stub(). */
18311
18312 struct a8_branch_to_stub_data
18313 {
18314 asection *writing_section;
18315 bfd_byte *contents;
18316 };
18317
18318
18319 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18320 places for a particular section. */
18321
18322 static bfd_boolean
18323 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18324 void *in_arg)
18325 {
18326 struct elf32_arm_stub_hash_entry *stub_entry;
18327 struct a8_branch_to_stub_data *data;
18328 bfd_byte *contents;
18329 unsigned long branch_insn;
18330 bfd_vma veneered_insn_loc, veneer_entry_loc;
18331 bfd_signed_vma branch_offset;
18332 bfd *abfd;
18333 unsigned int loc;
18334
18335 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18336 data = (struct a8_branch_to_stub_data *) in_arg;
18337
18338 if (stub_entry->target_section != data->writing_section
18339 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18340 return TRUE;
18341
18342 contents = data->contents;
18343
18344 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18345 generated when both source and target are in the same section. */
18346 veneered_insn_loc = stub_entry->target_section->output_section->vma
18347 + stub_entry->target_section->output_offset
18348 + stub_entry->source_value;
18349
18350 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18351 + stub_entry->stub_sec->output_offset
18352 + stub_entry->stub_offset;
18353
18354 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18355 veneered_insn_loc &= ~3u;
18356
18357 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18358
18359 abfd = stub_entry->target_section->owner;
18360 loc = stub_entry->source_value;
18361
18362 /* We attempt to avoid this condition by setting stubs_always_after_branch
18363 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18364 This check is just to be on the safe side... */
18365 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18366 {
18367 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18368 "allocated in unsafe location"), abfd);
18369 return FALSE;
18370 }
18371
18372 switch (stub_entry->stub_type)
18373 {
18374 case arm_stub_a8_veneer_b:
18375 case arm_stub_a8_veneer_b_cond:
18376 branch_insn = 0xf0009000;
18377 goto jump24;
18378
18379 case arm_stub_a8_veneer_blx:
18380 branch_insn = 0xf000e800;
18381 goto jump24;
18382
18383 case arm_stub_a8_veneer_bl:
18384 {
18385 unsigned int i1, j1, i2, j2, s;
18386
18387 branch_insn = 0xf000d000;
18388
18389 jump24:
18390 if (branch_offset < -16777216 || branch_offset > 16777214)
18391 {
18392 /* There's not much we can do apart from complain if this
18393 happens. */
18394 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18395 "of range (input file too large)"), abfd);
18396 return FALSE;
18397 }
18398
18399 /* i1 = not(j1 eor s), so:
18400 not i1 = j1 eor s
18401 j1 = (not i1) eor s. */
18402
18403 branch_insn |= (branch_offset >> 1) & 0x7ff;
18404 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18405 i2 = (branch_offset >> 22) & 1;
18406 i1 = (branch_offset >> 23) & 1;
18407 s = (branch_offset >> 24) & 1;
18408 j1 = (!i1) ^ s;
18409 j2 = (!i2) ^ s;
18410 branch_insn |= j2 << 11;
18411 branch_insn |= j1 << 13;
18412 branch_insn |= s << 26;
18413 }
18414 break;
18415
18416 default:
18417 BFD_FAIL ();
18418 return FALSE;
18419 }
18420
18421 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18422 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18423
18424 return TRUE;
18425 }
18426
18427 /* Beginning of stm32l4xx work-around. */
18428
18429 /* Functions encoding instructions necessary for the emission of the
18430 fix-stm32l4xx-629360.
18431 Encoding is extracted from the
18432 ARM (C) Architecture Reference Manual
18433 ARMv7-A and ARMv7-R edition
18434 ARM DDI 0406C.b (ID072512). */
18435
18436 static inline bfd_vma
18437 create_instruction_branch_absolute (int branch_offset)
18438 {
18439 /* A8.8.18 B (A8-334)
18440 B target_address (Encoding T4). */
18441 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18442 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18443 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18444
18445 int s = ((branch_offset & 0x1000000) >> 24);
18446 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18447 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18448
18449 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18450 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18451
18452 bfd_vma patched_inst = 0xf0009000
18453 | s << 26 /* S. */
18454 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18455 | j1 << 13 /* J1. */
18456 | j2 << 11 /* J2. */
18457 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18458
18459 return patched_inst;
18460 }
18461
18462 static inline bfd_vma
18463 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18464 {
18465 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18466 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18467 bfd_vma patched_inst = 0xe8900000
18468 | (/*W=*/wback << 21)
18469 | (base_reg << 16)
18470 | (reg_mask & 0x0000ffff);
18471
18472 return patched_inst;
18473 }
18474
18475 static inline bfd_vma
18476 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18477 {
18478 /* A8.8.60 LDMDB/LDMEA (A8-402)
18479 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18480 bfd_vma patched_inst = 0xe9100000
18481 | (/*W=*/wback << 21)
18482 | (base_reg << 16)
18483 | (reg_mask & 0x0000ffff);
18484
18485 return patched_inst;
18486 }
18487
18488 static inline bfd_vma
18489 create_instruction_mov (int target_reg, int source_reg)
18490 {
18491 /* A8.8.103 MOV (register) (A8-486)
18492 MOV Rd, Rm (Encoding T1). */
18493 bfd_vma patched_inst = 0x4600
18494 | (target_reg & 0x7)
18495 | ((target_reg & 0x8) >> 3) << 7
18496 | (source_reg << 3);
18497
18498 return patched_inst;
18499 }
18500
18501 static inline bfd_vma
18502 create_instruction_sub (int target_reg, int source_reg, int value)
18503 {
18504 /* A8.8.221 SUB (immediate) (A8-708)
18505 SUB Rd, Rn, #value (Encoding T3). */
18506 bfd_vma patched_inst = 0xf1a00000
18507 | (target_reg << 8)
18508 | (source_reg << 16)
18509 | (/*S=*/0 << 20)
18510 | ((value & 0x800) >> 11) << 26
18511 | ((value & 0x700) >> 8) << 12
18512 | (value & 0x0ff);
18513
18514 return patched_inst;
18515 }
18516
18517 static inline bfd_vma
18518 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18519 int first_reg)
18520 {
18521 /* A8.8.332 VLDM (A8-922)
18522 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18523 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18524 | (/*W=*/wback << 21)
18525 | (base_reg << 16)
18526 | (num_words & 0x000000ff)
18527 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18528 | (first_reg & 0x00000001) << 22;
18529
18530 return patched_inst;
18531 }
18532
18533 static inline bfd_vma
18534 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18535 int first_reg)
18536 {
18537 /* A8.8.332 VLDM (A8-922)
18538 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18539 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18540 | (base_reg << 16)
18541 | (num_words & 0x000000ff)
18542 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18543 | (first_reg & 0x00000001) << 22;
18544
18545 return patched_inst;
18546 }
18547
18548 static inline bfd_vma
18549 create_instruction_udf_w (int value)
18550 {
18551 /* A8.8.247 UDF (A8-758)
18552 Undefined (Encoding T2). */
18553 bfd_vma patched_inst = 0xf7f0a000
18554 | (value & 0x00000fff)
18555 | (value & 0x000f0000) << 16;
18556
18557 return patched_inst;
18558 }
18559
18560 static inline bfd_vma
18561 create_instruction_udf (int value)
18562 {
18563 /* A8.8.247 UDF (A8-758)
18564 Undefined (Encoding T1). */
18565 bfd_vma patched_inst = 0xde00
18566 | (value & 0xff);
18567
18568 return patched_inst;
18569 }
18570
18571 /* Functions writing an instruction in memory, returning the next
18572 memory position to write to. */
18573
18574 static inline bfd_byte *
18575 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18576 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18577 {
18578 put_thumb2_insn (htab, output_bfd, insn, pt);
18579 return pt + 4;
18580 }
18581
18582 static inline bfd_byte *
18583 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18584 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18585 {
18586 put_thumb_insn (htab, output_bfd, insn, pt);
18587 return pt + 2;
18588 }
18589
18590 /* Function filling up a region in memory with T1 and T2 UDFs taking
18591 care of alignment. */
18592
18593 static bfd_byte *
18594 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18595 bfd * output_bfd,
18596 const bfd_byte * const base_stub_contents,
18597 bfd_byte * const from_stub_contents,
18598 const bfd_byte * const end_stub_contents)
18599 {
18600 bfd_byte *current_stub_contents = from_stub_contents;
18601
18602 /* Fill the remaining of the stub with deterministic contents : UDF
18603 instructions.
18604 Check if realignment is needed on modulo 4 frontier using T1, to
18605 further use T2. */
18606 if ((current_stub_contents < end_stub_contents)
18607 && !((current_stub_contents - base_stub_contents) % 2)
18608 && ((current_stub_contents - base_stub_contents) % 4))
18609 current_stub_contents =
18610 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18611 create_instruction_udf (0));
18612
18613 for (; current_stub_contents < end_stub_contents;)
18614 current_stub_contents =
18615 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18616 create_instruction_udf_w (0));
18617
18618 return current_stub_contents;
18619 }
18620
18621 /* Functions writing the stream of instructions equivalent to the
18622 derived sequence for ldmia, ldmdb, vldm respectively. */
18623
18624 static void
18625 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18626 bfd * output_bfd,
18627 const insn32 initial_insn,
18628 const bfd_byte *const initial_insn_addr,
18629 bfd_byte *const base_stub_contents)
18630 {
18631 int wback = (initial_insn & 0x00200000) >> 21;
18632 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18633 int insn_all_registers = initial_insn & 0x0000ffff;
18634 int insn_low_registers, insn_high_registers;
18635 int usable_register_mask;
18636 int nb_registers = elf32_arm_popcount (insn_all_registers);
18637 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18638 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18639 bfd_byte *current_stub_contents = base_stub_contents;
18640
18641 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18642
18643 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18644 smaller than 8 registers load sequences that do not cause the
18645 hardware issue. */
18646 if (nb_registers <= 8)
18647 {
18648 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18649 current_stub_contents =
18650 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18651 initial_insn);
18652
18653 /* B initial_insn_addr+4. */
18654 if (!restore_pc)
18655 current_stub_contents =
18656 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18657 create_instruction_branch_absolute
18658 (initial_insn_addr - current_stub_contents));
18659
18660 /* Fill the remaining of the stub with deterministic contents. */
18661 current_stub_contents =
18662 stm32l4xx_fill_stub_udf (htab, output_bfd,
18663 base_stub_contents, current_stub_contents,
18664 base_stub_contents +
18665 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18666
18667 return;
18668 }
18669
18670 /* - reg_list[13] == 0. */
18671 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18672
18673 /* - reg_list[14] & reg_list[15] != 1. */
18674 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18675
18676 /* - if (wback==1) reg_list[rn] == 0. */
18677 BFD_ASSERT (!wback || !restore_rn);
18678
18679 /* - nb_registers > 8. */
18680 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18681
18682 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18683
18684 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18685 - One with the 7 lowest registers (register mask 0x007F)
18686 This LDM will finally contain between 2 and 7 registers
18687 - One with the 7 highest registers (register mask 0xDF80)
18688 This ldm will finally contain between 2 and 7 registers. */
18689 insn_low_registers = insn_all_registers & 0x007F;
18690 insn_high_registers = insn_all_registers & 0xDF80;
18691
18692 /* A spare register may be needed during this veneer to temporarily
18693 handle the base register. This register will be restored with the
18694 last LDM operation.
18695 The usable register may be any general purpose register (that
18696 excludes PC, SP, LR : register mask is 0x1FFF). */
18697 usable_register_mask = 0x1FFF;
18698
18699 /* Generate the stub function. */
18700 if (wback)
18701 {
18702 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18703 current_stub_contents =
18704 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18705 create_instruction_ldmia
18706 (rn, /*wback=*/1, insn_low_registers));
18707
18708 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18709 current_stub_contents =
18710 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18711 create_instruction_ldmia
18712 (rn, /*wback=*/1, insn_high_registers));
18713 if (!restore_pc)
18714 {
18715 /* B initial_insn_addr+4. */
18716 current_stub_contents =
18717 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18718 create_instruction_branch_absolute
18719 (initial_insn_addr - current_stub_contents));
18720 }
18721 }
18722 else /* if (!wback). */
18723 {
18724 ri = rn;
18725
18726 /* If Rn is not part of the high-register-list, move it there. */
18727 if (!(insn_high_registers & (1 << rn)))
18728 {
18729 /* Choose a Ri in the high-register-list that will be restored. */
18730 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18731
18732 /* MOV Ri, Rn. */
18733 current_stub_contents =
18734 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18735 create_instruction_mov (ri, rn));
18736 }
18737
18738 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18739 current_stub_contents =
18740 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18741 create_instruction_ldmia
18742 (ri, /*wback=*/1, insn_low_registers));
18743
18744 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18745 current_stub_contents =
18746 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18747 create_instruction_ldmia
18748 (ri, /*wback=*/0, insn_high_registers));
18749
18750 if (!restore_pc)
18751 {
18752 /* B initial_insn_addr+4. */
18753 current_stub_contents =
18754 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18755 create_instruction_branch_absolute
18756 (initial_insn_addr - current_stub_contents));
18757 }
18758 }
18759
18760 /* Fill the remaining of the stub with deterministic contents. */
18761 current_stub_contents =
18762 stm32l4xx_fill_stub_udf (htab, output_bfd,
18763 base_stub_contents, current_stub_contents,
18764 base_stub_contents +
18765 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18766 }
18767
18768 static void
18769 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18770 bfd * output_bfd,
18771 const insn32 initial_insn,
18772 const bfd_byte *const initial_insn_addr,
18773 bfd_byte *const base_stub_contents)
18774 {
18775 int wback = (initial_insn & 0x00200000) >> 21;
18776 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18777 int insn_all_registers = initial_insn & 0x0000ffff;
18778 int insn_low_registers, insn_high_registers;
18779 int usable_register_mask;
18780 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18781 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18782 int nb_registers = elf32_arm_popcount (insn_all_registers);
18783 bfd_byte *current_stub_contents = base_stub_contents;
18784
18785 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18786
18787 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18788 smaller than 8 registers load sequences that do not cause the
18789 hardware issue. */
18790 if (nb_registers <= 8)
18791 {
18792 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18793 current_stub_contents =
18794 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18795 initial_insn);
18796
18797 /* B initial_insn_addr+4. */
18798 current_stub_contents =
18799 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18800 create_instruction_branch_absolute
18801 (initial_insn_addr - current_stub_contents));
18802
18803 /* Fill the remaining of the stub with deterministic contents. */
18804 current_stub_contents =
18805 stm32l4xx_fill_stub_udf (htab, output_bfd,
18806 base_stub_contents, current_stub_contents,
18807 base_stub_contents +
18808 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18809
18810 return;
18811 }
18812
18813 /* - reg_list[13] == 0. */
18814 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18815
18816 /* - reg_list[14] & reg_list[15] != 1. */
18817 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18818
18819 /* - if (wback==1) reg_list[rn] == 0. */
18820 BFD_ASSERT (!wback || !restore_rn);
18821
18822 /* - nb_registers > 8. */
18823 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18824
18825 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18826
18827 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18828 - One with the 7 lowest registers (register mask 0x007F)
18829 This LDM will finally contain between 2 and 7 registers
18830 - One with the 7 highest registers (register mask 0xDF80)
18831 This ldm will finally contain between 2 and 7 registers. */
18832 insn_low_registers = insn_all_registers & 0x007F;
18833 insn_high_registers = insn_all_registers & 0xDF80;
18834
18835 /* A spare register may be needed during this veneer to temporarily
18836 handle the base register. This register will be restored with
18837 the last LDM operation.
18838 The usable register may be any general purpose register (that excludes
18839 PC, SP, LR : register mask is 0x1FFF). */
18840 usable_register_mask = 0x1FFF;
18841
18842 /* Generate the stub function. */
18843 if (!wback && !restore_pc && !restore_rn)
18844 {
18845 /* Choose a Ri in the low-register-list that will be restored. */
18846 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18847
18848 /* MOV Ri, Rn. */
18849 current_stub_contents =
18850 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18851 create_instruction_mov (ri, rn));
18852
18853 /* LDMDB Ri!, {R-high-register-list}. */
18854 current_stub_contents =
18855 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18856 create_instruction_ldmdb
18857 (ri, /*wback=*/1, insn_high_registers));
18858
18859 /* LDMDB Ri, {R-low-register-list}. */
18860 current_stub_contents =
18861 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18862 create_instruction_ldmdb
18863 (ri, /*wback=*/0, insn_low_registers));
18864
18865 /* B initial_insn_addr+4. */
18866 current_stub_contents =
18867 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18868 create_instruction_branch_absolute
18869 (initial_insn_addr - current_stub_contents));
18870 }
18871 else if (wback && !restore_pc && !restore_rn)
18872 {
18873 /* LDMDB Rn!, {R-high-register-list}. */
18874 current_stub_contents =
18875 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18876 create_instruction_ldmdb
18877 (rn, /*wback=*/1, insn_high_registers));
18878
18879 /* LDMDB Rn!, {R-low-register-list}. */
18880 current_stub_contents =
18881 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18882 create_instruction_ldmdb
18883 (rn, /*wback=*/1, insn_low_registers));
18884
18885 /* B initial_insn_addr+4. */
18886 current_stub_contents =
18887 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18888 create_instruction_branch_absolute
18889 (initial_insn_addr - current_stub_contents));
18890 }
18891 else if (!wback && restore_pc && !restore_rn)
18892 {
18893 /* Choose a Ri in the high-register-list that will be restored. */
18894 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18895
18896 /* SUB Ri, Rn, #(4*nb_registers). */
18897 current_stub_contents =
18898 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18899 create_instruction_sub (ri, rn, (4 * nb_registers)));
18900
18901 /* LDMIA Ri!, {R-low-register-list}. */
18902 current_stub_contents =
18903 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18904 create_instruction_ldmia
18905 (ri, /*wback=*/1, insn_low_registers));
18906
18907 /* LDMIA Ri, {R-high-register-list}. */
18908 current_stub_contents =
18909 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18910 create_instruction_ldmia
18911 (ri, /*wback=*/0, insn_high_registers));
18912 }
18913 else if (wback && restore_pc && !restore_rn)
18914 {
18915 /* Choose a Ri in the high-register-list that will be restored. */
18916 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18917
18918 /* SUB Rn, Rn, #(4*nb_registers) */
18919 current_stub_contents =
18920 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18921 create_instruction_sub (rn, rn, (4 * nb_registers)));
18922
18923 /* MOV Ri, Rn. */
18924 current_stub_contents =
18925 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18926 create_instruction_mov (ri, rn));
18927
18928 /* LDMIA Ri!, {R-low-register-list}. */
18929 current_stub_contents =
18930 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18931 create_instruction_ldmia
18932 (ri, /*wback=*/1, insn_low_registers));
18933
18934 /* LDMIA Ri, {R-high-register-list}. */
18935 current_stub_contents =
18936 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18937 create_instruction_ldmia
18938 (ri, /*wback=*/0, insn_high_registers));
18939 }
18940 else if (!wback && !restore_pc && restore_rn)
18941 {
18942 ri = rn;
18943 if (!(insn_low_registers & (1 << rn)))
18944 {
18945 /* Choose a Ri in the low-register-list that will be restored. */
18946 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18947
18948 /* MOV Ri, Rn. */
18949 current_stub_contents =
18950 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18951 create_instruction_mov (ri, rn));
18952 }
18953
18954 /* LDMDB Ri!, {R-high-register-list}. */
18955 current_stub_contents =
18956 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18957 create_instruction_ldmdb
18958 (ri, /*wback=*/1, insn_high_registers));
18959
18960 /* LDMDB Ri, {R-low-register-list}. */
18961 current_stub_contents =
18962 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18963 create_instruction_ldmdb
18964 (ri, /*wback=*/0, insn_low_registers));
18965
18966 /* B initial_insn_addr+4. */
18967 current_stub_contents =
18968 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18969 create_instruction_branch_absolute
18970 (initial_insn_addr - current_stub_contents));
18971 }
18972 else if (!wback && restore_pc && restore_rn)
18973 {
18974 ri = rn;
18975 if (!(insn_high_registers & (1 << rn)))
18976 {
18977 /* Choose a Ri in the high-register-list that will be restored. */
18978 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18979 }
18980
18981 /* SUB Ri, Rn, #(4*nb_registers). */
18982 current_stub_contents =
18983 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18984 create_instruction_sub (ri, rn, (4 * nb_registers)));
18985
18986 /* LDMIA Ri!, {R-low-register-list}. */
18987 current_stub_contents =
18988 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18989 create_instruction_ldmia
18990 (ri, /*wback=*/1, insn_low_registers));
18991
18992 /* LDMIA Ri, {R-high-register-list}. */
18993 current_stub_contents =
18994 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18995 create_instruction_ldmia
18996 (ri, /*wback=*/0, insn_high_registers));
18997 }
18998 else if (wback && restore_rn)
18999 {
19000 /* The assembler should not have accepted to encode this. */
19001 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19002 "undefined behavior.\n");
19003 }
19004
19005 /* Fill the remaining of the stub with deterministic contents. */
19006 current_stub_contents =
19007 stm32l4xx_fill_stub_udf (htab, output_bfd,
19008 base_stub_contents, current_stub_contents,
19009 base_stub_contents +
19010 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19011
19012 }
19013
19014 static void
19015 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19016 bfd * output_bfd,
19017 const insn32 initial_insn,
19018 const bfd_byte *const initial_insn_addr,
19019 bfd_byte *const base_stub_contents)
19020 {
19021 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19022 bfd_byte *current_stub_contents = base_stub_contents;
19023
19024 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19025
19026 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19027 smaller than 8 words load sequences that do not cause the
19028 hardware issue. */
19029 if (num_words <= 8)
19030 {
19031 /* Untouched instruction. */
19032 current_stub_contents =
19033 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19034 initial_insn);
19035
19036 /* B initial_insn_addr+4. */
19037 current_stub_contents =
19038 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19039 create_instruction_branch_absolute
19040 (initial_insn_addr - current_stub_contents));
19041 }
19042 else
19043 {
19044 bfd_boolean is_dp = /* DP encoding. */
19045 (initial_insn & 0xfe100f00) == 0xec100b00;
19046 bfd_boolean is_ia_nobang = /* (IA without !). */
19047 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19048 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19049 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19050 bfd_boolean is_db_bang = /* (DB with !). */
19051 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19052 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19053 /* d = UInt (Vd:D);. */
19054 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19055 | (((unsigned int)initial_insn << 9) >> 31);
19056
19057 /* Compute the number of 8-words chunks needed to split. */
19058 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19059 int chunk;
19060
19061 /* The test coverage has been done assuming the following
19062 hypothesis that exactly one of the previous is_ predicates is
19063 true. */
19064 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19065 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19066
19067 /* We treat the cutting of the words in one pass for all
19068 cases, then we emit the adjustments:
19069
19070 vldm rx, {...}
19071 -> vldm rx!, {8_words_or_less} for each needed 8_word
19072 -> sub rx, rx, #size (list)
19073
19074 vldm rx!, {...}
19075 -> vldm rx!, {8_words_or_less} for each needed 8_word
19076 This also handles vpop instruction (when rx is sp)
19077
19078 vldmd rx!, {...}
19079 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19080 for (chunk = 0; chunk < chunks; ++chunk)
19081 {
19082 bfd_vma new_insn = 0;
19083
19084 if (is_ia_nobang || is_ia_bang)
19085 {
19086 new_insn = create_instruction_vldmia
19087 (base_reg,
19088 is_dp,
19089 /*wback= . */1,
19090 chunks - (chunk + 1) ?
19091 8 : num_words - chunk * 8,
19092 first_reg + chunk * 8);
19093 }
19094 else if (is_db_bang)
19095 {
19096 new_insn = create_instruction_vldmdb
19097 (base_reg,
19098 is_dp,
19099 chunks - (chunk + 1) ?
19100 8 : num_words - chunk * 8,
19101 first_reg + chunk * 8);
19102 }
19103
19104 if (new_insn)
19105 current_stub_contents =
19106 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19107 new_insn);
19108 }
19109
19110 /* Only this case requires the base register compensation
19111 subtract. */
19112 if (is_ia_nobang)
19113 {
19114 current_stub_contents =
19115 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19116 create_instruction_sub
19117 (base_reg, base_reg, 4*num_words));
19118 }
19119
19120 /* B initial_insn_addr+4. */
19121 current_stub_contents =
19122 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19123 create_instruction_branch_absolute
19124 (initial_insn_addr - current_stub_contents));
19125 }
19126
19127 /* Fill the remaining of the stub with deterministic contents. */
19128 current_stub_contents =
19129 stm32l4xx_fill_stub_udf (htab, output_bfd,
19130 base_stub_contents, current_stub_contents,
19131 base_stub_contents +
19132 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19133 }
19134
19135 static void
19136 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19137 bfd * output_bfd,
19138 const insn32 wrong_insn,
19139 const bfd_byte *const wrong_insn_addr,
19140 bfd_byte *const stub_contents)
19141 {
19142 if (is_thumb2_ldmia (wrong_insn))
19143 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19144 wrong_insn, wrong_insn_addr,
19145 stub_contents);
19146 else if (is_thumb2_ldmdb (wrong_insn))
19147 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19148 wrong_insn, wrong_insn_addr,
19149 stub_contents);
19150 else if (is_thumb2_vldm (wrong_insn))
19151 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19152 wrong_insn, wrong_insn_addr,
19153 stub_contents);
19154 }
19155
19156 /* End of stm32l4xx work-around. */
19157
19158
19159 /* Do code byteswapping. Return FALSE afterwards so that the section is
19160 written out as normal. */
19161
19162 static bfd_boolean
19163 elf32_arm_write_section (bfd *output_bfd,
19164 struct bfd_link_info *link_info,
19165 asection *sec,
19166 bfd_byte *contents)
19167 {
19168 unsigned int mapcount, errcount;
19169 _arm_elf_section_data *arm_data;
19170 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19171 elf32_arm_section_map *map;
19172 elf32_vfp11_erratum_list *errnode;
19173 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19174 bfd_vma ptr;
19175 bfd_vma end;
19176 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19177 bfd_byte tmp;
19178 unsigned int i;
19179
19180 if (globals == NULL)
19181 return FALSE;
19182
19183 /* If this section has not been allocated an _arm_elf_section_data
19184 structure then we cannot record anything. */
19185 arm_data = get_arm_elf_section_data (sec);
19186 if (arm_data == NULL)
19187 return FALSE;
19188
19189 mapcount = arm_data->mapcount;
19190 map = arm_data->map;
19191 errcount = arm_data->erratumcount;
19192
19193 if (errcount != 0)
19194 {
19195 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19196
19197 for (errnode = arm_data->erratumlist; errnode != 0;
19198 errnode = errnode->next)
19199 {
19200 bfd_vma target = errnode->vma - offset;
19201
19202 switch (errnode->type)
19203 {
19204 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19205 {
19206 bfd_vma branch_to_veneer;
19207 /* Original condition code of instruction, plus bit mask for
19208 ARM B instruction. */
19209 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19210 | 0x0a000000;
19211
19212 /* The instruction is before the label. */
19213 target -= 4;
19214
19215 /* Above offset included in -4 below. */
19216 branch_to_veneer = errnode->u.b.veneer->vma
19217 - errnode->vma - 4;
19218
19219 if ((signed) branch_to_veneer < -(1 << 25)
19220 || (signed) branch_to_veneer >= (1 << 25))
19221 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19222 "range"), output_bfd);
19223
19224 insn |= (branch_to_veneer >> 2) & 0xffffff;
19225 contents[endianflip ^ target] = insn & 0xff;
19226 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19227 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19228 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19229 }
19230 break;
19231
19232 case VFP11_ERRATUM_ARM_VENEER:
19233 {
19234 bfd_vma branch_from_veneer;
19235 unsigned int insn;
19236
19237 /* Take size of veneer into account. */
19238 branch_from_veneer = errnode->u.v.branch->vma
19239 - errnode->vma - 12;
19240
19241 if ((signed) branch_from_veneer < -(1 << 25)
19242 || (signed) branch_from_veneer >= (1 << 25))
19243 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19244 "range"), output_bfd);
19245
19246 /* Original instruction. */
19247 insn = errnode->u.v.branch->u.b.vfp_insn;
19248 contents[endianflip ^ target] = insn & 0xff;
19249 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19250 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19251 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19252
19253 /* Branch back to insn after original insn. */
19254 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19255 contents[endianflip ^ (target + 4)] = insn & 0xff;
19256 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19257 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19258 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19259 }
19260 break;
19261
19262 default:
19263 abort ();
19264 }
19265 }
19266 }
19267
19268 if (arm_data->stm32l4xx_erratumcount != 0)
19269 {
19270 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19271 stm32l4xx_errnode != 0;
19272 stm32l4xx_errnode = stm32l4xx_errnode->next)
19273 {
19274 bfd_vma target = stm32l4xx_errnode->vma - offset;
19275
19276 switch (stm32l4xx_errnode->type)
19277 {
19278 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19279 {
19280 unsigned int insn;
19281 bfd_vma branch_to_veneer =
19282 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19283
19284 if ((signed) branch_to_veneer < -(1 << 24)
19285 || (signed) branch_to_veneer >= (1 << 24))
19286 {
19287 bfd_vma out_of_range =
19288 ((signed) branch_to_veneer < -(1 << 24)) ?
19289 - branch_to_veneer - (1 << 24) :
19290 ((signed) branch_to_veneer >= (1 << 24)) ?
19291 branch_to_veneer - (1 << 24) : 0;
19292
19293 _bfd_error_handler
19294 (_("%pB(%#" PRIx64 "): error: "
19295 "cannot create STM32L4XX veneer; "
19296 "jump out of range by %" PRId64 " bytes; "
19297 "cannot encode branch instruction"),
19298 output_bfd,
19299 (uint64_t) (stm32l4xx_errnode->vma - 4),
19300 (int64_t) out_of_range);
19301 continue;
19302 }
19303
19304 insn = create_instruction_branch_absolute
19305 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19306
19307 /* The instruction is before the label. */
19308 target -= 4;
19309
19310 put_thumb2_insn (globals, output_bfd,
19311 (bfd_vma) insn, contents + target);
19312 }
19313 break;
19314
19315 case STM32L4XX_ERRATUM_VENEER:
19316 {
19317 bfd_byte * veneer;
19318 bfd_byte * veneer_r;
19319 unsigned int insn;
19320
19321 veneer = contents + target;
19322 veneer_r = veneer
19323 + stm32l4xx_errnode->u.b.veneer->vma
19324 - stm32l4xx_errnode->vma - 4;
19325
19326 if ((signed) (veneer_r - veneer -
19327 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19328 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19329 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19330 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19331 || (signed) (veneer_r - veneer) >= (1 << 24))
19332 {
19333 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19334 "veneer"), output_bfd);
19335 continue;
19336 }
19337
19338 /* Original instruction. */
19339 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19340
19341 stm32l4xx_create_replacing_stub
19342 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19343 }
19344 break;
19345
19346 default:
19347 abort ();
19348 }
19349 }
19350 }
19351
19352 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19353 {
19354 arm_unwind_table_edit *edit_node
19355 = arm_data->u.exidx.unwind_edit_list;
19356 /* Now, sec->size is the size of the section we will write. The original
19357 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19358 markers) was sec->rawsize. (This isn't the case if we perform no
19359 edits, then rawsize will be zero and we should use size). */
19360 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19361 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19362 unsigned int in_index, out_index;
19363 bfd_vma add_to_offsets = 0;
19364
19365 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19366 {
19367 if (edit_node)
19368 {
19369 unsigned int edit_index = edit_node->index;
19370
19371 if (in_index < edit_index && in_index * 8 < input_size)
19372 {
19373 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19374 contents + in_index * 8, add_to_offsets);
19375 out_index++;
19376 in_index++;
19377 }
19378 else if (in_index == edit_index
19379 || (in_index * 8 >= input_size
19380 && edit_index == UINT_MAX))
19381 {
19382 switch (edit_node->type)
19383 {
19384 case DELETE_EXIDX_ENTRY:
19385 in_index++;
19386 add_to_offsets += 8;
19387 break;
19388
19389 case INSERT_EXIDX_CANTUNWIND_AT_END:
19390 {
19391 asection *text_sec = edit_node->linked_section;
19392 bfd_vma text_offset = text_sec->output_section->vma
19393 + text_sec->output_offset
19394 + text_sec->size;
19395 bfd_vma exidx_offset = offset + out_index * 8;
19396 unsigned long prel31_offset;
19397
19398 /* Note: this is meant to be equivalent to an
19399 R_ARM_PREL31 relocation. These synthetic
19400 EXIDX_CANTUNWIND markers are not relocated by the
19401 usual BFD method. */
19402 prel31_offset = (text_offset - exidx_offset)
19403 & 0x7ffffffful;
19404 if (bfd_link_relocatable (link_info))
19405 {
19406 /* Here relocation for new EXIDX_CANTUNWIND is
19407 created, so there is no need to
19408 adjust offset by hand. */
19409 prel31_offset = text_sec->output_offset
19410 + text_sec->size;
19411 }
19412
19413 /* First address we can't unwind. */
19414 bfd_put_32 (output_bfd, prel31_offset,
19415 &edited_contents[out_index * 8]);
19416
19417 /* Code for EXIDX_CANTUNWIND. */
19418 bfd_put_32 (output_bfd, 0x1,
19419 &edited_contents[out_index * 8 + 4]);
19420
19421 out_index++;
19422 add_to_offsets -= 8;
19423 }
19424 break;
19425 }
19426
19427 edit_node = edit_node->next;
19428 }
19429 }
19430 else
19431 {
19432 /* No more edits, copy remaining entries verbatim. */
19433 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19434 contents + in_index * 8, add_to_offsets);
19435 out_index++;
19436 in_index++;
19437 }
19438 }
19439
19440 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19441 bfd_set_section_contents (output_bfd, sec->output_section,
19442 edited_contents,
19443 (file_ptr) sec->output_offset, sec->size);
19444
19445 return TRUE;
19446 }
19447
19448 /* Fix code to point to Cortex-A8 erratum stubs. */
19449 if (globals->fix_cortex_a8)
19450 {
19451 struct a8_branch_to_stub_data data;
19452
19453 data.writing_section = sec;
19454 data.contents = contents;
19455
19456 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19457 & data);
19458 }
19459
19460 if (mapcount == 0)
19461 return FALSE;
19462
19463 if (globals->byteswap_code)
19464 {
19465 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19466
19467 ptr = map[0].vma;
19468 for (i = 0; i < mapcount; i++)
19469 {
19470 if (i == mapcount - 1)
19471 end = sec->size;
19472 else
19473 end = map[i + 1].vma;
19474
19475 switch (map[i].type)
19476 {
19477 case 'a':
19478 /* Byte swap code words. */
19479 while (ptr + 3 < end)
19480 {
19481 tmp = contents[ptr];
19482 contents[ptr] = contents[ptr + 3];
19483 contents[ptr + 3] = tmp;
19484 tmp = contents[ptr + 1];
19485 contents[ptr + 1] = contents[ptr + 2];
19486 contents[ptr + 2] = tmp;
19487 ptr += 4;
19488 }
19489 break;
19490
19491 case 't':
19492 /* Byte swap code halfwords. */
19493 while (ptr + 1 < end)
19494 {
19495 tmp = contents[ptr];
19496 contents[ptr] = contents[ptr + 1];
19497 contents[ptr + 1] = tmp;
19498 ptr += 2;
19499 }
19500 break;
19501
19502 case 'd':
19503 /* Leave data alone. */
19504 break;
19505 }
19506 ptr = end;
19507 }
19508 }
19509
19510 free (map);
19511 arm_data->mapcount = -1;
19512 arm_data->mapsize = 0;
19513 arm_data->map = NULL;
19514
19515 return FALSE;
19516 }
19517
19518 /* Mangle thumb function symbols as we read them in. */
19519
19520 static bfd_boolean
19521 elf32_arm_swap_symbol_in (bfd * abfd,
19522 const void *psrc,
19523 const void *pshn,
19524 Elf_Internal_Sym *dst)
19525 {
19526 Elf_Internal_Shdr *symtab_hdr;
19527 const char *name = NULL;
19528
19529 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19530 return FALSE;
19531 dst->st_target_internal = 0;
19532
19533 /* New EABI objects mark thumb function symbols by setting the low bit of
19534 the address. */
19535 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19536 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19537 {
19538 if (dst->st_value & 1)
19539 {
19540 dst->st_value &= ~(bfd_vma) 1;
19541 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19542 ST_BRANCH_TO_THUMB);
19543 }
19544 else
19545 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19546 }
19547 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19548 {
19549 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19550 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19551 }
19552 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19553 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19554 else
19555 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19556
19557 /* Mark CMSE special symbols. */
19558 symtab_hdr = & elf_symtab_hdr (abfd);
19559 if (symtab_hdr->sh_size)
19560 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19561 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19562 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19563
19564 return TRUE;
19565 }
19566
19567
19568 /* Mangle thumb function symbols as we write them out. */
19569
19570 static void
19571 elf32_arm_swap_symbol_out (bfd *abfd,
19572 const Elf_Internal_Sym *src,
19573 void *cdst,
19574 void *shndx)
19575 {
19576 Elf_Internal_Sym newsym;
19577
19578 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19579 of the address set, as per the new EABI. We do this unconditionally
19580 because objcopy does not set the elf header flags until after
19581 it writes out the symbol table. */
19582 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19583 {
19584 newsym = *src;
19585 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19586 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19587 if (newsym.st_shndx != SHN_UNDEF)
19588 {
19589 /* Do this only for defined symbols. At link type, the static
19590 linker will simulate the work of dynamic linker of resolving
19591 symbols and will carry over the thumbness of found symbols to
19592 the output symbol table. It's not clear how it happens, but
19593 the thumbness of undefined symbols can well be different at
19594 runtime, and writing '1' for them will be confusing for users
19595 and possibly for dynamic linker itself.
19596 */
19597 newsym.st_value |= 1;
19598 }
19599
19600 src = &newsym;
19601 }
19602 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19603 }
19604
19605 /* Add the PT_ARM_EXIDX program header. */
19606
19607 static bfd_boolean
19608 elf32_arm_modify_segment_map (bfd *abfd,
19609 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19610 {
19611 struct elf_segment_map *m;
19612 asection *sec;
19613
19614 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19615 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19616 {
19617 /* If there is already a PT_ARM_EXIDX header, then we do not
19618 want to add another one. This situation arises when running
19619 "strip"; the input binary already has the header. */
19620 m = elf_seg_map (abfd);
19621 while (m && m->p_type != PT_ARM_EXIDX)
19622 m = m->next;
19623 if (!m)
19624 {
19625 m = (struct elf_segment_map *)
19626 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19627 if (m == NULL)
19628 return FALSE;
19629 m->p_type = PT_ARM_EXIDX;
19630 m->count = 1;
19631 m->sections[0] = sec;
19632
19633 m->next = elf_seg_map (abfd);
19634 elf_seg_map (abfd) = m;
19635 }
19636 }
19637
19638 return TRUE;
19639 }
19640
19641 /* We may add a PT_ARM_EXIDX program header. */
19642
19643 static int
19644 elf32_arm_additional_program_headers (bfd *abfd,
19645 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19646 {
19647 asection *sec;
19648
19649 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19650 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19651 return 1;
19652 else
19653 return 0;
19654 }
19655
19656 /* Hook called by the linker routine which adds symbols from an object
19657 file. */
19658
19659 static bfd_boolean
19660 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19661 Elf_Internal_Sym *sym, const char **namep,
19662 flagword *flagsp, asection **secp, bfd_vma *valp)
19663 {
19664 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
19665 && (abfd->flags & DYNAMIC) == 0
19666 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
19667 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
19668
19669 if (elf32_arm_hash_table (info) == NULL)
19670 return FALSE;
19671
19672 if (elf32_arm_hash_table (info)->vxworks_p
19673 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19674 flagsp, secp, valp))
19675 return FALSE;
19676
19677 return TRUE;
19678 }
19679
19680 /* We use this to override swap_symbol_in and swap_symbol_out. */
19681 const struct elf_size_info elf32_arm_size_info =
19682 {
19683 sizeof (Elf32_External_Ehdr),
19684 sizeof (Elf32_External_Phdr),
19685 sizeof (Elf32_External_Shdr),
19686 sizeof (Elf32_External_Rel),
19687 sizeof (Elf32_External_Rela),
19688 sizeof (Elf32_External_Sym),
19689 sizeof (Elf32_External_Dyn),
19690 sizeof (Elf_External_Note),
19691 4,
19692 1,
19693 32, 2,
19694 ELFCLASS32, EV_CURRENT,
19695 bfd_elf32_write_out_phdrs,
19696 bfd_elf32_write_shdrs_and_ehdr,
19697 bfd_elf32_checksum_contents,
19698 bfd_elf32_write_relocs,
19699 elf32_arm_swap_symbol_in,
19700 elf32_arm_swap_symbol_out,
19701 bfd_elf32_slurp_reloc_table,
19702 bfd_elf32_slurp_symbol_table,
19703 bfd_elf32_swap_dyn_in,
19704 bfd_elf32_swap_dyn_out,
19705 bfd_elf32_swap_reloc_in,
19706 bfd_elf32_swap_reloc_out,
19707 bfd_elf32_swap_reloca_in,
19708 bfd_elf32_swap_reloca_out
19709 };
19710
19711 static bfd_vma
19712 read_code32 (const bfd *abfd, const bfd_byte *addr)
19713 {
19714 /* V7 BE8 code is always little endian. */
19715 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19716 return bfd_getl32 (addr);
19717
19718 return bfd_get_32 (abfd, addr);
19719 }
19720
19721 static bfd_vma
19722 read_code16 (const bfd *abfd, const bfd_byte *addr)
19723 {
19724 /* V7 BE8 code is always little endian. */
19725 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19726 return bfd_getl16 (addr);
19727
19728 return bfd_get_16 (abfd, addr);
19729 }
19730
19731 /* Return size of plt0 entry starting at ADDR
19732 or (bfd_vma) -1 if size can not be determined. */
19733
19734 static bfd_vma
19735 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19736 {
19737 bfd_vma first_word;
19738 bfd_vma plt0_size;
19739
19740 first_word = read_code32 (abfd, addr);
19741
19742 if (first_word == elf32_arm_plt0_entry[0])
19743 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19744 else if (first_word == elf32_thumb2_plt0_entry[0])
19745 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19746 else
19747 /* We don't yet handle this PLT format. */
19748 return (bfd_vma) -1;
19749
19750 return plt0_size;
19751 }
19752
19753 /* Return size of plt entry starting at offset OFFSET
19754 of plt section located at address START
19755 or (bfd_vma) -1 if size can not be determined. */
19756
19757 static bfd_vma
19758 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19759 {
19760 bfd_vma first_insn;
19761 bfd_vma plt_size = 0;
19762 const bfd_byte *addr = start + offset;
19763
19764 /* PLT entry size if fixed on Thumb-only platforms. */
19765 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19766 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19767
19768 /* Respect Thumb stub if necessary. */
19769 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19770 {
19771 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19772 }
19773
19774 /* Strip immediate from first add. */
19775 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19776
19777 #ifdef FOUR_WORD_PLT
19778 if (first_insn == elf32_arm_plt_entry[0])
19779 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19780 #else
19781 if (first_insn == elf32_arm_plt_entry_long[0])
19782 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19783 else if (first_insn == elf32_arm_plt_entry_short[0])
19784 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19785 #endif
19786 else
19787 /* We don't yet handle this PLT format. */
19788 return (bfd_vma) -1;
19789
19790 return plt_size;
19791 }
19792
19793 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19794
19795 static long
19796 elf32_arm_get_synthetic_symtab (bfd *abfd,
19797 long symcount ATTRIBUTE_UNUSED,
19798 asymbol **syms ATTRIBUTE_UNUSED,
19799 long dynsymcount,
19800 asymbol **dynsyms,
19801 asymbol **ret)
19802 {
19803 asection *relplt;
19804 asymbol *s;
19805 arelent *p;
19806 long count, i, n;
19807 size_t size;
19808 Elf_Internal_Shdr *hdr;
19809 char *names;
19810 asection *plt;
19811 bfd_vma offset;
19812 bfd_byte *data;
19813
19814 *ret = NULL;
19815
19816 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19817 return 0;
19818
19819 if (dynsymcount <= 0)
19820 return 0;
19821
19822 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19823 if (relplt == NULL)
19824 return 0;
19825
19826 hdr = &elf_section_data (relplt)->this_hdr;
19827 if (hdr->sh_link != elf_dynsymtab (abfd)
19828 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19829 return 0;
19830
19831 plt = bfd_get_section_by_name (abfd, ".plt");
19832 if (plt == NULL)
19833 return 0;
19834
19835 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19836 return -1;
19837
19838 data = plt->contents;
19839 if (data == NULL)
19840 {
19841 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19842 return -1;
19843 bfd_cache_section_contents((asection *) plt, data);
19844 }
19845
19846 count = relplt->size / hdr->sh_entsize;
19847 size = count * sizeof (asymbol);
19848 p = relplt->relocation;
19849 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19850 {
19851 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19852 if (p->addend != 0)
19853 size += sizeof ("+0x") - 1 + 8;
19854 }
19855
19856 s = *ret = (asymbol *) bfd_malloc (size);
19857 if (s == NULL)
19858 return -1;
19859
19860 offset = elf32_arm_plt0_size (abfd, data);
19861 if (offset == (bfd_vma) -1)
19862 return -1;
19863
19864 names = (char *) (s + count);
19865 p = relplt->relocation;
19866 n = 0;
19867 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19868 {
19869 size_t len;
19870
19871 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19872 if (plt_size == (bfd_vma) -1)
19873 break;
19874
19875 *s = **p->sym_ptr_ptr;
19876 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
19877 we are defining a symbol, ensure one of them is set. */
19878 if ((s->flags & BSF_LOCAL) == 0)
19879 s->flags |= BSF_GLOBAL;
19880 s->flags |= BSF_SYNTHETIC;
19881 s->section = plt;
19882 s->value = offset;
19883 s->name = names;
19884 s->udata.p = NULL;
19885 len = strlen ((*p->sym_ptr_ptr)->name);
19886 memcpy (names, (*p->sym_ptr_ptr)->name, len);
19887 names += len;
19888 if (p->addend != 0)
19889 {
19890 char buf[30], *a;
19891
19892 memcpy (names, "+0x", sizeof ("+0x") - 1);
19893 names += sizeof ("+0x") - 1;
19894 bfd_sprintf_vma (abfd, buf, p->addend);
19895 for (a = buf; *a == '0'; ++a)
19896 ;
19897 len = strlen (a);
19898 memcpy (names, a, len);
19899 names += len;
19900 }
19901 memcpy (names, "@plt", sizeof ("@plt"));
19902 names += sizeof ("@plt");
19903 ++s, ++n;
19904 offset += plt_size;
19905 }
19906
19907 return n;
19908 }
19909
19910 static bfd_boolean
19911 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
19912 {
19913 if (hdr->sh_flags & SHF_ARM_PURECODE)
19914 *flags |= SEC_ELF_PURECODE;
19915 return TRUE;
19916 }
19917
19918 static flagword
19919 elf32_arm_lookup_section_flags (char *flag_name)
19920 {
19921 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
19922 return SHF_ARM_PURECODE;
19923
19924 return SEC_NO_FLAGS;
19925 }
19926
19927 static unsigned int
19928 elf32_arm_count_additional_relocs (asection *sec)
19929 {
19930 struct _arm_elf_section_data *arm_data;
19931 arm_data = get_arm_elf_section_data (sec);
19932
19933 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
19934 }
19935
19936 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
19937 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
19938 FALSE otherwise. ISECTION is the best guess matching section from the
19939 input bfd IBFD, but it might be NULL. */
19940
19941 static bfd_boolean
19942 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
19943 bfd *obfd ATTRIBUTE_UNUSED,
19944 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
19945 Elf_Internal_Shdr *osection)
19946 {
19947 switch (osection->sh_type)
19948 {
19949 case SHT_ARM_EXIDX:
19950 {
19951 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
19952 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
19953 unsigned i = 0;
19954
19955 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
19956 osection->sh_info = 0;
19957
19958 /* The sh_link field must be set to the text section associated with
19959 this index section. Unfortunately the ARM EHABI does not specify
19960 exactly how to determine this association. Our caller does try
19961 to match up OSECTION with its corresponding input section however
19962 so that is a good first guess. */
19963 if (isection != NULL
19964 && osection->bfd_section != NULL
19965 && isection->bfd_section != NULL
19966 && isection->bfd_section->output_section != NULL
19967 && isection->bfd_section->output_section == osection->bfd_section
19968 && iheaders != NULL
19969 && isection->sh_link > 0
19970 && isection->sh_link < elf_numsections (ibfd)
19971 && iheaders[isection->sh_link]->bfd_section != NULL
19972 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
19973 )
19974 {
19975 for (i = elf_numsections (obfd); i-- > 0;)
19976 if (oheaders[i]->bfd_section
19977 == iheaders[isection->sh_link]->bfd_section->output_section)
19978 break;
19979 }
19980
19981 if (i == 0)
19982 {
19983 /* Failing that we have to find a matching section ourselves. If
19984 we had the output section name available we could compare that
19985 with input section names. Unfortunately we don't. So instead
19986 we use a simple heuristic and look for the nearest executable
19987 section before this one. */
19988 for (i = elf_numsections (obfd); i-- > 0;)
19989 if (oheaders[i] == osection)
19990 break;
19991 if (i == 0)
19992 break;
19993
19994 while (i-- > 0)
19995 if (oheaders[i]->sh_type == SHT_PROGBITS
19996 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
19997 == (SHF_ALLOC | SHF_EXECINSTR))
19998 break;
19999 }
20000
20001 if (i)
20002 {
20003 osection->sh_link = i;
20004 /* If the text section was part of a group
20005 then the index section should be too. */
20006 if (oheaders[i]->sh_flags & SHF_GROUP)
20007 osection->sh_flags |= SHF_GROUP;
20008 return TRUE;
20009 }
20010 }
20011 break;
20012
20013 case SHT_ARM_PREEMPTMAP:
20014 osection->sh_flags = SHF_ALLOC;
20015 break;
20016
20017 case SHT_ARM_ATTRIBUTES:
20018 case SHT_ARM_DEBUGOVERLAY:
20019 case SHT_ARM_OVERLAYSECTION:
20020 default:
20021 break;
20022 }
20023
20024 return FALSE;
20025 }
20026
20027 /* Returns TRUE if NAME is an ARM mapping symbol.
20028 Traditionally the symbols $a, $d and $t have been used.
20029 The ARM ELF standard also defines $x (for A64 code). It also allows a
20030 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20031 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20032 not support them here. $t.x indicates the start of ThumbEE instructions. */
20033
20034 static bfd_boolean
20035 is_arm_mapping_symbol (const char * name)
20036 {
20037 return name != NULL /* Paranoia. */
20038 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20039 the mapping symbols could have acquired a prefix.
20040 We do not support this here, since such symbols no
20041 longer conform to the ARM ELF ABI. */
20042 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20043 && (name[2] == 0 || name[2] == '.');
20044 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20045 any characters that follow the period are legal characters for the body
20046 of a symbol's name. For now we just assume that this is the case. */
20047 }
20048
20049 /* Make sure that mapping symbols in object files are not removed via the
20050 "strip --strip-unneeded" tool. These symbols are needed in order to
20051 correctly generate interworking veneers, and for byte swapping code
20052 regions. Once an object file has been linked, it is safe to remove the
20053 symbols as they will no longer be needed. */
20054
20055 static void
20056 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20057 {
20058 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20059 && sym->section != bfd_abs_section_ptr
20060 && is_arm_mapping_symbol (sym->name))
20061 sym->flags |= BSF_KEEP;
20062 }
20063
20064 #undef elf_backend_copy_special_section_fields
20065 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20066
20067 #define ELF_ARCH bfd_arch_arm
20068 #define ELF_TARGET_ID ARM_ELF_DATA
20069 #define ELF_MACHINE_CODE EM_ARM
20070 #ifdef __QNXTARGET__
20071 #define ELF_MAXPAGESIZE 0x1000
20072 #else
20073 #define ELF_MAXPAGESIZE 0x10000
20074 #endif
20075 #define ELF_MINPAGESIZE 0x1000
20076 #define ELF_COMMONPAGESIZE 0x1000
20077
20078 #define bfd_elf32_mkobject elf32_arm_mkobject
20079
20080 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20081 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20082 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20083 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20084 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20085 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20086 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20087 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20088 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20089 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20090 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20091 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20092 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20093
20094 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20095 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20096 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20097 #define elf_backend_check_relocs elf32_arm_check_relocs
20098 #define elf_backend_update_relocs elf32_arm_update_relocs
20099 #define elf_backend_relocate_section elf32_arm_relocate_section
20100 #define elf_backend_write_section elf32_arm_write_section
20101 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20102 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20103 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20104 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20105 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20106 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20107 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20108 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20109 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20110 #define elf_backend_object_p elf32_arm_object_p
20111 #define elf_backend_fake_sections elf32_arm_fake_sections
20112 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20113 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20114 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20115 #define elf_backend_size_info elf32_arm_size_info
20116 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20117 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20118 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20119 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20120 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20121 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20122 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20123 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20124
20125 #define elf_backend_can_refcount 1
20126 #define elf_backend_can_gc_sections 1
20127 #define elf_backend_plt_readonly 1
20128 #define elf_backend_want_got_plt 1
20129 #define elf_backend_want_plt_sym 0
20130 #define elf_backend_want_dynrelro 1
20131 #define elf_backend_may_use_rel_p 1
20132 #define elf_backend_may_use_rela_p 0
20133 #define elf_backend_default_use_rela_p 0
20134 #define elf_backend_dtrel_excludes_plt 1
20135
20136 #define elf_backend_got_header_size 12
20137 #define elf_backend_extern_protected_data 1
20138
20139 #undef elf_backend_obj_attrs_vendor
20140 #define elf_backend_obj_attrs_vendor "aeabi"
20141 #undef elf_backend_obj_attrs_section
20142 #define elf_backend_obj_attrs_section ".ARM.attributes"
20143 #undef elf_backend_obj_attrs_arg_type
20144 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20145 #undef elf_backend_obj_attrs_section_type
20146 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20147 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20148 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20149
20150 #undef elf_backend_section_flags
20151 #define elf_backend_section_flags elf32_arm_section_flags
20152 #undef elf_backend_lookup_section_flags_hook
20153 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20154
20155 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20156
20157 #include "elf32-target.h"
20158
20159 /* Native Client targets. */
20160
20161 #undef TARGET_LITTLE_SYM
20162 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20163 #undef TARGET_LITTLE_NAME
20164 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20165 #undef TARGET_BIG_SYM
20166 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20167 #undef TARGET_BIG_NAME
20168 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20169
20170 /* Like elf32_arm_link_hash_table_create -- but overrides
20171 appropriately for NaCl. */
20172
20173 static struct bfd_link_hash_table *
20174 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20175 {
20176 struct bfd_link_hash_table *ret;
20177
20178 ret = elf32_arm_link_hash_table_create (abfd);
20179 if (ret)
20180 {
20181 struct elf32_arm_link_hash_table *htab
20182 = (struct elf32_arm_link_hash_table *) ret;
20183
20184 htab->nacl_p = 1;
20185
20186 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20187 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20188 }
20189 return ret;
20190 }
20191
20192 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20193 really need to use elf32_arm_modify_segment_map. But we do it
20194 anyway just to reduce gratuitous differences with the stock ARM backend. */
20195
20196 static bfd_boolean
20197 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20198 {
20199 return (elf32_arm_modify_segment_map (abfd, info)
20200 && nacl_modify_segment_map (abfd, info));
20201 }
20202
20203 static void
20204 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20205 {
20206 elf32_arm_final_write_processing (abfd, linker);
20207 nacl_final_write_processing (abfd, linker);
20208 }
20209
20210 static bfd_vma
20211 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20212 const arelent *rel ATTRIBUTE_UNUSED)
20213 {
20214 return plt->vma
20215 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20216 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20217 }
20218
20219 #undef elf32_bed
20220 #define elf32_bed elf32_arm_nacl_bed
20221 #undef bfd_elf32_bfd_link_hash_table_create
20222 #define bfd_elf32_bfd_link_hash_table_create \
20223 elf32_arm_nacl_link_hash_table_create
20224 #undef elf_backend_plt_alignment
20225 #define elf_backend_plt_alignment 4
20226 #undef elf_backend_modify_segment_map
20227 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20228 #undef elf_backend_modify_program_headers
20229 #define elf_backend_modify_program_headers nacl_modify_program_headers
20230 #undef elf_backend_final_write_processing
20231 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20232 #undef bfd_elf32_get_synthetic_symtab
20233 #undef elf_backend_plt_sym_val
20234 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20235 #undef elf_backend_copy_special_section_fields
20236
20237 #undef ELF_MINPAGESIZE
20238 #undef ELF_COMMONPAGESIZE
20239
20240
20241 #include "elf32-target.h"
20242
20243 /* Reset to defaults. */
20244 #undef elf_backend_plt_alignment
20245 #undef elf_backend_modify_segment_map
20246 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20247 #undef elf_backend_modify_program_headers
20248 #undef elf_backend_final_write_processing
20249 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20250 #undef ELF_MINPAGESIZE
20251 #define ELF_MINPAGESIZE 0x1000
20252 #undef ELF_COMMONPAGESIZE
20253 #define ELF_COMMONPAGESIZE 0x1000
20254
20255
20256 /* FDPIC Targets. */
20257
20258 #undef TARGET_LITTLE_SYM
20259 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20260 #undef TARGET_LITTLE_NAME
20261 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20262 #undef TARGET_BIG_SYM
20263 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20264 #undef TARGET_BIG_NAME
20265 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20266 #undef elf_match_priority
20267 #define elf_match_priority 128
20268 #undef ELF_OSABI
20269 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20270
20271 /* Like elf32_arm_link_hash_table_create -- but overrides
20272 appropriately for FDPIC. */
20273
20274 static struct bfd_link_hash_table *
20275 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20276 {
20277 struct bfd_link_hash_table *ret;
20278
20279 ret = elf32_arm_link_hash_table_create (abfd);
20280 if (ret)
20281 {
20282 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20283
20284 htab->fdpic_p = 1;
20285 }
20286 return ret;
20287 }
20288
20289 /* We need dynamic symbols for every section, since segments can
20290 relocate independently. */
20291 static bfd_boolean
20292 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20293 struct bfd_link_info *info
20294 ATTRIBUTE_UNUSED,
20295 asection *p ATTRIBUTE_UNUSED)
20296 {
20297 switch (elf_section_data (p)->this_hdr.sh_type)
20298 {
20299 case SHT_PROGBITS:
20300 case SHT_NOBITS:
20301 /* If sh_type is yet undecided, assume it could be
20302 SHT_PROGBITS/SHT_NOBITS. */
20303 case SHT_NULL:
20304 return FALSE;
20305
20306 /* There shouldn't be section relative relocations
20307 against any other section. */
20308 default:
20309 return TRUE;
20310 }
20311 }
20312
20313 #undef elf32_bed
20314 #define elf32_bed elf32_arm_fdpic_bed
20315
20316 #undef bfd_elf32_bfd_link_hash_table_create
20317 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20318
20319 #undef elf_backend_omit_section_dynsym
20320 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20321
20322 #include "elf32-target.h"
20323
20324 #undef elf_match_priority
20325 #undef ELF_OSABI
20326 #undef elf_backend_omit_section_dynsym
20327
20328 /* VxWorks Targets. */
20329
20330 #undef TARGET_LITTLE_SYM
20331 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20332 #undef TARGET_LITTLE_NAME
20333 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20334 #undef TARGET_BIG_SYM
20335 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20336 #undef TARGET_BIG_NAME
20337 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20338
20339 /* Like elf32_arm_link_hash_table_create -- but overrides
20340 appropriately for VxWorks. */
20341
20342 static struct bfd_link_hash_table *
20343 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20344 {
20345 struct bfd_link_hash_table *ret;
20346
20347 ret = elf32_arm_link_hash_table_create (abfd);
20348 if (ret)
20349 {
20350 struct elf32_arm_link_hash_table *htab
20351 = (struct elf32_arm_link_hash_table *) ret;
20352 htab->use_rel = 0;
20353 htab->vxworks_p = 1;
20354 }
20355 return ret;
20356 }
20357
20358 static void
20359 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20360 {
20361 elf32_arm_final_write_processing (abfd, linker);
20362 elf_vxworks_final_write_processing (abfd, linker);
20363 }
20364
20365 #undef elf32_bed
20366 #define elf32_bed elf32_arm_vxworks_bed
20367
20368 #undef bfd_elf32_bfd_link_hash_table_create
20369 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20370 #undef elf_backend_final_write_processing
20371 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20372 #undef elf_backend_emit_relocs
20373 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20374
20375 #undef elf_backend_may_use_rel_p
20376 #define elf_backend_may_use_rel_p 0
20377 #undef elf_backend_may_use_rela_p
20378 #define elf_backend_may_use_rela_p 1
20379 #undef elf_backend_default_use_rela_p
20380 #define elf_backend_default_use_rela_p 1
20381 #undef elf_backend_want_plt_sym
20382 #define elf_backend_want_plt_sym 1
20383 #undef ELF_MAXPAGESIZE
20384 #define ELF_MAXPAGESIZE 0x1000
20385
20386 #include "elf32-target.h"
20387
20388
20389 /* Merge backend specific data from an object file to the output
20390 object file when linking. */
20391
20392 static bfd_boolean
20393 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20394 {
20395 bfd *obfd = info->output_bfd;
20396 flagword out_flags;
20397 flagword in_flags;
20398 bfd_boolean flags_compatible = TRUE;
20399 asection *sec;
20400
20401 /* Check if we have the same endianness. */
20402 if (! _bfd_generic_verify_endian_match (ibfd, info))
20403 return FALSE;
20404
20405 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20406 return TRUE;
20407
20408 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20409 return FALSE;
20410
20411 /* The input BFD must have had its flags initialised. */
20412 /* The following seems bogus to me -- The flags are initialized in
20413 the assembler but I don't think an elf_flags_init field is
20414 written into the object. */
20415 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20416
20417 in_flags = elf_elfheader (ibfd)->e_flags;
20418 out_flags = elf_elfheader (obfd)->e_flags;
20419
20420 /* In theory there is no reason why we couldn't handle this. However
20421 in practice it isn't even close to working and there is no real
20422 reason to want it. */
20423 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20424 && !(ibfd->flags & DYNAMIC)
20425 && (in_flags & EF_ARM_BE8))
20426 {
20427 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20428 ibfd);
20429 return FALSE;
20430 }
20431
20432 if (!elf_flags_init (obfd))
20433 {
20434 /* If the input is the default architecture and had the default
20435 flags then do not bother setting the flags for the output
20436 architecture, instead allow future merges to do this. If no
20437 future merges ever set these flags then they will retain their
20438 uninitialised values, which surprise surprise, correspond
20439 to the default values. */
20440 if (bfd_get_arch_info (ibfd)->the_default
20441 && elf_elfheader (ibfd)->e_flags == 0)
20442 return TRUE;
20443
20444 elf_flags_init (obfd) = TRUE;
20445 elf_elfheader (obfd)->e_flags = in_flags;
20446
20447 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20448 && bfd_get_arch_info (obfd)->the_default)
20449 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20450
20451 return TRUE;
20452 }
20453
20454 /* Determine what should happen if the input ARM architecture
20455 does not match the output ARM architecture. */
20456 if (! bfd_arm_merge_machines (ibfd, obfd))
20457 return FALSE;
20458
20459 /* Identical flags must be compatible. */
20460 if (in_flags == out_flags)
20461 return TRUE;
20462
20463 /* Check to see if the input BFD actually contains any sections. If
20464 not, its flags may not have been initialised either, but it
20465 cannot actually cause any incompatiblity. Do not short-circuit
20466 dynamic objects; their section list may be emptied by
20467 elf_link_add_object_symbols.
20468
20469 Also check to see if there are no code sections in the input.
20470 In this case there is no need to check for code specific flags.
20471 XXX - do we need to worry about floating-point format compatability
20472 in data sections ? */
20473 if (!(ibfd->flags & DYNAMIC))
20474 {
20475 bfd_boolean null_input_bfd = TRUE;
20476 bfd_boolean only_data_sections = TRUE;
20477
20478 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20479 {
20480 /* Ignore synthetic glue sections. */
20481 if (strcmp (sec->name, ".glue_7")
20482 && strcmp (sec->name, ".glue_7t"))
20483 {
20484 if ((bfd_get_section_flags (ibfd, sec)
20485 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20486 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20487 only_data_sections = FALSE;
20488
20489 null_input_bfd = FALSE;
20490 break;
20491 }
20492 }
20493
20494 if (null_input_bfd || only_data_sections)
20495 return TRUE;
20496 }
20497
20498 /* Complain about various flag mismatches. */
20499 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20500 EF_ARM_EABI_VERSION (out_flags)))
20501 {
20502 _bfd_error_handler
20503 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20504 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20505 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20506 return FALSE;
20507 }
20508
20509 /* Not sure what needs to be checked for EABI versions >= 1. */
20510 /* VxWorks libraries do not use these flags. */
20511 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20512 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20513 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20514 {
20515 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20516 {
20517 _bfd_error_handler
20518 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20519 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20520 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20521 flags_compatible = FALSE;
20522 }
20523
20524 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20525 {
20526 if (in_flags & EF_ARM_APCS_FLOAT)
20527 _bfd_error_handler
20528 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20529 ibfd, obfd);
20530 else
20531 _bfd_error_handler
20532 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20533 ibfd, obfd);
20534
20535 flags_compatible = FALSE;
20536 }
20537
20538 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20539 {
20540 if (in_flags & EF_ARM_VFP_FLOAT)
20541 _bfd_error_handler
20542 (_("error: %pB uses %s instructions, whereas %pB does not"),
20543 ibfd, "VFP", obfd);
20544 else
20545 _bfd_error_handler
20546 (_("error: %pB uses %s instructions, whereas %pB does not"),
20547 ibfd, "FPA", obfd);
20548
20549 flags_compatible = FALSE;
20550 }
20551
20552 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20553 {
20554 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20555 _bfd_error_handler
20556 (_("error: %pB uses %s instructions, whereas %pB does not"),
20557 ibfd, "Maverick", obfd);
20558 else
20559 _bfd_error_handler
20560 (_("error: %pB does not use %s instructions, whereas %pB does"),
20561 ibfd, "Maverick", obfd);
20562
20563 flags_compatible = FALSE;
20564 }
20565
20566 #ifdef EF_ARM_SOFT_FLOAT
20567 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20568 {
20569 /* We can allow interworking between code that is VFP format
20570 layout, and uses either soft float or integer regs for
20571 passing floating point arguments and results. We already
20572 know that the APCS_FLOAT flags match; similarly for VFP
20573 flags. */
20574 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20575 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20576 {
20577 if (in_flags & EF_ARM_SOFT_FLOAT)
20578 _bfd_error_handler
20579 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20580 ibfd, obfd);
20581 else
20582 _bfd_error_handler
20583 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20584 ibfd, obfd);
20585
20586 flags_compatible = FALSE;
20587 }
20588 }
20589 #endif
20590
20591 /* Interworking mismatch is only a warning. */
20592 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20593 {
20594 if (in_flags & EF_ARM_INTERWORK)
20595 {
20596 _bfd_error_handler
20597 (_("warning: %pB supports interworking, whereas %pB does not"),
20598 ibfd, obfd);
20599 }
20600 else
20601 {
20602 _bfd_error_handler
20603 (_("warning: %pB does not support interworking, whereas %pB does"),
20604 ibfd, obfd);
20605 }
20606 }
20607 }
20608
20609 return flags_compatible;
20610 }
20611
20612
20613 /* Symbian OS Targets. */
20614
20615 #undef TARGET_LITTLE_SYM
20616 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20617 #undef TARGET_LITTLE_NAME
20618 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20619 #undef TARGET_BIG_SYM
20620 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20621 #undef TARGET_BIG_NAME
20622 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20623
20624 /* Like elf32_arm_link_hash_table_create -- but overrides
20625 appropriately for Symbian OS. */
20626
20627 static struct bfd_link_hash_table *
20628 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20629 {
20630 struct bfd_link_hash_table *ret;
20631
20632 ret = elf32_arm_link_hash_table_create (abfd);
20633 if (ret)
20634 {
20635 struct elf32_arm_link_hash_table *htab
20636 = (struct elf32_arm_link_hash_table *)ret;
20637 /* There is no PLT header for Symbian OS. */
20638 htab->plt_header_size = 0;
20639 /* The PLT entries are each one instruction and one word. */
20640 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20641 htab->symbian_p = 1;
20642 /* Symbian uses armv5t or above, so use_blx is always true. */
20643 htab->use_blx = 1;
20644 htab->root.is_relocatable_executable = 1;
20645 }
20646 return ret;
20647 }
20648
20649 static const struct bfd_elf_special_section
20650 elf32_arm_symbian_special_sections[] =
20651 {
20652 /* In a BPABI executable, the dynamic linking sections do not go in
20653 the loadable read-only segment. The post-linker may wish to
20654 refer to these sections, but they are not part of the final
20655 program image. */
20656 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
20657 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
20658 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
20659 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
20660 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
20661 /* These sections do not need to be writable as the SymbianOS
20662 postlinker will arrange things so that no dynamic relocation is
20663 required. */
20664 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
20665 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
20666 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20667 { NULL, 0, 0, 0, 0 }
20668 };
20669
20670 static void
20671 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20672 struct bfd_link_info *link_info)
20673 {
20674 /* BPABI objects are never loaded directly by an OS kernel; they are
20675 processed by a postlinker first, into an OS-specific format. If
20676 the D_PAGED bit is set on the file, BFD will align segments on
20677 page boundaries, so that an OS can directly map the file. With
20678 BPABI objects, that just results in wasted space. In addition,
20679 because we clear the D_PAGED bit, map_sections_to_segments will
20680 recognize that the program headers should not be mapped into any
20681 loadable segment. */
20682 abfd->flags &= ~D_PAGED;
20683 elf32_arm_begin_write_processing (abfd, link_info);
20684 }
20685
20686 static bfd_boolean
20687 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20688 struct bfd_link_info *info)
20689 {
20690 struct elf_segment_map *m;
20691 asection *dynsec;
20692
20693 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20694 segment. However, because the .dynamic section is not marked
20695 with SEC_LOAD, the generic ELF code will not create such a
20696 segment. */
20697 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
20698 if (dynsec)
20699 {
20700 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
20701 if (m->p_type == PT_DYNAMIC)
20702 break;
20703
20704 if (m == NULL)
20705 {
20706 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
20707 m->next = elf_seg_map (abfd);
20708 elf_seg_map (abfd) = m;
20709 }
20710 }
20711
20712 /* Also call the generic arm routine. */
20713 return elf32_arm_modify_segment_map (abfd, info);
20714 }
20715
20716 /* Return address for Ith PLT stub in section PLT, for relocation REL
20717 or (bfd_vma) -1 if it should not be included. */
20718
20719 static bfd_vma
20720 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
20721 const arelent *rel ATTRIBUTE_UNUSED)
20722 {
20723 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
20724 }
20725
20726 #undef elf32_bed
20727 #define elf32_bed elf32_arm_symbian_bed
20728
20729 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20730 will process them and then discard them. */
20731 #undef ELF_DYNAMIC_SEC_FLAGS
20732 #define ELF_DYNAMIC_SEC_FLAGS \
20733 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20734
20735 #undef elf_backend_emit_relocs
20736
20737 #undef bfd_elf32_bfd_link_hash_table_create
20738 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
20739 #undef elf_backend_special_sections
20740 #define elf_backend_special_sections elf32_arm_symbian_special_sections
20741 #undef elf_backend_begin_write_processing
20742 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
20743 #undef elf_backend_final_write_processing
20744 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20745
20746 #undef elf_backend_modify_segment_map
20747 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20748
20749 /* There is no .got section for BPABI objects, and hence no header. */
20750 #undef elf_backend_got_header_size
20751 #define elf_backend_got_header_size 0
20752
20753 /* Similarly, there is no .got.plt section. */
20754 #undef elf_backend_want_got_plt
20755 #define elf_backend_want_got_plt 0
20756
20757 #undef elf_backend_plt_sym_val
20758 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
20759
20760 #undef elf_backend_may_use_rel_p
20761 #define elf_backend_may_use_rel_p 1
20762 #undef elf_backend_may_use_rela_p
20763 #define elf_backend_may_use_rela_p 0
20764 #undef elf_backend_default_use_rela_p
20765 #define elf_backend_default_use_rela_p 0
20766 #undef elf_backend_want_plt_sym
20767 #define elf_backend_want_plt_sym 0
20768 #undef elf_backend_dtrel_excludes_plt
20769 #define elf_backend_dtrel_excludes_plt 0
20770 #undef ELF_MAXPAGESIZE
20771 #define ELF_MAXPAGESIZE 0x8000
20772
20773 #include "elf32-target.h"
This page took 0.483459 seconds and 5 git commands to generate.